diff --git "a/2252.jsonl" "b/2252.jsonl" new file mode 100644--- /dev/null +++ "b/2252.jsonl" @@ -0,0 +1,758 @@ +{"seq_id":"74859695","text":"import os\n\ndirs = [int(dir) for dir in os.listdir(\"Results/\")]\nif len(dirs) > 0:\n\tnext = max(dirs) + 1\nelse:\n\tnext = 0\n\npath = \"Results/\" + str(next)\n\nprint(\"Gathering results into \"+path)\n\nos.system(\"mkdir \\\"\"+path+\"\\\"\")\n\nfiles = [\"Objects/results.dict\", \"Objects/model.h5\", \"Objects/submission.csv\", \"Objects/corrs.dict\", \"Objects/imbalance.dict\", \"Out/results.txt\", \"Out/shapes.txt\", \"Out/corrs.txt\", \"Out/imbalance.txt\", \"model.py\"]\n\nfor file in files:\n\tchar_len = len(file)\n\tafter_slash = 0\n\tfor i in range(1, char_len+1):\n\t\tif file[-i] == \"/\":\n\t\t\tafter_slash = 1-i\n\t\t\tbreak\n\tname = file[after_slash:]\n\tos.system(\"cp \"+file+\" \"+path+\"/\"+name)\n","sub_path":"FTCNN/gather_results.py","file_name":"gather_results.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633127571","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport sys\nimport time\nimport zlib\nfrom watchdog.observers import Observer\nfrom watchdog.events import LoggingEventHandler\nimport os\nimport shutil\nfrom hkdf import Hkdf\nfrom Crypto.Protocol.KDF import PBKDF2\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import (SHA1, SHA256, SHA224, SHA256, SHA384,\n SHA512, HMAC)\nimport random\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nimport numpy as np\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import *\nimport tkinter as tk\nfrom easygui import *\nimport hashlib\nimport six\n\n\ndef crc_compare(event):\n \"\"\"\n a function that compares the original crc that in the \"shadow file\" to the new crc of the modified file\n :param event: watchdog event\n :return: boolean\n \"\"\"\n origCRCFile = open(event.src_path + '' + '.crc32Dir' + '/' + 'crc32.32b', \"r+\")\n origCRC = origCRCFile.read()\n newCRC = crc(event.src_path)\n if origCRC == newCRC:\n return True\n else:\n return False\n\nclass DropboxContentHasher(object):\n \"\"\"\n Computes a hash using the same algorithm that the Dropbox API uses for the\n the \"content_hash\" metadata field.\n The digest() method returns a raw binary representation of the hash. The\n hexdigest() convenience method returns a hexadecimal-encoded version, which\n is what the \"content_hash\" metadata field uses.\n This class has the same interface as the hashers in the standard 'hashlib'\n package.\n Example:\n hasher = DropboxContentHasher()\n with open('some-file', 'rb') as f:\n while True:\n chunk = f.read(1024) # or whatever chunk size you want\n if len(chunk) == 0:\n break\n hasher.update(chunk)\n print(hasher.hexdigest())\n \"\"\"\n\n BLOCK_SIZE = 4 * 1024 * 1024\n\n def __init__(self):\n self._overall_hasher = hashlib.sha256()\n self._block_hasher = hashlib.sha256()\n self._block_pos = 0\n\n self.digest_size = self._overall_hasher.digest_size\n # hashlib classes also define 'block_size', but I don't know how people use that value\n\n def update(self, new_data):\n if self._overall_hasher is None:\n raise AssertionError(\n \"can't use this object anymore; you already called digest()\")\n\n assert isinstance(new_data, six.binary_type), (\n \"Expecting a byte string, got {!r}\".format(new_data))\n\n new_data_pos = 0\n while new_data_pos < len(new_data):\n if self._block_pos == self.BLOCK_SIZE:\n self._overall_hasher.update(self._block_hasher.digest())\n self._block_hasher = hashlib.sha256()\n self._block_pos = 0\n\n space_in_block = self.BLOCK_SIZE - self._block_pos\n part = new_data[new_data_pos:(new_data_pos+space_in_block)]\n self._block_hasher.update(part)\n\n self._block_pos += len(part)\n new_data_pos += len(part)\n\n def _finish(self):\n if self._overall_hasher is None:\n raise AssertionError(\n \"can't use this object anymore; you already called digest() or hexdigest()\")\n\n if self._block_pos > 0:\n self._overall_hasher.update(self._block_hasher.digest())\n self._block_hasher = None\n h = self._overall_hasher\n self._overall_hasher = None # Make sure we can't use this object anymore.\n return h\n\n def digest(self):\n return self._finish().digest()\n\n def hexdigest(self):\n return self._finish().hexdigest()\n\n def copy(self):\n c = DropboxContentHasher.__new__(DropboxContentHasher)\n c._overall_hasher = self._overall_hasher.copy()\n c._block_hasher = self._block_hasher.copy()\n c._block_pos = self._block_pos\n return c\n\ndef guiWelcomeBox():\n \"\"\"\n a graphic user interface that welcomes the user\n :return: none\n \"\"\"\n msg = \"welcome to my Dropbox project\\n \" \\\n \"now im going to show you a demo of the project\"\n title = \"Tomer Rippin Dropbox project\"\n msgbox(msg, title, ok_button=\"lets go!!\")\n\n\ndef guiPasswordBox():\n \"\"\"\n a graphic user interface function that gets a user ID and password from the user\n :return: none\n \"\"\"\n msg = \"Enter login information\"\n title = \"Please enter password\"\n fieldNames = [\"Password\"]\n fieldValues = [] # we start with blanks for the values\n fieldValues = multpasswordbox(msg, title, fieldNames)\n\n # make sure that none of the fields was left blank\n while 1:\n if fieldValues == None: break\n errmsg = \"\"\n for i in range(len(fieldNames)):\n if fieldValues[i].strip() == \"\":\n errmsg = errmsg + ('\"%s\" is a required field.\\n\\n' % fieldNames[i])\n if errmsg == \"\": break # no problems found\n fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)\n global password\n password = fieldValues[0]\n\n\ndef guiDecryptButton():\n \"\"\"\n a graphic user interface button that calls the \"choosefilefordecrypt\"\n function\n :return: none\n \"\"\"\n root = tk.Tk()\n frame = tk.Frame(root)\n frame.pack()\n\n decryptbutton = tk.Button(\n frame,\n text=\"decrypt\",\n command=chooseFilesForDec)\n decryptbutton.pack(side=tk.LEFT)\n\n\ndef chooseFilesForDec():\n \"\"\"\n a function that starts the decryption process\n the function starts when the user presses the \"decrypt button\n it gets the filenames the user wants to decrypt, the original and the already encrypted\n it cuts the filenames to only the part \"callDec\" function need.\n it calls the \"callDec\" function\n :return: none\n \"\"\"\n encFilenameInit = askopenfilename()\n currentDirectory = os.getcwd()\n delete = len(currentDirectory)\n lengthEnc = ((delete - len(encFilenameInit)))\n encFilename = \".\" + encFilenameInit[lengthEnc:]\n\n callDec(encFilename)\n\n\ndef callDec(encFilename):\n \"\"\"\n the function creates a salt from the original file\n it calls the \"decrypt\" function\n :param origFilename: the original file you want to decrypt\n :param encFilename: the encrypted file\n :return:\n \"\"\"\n encryptionObj.fileDecrypt(encFilename)\n\n\nclass MyDict:\n \"\"\" a class that responsible to all of the dictionary operations \"\"\"\n\n def __init__(self):\n \"\"\"\n creats a dictionary file (mydict.npy) if it doesn't exists it creates one and initialize it\n \"\"\"\n self.dictName = \"mydict.npy\"\n try:\n self.dictonary = np.load(self.dictName, allow_pickle=True).item()\n except:\n print(\"initialized dictionary\")\n self.dictonary = {}\n np.save(self.dictName, self.dictonary, allow_pickle=True)\n\n def dictPush(self, filename, iv, tag, key):\n \"\"\"\n the function decrypts the dictionary\n it pushes the iv and the tag to the dictionary with the filename as key\n it encrypts the dictionary\n it updates the \"mydict.npy\" file\n :param filename: the original name of the file that getting encrypt\n :param iv: the Initialization vector\n :param tag:\n :param key: the encryption key\n :return: none\n \"\"\"\n self.dictionary = np.load(self.dictName, allow_pickle=True).item()\n\n self.dictionary[filename] = [iv, tag, key]\n np.save(self.dictName, self.dictionary, allow_pickle=True)\n\n def dictPull(self, filename):\n \"\"\"\n it gets filename and returns the iv and tag from the dictionary\n that are necessary for the decryption process\n :param filename: the original filename of the encrypted filename\n :return: iv and tag (bytes)\n \"\"\"\n\n self.dictionary = np.load(self.dictName, allow_pickle=True).item()\n iv, tag, key = self.dictionary[filename]\n np.save(self.dictName, self.dictionary, allow_pickle=True)\n return iv, tag, key\n\n def dictPrint(self):\n \"\"\"\n prints the dictionary\n :return: none\n \"\"\"\n self.dictionary = np.load(self.dictName, allow_pickle=True).item()\n print(\"dictionary --> \")\n print(self.dictionary)\n\n def dictDelete(self, event):\n \"\"\"\n deletes the file that got deleted from the dictionary\n :param event: watchdog event\n :return: none\n \"\"\"\n self.dictionary = np.load(self.dictName, allow_pickle=True).item()\n print(type(self.dictionary))\n del self.dictionary[event.src_path]\n print(\"dictionary --> \")\n print(self.dictionary)\n\n def dictSwitchKey(self, event):\n \"\"\"\n when a file move occurs it switches the key in the dictionary to the new name of the\n moved file\n :param event: watchdog event\n :return: none\n \"\"\"\n self.dictionary = np.load(self.dictName, allow_pickle=True).item()\n self.dictionary[event.dest_path] = self.dictionary.pop(event.src_path)\n print(self.dictionary)\n np.save(self.dictName, self.dictionary, allow_pickle=True)\n\n\ndef verifyFile(event):\n \"\"\"\n checks if the event source isn't recursive or temporary\n :param event: watchdog event\n :return: boolean\n \"\"\"\n return event.src_path[2] != '.' and \"32b\" not in event.src_path and 'EncFile' not in event.src_path and \"EncCRC32\" \\\n not in event.src_path and 'crc32Dir' not in event.src_path and '_' not in event.src_path \\\n and \"npy\" not in event.src_path and \"DecryptedFile\" not in event.src_path \\\n and \"plaintext\" not in event.src_path and \"Demo\" not in event.src_path\n\n\nHASH_ALGOS = {\n 'sha1': SHA1,\n 'sha224': SHA224,\n 'sha256': SHA256,\n 'sha384': SHA384,\n 'sha512': SHA512\n}\nHASH_CNT = 1000 # Number of hashes to compute one SHA256 takes 15 microsec,\nSALT_LENGTH = 16 # Length for the Password salt for PBKDF\nHASH_ALGO = 'sha256' # For PBKDF HMAC\nIV_LENGTH = 12 # Length of GCM IV\nTAG_LENGTH = 16 # Length of the GCM tag, truncate if larger than this\nHASH_FUNC = HASH_ALGOS[HASH_ALGO]\n\n\ndef hash256(*args):\n \"\"\"short function for Hashing the arguments with SHA-256\"\"\"\n assert len(args) > 0, \"Should give at least 1 message\"\n assert all(isinstance(m, (bytes, basestring)) for m in args), \\\n \"All inputs should be byte string\"\n h = SHA256.new(bytes(len(args)) + bytes(args[0]) + bytes(len(args[0])))\n for m in args[1:]:\n h.update(bytes(m))\n h.update(bytes(len(m)))\n h.update(bytes(len(args)))\n return h.digest()\n\n\ndef hmac256(secret, m):\n return HMAC.new(key=secret, msg=m, digestmod=HASH_FUNC).digest()\n\n\ndef pad_pw(pw, pad_length):\n \"\"\"Pad pw to a pad_length, so that it hides the length of the password in bytes.\"\"\"\n assert 0 < pad_length < 256\n pw = bytes(pw)\n k = len(pw) / pad_length\n topad = pw[k * pad_length:]\n topad_len = pad_length - len(topad)\n if topad_len == 0:\n topad_len = pad_length\n pad = chr(topad_len) * topad_len\n return pw + pad\n # padder = padding.PKCS7(pad_length*8).padder()\n # return padder.update(bytes(pw)) + padder.finalize()\n\n\ndef unpad_pw(padded_pw, pad_length):\n \"\"\"Unpad pw\"\"\"\n padded_pw = bytes(padded_pw)\n padlen = ord(padded_pw[-1])\n assert padlen > 0, \"Malformed padding. Last byte cannot be zero.\"\n pad = padded_pw[-padlen:]\n assert all((padi == chr(padlen) for padi in pad))\n return padded_pw[:-padlen]\n\n # unpadder = padding.PKCS7(pad_length*8).unpadder()\n # unpadder.update(bytes(pw)) + unpadder.finalize()\n\n\ndef pwencrypt(pw, m):\n \"\"\"Encrypt the message m under pw using AES-GCM method (AEAD scheme).\n iv = 0 # Promise me you will never reuse the key\n c = ..>\n :hash_style: sha-256 or sha-512, scrypt\n :iteration: Number of iteration. These two are the parameters\n for PBKDF2.\n Size of the ciphertext:\n \"\"\"\n m = m.encode('ascii', errors='ignore')\n itercnt = random.randint(HASH_CNT, 2 * HASH_CNT)\n header_txt = HASH_ALGO + '.' + str(itercnt)\n sa = os.urandom(SALT_LENGTH)\n key = PBKDF2(\n pw, sa,\n dkLen=16,\n count=itercnt,\n prf=hmac256\n\n )\n iv, ctx, tag = textEncrypt(key, m, associated_data=header_txt)\n # Salt (SALT_LENGTH), IV (IV_LENGTH), TAG (TAG_LENGTH)\n ctx_b64 = urlsafe_b64encode(sa + iv + tag + ctx)\n return header_txt + '.' + ctx_b64\n\n\ndef pwdecrypt(pw, full_ctx_b64):\n \"\"\"\n Decrypt a ciphertext using pw,\n Recover, hash algo, iteration count, and salt, iv, tag, ctx from ctx_b64\n \"\"\"\n full_ctx_b64 = full_ctx_b64.encode('ascii', errors='ignore')\n hash_algo, itercnt, ctx_b64 = full_ctx_b64.split('.')\n header_txt = hash_algo + '.' + itercnt\n ctx_bin = urlsafe_b64decode(ctx_b64)\n sa, ctx_bin = ctx_bin[:SALT_LENGTH], ctx_bin[SALT_LENGTH:]\n iv, ctx_bin = ctx_bin[:IV_LENGTH], ctx_bin[IV_LENGTH:]\n tag, ctx = ctx_bin[:TAG_LENGTH], ctx_bin[TAG_LENGTH:]\n hmac_tmp = lambda secret, m: HMAC.new(key=secret, msg=m, digestmod=HASH_ALGOS[hash_algo]).digest()\n key = PBKDF2(\n pw, sa,\n dkLen=16,\n count=int(itercnt),\n prf=hmac_tmp\n )\n try:\n m = textDecrypt(key, iv, ctx, tag, associated_data=header_txt)\n return m\n except Exception as e:\n raise ValueError(e)\n\n\ndef textEncrypt(key, plaintext, associated_data=''):\n # Generate a random 96-bit IV.\n iv = os.urandom(IV_LENGTH)\n # 16 (AES-128), 24 (AES-192), or 32 (AES-256)\n if len(key) not in (16, 24, 32):\n key = hash256(key) # makes it 256-bit\n # Construct an AES-GCM Cipher object with the given key and a\n # randomly generated IV.\n # create the ciphertext\n encryptor = AES.new(key=key, mode=AES.MODE_GCM, nonce=iv)\n\n # associated_data will be authenticated but not encrypted,\n # it must also be passed in on decryption.\n encryptor.update(associated_data)\n\n ctx = encryptor.encrypt(plaintext)\n # Encrypt the plaintext and get the associated ciphertext.\n # GCM does not require padding.\n tag = encryptor.digest()\n return (iv, ctx, tag)\n\n\ndef textDecrypt(key, iv, ciphertext, tag, associated_data=''):\n # Construct a Cipher object, with the key, iv, and additionally the\n # GCM tag used for authenticating the message.\n if len(key) not in (16, 24, 32):\n key = hash256(key) # makes it 256-bit\n\n decryptor = AES.new(key=key, mode=AES.MODE_GCM, nonce=iv)\n\n # We put associated_data back in or the tag will fail to verify\n # when we finalize the decryptor.\n decryptor.update(associated_data)\n plaintext = decryptor.decrypt(ciphertext)\n\n # Decryption gets us the authenticated plaintext.\n # If the tag does not match an InvalidTag exception will be raised.\n decryptor.verify(tag)\n return plaintext\n\n\nclass MyEncryption:\n \"\"\"\n a class for encryption and decryption\n \"\"\"\n\n def __init__(self, password):\n \"\"\"\n\n :param password:\n \"\"\"\n self.password = password\n self.UTF8password = password.encode('UTF-8')\n\n def fileEncrypt(self, event, salt, destdir):\n \"\"\"\n a method that calculates HKDF key based on salt and password\n reads the event file, encrypts it and stores the iv and the tag in the dictionary file\n then it writes the cipher text into an encrypted file\n it then creates a hash of the encrypted file\n\n :param event: watchdog event\n :param salt: CRC32 of the unencrypted file\n :param destdir: \"shadow file\" directory name str\n :return: none\n \"\"\"\n print(\"in fileEncrypt\")\n # destdir = destdir + \"/\" + destdir\n kdf = Hkdf(str(salt).encode('utf8'), self.UTF8password, hash=hashlib.sha512)\n key = kdf.expand(b\"context\", 16)\n # add code for encryption encrypt(event, key)\n file_in = open(event.src_path, \"r\")\n plainText = file_in.read()\n plainText = [str(x) for x in plainText]\n plainText = ''.join(plainText).encode('utf8')\n print(plainText)\n iv, ctx, tag = textEncrypt(key, plainText)\n print(\"key -->\" + str(key))\n print(\"iv -->\" + str(iv))\n print(\"tag-->\" + str(tag))\n print(\"ctx-->\" + str(ctx))\n print(type(ctx))\n # add code to writing file into destdir\n encFile = open(destdir + \"/EncFile\", \"wb+\")\n encFile.write(ctx)\n encFile.close()\n d.dictPush(event.src_path, iv, tag, key)\n encFileHash = open(destdir + \"/EncHash\", \"w+\")\n hasher = DropboxContentHasher()\n encFile = open(destdir + \"/EncFile\", \"rb+\")\n encryptedData = encFile.read()\n hasher.update(encryptedData)\n encFileHash.write(hasher.hexdigest())\n\n def fileDecrypt(self, filename):\n \"\"\"\n a method that calculates HKDF key based on salt and password\n decrypts file event.src_path\n collects from dictionary file the iv and tag\n writes decrypted file into \"plaintext\"\n\n :param filename: the file you want to decrypt (string)\n\n :return:\n \"\"\"\n print(\"in fileDecrypt\")\n iv, tag, key = d.dictPull(filename[:-17])\n print(\"key -->\" + str(key))\n print(\"iv -->\" + str(iv))\n print(\"tag-->\" + str(tag))\n\n # add code for decryption encrypt(event, key)\n file_in = open(filename, \"rb\")\n ctx = file_in.read()\n print(type(ctx))\n print(\"ctx-->\" + str(ctx))\n plaintext = textDecrypt(key, iv, ctx, tag)\n print(\"plaintext -->\" + str(plaintext))\n decryptedFile = open(\"./plaintext\", \"w+\")\n decryptedFile.write(plaintext.decode(\"utf8\"))\n\n\ndef crcCreate(event):\n \"\"\"\n a method that accepts a watchdog event and calculates a CRC32\n of the file in event.src_path\n creates a file and writes the CRC32 into it\n the file is placed into the \"shadow directory\" --> event.src_path + '.crc32Dir'\n :param event: watchdog event\n :return: none\n \"\"\"\n try:\n\n crcFileName = event.src_path + '' + '.crc32Dir' + '/' + 'crc32.32b'\n f = open(crcFileName, \"w+\")\n f.write(crc(event.src_path))\n print(f)\n f.close()\n except:\n print(\"could not open file %s\" % crcFileName)\n\n\ndef crc(fileName):\n \"\"\"\n a method that calculates the CRC32 of a given file fileName\n :param fileName: string containing file name\n :return: CRC32 hexadecimal\n \"\"\"\n prev = 0\n fileName.strip('_')\n for eachLine in open(fileName, \"rb\"):\n prev = zlib.crc32(eachLine, prev)\n return \"%X\" % (prev & 0xFFFFFFFF)\n\n\nclass MyEventHandler(LoggingEventHandler):\n \"\"\"\n a new eventhandler that adds Dropbox project functionality to the watchdog.EventHandler\n\n \"\"\"\n\n def on_created(self, event):\n \"\"\"\n method in MyEventHandler that gets activated whenever an 'created' event is\n detected by the watchdog observer\n\n ---\n actions:\n\n checks if the event that created this event is a directory or a file\n checks the event isn't recursive or temporary\n creates a directory named dirPath and calls the crcCreate method\n calls the encryption\n\n ---\n\n :param event: watchdog event\n :return: none\n \"\"\"\n super(MyEventHandler, self).on_created(event)\n\n if not event.is_directory:\n dirPath = event.src_path.split('_')[0] + '.crc32Dir' # the name of the shadow directory\n if verifyFile(event):\n try:\n os.mkdir(dirPath)\n except OSError:\n print(\"Creation of the directory %s failed\" % dirPath)\n salt = str(crc(event.src_path)).encode('UTF-8')\n encryptionObj.fileEncrypt(event, salt, dirPath)\n crcCreate(event)\n\n def on_moved(self, event):\n \"\"\"\n method in MyEventHandler that gets activated whenever an 'moved' event is\n detected by the watchdog observer\n\n ---\n actions\n moves the CRC32 directory of the event file from event.src_path to event.dest_path\n ---\n :param event: watchdog event\n :return: none\n \"\"\"\n super(MyEventHandler, self).on_moved(event)\n\n if len(event.src_path) > 3:\n if event.src_path[2] != '.':\n if not event.is_directory:\n if verifyFile(event):\n d.dictSwitchKey(event)\n shutil.move(event.src_path + '' + '.crc32Dir', event.dest_path + '.crc32Dir')\n\n def on_deleted(self, event):\n \"\"\"\n method in MyEventHandler that gets activated whenever an 'deleted' event is\n detected by the watchdog observer\n\n ---\n actions\n\n checks the event isn't recursive or temporary\n deletes the CRC32 directory\n deletes from the dictionary\n ---\n :param event: watchdog event\n :return: none\n \"\"\"\n super(MyEventHandler, self).on_deleted(event)\n delpath = event.src_path # a path to the src of the event\n if verifyFile(event):\n d.dictDelete(event)\n shutil.rmtree(delpath + '.crc32Dir')\n\n def on_modified(self, event):\n \"\"\"\n method in MyEventHandler that gets activated whenever an 'modified' event is\n detected by the watchdog observer\n\n ---\n actions\n\n checks if the event that created this event is a directory or a file\n checks the event isn't recursive or temporary\n compares the crc of the original file with the crc of the modified file\n if they are equal:\n not doing anything\n if not:\n starting the encryption process, switches the original crc with the new one\n\n ---\n :param event: watchdog event\n :return: none\n \"\"\"\n super(MyEventHandler, self).on_modified(event)\n if len(event.src_path) > 3:\n if not event.is_directory:\n if verifyFile(event):\n if not crc_compare(event):\n dirPath = event.src_path + '' + '.crc32Dir' # the name of the directory holding the CRC32 file\n salt = str(crc(event.src_path)).encode('UTF-8')\n encryptionObj.fileEncrypt(event, salt, dirPath)\n d.dictPrint()\n crcCreate(event)\n\n\nif __name__ == \"__main__\":\n master = Tk() # initilaize the GUI\n d = MyDict() # creates a MyDict object\n d.__init__()\n guiWelcomeBox()\n guiPasswordBox()\n encryptionObj = MyEncryption(password)\n guiDecryptButton()\n path = sys.argv[1] if len(sys.argv) > 1 else '.' # path is the directory of the source\n event_handler = MyEventHandler() # the observer event_handler is MyEventHandler\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True) # the observer will check all of the files in 'path'\n observer.start()\n root = mainloop()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"MoveFix.py","file_name":"MoveFix.py","file_ext":"py","file_size_in_byte":23201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105031631","text":"\"\"\"\nGet info about your SEO (tags)\nClass takes two arguments shop name and search tag.\n\"\"\"\n\nfrom selenium import webdriver\nimport time \nimport datetime\nfrom getProxyUSA import GetProxy\nimport csv\nimport os \n\nclass SearchShop:\n\tdef __init__(self, shop, tag):\n\t\tself.shop = shop \n\t\tself.tag = tag \n\t\tself.worker()\n\t\tself.browser.close()\n\n\tdef worker(self):\n\t\tproxy = GetProxy()\n\t\tfirefox_capabilities = webdriver.DesiredCapabilities.FIREFOX\n\t\tfirefox_capabilities['marionette'] = True\n\n\t\tfirefox_capabilities['proxy'] = {\n\t\t\t\"proxyType\": \"MANUAL\",\n\t\t\t\"httpProxy\": proxy,\n\t\t\t\"ftpProxy\": proxy,\n\t\t\t\"sslProxy\": proxy\n\t\t\t}\n\n\t\tself.browser = webdriver.Firefox(capabilities=firefox_capabilities)\n\t\tself.browser.get(f\"https://www.etsy.com/search?q={self.tag}\")\n\t\tself.make_scroll()\n\t\tself.get_pages()\n\t\tk = False\n\t\tfor page in range(self.total_pages):\n\t\t\ttime.sleep(2)\n\t\t\tself.browser.get(f\"https://www.etsy.com/search?q={self.tag}&ref=pagination&page={page+1}\")\n\t\t\titems = self.browser.find_elements_by_css_selector('.wt-grid__item-xl-3')\n\t\t\tfor i in range(len(items)):\n\t\t\t\ttry:\n\t\t\t\t\ttmp_shop = items[i].find_element_by_class_name('text-gray-lighter').text \n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tif tmp_shop == self.shop:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.title = items[i].find_element_by_css_selector('.logged .text-body').text\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.title = 'None'\n\t\t\t\t\tself.date = datetime.datetime.now().date()\n\t\t\t\t\tself.pos = page * 48 + i \n\t\t\t\t\tself.page = page + 1\n\t\t\t\t\tself.save_csv()\n\t\t\t\t\tk = True\n\t\t\tif k:\n\t\t\t\tbreak\n\n\tdef make_scroll(self):\n\t\tfor i in range(3):\n\t\t\tself.browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\ttime.sleep(1)\n\n\tdef save_csv(self):\n\t\tpath = f\"{os.getcwd()}/{self.shop}\"\n\t\ttry: \n\t\t\tos.mkdir(path) \n\t\texcept OSError as error: \n\t\t\tprint(error) \n\t\twith open(f'{path}/{self.shop}.csv', 'a') as f:\n\t\t\t\twriter = csv.writer(f)\n\t\t\t\twriter.writerow((self.date, self.shop, self.tag, self.title, self.pos, \n\t\t\t\t\t\t\t\tself.page, self.total_pages))\n\t\tprint(self.tag)\n\n\tdef get_pages(self):\n\t\ttry:\n\t\t\ttime.sleep(3)\n\t\t\tself.total_pages = int(self.browser.find_elements_by_css_selector('.wt-action-group__item')[-2].text.split('\\n')[-1])\n\t\texcept:\n\t\t\tprint('No found')\n\t\t\tself.browser.close()\n\t\t\tself.ch_tag()\n\n\tdef ch_tag(self):\n\t\tself.tag = '+'.join(input(\"Search tag:\\n\").split())\n\t\tself.worker()\n\nif __name__ =='__main__':\n\tshop = input('Shop name:\\n')\n\ttag = '+'.join(input(\"Search tag:\\n\").split())\n\tSearchShop(shop, tag)","sub_path":"forWork/searchShopPage.py","file_name":"searchShopPage.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136390562","text":"from selenium import webdriver\n\nclass GoogleID:\n def test_google_ID(self):\n url = 'https://google.com'\n driver = webdriver.Firefox()\n driver.get(url)\n ele_gl = driver.find_element_by_id('hplogo')\n text = ele_gl.text\n if ele_gl:\n print('Hurrah!!!, Found the id name of finding lucky ' +text)\n driver.quit()\n\n\ngid = GoogleID()\ngid.test_google_ID()","sub_path":"find_elements/find_id_google.py","file_name":"find_id_google.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"580330332","text":"'''\nExamples\ninputCopy\n3\n101\noutputCopy\nYes\ninputCopy\n4\n1011\noutputCopy\nNo\ninputCopy\n5\n10001\noutputCopy\nNo\n'''\nnoe = int(input())\nins = \"0\"+input()+\"0\"\nprint(\"NO\" if (\"11\" in ins or \"000\" in ins ) else \"YES\")\n","sub_path":"ACMSGuru/484_A_row.py","file_name":"484_A_row.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482988033","text":"import time\nimport sys\n\nimport urllib.error\n\nimport goslate\n\n\nwith open('single_spintax.csv', 'r') as f:\n texts = f.readlines()\n\nlistArticles = []\nfor text in texts:\n article = text.split('\";\"')\n article[0] = article[0][1:]\n article[-1] = article[-1][:-2].strip()\n listArticles.append(article)\n\ngs = goslate.Goslate()\n\narticles = []\nfor idx, x in enumerate(listArticles, start=1):\n print('Processing...', idx)\n article = []\n for x_ in x:\n time.sleep(5)\n try:\n hasil = gs.translate(x_, 'nl')\n article.append(hasil)\n except urllib.error.HTTPError:\n print(\"503: your process have been rejected by Google... :(\")\n break\n print(\"translating...\")\n\n time.sleep(30)\n articles.append(article)\n\n#print(article)\n#sys.exit()\n\nwith open('50_hasil.csv', 'w') as f:\n for a_ in articles:\n #pass\n #print(a_)\n a_[3] = a_[3].replace(\"\", \"

\")\n a_[3] = a_[3].replace(\"

\", \"

\")\n a_[3] = a_[3].replace(\"\", \"

\")\n a_[3] = a_[3].replace(\"

\", \"

\")\n a_[3] = a_[3].replace(\"\", \"

\")\n a_[3] = a_[3].replace(\"

\", \"

\")\n a_ = '\"{}\";\"{}\";\"{}\";\"{}\"'.format(a_[0],a_[1],a_[2],a_[3])\n f.write(a_ + '\\n')\n\nprint(\"Program selesai\")\n","sub_path":"4_translate/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526629427","text":"import socket\nimport sys\nfrom bs4 import BeautifulSoup\n\nclass Client:\n def __init__(self):\n self.host = '192.168.100.219'\n self.port = 5000\n self.client = None\n self.size = 1024\n\n def open_socket(self):\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client.connect((self.host,self.port))\n \n def get_response(self,response):\n \n while True:\n received = self.client.recv(1024)\n response += received\n \n if len(received) < 1024:\n break\n\n return response\n\n def get_index(self):\n request_header = b'GET / HTTP/1.0\\r\\n\\r\\n'\n self.client.send(request_header)\n response = self.get_response(b'')\n \n responses = response.rsplit(b'\\r\\n',1)\n content = responses[1].decode('utf-8')\n print(responses[0])\n\n soup = BeautifulSoup(content, 'html.parser')\n print(soup.get_text())\n\n def get_file(self,file_request):\n request_header = f'GET /{file_request} HTTP/1.0\\r\\n\\r\\n'.encode('utf-8')\n self.client.send(request_header)\n response = self.get_response(b'')\n responses = response.split(b'\\r\\n', 3)\n http_status = responses[0].split(b' ',1)\n print(responses[0])\n\n if http_status[1] == b'200 OK':\n with open(file_request,'wb') as file:\n content = responses[3]\n file.write(content)\n\n elif http_status[1] == b'404 Not found':\n content = responses[3].decode('utf-8')\n soup = BeautifulSoup(content, 'html.parser')\n print(soup.get_text())\n\n\n def run(self, file_request='/'):\n self.open_socket()\n try:\n while True:\n if file_request == '/' or file_request == 'index.html':\n self.get_index()\n else:\n self.get_file(file_request)\n \n file_request = input()\n \n \n except KeyboardInterrupt:\n self.client.close()\n sys.exit(0)\n\nif __name__ == \"__main__\" :\n client = Client()\n client.run()\n","sub_path":"Tugas UTS/no6/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156158676","text":"#py4e assignment 11\n\nimport re\nfname = input(\"Enter file name: \")\nif len(fname) < 1 : fname = \"regex_sum_42.txt\"\n\nfh = open(fname)\nfor line in fh:\n line = line.rstrip()\n num = re.findall('([0-9]+)', line)\n if len(num) > 0:\n print(num)\n","sub_path":"assignment11/assignment11.py","file_name":"assignment11.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177884896","text":"'''\r\nCreated on 29 Jul 2021\r\n\r\n@author: matth\r\n\r\nThis module contains the method that is responsible for the front end of the 2nd tab of the application. \r\n'''\r\n\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom methods import ML_ModelsMethods as mm # custom script\r\nfrom methods import CleanerMethods as cl\r\n\r\n\r\ndef MLPagePrintOut():\r\n '''\r\n Summary:\r\n Provides frontend elements of 'Create ML Classifier' tab using the streamlit framework.\r\n '''\r\n \r\n df_set = False\r\n spliting = False\r\n categories_valid = True\r\n bounds = {}\r\n \r\n st.title('Create Machine Learning Classifier')\r\n \r\n if 'revised_df' not in st.session_state:\r\n st.write('No cached dataset, please upload file.')\r\n dataset = st.file_uploader(\"Upload CSV File\", type=['csv'])\r\n if dataset is not None:\r\n df = pd.read_csv(dataset, na_values=\"NaN\")\r\n df_set = True \r\n else:\r\n option = st.selectbox(\r\n 'Select Dataset', [\r\n 'Cached Dataset',\r\n 'Upload New Dataset'])\r\n if option == 'Upload New Dataset':\r\n dataset = st.file_uploader(\"Upload CSV File\", type=['csv'])\r\n if dataset is not None:\r\n df = pd.read_csv(dataset, na_values=\"NaN\")\r\n df_set = True\r\n else:\r\n df = st.session_state.revised_df\r\n df_set = True\r\n \r\n if df_set == True:\r\n st.write('Dataframe:')\r\n st.write(df)\r\n headers = list(df.columns)\r\n \r\n try:\r\n st.write('Correlation Heatmap')\r\n df_corr = df.corr()\r\n sns.heatmap(df_corr, annot=True, cmap='Reds')\r\n st.pyplot()\r\n except ValueError:\r\n st.write('Dataset is not appropriate for Correlation Heatmap')\r\n \r\n method = st.selectbox(\r\n 'Select ML Method', [\r\n 'Decision Tree',\r\n 'Random Forest',\r\n 'Gaussian Naive Bayes',\r\n 'n Nearest Neighbours',\r\n 'Ada Boost',\r\n 'Quadratic Discriminant Analysis',\r\n 'Zero R'])\r\n \r\n n_neighbour = -1\r\n if method == 'n Nearest Neighbours':\r\n n_neighbour = st.selectbox('Select n', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\r\n \r\n model_class = st.selectbox('Select Class', headers)\r\n numeric_class = cl.checkListIsNum(df[model_class])\r\n \r\n if numeric_class:\r\n st.write('Split numeric class into discrete categories')\r\n num_of_categories = st.selectbox('Number of Categories', ['Don\\'t Split', '2', '3', '4'], index=1)\r\n \r\n with st.form('treatment_form'):\r\n \r\n save_classifier = st.text_input(label='Classifier Name - Enter Name to Save Classifier')\r\n \r\n if numeric_class:\r\n if num_of_categories == '2' or num_of_categories == '3' or num_of_categories == '4':\r\n spliting = True\r\n bounds = mm.splitNumericCategoriesGetBounds(num_of_categories)\r\n \r\n st.write(\"Columns to be included in model:\")\r\n \r\n column_included = {}\r\n \r\n for col in headers:\r\n if col != model_class:\r\n column_included[col] = st.checkbox(col, value=\"True\")\r\n \r\n submitted = st.form_submit_button(\"Submit\")\r\n \r\n if submitted:\r\n \r\n model_df = mm.removeColumns(df, column_included)\r\n \r\n if numeric_class and spliting:\r\n categories_valid = mm.boudaryValuesValid(bounds, num_of_categories) and mm.boudaryNamesValid(bounds, num_of_categories)\r\n \r\n if (not categories_valid) or mm.checkColumnsAllFalse(column_included):\r\n pass # not placeholder\r\n else:\r\n if spliting:\r\n if bounds[0] == 0:\r\n model_df = mm.splitNumericCategories0(bounds, num_of_categories, model_df, model_class)\r\n else:\r\n model_df = mm.splitNumericCategories1(bounds, num_of_categories, model_df, model_class)\r\n \r\n st.write('Dataset used for model:')\r\n st.write(model_df)\r\n st.markdown(cl.get_table_download_link(model_df, 'model_dataset.csv'), unsafe_allow_html=True)\r\n \r\n if submitted:\r\n \r\n if mm.checkDatasetClassNotEmpty(model_df[model_class]):\r\n \r\n classifier = mm.runClassifier(model_df, model_class, method, n_neighbour)\r\n \r\n if save_classifier != '':\r\n classifier_name = save_classifier + '_name'\r\n classifier_class = save_classifier + '_class'\r\n classifier_model = save_classifier + '_model'\r\n classifier_headers = save_classifier + '_headers'\r\n classifier_datatypes = save_classifier + '_datatypes'\r\n st.session_state[classifier_name] = save_classifier\r\n st.session_state[classifier_class] = model_class\r\n st.session_state[classifier_model] = classifier\r\n st.session_state[classifier_headers] = list(model_df.columns)\r\n st.session_state[classifier_datatypes] = model_df.dtypes\r\n \r\n","sub_path":"main/pages/ML_Models.py","file_name":"ML_Models.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99759867","text":"# -*- coding: utf-8 -*-\nimport os.path\nimport json\nfrom datetime import datetime\nfrom sqlite3 import dbapi2 as sqlite3\nfrom functools import wraps\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import g\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import abort\nfrom flask import render_template\nfrom flask import send_from_directory\nfrom flask import make_response\nfrom flask import Response\n\napp = Flask(__name__)\n\n\ndef connect_db():\n \"\"\"Connects to the specific database.\"\"\"\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv\n\n\ndef init_db():\n \"\"\"Creates the database tables.\"\"\"\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\ndef get_db():\n \"\"\"\n Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\ndef jsonify(function):\n \"\"\"\n Creates a response with the JSON representation of wrapped function result.\n \"\"\"\n @wraps(function)\n def inner(*args, **kwargs):\n result = function(*args, **kwargs)\n if not isinstance(result, Response):\n result = Response(json.dumps(result), mimetype='application/json')\n return result\n return inner\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef main_page():\n \"\"\"\n Shows main page from static (GET) and sets cookie with username (POST)\n \"\"\"\n if request.method == 'GET':\n return render_template('main.html')\n if request.method == 'POST':\n username = request.form.get('username', 'player1')\n if username:\n response = redirect(url_for('game_page', level_id=1))\n response.set_cookie('username', username, expires=datetime.max)\n return response\n\n\n@app.route('/level/')\ndef game_page(level_id=1):\n \"\"\"\n Sets cookie with level_id and shows main page from static.\n \"\"\"\n response = make_response(render_template('game.html'))\n response.set_cookie('level_id', str(level_id), expires=datetime.max)\n return response\n\n\n@app.route('/top-10/')\ndef top_10(level_id):\n params = {\n 'level_id': level_id,\n 'next_level_url': url_for('game_page', level_id=level_id+1),\n }\n return render_template('top-10.html', **params)\n\n\n@app.route('/api/v1/level/.')\ndef get_level(level_id, ext):\n \"\"\"\n Loads map (obj or mtl file) from static directory.\n \"\"\"\n return send_from_directory(app.config['LVL_DIR'], '%03d.%s' % (level_id, ext))\n\n\n@app.route('/api/v1/scores/', methods=['POST', 'GET'])\n@jsonify\ndef scores(level_id):\n \"\"\"\n Returns scores for all users (GET) or saves score (POST).\n \"\"\"\n if request.method == 'POST':\n moves = request.form.get('moves', None)\n duration = request.form.get('duration', None)\n username = request.form.get('username', None)\n if moves and duration and username:\n db = get_db()\n db.execute(\n 'INSERT INTO scores (level_id, username, moves, duration) \\\n VALUES (?, ?, ?, ?)',\n [level_id, username, moves, duration]\n )\n db.commit()\n return {'result': True}\n else:\n abort(400)\n if request.method == 'GET':\n db = get_db()\n cur = db.execute(\n 'SELECT moves, duration, username \\\n FROM scores \\\n WHERE level_id = {} \\\n ORDER BY moves, duration'.format(level_id)\n )\n entries = cur.fetchall()\n result = [dict(entries[i]) for i in range(len(entries))]\n return result\n","sub_path":"src/buckyban/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135380694","text":"from django.shortcuts import render, HttpResponse\r\nimport requests\r\nimport re\r\nfrom rest_framework.views import APIView\r\nimport json\r\nfrom vedio import forms\r\nfrom vedio import huanongwang\r\n# Create your views here.\r\n\r\n\r\ndef zhongxinwang(url):\r\n addrs = []\r\n req = requests.get(url)\r\n result = re.findall(r'http://(.+?).mp4', req.text)\r\n for item in result:\r\n addr = 'http://' + item + '.mp4'\r\n addrs.append(addr)\r\n return addrs\r\n\r\n\r\n\r\n\r\nclass XinWenView(APIView):\r\n\r\n def get(self, request, *args, **kwargs):\r\n form_obj = forms.UrlForm()\r\n return render(request, 'url.html', {'form_obj': form_obj})\r\n\r\n\r\n def post(self, request, *args, **kwargs):\r\n addrs = []\r\n form_obj = forms.UrlForm(request.POST)\r\n if form_obj.is_valid():\r\n url_type = form_obj.cleaned_data.get(\"url_type\")\r\n url = form_obj.cleaned_data.get(\"url\")\r\n if url_type == 0:\r\n addrs = zhongxinwang(url)\r\n else:\r\n mp4_name = huanongwang.main(url)\r\n addr = 'http://111.230.10.100/' + mp4_name\r\n addrs.append(addr)\r\n return HttpResponse(json.dumps(addrs, ensure_ascii=False), content_type=\"application/json\")\r\n\r\n else:\r\n return HttpResponse(form_obj.errors)\r\n\r\n\r\n\r\n\r\n","sub_path":"vedio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415402497","text":"# Definition for a binary tree node.\nclass TreeNode:\n \n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n \n def levelOrderBottom(self, root: TreeNode) -> list:\n \n depth = self.find_depth(root)\n matrix = []\n \n for i in reversed(range(depth)):\n \n matrix.append(self.get_at_index(root, i))\n \n return matrix\n \n \n def find_depth(self, node) -> int:\n \n left_val = 0\n right_val = 0\n \n if node == None:\n return 0\n else:\n if node.left is not None:\n left_val = self.find_depth(node.left)\n\n if node.right is not None:\n right_val = self.find_depth(node.right)\n \n return 1 + (left_val if left_val > right_val else right_val)\n\n \n def get_at_index(self, node, i, curr_index=0) -> list:\n \n if curr_index == i:\n return [node.val]\n else:\n \n left_val, right_val = [], []\n \n if node.left is not None:\n left_val = self.get_at_index(node.left, i, curr_index+1)\n \n if node.right is not None:\n right_val = self.get_at_index(node.right, i, curr_index+1)\n \n return left_val + right_val\n \n \n\ndef main():\n \n node_list = [3,9,20,None,None,15,7]\n \n node_15 = TreeNode(15)\n node_7 = TreeNode(7)\n node_20 = TreeNode(20, node_15, node_7)\n node_9 = TreeNode(9)\n node_3 = TreeNode(3, node_9, node_20)\n node_4 = TreeNode(4, node_3)\n \n s = Solution()\n \n print(f\"{s.levelOrderBottom(node_4)}\")\n \n return 0\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"2_binary_tree.py","file_name":"2_binary_tree.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627247113","text":"\"\"\"medserver URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework.authtoken import views\nfrom rest_framework import routers\n\nfrom apps.human_resources import views as hr_views\nfrom apps.patients import views as p_views\nfrom apps.visit_planning import views as vp_views\nfrom medserver.views import schema_view\n\nrouter = routers.DefaultRouter()\nrouter.register(r'employees', hr_views.EmployeeViewSet)\nrouter.register(r'departments', hr_views.DepartmentViewSet)\nrouter.register(r'schedules', hr_views.EmployeeScheduleViewSet)\nrouter.register(r'free_times', hr_views.EmployeeFreeTimesView, base_name='free_times')\nrouter.register(r'allergy_sources', p_views.AllergySourceViewSet)\nrouter.register(r'previous_illnesses', p_views.PreviousIllnessesViewSet)\nrouter.register(r'previous_surgery', p_views.PreviousSurgeryViewSet)\nrouter.register(r'patients', p_views.PatientFullViewSet)\nrouter.register(r'patients_smart', p_views.PatientSmartViewSet)\nrouter.register(r'visit_reasons', vp_views.VisitReasonViewSet)\nrouter.register(r'patient_visits', vp_views.PatientVisitViewSet)\nrouter.register(r'patient_visit_statuses', vp_views.PatientVisitStatusesView, base_name='patient_visit_statuses')\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^api/', include(router.urls)),\n url(r'^api/me', hr_views.me),\n url(r'^api/get_token', views.obtain_auth_token),\n url(r'^schema/?', schema_view),\n]\n","sub_path":"MED.Server/medserver/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27952507","text":"import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport time, os\n\nimport tvm\nimport tvm.contrib.graph_runtime as runtime\nfrom tvm import relay\n\nimport tensorflow as tf\nimport tvm.relay.testing.tf as tf_testing\n\nfrom tensorflow import nn\nfrom tensorflow import image\n\nnp.random.seed(0)\n\n\"\"\" \n Network parameters\n\"\"\"\nIMG_WIDTH = 600\nIMG_HEIGHT = 400\n\nBATCH_SIZE = 32\nN = 224\nFIN = 3\nFOUT = 32\n\nK_Y = 3\nK_X = 3\n\nNB_TESTS = 101\n\n\"\"\" \n Target settings\n\"\"\"\ntarget = \"llvm -mcpu=core-avx2\"\ntarget_host = \"llvm\"\nlayout = None\n\ninput_shape = (BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, FIN)\ndtype = \"float32\"\n\n\"\"\" \n Create the graph in TensorFlow \n\"\"\"\ndef ResizeConvReluMaxPool(X, weights, bias):\n resize = image.resize(X, [N + 2, N + 2])\n \n conv = nn.conv2d(resize, weights, strides=[1, 1, 1, 1], padding=\"VALID\", data_format=\"NHWC\")\n conv_bias = nn.bias_add(conv, bias, data_format=\"NHWC\")\n\n relu = nn.relu(conv_bias)\n maxpool = nn.max_pool2d(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"VALID\", data_format=\"NHWC\", name=\"output\")\n \n return maxpool\n\nweights = np.random.rand(K_Y, K_X, FIN, FOUT)\nbias = np.random.rand(FOUT)\n\nX = tf.compat.v1.placeholder(tf.float32, [BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, FIN], name=\"X\")\nactivations = ResizeConvReluMaxPool(X, weights, bias)\n\nmodel_path = \"tf_model.pb\"\ntf.io.write_graph(tf.compat.v1.get_default_graph(), \"\", model_path, as_text=False)\n\n\"\"\" \n Create the graph in TVM and compile it \n\"\"\"\nwith tf.io.gfile.GFile(model_path, \"rb\") as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name=\"\")\n\n graph_def = tf_testing.ProcessGraphDefParam(graph_def)\n\n # Add shapes to the graph.\n with tf.Session() as sess:\n graph_def = tf_testing.AddShapesToGraphDef(sess, \"output\")\n\n# Import TF graph definition to Relay frontend\nshape_dict = {\"X\": input_shape}\nmod, parameters = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)\n\n# Compile the graph\nwith relay.build_config(opt_level=3):\n graph, lib, params = relay.build_module.build(\n mod, target=target, params=parameters)\n\n\"\"\" Execute and evaluate the graph \"\"\"\nctx = tvm.cpu()\ndata_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\nmodule = runtime.create(graph, lib, ctx)\nmodule.set_input(\"X\", data_tvm)\nmodule.set_input(**params)\n\n# evaluate\nprint(\"Evaluate inference time cost...\")\nftimer = module.module.time_evaluator(\"run\", ctx, number=NB_TESTS, repeat=1)\nprof_res = np.array(ftimer().results) * 1000 # convert to millisecond\n\nprint(\"Network execution time : \", np.median(prof_res))","sub_path":"benchmarks/DNN/blocks/Resize-Conv-ReLU-MaxPool/cpu/dense/resize_conv_relu_maxpool_tvm.py","file_name":"resize_conv_relu_maxpool_tvm.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"455845650","text":"\"\"\"Cover \"Manhattan skyline\" using the minimum number of rectangles.\"\"\"\nimport getopt\nimport sys\nimport re\ndef stone_wall(arr_h):\n \"\"\"Cover \"Manhattan skyline\" using the minimum number of rectangles.\"\"\"\n # define N as length of arr_h\n len_n = len(arr_h)\n # start block count at length of arr_h\n blocks = len(arr_h)\n # iterate through arr_h to compare blocks\n for ndx_n in range(len_n):\n # create a lookback variable to crawl backwards from current loop location\n lookback = ndx_n\n # only crawl backwards while height is high enough to share stones\n while (arr_h[ndx_n] <= arr_h[lookback]) & (lookback > 0):\n lookback -= 1\n # if a shared height is reached,\n if arr_h[lookback] == arr_h[ndx_n]:\n # subtract from block count\n blocks -= 1\n # and stop while loop\n lookback = 0\n # return the block count\n return blocks\n# Bash Testing:\nARGS = list(getopt.getopt(sys.argv, \"ho:v\"))[1][1]\nARGS = re.sub(r\"^\\[|\\]$\", \"\", ARGS).split(\", \")\nARGS = [int(x) for x in ARGS]\n# time1 = datetime.datetime.now()\nRESULT = stone_wall(ARGS)\n# time2 = datetime.datetime.now()\n# timeChange = time2-time1\nprint [\"result: \"]+[RESULT] #+[\" ; milliseconds: \"]+[timeChange.total_seconds()*1000])\n# Codility Testing:\n## https://app.codility.com/demo/results/trainingQYZX6P-S8W/ ; trainingK59HWD-FN2\n## Correctness: 100%\n## Performance: 77%\n## Difficulty: Painless\n","sub_path":"source/7-stacks-and-queues/2/stone_wall.py","file_name":"stone_wall.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"339636116","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('member', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SponsorItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False, db_column='created')),\n ('updated_at', models.DateTimeField(default=django.utils.timezone.now, editable=False, db_column='updated')),\n ('code', models.CharField(max_length=25)),\n ('name', models.CharField(max_length=255)),\n ('type', models.CharField(max_length=25)),\n ('description', models.CharField(max_length=523)),\n ('amount', models.FloatField()),\n ('photo', models.CharField(max_length=255, null=True, blank=True)),\n ('created_by', models.ForeignKey(related_name='sponsoritem_created_by', db_column='createdBy', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n 'db_table': 'sponsorItems',\n },\n ),\n migrations.AddField(\n model_name='seva',\n name='title',\n field=models.CharField(max_length=55, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='sevacategory',\n name='duration',\n field=models.CharField(max_length=11, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='sevacategory',\n name='duration_type',\n field=models.CharField(max_length=11, null=True, db_column='durationType', blank=True),\n ),\n migrations.AddField(\n model_name='sevacategory',\n name='recurrence',\n field=models.CharField(max_length=25, null=True, blank=True),\n ),\n migrations.AddField(\n model_name='sevacategory',\n name='show_start_date',\n field=models.BooleanField(default=True, db_column='showStartDate'),\n ),\n migrations.AddField(\n model_name='sevacategory',\n name='sponsor_item_type',\n field=models.CharField(max_length=55, null=True, db_column='sponsorItemType', blank=True),\n ),\n migrations.AlterField(\n model_name='seva',\n name='enddate',\n field=models.DateField(null=True, db_column='endDate', blank=True),\n ),\n migrations.AlterField(\n model_name='seva',\n name='startdate',\n field=models.DateField(null=True, db_column='startDate', blank=True),\n ),\n migrations.AddField(\n model_name='sponsoritem',\n name='seva_id',\n field=models.ForeignKey(db_column='sevaId', to='member.Seva', unique=True),\n ),\n migrations.AddField(\n model_name='sponsoritem',\n name='updated_by',\n field=models.ForeignKey(related_name='sponsoritem_updated_by', db_column='updatedBy', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True),\n ),\n ]\n","sub_path":"Django/Django_Old/disa-py/member/migrations/0003_auto_20160315_0631.py","file_name":"0003_auto_20160315_0631.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311171277","text":"\"\"\"add error to GeoFileStatus\n\nRevision ID: 4231989b78d5\nRevises: ed182660c635\nCreate Date: 2020-09-18 11:17:54.345395\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = \"4231989b78d5\"\ndown_revision = \"ed182660c635\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.execute(\"COMMIT\")\n op.execute(\"ALTER TYPE geofilestatus ADD VALUE 'error'\")\n\n\ndef downgrade():\n op.execute(\"UPDATE geofile SET status='importing' WHERE status='error'\")\n op.execute(\n \"DELETE FROM pg_enum WHERE enumtypid='geofilestatus'::regtype AND enumlabel='error'\"\n )\n","sub_path":"backend/app/alembic/versions/4231989b78d5_add_error_to_geofilestatus.py","file_name":"4231989b78d5_add_error_to_geofilestatus.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591481018","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\n\ndef get_title(url):\n\n try:\n html=urlopen(url) #서버에 응답이 없을경우(500) 또는 파일이 없을경우 (404)에 HTTPERROR가 발생하여 None을 리턴\n except HTTPError as e:\n return None\n try:\n bsobj = BeautifulSoup(html.read(),\"html.parser\") #html 파일을 객체로 읽어온뒤에 bs4객체로 변환한다.\n title = bsobj.head.title # head값이 없��경우에는 None 리턴, None리턴 값의 attribute를 호출하면 AttributeError가 발생한다\n except AttributeError as e:\n return None\n return title\n\n\nresult = get_title(\"http://www.naver.com\")\nif result == None:\n print(\"this site does not use tags.\")\nprint(result)","sub_path":"crawler_1chapter.py","file_name":"crawler_1chapter.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564022619","text":"import sys\nfrom math import prod\n\nlines = []\nfor i in sys.stdin:\n lines.append(i.strip(\"\\n\"))\n\nslopes = [(1, -1), (3, -1), (5, -1), (7, -1), (1, -2)]\n\n\ndef encounters(lines, slope):\n position = (0, 0)\n multiplier = 1\n trees = 0\n count = abs(slope[1]) - 1\n\n for i in lines:\n if count != abs(slope[1]) - 1:\n count += 1\n continue\n\n string = i * multiplier\n\n if position[0] > (len(string) - 1):\n multiplier += 1\n string = i * multiplier\n\n if string[position[0]] == \"#\":\n trees += 1\n\n position = (position[0] + slope[0], position[1] + slope[1])\n count = 0\n return trees\n\n\nresults = []\nfor i in slopes:\n results.append(encounters(lines, i))\n\nprint(prod(results))\n","sub_path":"day-3/python/day-3-2.py","file_name":"day-3-2.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521035753","text":"\"\"\"\n\"\"\"\n\nimport sys\nimport unittest\n\nfrom minds.safe_config import cfg as testcfg\nfrom minds import lucene_logic\n\n\nclass TestLuceneLogic(unittest.TestCase):\n\n def setUp(self):\n self.indexpath = testcfg.getpath('archiveindex')\n self.cleanup()\n\n\n def tearDown(self):\n self.cleanup()\n\n\n def cleanup(self):\n self.assertEqual('testdata/archive/index', self.indexpath) # make sure don't delete wrong data\n if self.indexpath.exists():\n self.indexpath.rmtree()\n\n\n def _test_index_and_search(self, **args):\n\n writer = lucene_logic.Writer(**args)\n try:\n if len(args) == 0: # special case for RAM directory\n args = { 'directory': writer.directory } # pass the RAM directory for subsequence searching\n\n if writer.docCount() <= 1:\n writer.addDocument(u'1', {'uri': u'http://a', 'date': '2004'}, u'content1')\n writer.addDocument(u'2', {'uri': u'http://a', 'date': '2005'}, u'content2')\n finally:\n writer.close()\n\n reader = lucene_logic.Reader(**args)\n try:\n self.assertEqual(reader.numDocs(), 3) # 2 document added + 1 version document\n self.assert_(reader.hasDocument(u'1'))\n self.assert_(not reader.hasDocument(u'3'))\n finally:\n reader.close()\n\n searcher = lucene_logic.Searcher(**args)\n try:\n hits = searcher.searchLast(u'http://a')\n self.assertEqual(len(hits), 2)\n self.assertEqual(hits.doc(0).get('docid'), u'2')\n self.assertEqual(hits.doc(0).get('date'), u'2005')\n self.assertEqual(hits.doc(1).get('docid'), u'1')\n self.assertEqual(hits.doc(1).get('date'), u'2004')\n\n hits = searcher.searchLast(u'not exist')\n self.assertEqual(len(hits), 0)\n finally:\n searcher.close()\n\n\n def test_RAM(self):\n self.assert_(not self.indexpath.exists())\n self._test_index_and_search()\n self.assert_(not self.indexpath.exists())\n\n\n def test_FSDirectory(self):\n # iteration 1: start with empty diretory\n self.assert_(not self.indexpath.exists())\n self._test_index_and_search(pathname=self.indexpath)\n\n # iteration 2: with existing index\n self.assert_(self.indexpath.exists())\n self._test_index_and_search(pathname=self.indexpath)\n\n\n def test_version(self):\n # check for the version document added to new index\n reader = lucene_logic.Reader()\n version = testcfg.get('version.number', '?')\n self.assertEqual(1, reader.numDocs())\n self.assertEqual(version, reader.getVersion())\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tags/release-0.8.0/minds/test/test_lucene_logic.py","file_name":"test_lucene_logic.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520949015","text":"from keras.layers import Input, Conv2D, MaxPool2D, AvgPool2D, Flatten, Dense, GlobalAveragePooling2D\r\nfrom keras.layers import Dropout, BatchNormalization, Activation, concatenate\r\nfrom keras import regularizers\r\nfrom keras.models import Model\r\nimport keras.activations as activation\r\nimport numpy as np\r\n\r\n\r\n\r\nclass inception_V1():\r\n def __init__(self, batch_size=32,\r\n n_classes=1000):\r\n # 5\r\n\r\n self.data_size = (batch_size, 224, 224, 3)\r\n\r\n self.n_classes = n_classes\r\n self.decay_weight = 0.5e-3\r\n self.layer_lst = ['Conv2d_0a_1x1',\r\n 'Conv2d_0a_1x1', 'Conv2d_0b_3x3',\r\n 'Conv2d_0a_1x1', 'Conv2d_0b_3x3'\r\n , 'MaxPool_0a_3x3', 'Conv2d_0b_1x1']\r\n @property\r\n def input_size(self):\r\n return self.data_size[1:]\r\n\r\n def block(self, layer_name, net, out_lst):\r\n assert isinstance(out_lst, list), TypeError\r\n assert len(out_lst) == 6, ValueError\r\n\r\n net0 = Conv2D(out_lst[0], (1, 1), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv1')(net)\r\n\r\n net1 = Conv2D(out_lst[1], (1, 1), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv2_1')(net)\r\n net1 = Conv2D(out_lst[2], (3, 3), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv2_2')(net1)\r\n\r\n net2 = Conv2D(out_lst[3], (1, 1), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv3_1')(net)\r\n net2 = Conv2D(out_lst[4], (5, 5), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv3_2')(net2)\r\n\r\n net3 = MaxPool2D((3, 3), strides=(1, 1),padding='same', name=layer_name +\"maxpool4_1\")(net)\r\n net3 = Conv2D(out_lst[5], (1, 1), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv4_2')(net3)\r\n\r\n return concatenate([net0, net1, net2, net3], axis=3)\r\n\r\n def subbranch(self, layer_name,net):\r\n net = AvgPool2D((5, 5), strides=(3, 3), padding='valid', name=layer_name + 'avgpool')(net)\r\n net = Conv2D(128, (1, 1), strides=(1, 1), padding='same',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=layer_name + 'conv1')(net)\r\n net = Flatten(name=layer_name + 'Flatten1')(net)\r\n net = Dense(1024, activation='relu', name=layer_name + 'fc1')(net)\r\n net = Dropout(0.7,name=layer_name + 'dropout1')(net)\r\n net = Dense(self.n_classes, activation='relu', name=layer_name + 'fc2')(net)\r\n return Activation('softmax',name=layer_name + 'softmax')(net)\r\n\r\n\r\n def build(self):\r\n data_input = Input(batch_shape=self.data_size)\r\n net = Conv2D(64, (7, 7), strides=(2, 2),\r\n padding=\"same\", activation=\"relu\",\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=\"conv1\")(data_input)\r\n net = MaxPool2D((3, 3), strides=(2, 2),padding='same', name=\"maxpool1\")(net)\r\n net = BatchNormalization()(net)\r\n\r\n net = Conv2D(192, (1, 1), strides=(1, 1), padding='valid',\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name='conv2_1')(net)\r\n net = Conv2D(192, (3, 3), strides=(1, 1),\r\n activation=\"relu\", padding=\"same\",\r\n kernel_regularizer=regularizers.l2(self.decay_weight),\r\n name=\"conv2_2\")(net)\r\n net = BatchNormalization()(net)\r\n net = MaxPool2D((2, 2), strides=(2, 2),padding='same', name=\"maxpool2\")(net)\r\n\r\n net = self.block('inception_3a_', net, [64, 96, 128, 16, 32, 32])\r\n net = self.block('inception_3b_', net, [128, 128, 192, 32, 96, 64])\r\n\r\n net = MaxPool2D((3, 3), strides=(2, 2),padding='same', name=\"maxpool3\")(net)\r\n\r\n net = self.block('inception_4a_', net, [192, 96, 208, 16, 48, 64])\r\n loss01 = self.subbranch('sub_loss3_1_',net)\r\n net = self.block('inception_4b_', net, [160, 112, 224, 24, 64, 64])\r\n net = self.block('inception_4c_', net, [128, 128, 256, 24, 64, 64])\r\n net = self.block('inception_4d_', net, [112, 144, 288, 32, 64, 64])\r\n loss02 = self.subbranch('sub_loss3_2_', net)\r\n net = self.block('inception_4e_', net, [256, 160, 320, 32, 128, 128])\r\n\r\n net = MaxPool2D((3, 3), strides=(2, 2),padding='same', name=\"maxpool4\")(net)\r\n\r\n net = self.block('inception_5a_', net, [256, 160, 320, 32, 128, 128])\r\n net = self.block('inception_5b_', net, [384, 192, 384, 48, 128, 128])\r\n\r\n net = AvgPool2D((7, 7), strides=(1, 1), padding='valid', name='avgpool3')(net)\r\n net = Dropout(0.4)(net)\r\n net = Flatten()(net)\r\n net = Dense(self.n_classes, name=\"fc1\", activation=\"relu\")(net)\r\n loss03 = Activation('softmax',name='loss3_3')(net)\r\n loss = 0.3*(loss01+loss02)+0.7*loss03\r\n model = Model(inputs=data_input, outputs=[loss01,loss02,loss03])\r\n from keras.utils import plot_model\r\n import os\r\n plot_model(model, to_file=os.path.join('./imgs', \"005_inceptionv2.png\"), show_shapes=True)\r\n model.summary()\r\n return loss\r\n\r\n\r\nmodel = inception_V1()\r\nmodel.build()\r\n","sub_path":"01 NetWork/nets_strut/005_inception_v2.py","file_name":"005_inception_v2.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106408307","text":"from pywebcopy import save_website\n\ndir_name = 'login'\nsite_url = 'http://brandio.io/envato/iofrm/html/'\nkwargs = {'project_name': dir_name}\nsave_website(\n url=site_url,\n project_folder=dir_name,\n **kwargs\n)\n","sub_path":"utils/httrack.py","file_name":"httrack.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434791938","text":"\"\"\"Support for sensors through the SmartThings cloud API.\"\"\"\nfrom collections import namedtuple\nfrom typing import Optional, Sequence\n\nfrom homeassistant.const import (\n DEVICE_CLASS_BATTERY, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE,\n DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_TIMESTAMP, MASS_KILOGRAMS,\n ENERGY_KILO_WATT_HOUR, POWER_WATT, TEMP_CELSIUS, TEMP_FAHRENHEIT)\n\nfrom . import SmartThingsEntity\nfrom .const import DATA_BROKERS, DOMAIN\n\nMap = namedtuple(\"map\", \"attribute name default_unit device_class\")\n\nCAPABILITY_TO_SENSORS = {\n 'activityLightingMode': [\n Map('lightingMode', \"Activity Lighting Mode\", None, None)],\n 'airConditionerMode': [\n Map('airConditionerMode', \"Air Conditioner Mode\", None, None)],\n 'airQualitySensor': [\n Map('airQuality', \"Air Quality\", 'CAQI', None)],\n 'alarm': [\n Map('alarm', \"Alarm\", None, None)],\n 'audioVolume': [\n Map('volume', \"Volume\", \"%\", None)],\n 'battery': [\n Map('battery', \"Battery\", \"%\", DEVICE_CLASS_BATTERY)],\n 'bodyMassIndexMeasurement': [\n Map('bmiMeasurement', \"Body Mass Index\", \"kg/m^2\", None)],\n 'bodyWeightMeasurement': [\n Map('bodyWeightMeasurement', \"Body Weight\", MASS_KILOGRAMS, None)],\n 'carbonDioxideMeasurement': [\n Map('carbonDioxide', \"Carbon Dioxide Measurement\", \"ppm\", None)],\n 'carbonMonoxideDetector': [\n Map('carbonMonoxide', \"Carbon Monoxide Detector\", None, None)],\n 'carbonMonoxideMeasurement': [\n Map('carbonMonoxideLevel', \"Carbon Monoxide Measurement\", \"ppm\",\n None)],\n 'dishwasherOperatingState': [\n Map('machineState', \"Dishwasher Machine State\", None, None),\n Map('dishwasherJobState', \"Dishwasher Job State\", None, None),\n Map('completionTime', \"Dishwasher Completion Time\", None,\n DEVICE_CLASS_TIMESTAMP)],\n 'dryerMode': [\n Map('dryerMode', \"Dryer Mode\", None, None)],\n 'dryerOperatingState': [\n Map('machineState', \"Dryer Machine State\", None, None),\n Map('dryerJobState', \"Dryer Job State\", None, None),\n Map('completionTime', \"Dryer Completion Time\", None,\n DEVICE_CLASS_TIMESTAMP)],\n 'dustSensor': [\n Map('fineDustLevel', \"Fine Dust Level\", None, None),\n Map('dustLevel', \"Dust Level\", None, None)],\n 'energyMeter': [\n Map('energy', \"Energy Meter\", ENERGY_KILO_WATT_HOUR, None)],\n 'equivalentCarbonDioxideMeasurement': [\n Map('equivalentCarbonDioxideMeasurement',\n 'Equivalent Carbon Dioxide Measurement', 'ppm', None)],\n 'formaldehydeMeasurement': [\n Map('formaldehydeLevel', 'Formaldehyde Measurement', 'ppm', None)],\n 'illuminanceMeasurement': [\n Map('illuminance', \"Illuminance\", 'lux', DEVICE_CLASS_ILLUMINANCE)],\n 'infraredLevel': [\n Map('infraredLevel', \"Infrared Level\", '%', None)],\n 'lock': [\n Map('lock', \"Lock\", None, None)],\n 'mediaInputSource': [\n Map('inputSource', \"Media Input Source\", None, None)],\n 'mediaPlaybackRepeat': [\n Map('playbackRepeatMode', \"Media Playback Repeat\", None, None)],\n 'mediaPlaybackShuffle': [\n Map('playbackShuffle', \"Media Playback Shuffle\", None, None)],\n 'mediaPlayback': [\n Map('playbackStatus', \"Media Playback Status\", None, None)],\n 'odorSensor': [\n Map('odorLevel', \"Odor Sensor\", None, None)],\n 'ovenMode': [\n Map('ovenMode', \"Oven Mode\", None, None)],\n 'ovenOperatingState': [\n Map('machineState', \"Oven Machine State\", None, None),\n Map('ovenJobState', \"Oven Job State\", None, None),\n Map('completionTime', \"Oven Completion Time\", None, None)],\n 'ovenSetpoint': [\n Map('ovenSetpoint', \"Oven Set Point\", None, None)],\n 'powerMeter': [\n Map('power', \"Power Meter\", POWER_WATT, None)],\n 'powerSource': [\n Map('powerSource', \"Power Source\", None, None)],\n 'refrigerationSetpoint': [\n Map('refrigerationSetpoint', \"Refrigeration Setpoint\", None,\n DEVICE_CLASS_TEMPERATURE)],\n 'relativeHumidityMeasurement': [\n Map('humidity', \"Relative Humidity Measurement\", '%',\n DEVICE_CLASS_HUMIDITY)],\n 'robotCleanerCleaningMode': [\n Map('robotCleanerCleaningMode', \"Robot Cleaner Cleaning Mode\",\n None, None)],\n 'robotCleanerMovement': [\n Map('robotCleanerMovement', \"Robot Cleaner Movement\", None, None)],\n 'robotCleanerTurboMode': [\n Map('robotCleanerTurboMode', \"Robot Cleaner Turbo Mode\", None, None)],\n 'signalStrength': [\n Map('lqi', \"LQI Signal Strength\", None, None),\n Map('rssi', \"RSSI Signal Strength\", None, None)],\n 'smokeDetector': [\n Map('smoke', \"Smoke Detector\", None, None)],\n 'temperatureMeasurement': [\n Map('temperature', \"Temperature Measurement\", None,\n DEVICE_CLASS_TEMPERATURE)],\n 'thermostatCoolingSetpoint': [\n Map('coolingSetpoint', \"Thermostat Cooling Setpoint\", None,\n DEVICE_CLASS_TEMPERATURE)],\n 'thermostatFanMode': [\n Map('thermostatFanMode', \"Thermostat Fan Mode\", None, None)],\n 'thermostatHeatingSetpoint': [\n Map('heatingSetpoint', \"Thermostat Heating Setpoint\", None,\n DEVICE_CLASS_TEMPERATURE)],\n 'thermostatMode': [\n Map('thermostatMode', \"Thermostat Mode\", None, None)],\n 'thermostatOperatingState': [\n Map('thermostatOperatingState', \"Thermostat Operating State\",\n None, None)],\n 'thermostatSetpoint': [\n Map('thermostatSetpoint', \"Thermostat Setpoint\", None,\n DEVICE_CLASS_TEMPERATURE)],\n 'threeAxis': [\n Map('threeAxis', \"Three Axis\", None, None)],\n 'tvChannel': [\n Map('tvChannel', \"Tv Channel\", None, None)],\n 'tvocMeasurement': [\n Map('tvocLevel', \"Tvoc Measurement\", 'ppm', None)],\n 'ultravioletIndex': [\n Map('ultravioletIndex', \"Ultraviolet Index\", None, None)],\n 'voltageMeasurement': [\n Map('voltage', \"Voltage Measurement\", 'V', None)],\n 'washerMode': [\n Map('washerMode', \"Washer Mode\", None, None)],\n 'washerOperatingState': [\n Map('machineState', \"Washer Machine State\", None, None),\n Map('washerJobState', \"Washer Job State\", None, None),\n Map('completionTime', \"Washer Completion Time\", None,\n DEVICE_CLASS_TIMESTAMP)]\n}\n\nUNITS = {\n 'C': TEMP_CELSIUS,\n 'F': TEMP_FAHRENHEIT\n}\n\nTHREE_AXIS_NAMES = ['X Coordinate', 'Y Coordinate', 'Z Coordinate']\n\n\nasync def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n \"\"\"Platform uses config entry setup.\"\"\"\n pass\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Add binary sensors for a config entry.\"\"\"\n from pysmartthings import Capability\n broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]\n sensors = []\n for device in broker.devices.values():\n for capability in broker.get_assigned(device.device_id, 'sensor'):\n if capability == Capability.three_axis:\n sensors.extend(\n [SmartThingsThreeAxisSensor(device, index)\n for index in range(len(THREE_AXIS_NAMES))])\n else:\n maps = CAPABILITY_TO_SENSORS[capability]\n sensors.extend([\n SmartThingsSensor(\n device, m.attribute, m.name, m.default_unit,\n m.device_class)\n for m in maps])\n async_add_entities(sensors)\n\n\ndef get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:\n \"\"\"Return all capabilities supported if minimum required are present.\"\"\"\n return [capability for capability in CAPABILITY_TO_SENSORS\n if capability in capabilities]\n\n\nclass SmartThingsSensor(SmartThingsEntity):\n \"\"\"Define a SmartThings Sensor.\"\"\"\n\n def __init__(self, device, attribute: str, name: str,\n default_unit: str, device_class: str):\n \"\"\"Init the class.\"\"\"\n super().__init__(device)\n self._attribute = attribute\n self._name = name\n self._device_class = device_class\n self._default_unit = default_unit\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the binary sensor.\"\"\"\n return '{} {}'.format(self._device.label, self._name)\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique ID.\"\"\"\n return '{}.{}'.format(self._device.device_id, self._attribute)\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._device.status.attributes[self._attribute].value\n\n @property\n def device_class(self):\n \"\"\"Return the device class of the sensor.\"\"\"\n return self._device_class\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit this state is expressed in.\"\"\"\n unit = self._device.status.attributes[self._attribute].unit\n return UNITS.get(unit, unit) if unit else self._default_unit\n\n\nclass SmartThingsThreeAxisSensor(SmartThingsEntity):\n \"\"\"Define a SmartThings Three Axis Sensor.\"\"\"\n\n def __init__(self, device, index):\n \"\"\"Init the class.\"\"\"\n super().__init__(device)\n self._index = index\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the binary sensor.\"\"\"\n return '{} {}'.format(\n self._device.label, THREE_AXIS_NAMES[self._index])\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique ID.\"\"\"\n return '{}.{}'.format(\n self._device.device_id, THREE_AXIS_NAMES[self._index])\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n from pysmartthings import Attribute\n three_axis = self._device.status.attributes[Attribute.three_axis].value\n try:\n return three_axis[self._index]\n except (TypeError, IndexError):\n return None\n","sub_path":"homeassistant/components/smartthings/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":9972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504397136","text":"from sklearn import preprocessing\nimport numpy as np\nimport pandas as pd\n\n\ndef create_df(data_path):\n # input: the path of the csv file\n # output: data frame\n return pd.read_csv(data_path)\n\n\ndef nan_columns(df):\n # input: data frame\n # output: a list of names of columns that contain nan values in the data frame\n return df.columns[df.isnull().any()].tolist()\n\n\ndef categorical_columns(df):\n # input: data frame\n # output: a list of column names that contain categorical values in the data frame\n return df.select_dtypes(include=['object', 'category']).columns.tolist()\n\n\ndef replace_missing_features(df, nancolumns):\n # input: data frame, list of column names that contain nan values\n # output: data frame\n new_df1 = df.copy()\n for nancolumn in nancolumns:\n new_df1[nancolumn] = new_df1[nancolumn].fillna(new_df1[nancolumn].median())\n return new_df1\n\n\ndef cat_to_num(new_df1, catcolumns):\n # input: data frame, list of categorical feature column names\n # output: data frame\n new_df2 = new_df1.copy()\n return pd.get_dummies(new_df2, columns=catcolumns)\n\n\ndef standardization(new_df2, labelcol):\n # input: data frame and name of the label column\n # output: scaled data frame\n new_df3 = new_df2.drop(labelcol, axis=1)\n scaler = preprocessing.StandardScaler()\n scaled = scaler.fit_transform(new_df3)\n new_df3 = pd.DataFrame(scaled, columns=new_df3.columns.values)\n new_df3.loc[:, labelcol] = new_df2.loc[:, labelcol].copy()\n return new_df3\n\n\ndef my_train_test_split(new_df3, labelcol, test_ratio):\n # input: data frame, name of the label column and test data percentage\n # output: X_train, X_test, y_train, y_test\n np.random.seed(0) # DON'T ERASE THIS LINE\n\n indices = np.random.permutation(len(new_df3))\n test_size = int(len(new_df3) * test_ratio)\n train_i, test_i = indices[test_size:], indices[:test_size]\n columns = new_df3.columns.values\n label_index = np.argwhere(columns == labelcol)\n wo_labelcol = np.delete(columns, label_index)\n return new_df3.loc[train_i, wo_labelcol].values, new_df3.loc[test_i, wo_labelcol].values, \\\n new_df3.loc[train_i, labelcol].values, new_df3.loc[test_i, labelcol].values\n\n\ndef main(dataPath, testRatio, labelColumn):\n # input: the path of the csv file, test data percentage and name of the label column\n # output: X_train, X_test, y_train, y_test as numpy arrays\n df = create_df(dataPath)\n nans = nan_columns(df)\n cats = categorical_columns(df)\n cleaned = replace_missing_features(df, nans)\n encoded = cat_to_num(cleaned, cats)\n standardized = standardization(encoded, labelColumn)\n return my_train_test_split(standardized, labelColumn, testRatio)\n","sub_path":"hw1/analysis_and_preprocessing.py","file_name":"analysis_and_preprocessing.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533794581","text":"#!/usr/bin/env python\nimport os\nimport json\nimport logging\nimport threading\n\nfrom modules.telegram_bot_webhook_api import TelegramWebhookBot\nfrom modules.client_telethon import MyTelegramClient\nfrom modules.bittrex_api import load_api_key_for_the_first_run\n\nlogging.getLogger().setLevel(logging.INFO)\n\nbot = TelegramWebhookBot()\nclient = MyTelegramClient(bot)\n\nINIT_KEY=False\n\ndef run_bot():\n global bot\n bot.run()\n\ndef run_client():\n global client\n client.run()\n\nif __name__ == \"__main__\":\n if INIT_KEY:\n load_api_key_for_the_first_run()\n else:\n b = threading.Thread(name='bot', target=run_bot)\n c = threading.Thread(name='client', target=run_client)\n\n b.start()\n c.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286479596","text":"import random\nimport time\n\n\ndef solver(a, b):\n return (a + b) // 2\n\n\nprint(\"Welcome to the Guess the Number!\")\n# name = input(\"Enter your name: \\n\")\n# print(f\"{name}, I'm thinking about number between 1 and 100.\")\na = 1\nb = 1000\nnumber = random.randint(a, b)\nguesses = 0\n\nwhile True:\n print(\"Take a guess.\")\n guesses += 1\n\n guess = solver(a, b)\n time.sleep(1)\n print(guess)\n\n if guess > number:\n print(\"Too high.\")\n b = guess\n elif guess < number:\n print(\"Too low.\")\n a = guess\n else:\n print(f\"Correct! You guessed in {guesses} guesses!\")\n break\n\n","sub_path":"number_guess.py","file_name":"number_guess.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242055329","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: ..\\fteikpy\\layered_model.py\n# Compiled at: 2018-02-19 18:21:00\n# Size of source mod 2**32: 4096 bytes\n\"\"\"\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\nimport numpy as np\nfrom ._fteik2d import fteik2d\nfrom ._lay2vel import lay2vel as l2vf\n__all__ = [\n 'lay2vel', 'lay2tt']\n\ndef lay2vel(lay, dz, grid_shape):\n \"\"\"\n Convert a layered model to a continuous velocity model.\n \n Parameters\n ----------\n lay : ndarray\n Layer velocities (first column) and interface depth (second column).\n dz : float\n Grid size in Z coordinate in meters.\n grid_shape : tuple (nz, nx[, ny])\n Gris shape.\n \n Returns\n -------\n vel : ndarray\n Velocity model grid in m/s.\n \"\"\"\n if not isinstance(lay, np.ndarray):\n if lay.ndim not in (1, 2):\n raise ValueError('grid must be a 1-D or 2-D ndarray')\n else:\n if lay.ndim == 1 and lay[0] < 0.0 or lay.ndim == 2 and lay[:, 0].min() < 0.0:\n raise ValueError('velocities must be positive')\n else:\n if not isinstance(dz, (float, int)) or dz < 0.0:\n raise ValueError('dz must be positive')\n zmax = dz * grid_shape[0]\n if lay.ndim == 1 and lay[1] > zmax or lay.ndim == 2 and lay[:, 1].max() > zmax:\n raise ValueError('last layer depth must be %.2f' % zmax)\n if not np.all([isinstance(n, int) for n in grid_shape]) or len(grid_shape) not in (1,\n 2,\n 3):\n raise ValueError('grid_shape must be a tuple of integers of size 1, 2 or 3')\n if len(grid_shape) == 1:\n return (l2vf.lay2vel1)(lay, dz, *grid_shape)\n else:\n if len(grid_shape) == 2:\n return (l2vf.lay2vel2)(lay, dz, *grid_shape)\n if len(grid_shape) == 3:\n return (l2vf.lay2vel3)(lay, dz, *grid_shape)\n\n\ndef lay2tt(velocity_model, grid_size, sources, receivers, n_sweep=1, n_threads=1):\n \"\"\"\n Given a layered velocity model, compute the first arrivel traveltime for\n each source and each receiver. Only useful if working in 3-D as a 2-D\n eikonal solver is used for traveltime computation.\n \n Parameters\n ----------\n velocity_model : ndarray of shape (nz, nx)\n Velocity model grid in m/s.\n grid_size : tuple (dz, dx)\n Grid size in meters.\n sources : ndarray\n Sources coordinates (Z, X[, Y]).\n receivers : ndarray\n Receivers coordinates (Z, X[, Y]).\n n_sweep : int, default 1\n Number of sweeps.\n n_threads : int, default 1\n Number of threads to pass to OpenMP.\n \n Returns\n -------\n tcalc : ndarray of shape (nrcv, nsrc)\n Traveltimes for each source and each receiver.\n \"\"\"\n if not isinstance(velocity_model, np.ndarray) or velocity_model.ndim != 2:\n raise ValueError('velocity_model must be a 2-D ndarray')\n else:\n if np.any(velocity_model <= 0.0):\n raise ValueError('velocity_model must be positive')\n else:\n if not isinstance(grid_size, (list, tuple, np.ndarray)):\n raise ValueError('grid_size must be a list, tuple or ndarray')\n else:\n if len(grid_size) != 2:\n raise ValueError('grid_size should be of length 2, got %d' % len(grid_size))\n else:\n if np.any(np.array(grid_size) <= 0.0):\n raise ValueError('elements in grid_size must be positive')\n if not isinstance(sources, np.ndarray) or sources.shape[1] != 3:\n raise ValueError('sources must be ndarray with 3 columns')\n if not isinstance(receivers, np.ndarray) or receivers.shape[1] != 3:\n raise ValueError('receivers must be ndarray with 3 columns')\n if not isinstance(n_sweep, int) or n_sweep <= 0:\n raise ValueError('n_sweep must be a positive integer, got %s' % n_sweep)\n if not isinstance(n_threads, int) or n_threads < 1:\n raise ValueError('n_threads must be atleast 1, got %s' % n_threads)\n dz, dx = grid_size\n tcalc = fteik2d.lay2tt((1.0 / velocity_model), dz, dx, (sources[:, 0]), (sources[:, 1]), (sources[:, 2]), (receivers[:, 0]),\n (receivers[:, 1]), (receivers[:, 2]), n_sweep, n_threads=n_threads)\n return tcalc","sub_path":"pycfiles/fteikpy-1.5.0.tar/layered_model.cpython-36.py","file_name":"layered_model.cpython-36.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614519712","text":"\"\"\"\nSO term\tSO description\tSO accession\tDisplay term\tIMPACT\tRANK\ntranscript_ablation\tA feature ablation whereby the deleted region includes a transcript feature\tSO:0001893\tTranscript ablation\tHIGH\t1\nsplice_acceptor_variant\tA splice variant that changes the 2 base region at the 3' end of an intron\tSO:0001574\tSplice acceptor variant\tHIGH\t3\nsplice_donor_variant\tA splice variant that changes the 2 base region at the 5' end of an intron\tSO:0001575\tSplice donor variant\tHIGH\t3\nstop_gained\tA sequence variant whereby at least one base of a codon is changed, resulting in a premature stop codon, leading to a shortened transcript\tSO:0001587\tStop gained\tHIGH\t4\nframeshift_variant\tA sequence variant which causes a disruption of the translational reading frame, because the number of nucleotides inserted or deleted is not a multiple of three\tSO:0001589\tFrameshift variant\tHIGH\t5\nstop_lost\tA sequence variant where at least one base of the terminator codon (stop) is changed, resulting in an elongated transcript\tSO:0001578\tStop lost\tHIGH\t6\nstart_lost\tA codon variant that changes at least one base of the canonical start codon\tSO:0002012\tStart lost\tHIGH\t7\ntranscript_amplification\tA feature amplification of a region containing a transcript\tSO:0001889\tTranscript amplification\tHIGH\t8\ninframe_insertion\tAn inframe non synonymous variant that inserts bases into in the coding sequence\tSO:0001821\tInframe insertion\tMODERATE\t10\ninframe_deletion\tAn inframe non synonymous variant that deletes bases from the coding sequence\tSO:0001822\tInframe deletion\tMODERATE\t11\nmissense_variant\tA sequence variant, that changes one or more bases, resulting in a different amino acid sequence but where the length is preserved\tSO:0001583\tMissense variant\tMODERATE\t12\nprotein_altering_variant\tA sequence_variant which is predicted to change the protein encoded in the coding sequence\tSO:0001818\tProtein altering variant\tMODERATE\t12\nsplice_region_variant\tA sequence variant in which a change has occurred within the region of the splice site, either within 1-3 bases of the exon or 3-8 bases of the intron\tSO:0001630\tSplice region variant\tLOW\t13\nincomplete_terminal_codon_variant\tA sequence variant where at least one base of the final codon of an incompletely annotated transcript is changed\tSO:0001626\tIncomplete terminal codon variant\tLOW\t14\nstart_retained_variant\tA sequence variant where at least one base in the start codon is changed, but the start remains\tSO:0002019\tStart retained variant\tLOW\t15\nstop_retained_variant\tA sequence variant where at least one base in the terminator codon is changed, but the terminator remains\tSO:0001567\tStop retained variant\tLOW\t15\nsynonymous_variant\tA sequence variant where there is no resulting change to the encoded amino acid\tSO:0001819\tSynonymous variant\tLOW\t15\ncoding_sequence_variant\tA sequence variant that changes the coding sequence\tSO:0001580\tCoding sequence variant\tMODIFIER\t16\nmature_miRNA_variant\tA transcript variant located with the sequence of the mature miRNA\tSO:0001620\tMature miRNA variant\tMODIFIER\t17\n5_prime_UTR_variant\tA UTR variant of the 5' UTR\tSO:0001623\t5 prime UTR variant\tMODIFIER\t18\n3_prime_UTR_variant\tA UTR variant of the 3' UTR\tSO:0001624\t3 prime UTR variant\tMODIFIER\t19\nnon_coding_transcript_exon_variant\tA sequence variant that changes non-coding exon sequence in a non-coding transcript\tSO:0001792\tNon coding transcript exon variant\tMODIFIER\t20\nintron_variant\tA transcript variant occurring within an intron\tSO:0001627\tIntron variant\tMODIFIER\t21\nNMD_transcript_variant\tA variant in a transcript that is the target of NMD\tSO:0001621\tNMD transcript variant\tMODIFIER\t22\nnon_coding_transcript_variant\tA transcript variant of a non coding RNA gene\tSO:0001619\tNon coding transcript variant\tMODIFIER\t23\nupstream_gene_variant\tA sequence variant located 5' of a gene\tSO:0001631\tUpstream gene variant\tMODIFIER\t24\ndownstream_gene_variant\tA sequence variant located 3' of a gene\tSO:0001632\tDownstream gene variant\tMODIFIER\t25\nTFBS_ablation\tA feature ablation whereby the deleted region includes a transcription factor binding site\tSO:0001895\tTFBS ablation\tMODIFIER\t26\nTFBS_amplification\tA feature amplification of a region containing a transcription factor binding site\tSO:0001892\tTFBS amplification\tMODIFIER\t28\nTF_binding_site_variant\tA sequence variant located within a transcription factor binding site\tSO:0001782\tTF binding site variant\tMODIFIER\t30\nregulatory_region_ablation\tA feature ablation whereby the deleted region includes a regulatory region\tSO:0001894\tRegulatory region ablation\tMODERATE\t29\nregulatory_region_amplification\tA feature amplification of a region containing a regulatory region\tSO:0001891\tRegulatory region amplification\tMODIFIER\t30\nfeature_elongation\tA sequence variant that causes the extension of a genomic feature, with regard to the reference sequence\tSO:0001907\tFeature elongation\tMODIFIER\t31\nregulatory_region_variant\tA sequence variant located within a regulatory region\tSO:0001566\tRegulatory region variant\tMODIFIER\t33\nfeature_truncation\tA sequence variant that causes the reduction of a genomic feature, with regard to the reference sequence\tSO:0001906\tFeature truncation\tMODIFIER\t36\nintergenic_variant\tA sequence variant located in the intergenic region, between genes\tSO:0001628\tIntergenic variant\tMODIFIER\t38\nsequence_variant\t\t\t\t\t39\n\nhttps://uswest.ensembl.org/info/genome/variation/predicted_data.html\n\"\"\"\n\n\nclass Bio_EnsEMBL_Variation_Utils_Constants(object):\n \"\"\"\n Ensembl variant effect consequences\n See https://uswest.ensembl.org/info/docs/Doxygen/variation-api/Utils_2Constants_8pm_source.html\n \"\"\"\n ATTRIB_TYPE_SO_ACCESSION = 'SO_accession'\n ATTRIB_TYPE_SO_TERM = 'SO_term'\n ATTRIB_TYPE_DISPLAY_TERM = 'display_term'\n ATTRIB_TYPE_NCBI_TERM = 'NCBI_term'\n ATTRIB_TYPE_FEATURE_SO_TERM = 'feature_SO_term'\n ATTRIB_TYPE_RANK = 'rank'\n ATTRIB_TYPE_POLYPHEN_PREDICTION = 'polyphen_prediction'\n ATTRIB_TYPE_SIFT_PREDICTION = 'sift_prediction'\n ATTRIB_TYPE_SHORT_NAME = 'short_name'\n ATTRIB_TYPE_DBSNP_CLIN_SIG = 'dbsnp_clin_sig'\n ATTRIB_TYPE_DGVA_CLIN_SIG = 'dgva_clin_sig'\n ATTRIB_TYPE_CLINVAR_CLIN_SIG = 'clinvar_clin_sig'\n ATTRIB_TYPE_PROT_FUNC_ANALYSIS = 'prot_func_analysis'\n ATTRIB_TYPE_ASSOCIATED_GENE = 'associated_gene'\n ATTRIB_TYPE_RISK_ALLELE = 'risk_allele'\n ATTRIB_TYPE_P_VALUE = 'p_value'\n ATTRIB_TYPE_VARIATION_NAMES = 'variation_names'\n ATTRIB_TYPE_SAMPLE_ID = 'sample_id'\n ATTRIB_TYPE_STRAIN_ID = 'strain_id'\n ATTRIB_TYPE_LOD_SCORE = 'lod_score'\n ATTRIB_TYPE_VARIANCE = 'variance'\n ATTRIB_TYPE_INHERITANCE_TYPE = 'inheritance_type'\n ATTRIB_TYPE_EXTERNAL_ID = 'external_id'\n ATTRIB_TYPE_ODDS_RATIO = 'odds_ratio'\n ATTRIB_TYPE_BETA_COEF = 'beta_coef'\n ATTRIB_TYPE_ALLELE_SYMBOL = 'allele_symbol'\n ATTRIB_TYPE_ALLELE_ACCESSION_ID = 'allele_accession_id'\n ATTRIB_TYPE_MARKER_ACCESSION_ID = 'marker_accession_id'\n ATTRIB_TYPE_EVIDENCE = 'evidence'\n ATTRIB_TYPE_SEQUENCE_NUMBER = 'sequence_number'\n ATTRIB_TYPE_BASED_ON = 'based_on'\n ATTRIB_TYPE_CONSERVATION_SCORE = 'conservation_score'\n ATTRIB_TYPE_REVIEW_STATUS = 'review_status'\n\n SO_TERM_SNV = 'SNV'\n SO_TERM_SUBSTITUTION = 'substitution'\n SO_TERM_INSERTION = 'insertion'\n SO_TERM_DELETION = 'deletion'\n SO_TERM_INDEL = 'indel'\n SO_TERM_TANDEM_REPEAT = 'tandem_repeat'\n SO_TERM_SEQUENCE_ALTERATION = 'sequence_alteration'\n SO_TERM_GENETIC_MARKER = 'genetic_marker'\n SO_TERM_STRUCTURAL_VARIANT = 'structural_variant'\n SO_TERM_COPY_NUMBER_VARIATION = 'copy_number_variation'\n SO_TERM_PROBE = 'probe'\n SO_TERM_COPY_NUMBER_GAIN = 'copy_number_gain'\n SO_TERM_COPY_NUMBER_LOSS = 'copy_number_loss'\n SO_TERM_INVERSION = 'inversion'\n SO_TERM_COMPLEX_STRUCTURAL_ALTERATION = 'complex_structural_alteration'\n SO_TERM_TANDEM_DUPLICATION = 'tandem_duplication'\n SO_TERM_MOBILE_ELEMENT_INSERTION = 'mobile_element_insertion'\n SO_TERM_MOBILE_ELEMENT_DELETION = 'mobile_element_deletion'\n SO_TERM_INTERCHROMOSOMAL_BREAKPOINT = 'interchromosomal_breakpoint'\n SO_TERM_INTRACHROMOSOMAL_BREAKPOINT = 'intrachromosomal_breakpoint'\n SO_TERM_TRANSLOCATION = 'translocation'\n SO_TERM_DUPLICATION = 'duplication'\n SO_TERM_NOVEL_SEQUENCE_INSERTION = 'novel_sequence_insertion'\n SO_TERM_INTERCHROMOSOMAL_TRANSLOCATION = 'interchromosomal_translocation'\n SO_TERM_INTRACHROMOSOMAL_TRANSLOCATION = 'intrachromosomal_translocation'\n SO_TERM_ALU_INSERTION = 'Alu_insertion'\n SO_TERM_COMPLEX_SUBSTITUTION = 'complex_substitution'\n SO_TERM_SHORT_TANDEM_REPEAT_VARIATION = 'short_tandem_repeat_variation'\n SO_TERM_LOSS_OF_HETEROZYGOSITY = 'loss_of_heterozygosity'\n SO_TERM_INTERGENIC_VARIANT = 'intergenic_variant'\n SO_TERM_UPSTREAM_GENE_VARIANT = 'upstream_gene_variant'\n SO_TERM_DOWNSTREAM_GENE_VARIANT = 'downstream_gene_variant'\n SO_TERM_SPLICE_DONOR_VARIANT = 'splice_donor_variant'\n SO_TERM_SPLICE_ACCEPTOR_VARIANT = 'splice_acceptor_variant'\n SO_TERM_SPLICE_REGION_VARIANT = 'splice_region_variant'\n SO_TERM_INTRON_VARIANT = 'intron_variant'\n SO_TERM_5_PRIME_UTR_VARIANT = '5_prime_UTR_variant'\n SO_TERM_3_PRIME_UTR_VARIANT = '3_prime_UTR_variant'\n SO_TERM_SYNONYMOUS_VARIANT = 'synonymous_variant'\n SO_TERM_MISSENSE_VARIANT = 'missense_variant'\n SO_TERM_INFRAME_INSERTION = 'inframe_insertion'\n SO_TERM_INFRAME_DELETION = 'inframe_deletion'\n SO_TERM_STOP_GAINED = 'stop_gained'\n SO_TERM_STOP_LOST = 'stop_lost'\n SO_TERM_STOP_RETAINED_VARIANT = 'stop_retained_variant'\n SO_TERM_START_LOST = 'start_lost'\n SO_TERM_START_RETAINED_VARIANT = 'start_retained_variant'\n SO_TERM_FRAMESHIFT_VARIANT = 'frameshift_variant'\n SO_TERM_INCOMPLETE_TERMINAL_CODON_VARIANT = 'incomplete_terminal_codon_variant'\n SO_TERM_NMD_TRANSCRIPT_VARIANT = 'NMD_transcript_variant'\n SO_TERM_NON_CODING_TRANSCRIPT_VARIANT = 'non_coding_transcript_variant'\n SO_TERM_NON_CODING_TRANSCRIPT_EXON_VARIANT = 'non_coding_transcript_exon_variant'\n SO_TERM_MATURE_MIRNA_VARIANT = 'mature_miRNA_variant'\n SO_TERM_CODING_SEQUENCE_VARIANT = 'coding_sequence_variant'\n SO_TERM_REGULATORY_REGION_VARIANT = 'regulatory_region_variant'\n SO_TERM_TF_BINDING_SITE_VARIANT = 'TF_binding_site_variant'\n SO_TERM_TRANSCRIPT_ABLATION = 'transcript_ablation'\n SO_TERM_TRANSCRIPT_AMPLIFICATION = 'transcript_amplification'\n SO_TERM_TFBS_ABLATION = 'TFBS_ablation'\n SO_TERM_TFBS_AMPLIFICATION = 'TFBS_amplification'\n SO_TERM_REGULATORY_REGION_ABLATION = 'regulatory_region_ablation'\n SO_TERM_REGULATORY_REGION_AMPLIFICATION = 'regulatory_region_amplification'\n SO_TERM_FEATURE_ELONGATION = 'feature_elongation'\n SO_TERM_FEATURE_TRUNCATION = 'feature_truncation'\n SO_TERM_PROTEIN_ALTERING_VARIANT = 'protein_altering_variant'\n\n variation_classes = {\n 'SNV': {\n 'somatic_display_term': 'somatic SNV',\n 'SO_accession': 'SO:0001483',\n 'display_term': 'SNP'\n },\n 'substitution': {\n 'somatic_display_term': 'somatic substitution',\n 'SO_accession': 'SO:1000002',\n 'display_term': 'substitution'\n },\n 'insertion': {\n 'somatic_display_term': 'somatic insertion',\n 'SO_accession': 'SO:0000667',\n 'display_term': 'insertion'\n },\n 'deletion': {\n 'somatic_display_term': 'somatic deletion',\n 'SO_accession': 'SO:0000159',\n 'display_term': 'deletion'\n },\n 'indel': {\n 'somatic_display_term': 'somatic indel',\n 'SO_accession': 'SO:1000032',\n 'display_term': 'indel'\n },\n 'tandem_repeat': {\n 'somatic_display_term': 'somatic tandem repeat',\n 'SO_accession': 'SO:0000705',\n 'display_term': 'tandem repeat'\n },\n 'sequence_alteration': {\n 'somatic_display_term': 'somatic sequence alteration',\n 'SO_accession': 'SO:0001059',\n 'display_term': 'sequence alteration'\n },\n 'genetic_marker': {\n 'somatic_display_term': 'somatic genetic marker',\n 'SO_accession': 'SO:0001645',\n 'display_term': 'genetic marker'\n },\n 'structural_variant': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic SV',\n 'SO_accession': 'SO:0001537',\n 'display_term': 'SV'\n },\n 'copy_number_variation': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic CNV',\n 'SO_accession': 'SO:0001019',\n 'display_term': 'CNV'\n },\n 'probe': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic CNV_PROBE',\n 'SO_accession': 'SO:0000051',\n 'display_term': 'CNV_PROBE'\n },\n 'copy_number_gain': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic gain',\n 'SO_accession': 'SO:0001742',\n 'display_term': 'gain'\n },\n 'copy_number_loss': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic loss',\n 'SO_accession': 'SO:0001743',\n 'display_term': 'loss'\n },\n 'inversion': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic inversion',\n 'SO_accession': 'SO:1000036',\n 'display_term': 'inversion'\n },\n 'complex_structural_alteration': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic complex alteration',\n 'SO_accession': 'SO:0001784',\n 'display_term': 'complex alteration'\n },\n 'tandem_duplication': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic tandem duplication',\n 'SO_accession': 'SO:1000173',\n 'display_term': 'tandem duplication'\n },\n 'mobile_element_insertion': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic mobile element insertion',\n 'SO_accession': 'SO:0001837',\n 'display_term': 'mobile element insertion'\n },\n 'mobile_element_deletion': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic mobile element deletion',\n 'SO_accession': 'SO:0002066',\n 'display_term': 'mobile element deletion'\n },\n 'interchromosomal_breakpoint': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic interchromosomal breakpoint',\n 'SO_accession': 'SO:0001873',\n 'display_term': 'interchromosomal breakpoint'\n },\n 'intrachromosomal_breakpoint': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic intrachromosomal breakpoint',\n 'SO_accession': 'SO:0001874',\n 'display_term': 'intrachromosomal breakpoint'\n },\n 'translocation': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic translocation',\n 'SO_accession': 'SO:0000199',\n 'display_term': 'translocation'\n },\n 'duplication': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic duplication',\n 'SO_accession': 'SO:1000035',\n 'display_term': 'duplication'\n },\n 'novel_sequence_insertion': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic novel sequence insertion',\n 'SO_accession': 'SO:0001838',\n 'display_term': 'novel sequence insertion'\n },\n 'interchromosomal_translocation': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic interchromosomal translocation',\n 'SO_accession': 'SO:0002060',\n 'display_term': 'interchromosomal translocation'\n },\n 'intrachromosomal_translocation': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic intrachromosomal translocation',\n 'SO_accession': 'SO:0002061',\n 'display_term': 'intrachromosomal translocation'\n },\n 'Alu_insertion': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic alu insertion',\n 'SO_accession': 'SO:0002063',\n 'display_term': 'Alu insertion'\n },\n 'complex_substitution': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic complex substitution',\n 'SO_accession': 'SO:1000005',\n 'display_term': 'complex substitution'\n },\n 'short_tandem_repeat_variation': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic short tandem repeat variation',\n 'SO_accession': 'SO:0002096',\n 'display_term': 'short tandem repeat variation'\n },\n 'loss_of_heterozygosity': {\n 'type': 'sv',\n 'somatic_display_term': 'somatic loss of heterozygosity',\n 'SO_accession': 'SO:0001786',\n 'display_term': 'loss of heterozygosity'\n },\n }\n\n default_overlap_consequence = {\n 'is_default': 1,\n 'include': {\n 'within_feature': 0\n },\n 'description': 'A sequence variant located in the intergenic region, between genes',\n 'SO_accession': 'SO:0001628',\n 'SO_term': 'intergenic_variant',\n 'tier': '4',\n 'label': 'intergenic variant',\n 'rank': '38',\n 'impact': 'MODIFIER',\n 'display_term': 'INTERGENIC'\n }\n\n overlap_consequences = {\n 'intergenic_variant': default_overlap_consequence,\n\n 'sequence_variant': {\n 'include': {\n 'within_feature': 0\n },\n 'description': 'A sequence_variant is a non exact copy of a sequence_feature or genome exhibiting one or more sequence_alteration',\n 'SO_accession': 'SO:0001060',\n 'SO_term': 'sequence_variant',\n 'tier': '4',\n 'label': 'sequence variant',\n 'rank': '39',\n 'impact': 'MODIFIER',\n 'display_term': 'SEQUENCE_VARIANT',\n },\n 'upstream_gene_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 0\n },\n 'feature_SO_term': 'transcript',\n 'description': 'A sequence variant located 5\\' of a gene',\n 'SO_accession': 'SO:0001631',\n 'SO_term': 'upstream_gene_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::upstream',\n 'label': 'upstream gene variant',\n 'rank': '24',\n 'impact': 'MODIFIER',\n 'display_term': 'UPSTREAM',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'downstream_gene_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 0\n },\n 'feature_SO_term': 'transcript',\n 'description': 'A sequence variant located 3\\' of a gene',\n 'SO_accession': 'SO:0001632',\n 'SO_term': 'downstream_gene_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::downstream',\n 'label': 'downstream gene variant',\n 'rank': '25',\n 'impact': 'MODIFIER',\n 'display_term': 'DOWNSTREAM',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'splice_donor_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'intron_boundary': 1\n },\n 'NCBI_term': 'splice-5',\n 'feature_SO_term': 'primary_transcript',\n 'description': 'A splice variant that changes the 2 base region at the 5\\' end of an intron',\n 'SO_accession': 'SO:0001575',\n 'tier': '3',\n 'SO_term': 'splice_donor_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::donor_splice_site',\n 'label': 'splice donor variant',\n 'rank': '3',\n 'impact': 'HIGH',\n 'display_term': 'ESSENTIAL_SPLICE_SITE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'splice_acceptor_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'intron_boundary': 1\n },\n 'NCBI_term': 'splice-3',\n 'feature_SO_term': 'primary_transcript',\n 'description': 'A splice variant that changes the 2 base region at the 3\\' end of an intron',\n 'SO_accession': 'SO:0001574',\n 'tier': '3',\n 'SO_term': 'splice_acceptor_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::acceptor_splice_site',\n 'label': 'splice acceptor variant',\n 'rank': '3',\n 'impact': 'HIGH',\n 'display_term': 'ESSENTIAL_SPLICE_SITE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'splice_region_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'intron_boundary': 1\n },\n 'feature_SO_term': 'primary_transcript',\n 'description': 'A sequence variant in which a change has occurred within the region of the splice site, either within 1-3 bases of the exon or 3-8 bases of the intron',\n 'SO_accession': 'SO:0001630',\n 'SO_term': 'splice_region_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::splice_region',\n 'label': 'splice region variant',\n 'rank': '13',\n 'impact': 'LOW',\n 'display_term': 'SPLICE_SITE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'intron_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'intron': 1\n },\n 'NCBI_term': 'intron',\n 'feature_SO_term': 'primary_transcript',\n 'description': 'A transcript variant occurring within an intron',\n 'SO_accession': 'SO:0001627',\n 'tier': '3',\n 'SO_term': 'intron_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_intron',\n 'label': 'intron variant',\n 'rank': '21',\n 'impact': 'MODIFIER',\n 'display_term': 'INTRONIC',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n '5_prime_UTR_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'utr': 1,\n 'exon': 1\n },\n 'NCBI_term': 'untranslated_5',\n 'feature_SO_term': 'mRNA',\n 'description': 'A UTR variant of the 5\\' UTR',\n 'SO_accession': 'SO:0001623',\n 'tier': '3',\n 'SO_term': '5_prime_UTR_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_5_prime_utr',\n 'label': '5 prime UTR variant',\n 'rank': '18',\n 'impact': 'MODIFIER',\n 'display_term': '5PRIME_UTR',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n '3_prime_UTR_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'utr': 1,\n 'exon': 1\n },\n 'NCBI_term': 'untranslated_3',\n 'feature_SO_term': 'mRNA',\n 'description': 'A UTR variant of the 3\\' UTR',\n 'SO_accession': 'SO:0001624',\n 'tier': '3',\n 'SO_term': '3_prime_UTR_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_3_prime_utr',\n 'label': '3 prime UTR variant',\n 'rank': '19',\n 'impact': 'MODIFIER',\n 'display_term': '3PRIME_UTR',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'synonymous_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'coding': 1\n },\n 'NCBI_term': 'cds-synon',\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant where there is no resulting change to the encoded amino acid',\n 'SO_accession': 'SO:0001819',\n 'tier': '3',\n 'SO_term': 'synonymous_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::synonymous_variant',\n 'label': 'synonymous variant',\n 'rank': '15',\n 'impact': 'LOW',\n 'display_term': 'SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'missense_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'increase_length': 0,\n 'decrease_length': 0,\n 'coding': 1\n },\n 'NCBI_term': 'missense',\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant, that changes one or more bases, resulting in a different amino acid sequence but where the length is preserved',\n 'SO_accession': 'SO:0001583',\n 'tier': '3',\n 'SO_term': 'missense_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::missense_variant',\n 'label': 'missense variant',\n 'rank': '12',\n 'impact': 'MODERATE',\n 'display_term': 'NON_SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'inframe_insertion': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'insertion': 1,\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'An inframe non synonymous variant that inserts bases into in the coding sequence',\n 'SO_accession': 'SO:0001821',\n 'SO_term': 'inframe_insertion',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::inframe_insertion',\n 'label': 'inframe insertion',\n 'rank': '10',\n 'impact': 'MODERATE',\n 'display_term': 'NON_SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'inframe_deletion': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'deletion': 1,\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'An inframe non synonymous variant that deletes bases from the coding sequence',\n 'SO_accession': 'SO:0001822',\n 'SO_term': 'inframe_deletion',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::inframe_deletion',\n 'label': 'inframe deletion',\n 'rank': '11',\n 'impact': 'MODERATE',\n 'display_term': 'NON_SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'stop_gained': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'coding': 1\n },\n 'NCBI_term': 'nonsense',\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant whereby at least one base of a codon is changed, resulting in a premature stop codon, leading to a shortened transcript',\n 'SO_accession': 'SO:0001587',\n 'tier': '3',\n 'SO_term': 'stop_gained',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::stop_gained',\n 'label': 'stop gained',\n 'rank': '4',\n 'impact': 'HIGH',\n 'display_term': 'STOP_GAINED',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'stop_lost': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant where at least one base of the terminator codon (stop) is changed, resulting in an elongated transcript',\n 'SO_accession': 'SO:0001578',\n 'SO_term': 'stop_lost',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::stop_lost',\n 'label': 'stop lost',\n 'rank': '6',\n 'impact': 'HIGH',\n 'display_term': 'STOP_LOST',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'stop_retained_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant where at least one base in the terminator codon is changed, but the terminator remains',\n 'SO_accession': 'SO:0001567',\n 'SO_term': 'stop_retained_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::stop_retained',\n 'label': 'stop retained variant',\n 'rank': '15',\n 'impact': 'LOW',\n 'display_term': 'SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'start_lost': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A codon variant that changes at least one base of the canonical start codon',\n 'SO_accession': 'SO:0002012',\n 'SO_term': 'start_lost',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::start_lost',\n 'label': 'start lost',\n 'rank': '7',\n 'impact': 'HIGH',\n 'display_term': 'NON_SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'start_retained_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant where at least one base in the start codon is changed, but the start remains',\n 'SO_accession': 'SO:0002019',\n 'SO_term': 'start_retained_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::start_retained_variant',\n 'label': 'start retained variant',\n 'rank': '15',\n 'impact': 'LOW',\n 'display_term': 'SYNONYMOUS_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'frameshift_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'snp': 0,\n 'coding': 1\n },\n 'NCBI_term': 'frameshift',\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant which causes a disruption of the translational reading frame, because the number of nucleotides inserted or deleted is not a multiple of three',\n 'SO_accession': 'SO:0001589',\n 'tier': '3',\n 'SO_term': 'frameshift_variant',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::frameshift',\n 'label': 'frameshift variant',\n 'rank': '5',\n 'impact': 'HIGH',\n 'display_term': 'FRAMESHIFT_CODING',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'incomplete_terminal_codon_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant where at least one base of the final codon of an incompletely annotated transcript is changed',\n 'SO_accession': 'SO:0001626',\n 'SO_term': 'incomplete_terminal_codon_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::partial_codon',\n 'label': 'incomplete terminal codon variant',\n 'rank': '14',\n 'impact': 'LOW',\n 'display_term': 'PARTIAL_CODON',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'NMD_transcript_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 1,\n 'nonsense_mediated_decay': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A variant in a transcript that is the target of NMD',\n 'SO_accession': 'SO:0001621',\n 'SO_term': 'NMD_transcript_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_nmd_transcript',\n 'label': 'NMD transcript variant',\n 'rank': '22',\n 'impact': 'MODIFIER',\n 'display_term': 'NMD_TRANSCRIPT',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'non_coding_transcript_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 1,\n 'protein_coding': 0\n },\n 'feature_SO_term': 'ncRNA',\n 'description': 'A transcript variant of a non coding RNA gene',\n 'SO_accession': 'SO:0001619',\n 'SO_term': 'non_coding_transcript_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_non_coding_gene',\n 'label': 'non coding transcript variant',\n 'rank': '23',\n 'impact': 'MODIFIER',\n 'display_term': 'WITHIN_NON_CODING_GENE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'non_coding_transcript_exon_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 1,\n 'exon': 1,\n 'protein_coding': 0\n },\n 'feature_SO_term': 'ncRNA',\n 'description': 'A sequence variant that changes non-coding exon sequence in a non-coding transcript',\n 'SO_accession': 'SO:0001792',\n 'SO_term': 'non_coding_transcript_exon_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::non_coding_exon_variant',\n 'label': 'non coding transcript exon variant',\n 'rank': '20',\n 'impact': 'MODIFIER',\n 'display_term': 'WITHIN_NON_CODING_GENE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'mature_miRNA_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'within_feature': 1,\n 'nonsense_mediated_decay': 0,\n 'protein_coding': 0\n },\n 'feature_SO_term': 'miRNA',\n 'description': 'A transcript variant located with the sequence of the mature miRNA',\n 'SO_accession': 'SO:0001620',\n 'SO_term': 'mature_miRNA_variant',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_mature_miRNA',\n 'label': 'mature miRNA variant',\n 'rank': '17',\n 'impact': 'MODIFIER',\n 'display_term': 'WITHIN_MATURE_miRNA',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'coding_sequence_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence variant that changes the coding sequence',\n 'SO_accession': 'SO:0001580',\n 'SO_term': 'coding_sequence_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::coding_unknown',\n 'label': 'coding sequence variant',\n 'rank': '16',\n 'impact': 'MODIFIER',\n 'display_term': 'CODING_UNKNOWN',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'regulatory_region_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'feature_SO_term': 'regulatory_region',\n 'description': 'A sequence variant located within a regulatory region',\n 'SO_accession': 'SO:0001566',\n 'SO_term': 'regulatory_region_variant',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_regulatory_feature',\n 'label': 'regulatory region variant',\n 'rank': '36',\n 'impact': 'MODIFIER',\n 'display_term': 'REGULATORY_REGION',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::RegulatoryFeature'\n },\n 'TF_binding_site_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'feature_SO_term': 'TF_binding_site',\n 'description': 'A sequence variant located within a transcription factor binding site',\n 'SO_accession': 'SO:0001782',\n 'SO_term': 'TF_binding_site_variant',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::within_motif_feature',\n 'label': 'TF binding site',\n 'rank': '30',\n 'impact': 'MODIFIER',\n 'display_term': 'REGULATORY_REGION',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::MotifFeature'\n },\n 'transcript_ablation': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'deletion': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A feature ablation whereby the deleted region includes a transcript feature',\n 'SO_accession': 'SO:0001893',\n 'SO_term': 'transcript_ablation',\n 'tier': '1',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_ablation',\n 'label': 'transcript ablation',\n 'rank': '1',\n 'impact': 'HIGH',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'transcript_amplification': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'increase_length': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A feature amplification of a region containing a transcript',\n 'SO_accession': 'SO:0001889',\n 'SO_term': 'transcript_amplification',\n 'tier': '1',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_amplification',\n 'label': 'transcript amplification',\n 'rank': '8',\n 'impact': 'HIGH',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n 'TFBS_ablation': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'deletion': 1\n },\n 'feature_SO_term': 'TF_binding_site',\n 'description': 'A feature ablation whereby the deleted region includes a transcription factor binding site',\n 'SO_accession': 'SO:0001895',\n 'SO_term': 'TFBS_ablation',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_ablation',\n 'label': 'TFBS ablation',\n 'rank': '26',\n 'impact': 'MODERATE',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::MotifFeature'\n },\n 'TFBS_amplification': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'increase_length': 1\n },\n 'feature_SO_term': 'TF_binding_site',\n 'description': 'A feature amplification of a region containing a transcription factor binding site',\n 'SO_accession': 'SO:0001892',\n 'SO_term': 'TFBS_amplification',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_amplification',\n 'label': 'TFBS amplification',\n 'rank': '28',\n 'impact': 'MODIFIER',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::MotifFeature'\n },\n 'regulatory_region_ablation': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'deletion': 1\n },\n 'feature_SO_term': 'TF_binding_site',\n 'description': 'A feature ablation whereby the deleted region includes a regulatory region',\n 'SO_accession': 'SO:0001894',\n 'SO_term': 'regulatory_region_ablation',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_ablation',\n 'label': 'regulatory region ablation',\n 'rank': '31',\n 'impact': 'MODERATE',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::RegulatoryFeature'\n },\n 'regulatory_region_amplification': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'complete_overlap': 1,\n 'increase_length': 1\n },\n 'feature_SO_term': 'TF_binding_site',\n 'description': 'A feature amplification of a region containing a regulatory region',\n 'SO_accession': 'SO:0001891',\n 'SO_term': 'regulatory_region_amplification',\n 'tier': '2',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_amplification',\n 'label': 'regulatory region amplification',\n 'rank': '33',\n 'impact': 'MODIFIER',\n 'feature_class': 'Bio::EnsEMBL::Funcgen::RegulatoryFeature'\n },\n 'feature_elongation': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'sv': 1,\n 'increase_length': 1\n },\n 'feature_SO_term': 'sequence_feature',\n 'description': 'A sequence variant that causes the extension of a genomic feature, with regard to the reference sequence',\n 'SO_accession': 'SO:0001907',\n 'SO_term': 'feature_elongation',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_elongation',\n 'label': 'feature elongation',\n 'rank': '36',\n 'impact': 'MODIFIER',\n 'feature_class': 'Bio::EnsEMBL::Feature'\n },\n 'feature_truncation': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::BaseVariationFeature',\n 'include': {\n 'sv': 1,\n 'decrease_length': 1\n },\n 'feature_SO_term': 'sequence_feature',\n 'description': 'A sequence variant that causes the reduction of a genomic feature, with regard to the reference sequence',\n 'SO_accession': 'SO:0001906',\n 'SO_term': 'feature_truncation',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::feature_truncation',\n 'label': 'feature truncation',\n 'rank': '37',\n 'impact': 'MODIFIER',\n 'feature_class': 'Bio::EnsEMBL::Feature'\n },\n 'protein_altering_variant': {\n 'variant_feature_class': 'Bio::EnsEMBL::Variation::VariationFeature',\n 'include': {\n 'coding': 1\n },\n 'feature_SO_term': 'mRNA',\n 'description': 'A sequence_variant which is predicted to change the protein encoded in the coding sequence',\n 'SO_accession': 'SO:0001818',\n 'SO_term': 'protein_altering_variant',\n 'tier': '3',\n 'predicate': 'Bio::EnsEMBL::Variation::Utils::VariationEffect::protein_altering_variant',\n 'label': 'protein altering variant',\n 'rank': '12',\n 'impact': 'MODERATE',\n 'feature_class': 'Bio::EnsEMBL::Transcript'\n },\n }\n","sub_path":"Bio_EnsEMBL_Variation_Utils_Constants/Bio_EnsEMBL_Variation_Utils_Constants.py","file_name":"Bio_EnsEMBL_Variation_Utils_Constants.py","file_ext":"py","file_size_in_byte":45732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182585383","text":"#Software:FaviconDownloader\n#Script:DgUwLtB\n#You can choose use either wget or urllib. just uncomment which one you want.\n\n#import wget\nimport urllib.request\nprint('Enter the website here please')\nUrl = input(':').lower()\nCleanup = Url\t\nif Url.startswith('https'):\n\tCleanup = Url.split('://', 2)[1]\nelif Url.startswith('www.'):\n\tCleanup = Url.split('www.', 2)[1]\n\nCleanup = Cleanup.split('.')[0]\n#wget.download('https://www.google.com/s2/favicons?domain=' + str(Url),str(Cleanup+'.png'))\nurllib.request.urlretrieve('https://www.google.com/s2/favicons?domain=' + str(Url),str(Cleanup+'.png'))\nprint('Favicon ' + Cleanup + ' downloaded')\n","sub_path":"Favicondownloader.py","file_name":"Favicondownloader.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"196083483","text":"#!/usr/bin/python3\nimport time\nimport socket\n\nhote = \"localhost\"\n\nport = 12801\n\n\nconnexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nconnected = False\nwhile not connected:\n\ttry:\n\t\tconnexion_avec_serveur.connect((hote, port))\n\t\tconnected = True\n\texcept ConnectionRefusedError:\n\t\ttime.sleep(0.1)\nprint(\"Connexion établie avec le serveur sur le port {}\".format(port))\n\n\nmsg_a_envoyer = b\"\"\n\nwhile msg_a_envoyer != b\"fin\":\n\n msg_a_envoyer = input(\"> \")\n\n # Peut planter si vous tapez des caractères spéciaux\n\n msg_a_envoyer = msg_a_envoyer.encode()\n\n # On envoie le message\n\n connexion_avec_serveur.send(msg_a_envoyer)\n\n msg_recu = connexion_avec_serveur.recv(1024)\n\n print(msg_recu.decode()) # Là encore, peut planter s'il y a des accents\n\n\nprint(\"Fermeture de la connexion\")\n\nconnexion_avec_serveur.close()\n","sub_path":"Street pyghter/src2/bot/testClient2.py","file_name":"testClient2.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130053962","text":"# coding:utf8\nimport math\ndef isPrimeNumber(num):\n i = 2\n x = math.sqrt(num)\n while i < x:\n if num%i == 0:\n return False\n i += 1\n return True\n \ndef Reverse(num):\n rNum = 0\n while num:\n rNum = rNum*10 + num%10\n num //= 10\n return rNum\n \ndef RPrimeNumber(num):\n arr = []\n i = 2\n while i < num:\n if isPrimeNumber(i) and i == Reverse(i):\n arr.append(i)\n i += 1\n return arr\n \nprint (RPrimeNumber(1000))","sub_path":"answer/ex92.py","file_name":"ex92.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"381459977","text":"import structures\n\ndef bfs(grid, start, target):\n\n\treturn grid\n\n\nif __name__ == '__main__':\n\n\tgrid = [\n\t\t\t['S','0','0'],\n\t\t\t['0','0','0'],\n\t\t\t['0','0','F']\n\t\t\t]\n\n\tfor line in grid:\n\n\t\tfor cell in line:\n\n\t\t\tprint(cell, end=' ')\n\n\t\tprint()","sub_path":"lib/better_than_google.py","file_name":"better_than_google.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"104358588","text":"\"\"\"\nThis class listens for tts/say messages and triggers a sequence of messages\nthat result in the text message being converted to wav audio and played through the speaker service\nTODO Where the text is very long, it is split into parts and sent sequentially.\nThe speaker service sends start and end messages.\nThis service iterates each part, waiting for each speaker/started and speaker/finished message\nand finally sends a tts/finished message when all parts have finished playing\nDepends on os pico2wav install with path in config.yaml\n\"\"\"\n\nimport json\nimport os\nimport aiofiles\nimport concurrent.futures\nimport asyncio\nfrom random import seed\nfrom random import randint\nfrom MqttService import MqttService\nimport unicodedata\nimport string\n\nfrom google.cloud import texttospeech\n\nfrom pathlib import Path\n\nvalid_filename_chars = \"-_() %s%s\" % (string.ascii_letters, string.digits)\nchar_limit = 240\n\n\n\n# seed random number generator\nseed(1)\n\ndef write_speech(text,file_name,config):\n print('WRITE SPEECH')\n print([text,file_name,config])\n # Instantiates a client\n client = texttospeech.TextToSpeechClient()\n\n # Set the text input to be synthesized\n synthesis_input = texttospeech.SynthesisInput(text=text)\n input_text = texttospeech.SynthesisInput(text=text)\n # Build the voice request, select the language code (\"en-US\") and the ssml\n # voice gender (\"neutral\")\n voice = texttospeech.VoiceSelectionParams(\n language_code=config.get('language','en-US'),\n ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL)\n\n # Select the type of audio file you want returned\n audio_config = texttospeech.AudioConfig(\n audio_encoding=texttospeech.AudioEncoding.MP3)\n\n # Perform the text-to-speech request on the text input with the selected\n # voice parameters and audio file type\n response = client.synthesize_speech(\n request={\"input\": input_text, \"voice\": voice, \"audio_config\": audio_config}\n )\n print('GOT GOO SPEECH REQ')\n \n return response.audio_content\n\n\ndef clean_filename(filename, whitelist=valid_filename_chars, replace=' '):\n # replace spaces\n for r in replace:\n filename = filename.replace(r,'_')\n \n # keep only valid ascii chars\n cleaned_filename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').decode()\n \n # keep only whitelisted chars\n cleaned_filename = ''.join(c for c in cleaned_filename if c in whitelist)\n # if len(cleaned_filename)>char_limit:\n # print(\"Warning, filename truncated because it was over {}. Filenames may no longer be unique\".format(char_limit))\n return cleaned_filename[:char_limit] \n \n\ndef my_run_in_executor(executor, f, *args):\n return asyncio.wrap_future(executor.submit(f, *args))\n\nclass GoogleTtsService(MqttService):\n \"\"\" Text to Speech Service Class \"\"\"\n\n def __init__(\n self,\n config,\n loop\n ):\n super(\n GoogleTtsService,\n self).__init__(config,loop)\n self.config = config\n # subscribe to all sites\n self.subscribe_to = 'hermod/+/tts/say'\n cache_path = self.config['services']['GoogleTtsService'].get('cache_path','/tmp/tts_cache')\n Path(cache_path).mkdir(parents=True, exist_ok=True)\n\n\n async def on_message(self, msg):\n self.log('message {}'.format(msg))\n topic = \"{}\".format(msg.topic)\n parts = topic.split('/')\n site = parts[1]\n payload = {}\n try:\n payload = json.loads(msg.payload)\n except BaseException:\n pass\n # self.log('TTS payload')\n # self.log(payload)\n text = payload.get('text','')\n #self.log(text)\n if topic == 'hermod/' + site + '/tts/say' and len(text) > 0:\n # self.log('TTS start gen')\n \n await self.generate_audio(site, text, payload)\n elif topic == 'hermod/' + site + '/speaker/finished':\n # self.log('SPEAKER FINISHED')\n # self.log(payload)\n #self.play_requests[payload.get('id')] = value;\n \n message = {\"id\": payload.get('id')}\n await asyncio.sleep(0.5)\n await self.client.publish(\n 'hermod/{}/tts/finished'.format(site),\n json.dumps(message))\n await self.client.unsubscribe('hermod/{}/speaker/finished'.format(site))\n\n \n \"\"\" Use system binary pico2wav to generate audio file from text then send audio as mqtt\"\"\"\n async def generate_audio(self, site, text, payload):\n cache_path = self.config['services']['GoogleTtsService'].get('cache_path','/tmp/tts_cache')\n value = payload.get('id','no_id')\n # self.log('TTS GEN '+cache_path)\n \n if len(text) > 0:\n # self.log('TTS havetext')\n \n # filename limits\n short_text = text[0:200].replace(' ','_')\n # speakable and limited\n say_text = text[0:300].replace('(','').replace(')','')\n file_name = os.path.join(cache_path, clean_filename('tts-' + str(short_text)) + '.wav')\n # self.log('TTS file '+file_name)\n \n # generate if file doesn't exist in cache\n audio_file = None\n if not os.path.isfile(file_name):\n self.log('TTS exec')\n with concurrent.futures.ProcessPoolExecutor() as executor:\n #audio_file = await self.loop.run_in_executor(None,write_speech,text, file_name, self.config)\n audio_file = await my_run_in_executor(executor,write_speech,say_text , file_name, self.config)\n async with aiofiles.open(file_name, mode='wb') as f:\n await f.write(audio_file)\n # The response's audio_content is binary.\n # with open(file_name, 'wb') as out:\n # out.write(response.audio_content)\n self.log('TTS DONE write')\n else: \n self.log('TTS read')\n \n async with aiofiles.open(file_name, mode='rb') as f:\n audio_file = await f.read()\n self.log('TTS now send {}'.format(len(audio_file)))\n \n await self.client.subscribe('hermod/{}/speaker/finished'.format(site))\n \n slice_length = 16000\n def chunker(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n\n lc = 0\n ts = 0\n for slice in chunker(audio_file, slice_length):\n lc = lc + 1\n ts = ts + len(slice)\n await self.client.publish('hermod/{}/speaker/cache/{}'.format(site, value), payload=bytes(slice), qos=0)\n \n self.log(lc)\n self.log(ts)\n # finally send play message with empty payload\n await self.client.publish(\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=None, qos=0)\n \n self.log('TTS sent ')\n # cache short texts\n if len(short_text) > self.config.get('cache_max_letters',100):\n # self.log('TTS remove file')\n os.remove(file_name)\n","sub_path":"hermod-python/src/GoogleTtsService.py","file_name":"GoogleTtsService.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610310980","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 21 15:48:52 2020\n\n@author: Peter Corke\n\"\"\"\n\nfrom collections import UserList\nimport numpy as np\nimport math\n\nfrom spatialmath.base import argcheck\nimport spatialmath.base as tr\nfrom spatialmath import super_pose as sp\n\n\nclass SO2(sp.SMPose):\n\n # SO2() identity matrix\n # SO2(angle, unit)\n # SO2( obj ) # deep copy\n # SO2( np ) # make numpy object\n # SO2( nplist ) # make from list of numpy objects\n\n # constructor needs to take ndarray -> SO2, or list of ndarray -> SO2\n def __init__(self, arg=None, *, unit='rad'):\n super().__init__() # activate the UserList semantics\n\n if arg is None:\n # empty constructor\n if type(self) is SO2:\n self.data = [np.eye(2)]\n\n elif argcheck.isvector(arg):\n # SO2(value)\n # SO2(list of values)\n self.data = [tr.rot2(x, unit) for x in argcheck.getvector(arg)]\n\n elif isinstance(arg, np.ndarray) and arg.shape == (2,2):\n self.data = [arg]\n else:\n super().arghandler(arg)\n\n @classmethod\n def rand(cls, *, range=[0, 2 * math.pi], unit='rad', N=1):\n rand = np.random.uniform(low=range[0], high=range[1], size=N) # random values in the range\n return cls([tr.rot2(x) for x in argcheck.getunit(rand, unit)])\n\n @staticmethod\n def isvalid(x):\n return tr.isrot2(x, check=True)\n\n @property\n def T(self):\n return SO2(self.A.T)\n\n @property\n def inv(self):\n if len(self) == 1:\n return SO2(self.A.T)\n else:\n return SO2([x.T for x in self.A])\n\n @property\n def R(self):\n return self.A[:2, :2]\n\n @property\n def theta(self):\n \"\"\"Returns angle of SO2 object matrices in unit radians\"\"\"\n if len(self) == 1:\n return math.atan2(self.A[1,0], self.A[0,0])\n else:\n return [math.atan2(x.A[1,0], x.A[0,0]) for x in self] \n\n\nclass SE2(SO2):\n # constructor needs to take ndarray -> SO2, or list of ndarray -> SO2\n def __init__(self, x=None, y=None, theta=None, *, unit='rad'):\n super().__init__() # activate the UserList semantics\n\n if x is None and y is None and theta is None:\n # SE2()\n # empty constructor\n self.data = [np.eye(3)]\n\n elif x is not None:\n if y is not None and theta is not None:\n # SE2(x, y, theta)\n self.data = [tr.trot2(theta, t=[x, y], unit=unit)]\n\n elif y is None and theta is None:\n if argcheck.isvector(x, 3):\n # SE2( [x,y,theta])\n self.data = [tr.trot2(x[2], t=x[:2], unit=unit)]\n elif isinstance(x, np.ndarray):\n if x.shape == (3,3):\n # SE2( 3x3 matrix )\n self.data = [x]\n elif x.shape[1] == 3:\n # SE2( Nx3 )\n self.data = [tr.trot2(T.theta, t=T.t) for T in x]\n else:\n super().arghandler(x)\n else:\n raise ValueError('bad arguments to constructor')\n\n @property\n def T(self):\n raise NotImplemented('transpose is not meaningful for SE3 object')\n\n @classmethod\n def rand(cls, *, xrange=[-1, 1], yrange=[-1, 1], trange=[0, 2 * math.pi], unit='rad', N=1):\n x = np.random.uniform(low=xrange[0], high=xrange[1], size=N) # random values in the range\n y = np.random.uniform(low=yrange[0], high=yrange[1], size=N) # random values in the range\n theta = np.random.uniform(low=trange[0], high=trange[1], size=N) # random values in the range\n return cls([tr.trot2(t, t=[x, y]) for (t, x, y) in zip(x, y, argcheck.getunit(theta, unit))])\n\n @staticmethod\n def isvalid(x):\n return tr.ishom2(x, check=True)\n\n @property\n def t(self):\n return self.A[:2, 2]\n\n @property\n def xyt(self):\n if len(self) == 1:\n return np.r_[self.t, self.theta]\n else:\n return [np.r_[x.t, x.theta] for x in self]\n\n @property\n def inv(self):\n if len(self) == 1:\n return SE2(tr.rt2tr(self.R.T, -self.R.T @ self.t))\n else:\n return SE2([tr.rt2tr(x.R.T, -x.R.T @ x.t) for x in self])\n\n\nif __name__ == '__main__': # pragma: no cover\n\n import pathlib\n import os.path\n\n exec(open(os.path.join(pathlib.Path(__file__).parent.absolute(), \"test_pose2d.py\")).read())\n","sub_path":"spatialmath/pose2d.py","file_name":"pose2d.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29472352","text":"#! /usr/bin/env python\n\n\"\"\"\nThis module defines an interface for commanding a LinkLabs module\nvia the host interface over a serial connection.\n\"\"\"\n\nfrom serial import Serial\nfrom serial.tools import list_ports\nfrom time import time, sleep\nfrom contextlib import contextmanager\nfrom binascii import hexlify\nimport logging\nimport struct\n\nLOG = logging.getLogger(__name__)\n\nIRQ_FLAGS = {\n 'WDOG_RESET': 0x00000001,\n 'RESET': 0x00000002,\n 'TX_QUEUE_EMPTY': 0x00000010,\n 'TX_ERROR': 0x00000020,\n 'RX_DONE': 0x00000100,\n 'CONNECTED': 0x00001000,\n 'DISCONNECTED': 0x00002000,\n 'CRYPTO_ESTABLISHED': 0x00010000,\n 'APP_TOKEN_CONFIRMED': 0x00020000,\n 'DOWNLINK_REQUEST_ACK': 0x00040000,\n 'CRYPTO_ERROR': 0x00100000,\n 'APP_TOKEN_ERROR': 0x00200000,\n 'ASSERT': 0x80000000,\n}\n\nIFC_ACK_CODES = {\n 'ACK': 0,\n 'NACK_CMD_NOT_SUPPORTED': 1,\n 'NACK_INCORRECT_CHKSUM': 2,\n 'NACK_PAYLOAD_LEN_OOR': 3,\n 'NACK_PAYLOAD_OOR': 4,\n 'NACK_BOOTUP_IN_PROGRESS': 5,\n 'NACK_BUSY_TRY_AGAIN': 6,\n 'NACK_APP_TOKEN_REG': 7,\n 'NACK_PAYLOAD_LEN_EXCEEDED': 8,\n 'NACK_OTHER': 255,\n}\n\nOPCODES = {\n 'VERSION': 0,\n 'IFC_VERSION': 1,\n 'FREQUENCY': 6,\n 'TX_POWER': 7,\n 'RESET_SETTINGS': 8,\n 'GET_RADIO_PARAMS': 9,\n 'SET_RADIO_PARAMS': 10,\n 'PKT_SEND_QUEUE': 11,\n 'IRQ_FLAGS': 15,\n 'IRQ_FLAGS_MASK': 16,\n 'SLEEP': 20,\n 'SLEEP_BLOCK': 21,\n 'PKT_SEND': 30,\n 'PKT_ECHO': 31,\n 'PKT_RECV': 40,\n 'PKT_RECV_RSSI': 41,\n 'PKT_RECV_CONT': 42,\n 'MODULE_ID': 50,\n 'STORE_SETTINGS': 51,\n 'DELETE_SETTINGS': 52,\n 'RESET_MCU': 60,\n 'TRIGGER_BOOTLOADER': 61,\n 'MAC_MODE_SET': 70,\n 'MAC_MODE_GET': 71,\n 'SET_KEY': 80,\n 'PKT_SEND_ACK': 90,\n 'PKT_SEND_UNACK': 91,\n 'TX_CW': 98,\n 'RX_MODE_SET': 110,\n 'RX_MODE_GET': 111,\n 'QOS_REQUEST': 112,\n 'QOS_GET': 113,\n 'ANTENNA_SET': 114,\n 'ANTENNA_GET': 115,\n 'NET_TOKEN_SET': 116,\n 'NET_TOKEN_GET': 117,\n 'NET_INFO_GET': 118,\n 'STATS_GET': 119,\n 'RSSI_SET': 120,\n 'RSSI_GET': 121,\n 'DL_BAND_CFG_GET': 122,\n 'DL_BAND_CFG_SET': 123,\n 'APP_TOKEN_SET': 124,\n 'APP_TOKEN_GET': 125,\n 'APP_TOKEN_REG_GET': 126,\n 'CRYPTO_KEY_XCHG_REQ': 128,\n 'CRYPTO_KEY_SET': 129,\n 'HARDWARE_TYPE': 254,\n 'FIRMWARE_TYPE': 255,\n}\n\nOPEN_NET_TOKEN = hexlify(b'OPEN')\n\nclass ModuleConnection(object):\n \"\"\"\n The interface to a LinkLabs module. The `device` parameter should be a path\n to the module. If none is specified, then the constructor will attempt\n to find one.\n \"\"\"\n def __init__(self, device=None):\n device = device if device else find_module_device()\n LOG.info(\"Connecting to %s\", device)\n self.sdev = Serial(port=device, baudrate=115200, timeout=1.0)\n if not self.sdev.isOpen():\n raise IOError(\"Cannot open device %s\", device)\n self.frame_start_byte = 0xc4\n self.dummy_byte = 0xff\n self.num_dummy_bytes = 4\n self.message_counter = 0\n self.response_header_length = 5\n self.frame_start_timeout = 1.0\n\n def close(self):\n \"\"\" Closes the serial port owned by this object. \"\"\"\n self.sdev.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n self.close()\n\n def __str__(self):\n return self.get_unique_id()\n\n def __repr__(self):\n unique_id = self.get_unique_id()\n device_port = self.sdev.getPort()\n return self.__class__.__name__ + \"('{}') -> {}\".format(device_port, unique_id)\n\n def _send_command(self, opcode, send_buff=None):\n \"\"\"\n Sends a command to the module, waits for the response, and\n returns the response payload.\n \"\"\"\n self._send_packet(opcode, send_buff if send_buff else [])\n response = self._receive_packet(opcode, self.message_counter)\n self.message_counter = (self.message_counter + 1) % 256\n\n return response\n\n def _send_packet(self, opcode, send_buff):\n \"\"\" Sends a framed uart transmission to the module. \"\"\"\n buff = bytearray()\n\n buff.append(self.frame_start_byte)\n buff.append(opcode)\n buff.append(self.message_counter)\n\n len_msb = (len(send_buff) >> 8) & 0xFF\n len_lsb = (len(send_buff) >> 0) & 0xFF\n buff.append(len_msb)\n buff.append(len_lsb)\n\n buff = buff + bytearray(send_buff)\n\n checksum = compute_checksum(buff)\n buff.append((checksum >> 8) & 0xFF)\n buff.append((checksum >> 0) & 0xFF)\n\n # Start the buffer with several dummy bytes\n dummy = bytearray([self.dummy_byte] * self.num_dummy_bytes)\n buff = dummy + buff\n\n LOG.debug(\"Sending frame %s to %s\", hexlify(buff), self.sdev.getPort())\n written_size = self.sdev.write(buff)\n if written_size != len(buff):\n raise IOError(\"Not enough bytes written.\")\n self.sdev.flush()\n\n def _receive_packet(self, opcode, message_counter):\n \"\"\"\n Receive a framed uart transmission from the module. Will return\n the packet payload (without any framing header or CRC).\n \"\"\"\n start = time()\n while True:\n if time() - start > self.frame_start_timeout:\n raise IOError(\"Did not get frame start within timeout.\")\n byte = self.sdev.read()\n if byte:\n if ord(byte) == self.frame_start_byte:\n break\n else:\n LOG.warning(\"Bad frame start byte: %r\", byte)\n\n resp_header = bytearray(self.sdev.read(self.response_header_length))\n resp_opcode = resp_header[0]\n resp_message_counter = resp_header[1]\n resp_ack = resp_header[2]\n resp_payload_len = (resp_header[3] << 8) + resp_header[4]\n LOG.debug(\"Received frame header %s from %s\", hexlify(resp_header), self.sdev.getPort())\n\n if resp_opcode != opcode:\n raise IOError(\"Did not get the same opcode we sent:\\\n Received %s not %s\" % (resp_opcode, opcode))\n if resp_message_counter != message_counter:\n raise IOError(\"Did not get the same message counter we sent.\")\n\n if resp_ack != IFC_ACK_CODES['ACK']:\n nack = next(nack for nack, val in IFC_ACK_CODES.items() if val == resp_ack)\n # Read checksum bytes before raising the exception\n resp_checksum_buff = bytearray(self.sdev.read(2))\n raise IOError(resp_ack, \"Received NACK from module: %s\" % nack)\n\n resp_payload = bytearray(self.sdev.read(resp_payload_len))\n LOG.debug(\"Received frame payload %s from %s\", hexlify(resp_payload), self.sdev.getPort())\n if len(resp_payload) != resp_payload_len:\n raise IOError(\"Could not read the number of bytes promised by the module.\")\n\n resp_checksum_buff = bytearray(self.sdev.read(2))\n resp_checksum = (resp_checksum_buff[0] << 8) + resp_checksum_buff[1]\n checksum = compute_checksum(bytearray([self.frame_start_byte]) + resp_header + resp_payload)\n if resp_checksum != checksum:\n raise IOError(\"Checksum mismatch.\")\n\n LOG.debug(\"Received checksum bytes %s from %s\",\n hexlify(resp_checksum_buff), self.sdev.getPort())\n self.sdev.flush()\n\n return resp_payload\n\n def get_version(self):\n \"\"\" Returns the module's firmware version as a tuple of (major, minor, tag). \"\"\"\n resp_payload = self._send_command(OPCODES['VERSION'])\n return resp_payload[0], resp_payload[1], (resp_payload[2] << 8) + resp_payload[3]\n\n def set_mac_mode(self, mac):\n \"\"\"\n Sets the MAC mode of the module. Valid values of the parameter `mac` are\n 'Symphony' or 'NoMac'.\n \"\"\"\n if mac == 'NoMac':\n self._send_command(OPCODES['MAC_MODE_SET'], [0])\n elif mac == 'Symphony':\n self._send_command(OPCODES['MAC_MODE_SET'], [3])\n else:\n raise ValueError(\"Unknown MAC mode: %s\", mac)\n\n def get_mac_mode(self):\n \"\"\" Returns either 'Symphony' or 'NoMac'. \"\"\"\n resp = self._send_command(OPCODES['MAC_MODE_GET'])\n if resp[0] == 0:\n return 'NoMac'\n elif resp[0] == 3:\n return 'Symphony'\n else:\n raise ValueError(\"Unknown MAC mode: %s\", resp[0])\n\n def send_message(self, message, ack=False):\n \"\"\" Sends an uplink message to the gateway. \"\"\"\n if len(message) > 256:\n raise ValueError(\"Message too long. Max message size is 256 bytes.\")\n opcode = 'PKT_SEND_ACK' if ack else 'PKT_SEND_UNACK'\n self._send_command(OPCODES[opcode], message)\n\n def get_irq_flags(self):\n \"\"\" Returns a list of irq flags (as strings). \"\"\"\n resp = self._send_command(OPCODES['IRQ_FLAGS'], [0] * 4)\n flags_int = (resp[0] << 24) + (resp[1] << 16) + (resp[2] << 8) + resp[3]\n return [f for f in IRQ_FLAGS if IRQ_FLAGS[f] & flags_int]\n\n def clear_irq_flags(self, flags='all'):\n \"\"\"\n Clears the irq flags. `flags` is a list of irq flag strings. If\n the parameter is not given, then all flags are cleared.\n \"\"\"\n flag_dict = IRQ_FLAGS if flags == 'all' else {f: IRQ_FLAGS[f] for f in flags}\n flag_int = 0\n for _, flag_val in flag_dict.items():\n flag_int |= flag_val\n flag_buff = [0xFF & (flag_int >> n) for n in [24, 16, 8, 0]]\n self._send_command(OPCODES['IRQ_FLAGS'], flag_buff)\n\n def get_unique_id(self):\n \"\"\" Returns the UUID of the module. \"\"\"\n uuid = self._send_command(OPCODES['MODULE_ID'])\n return \"$301$0-0-0-\" + hexlify(uuid)[-9:].decode(\"utf-8\")\n\n def delete_settings(self):\n \"\"\" Returs the module to factory defaults. \"\"\"\n self._send_command(OPCODES['DELETE_SETTINGS'])\n\n def reset_mcu(self):\n \"\"\" Reset the module \"\"\"\n self._send_command(OPCODES['RESET_MCU'])\n\n def reboot_into_bootloader(self):\n \"\"\" Reboots the module into bootloader mode. \"\"\"\n # Use the _send_packet method because the module\n # reboots before sending the response.\n self._send_packet(OPCODES['TRIGGER_BOOTLOADER'], [])\n\n def set_network_token(self, token):\n \"\"\" Sets the network token for the module. The token should be a hex string. \"\"\"\n network_token = bytearray.fromhex(token)\n self._send_command(OPCODES['NET_TOKEN_SET'], network_token)\n\n def get_network_token(self):\n \"\"\" Sets the network token for the module. The token should be a hex string. \"\"\"\n network_token = self._send_command(OPCODES['NET_TOKEN_GET'])\n return hexlify(network_token)\n\n def set_app_token(self, token):\n \"\"\" Sets the application token for the module. The token should be a hex string. \"\"\"\n app_token = bytearray.fromhex(token)\n self._send_command(OPCODES['APP_TOKEN_SET'], app_token)\n\n def get_app_token(self):\n \"\"\" Sets the application token for the module. The token should be a hex string. \"\"\"\n app_token = self._send_command(OPCODES['APP_TOKEN_GET'])\n return hexlify(app_token)\n\n def get_rssi(self):\n \"\"\" Sets the application token for the module. The token should be a hex string. \"\"\"\n rssi = self._send_command(OPCODES['RSSI_GET'])\n return rssi\n\n def is_app_token_registered(self):\n \"\"\" Returns whether this module's app token has been confirmed by the gateway. \"\"\"\n return bool(self._send_command(OPCODES['APP_TOKEN_REG_GET'])[0])\n\n def set_qos(self, qos):\n \"\"\"\n Requests a quality of service level from the gateway. `qos` can be\n an integer from 0 through 15.\n \"\"\"\n self._send_command(OPCODES['QOS_REQUEST'], [qos])\n\n def get_qos(self):\n \"\"\" Returns the module's quality of service level. \"\"\"\n return int(self._send_command(OPCODES['QOS_GET'])[0])\n\n def set_downlink_mode(self, downlink_on=True):\n \"\"\"\n Sets the downlink mode of the module (either listening each frame or not listening at\n all) based on the boolean argument.\n \"\"\"\n self._send_command(OPCODES['RX_MODE_SET'], [1 if downlink_on else 0])\n\n def get_downlink_mode(self):\n \"\"\" Returns a boolean indicating whether or not the module is in downlink mode. \"\"\"\n return bool(self._send_command(OPCODES['RX_MODE_GET']))\n\n def retrieve_packet(self):\n \"\"\"\n Get a downlink packet from the module.\n Returns the packet, as well as RSSI and SNR values.\n \"\"\"\n buff = self._send_command(OPCODES['PKT_RECV_RSSI'], [0, 0])\n if buff:\n (rssi, ) = struct.unpack_from('<h', buff[:2])\n snr = buff[2]\n message = buff[3:]\n return message, rssi, snr\n\n\nclass ModuleDriver(ModuleConnection):\n \"\"\"\n This class extends the ModuleConnection class to provide higher level\n functionality.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ModuleDriver, self).__init__(*args, **kwargs)\n self.connection_timeout_s = 2 * 60.0\n self.app_token_confirm_timeout = 60.0\n self.transmit_timeout = 60.0\n\n def wait_for_flags(self, flags, timeout, bad_flags=None):\n \"\"\" Waits for all flags in `flags` to show up. \"\"\"\n start = time()\n while time() - start < timeout:\n mod_flags = self.get_irq_flags()\n if bad_flags:\n for flag in bad_flags:\n if flag in mod_flags:\n raise BadFlagError(flag)\n if all(f in mod_flags for f in flags):\n break\n else:\n sleep(0.1)\n else:\n raise RuntimeError(\"Timeout waiting for flags {}\".format(flags))\n\n def set_up(self, app_token, network_token=OPEN_NET_TOKEN, qos=0, factory_reset=False):\n \"\"\"\n Sets up the module so that it's ready to uplink or downlink.\n Throws an exception if it can't connect to a gateway with the provided network\n and application tokens.\n \"\"\"\n\n if factory_reset:\n LOG.info(\"Resetting module %s.\", self)\n self.delete_settings()\n sleep(3)\n\n self.set_mac_mode('Symphony')\n\n LOG.info(\"Setting network token for module %s.\", self)\n old_net_token = self.get_network_token()\n self.set_network_token(network_token)\n if old_net_token != network_token:\n LOG.debug(\"Waiting for connection and crypto flags for module %s.\", self)\n self.wait_for_flags(['CONNECTED', 'CRYPTO_ESTABLISHED'], self.connection_timeout_s,\n bad_flags=['CRYPTO_ERROR', 'DISCONNECTED'])\n\n LOG.info(\"Setting application token for module %s\", self)\n self.set_app_token(app_token)\n if not self.is_app_token_registered():\n LOG.debug(\"Waiting for app token confirmation for module %s.\", self)\n self.wait_for_flags(['APP_TOKEN_CONFIRMED'], self.app_token_confirm_timeout,\n bad_flags=['APP_TOKEN_ERROR'])\n\n LOG.info(\"Setting QoS %s for module %s\", qos, self)\n self.set_qos(qos)\n assert self.get_qos() == qos, \"Failed to set QoS\"\n\n def send_message_checked(self, message):\n \"\"\"\n Sends an ACK'd message, and waits for the status of that message.\n Throws an exception if the message was not successful.\n \"\"\"\n self.clear_irq_flags(['TX_QUEUE_EMPTY', 'TX_ERROR'])\n self.send_message(message, ack=True)\n\n LOG.debug(\"Waiting for ACK for module %s.\", self)\n self.wait_for_flags(['TX_QUEUE_EMPTY'], self.transmit_timeout, bad_flags=['TX_ERROR'])\n\n def get_received_message(self):\n \"\"\"\n Returns a downlink message if there is one, else returns None\n \"\"\"\n if 'RX_DONE' in self.get_irq_flags():\n self.clear_irq_flags(['RX_DONE'])\n pkt = self.retrieve_packet()\n if pkt is None:\n LOG.warning(\"RX_DONE flag set but no packet available.\")\n return pkt\n\n\ndef compute_checksum(buff):\n \"\"\" Computes the 16-bit CRC of the buffer. Returns the checksum as an integer. \"\"\"\n checksum = 0\n for byte in buff:\n checksum = ((checksum >> 8)|(checksum << 8)) & 0xFFFF\n checksum = checksum ^ byte\n checksum = checksum ^ ((checksum & 0xFF) >> 4)\n checksum = checksum ^ ((checksum << 12) & 0xFFFF)\n checksum = checksum ^ ((checksum & 0xFF) << 5)\n return checksum\n\n\nclass BadFlagError(BaseException):\n pass\n\n\n@contextmanager\ndef get_all_modules():\n \"\"\"\n Finds all attached modules, and returns a list of ModuleDriver objects\n for each one.\n\n This function uses contextmanager to automatically close each module's serial connection\n after you're done with it. This means you'll have to use the function with the `with`\n keyword. For example:\n\n with get_all_modules() as mods:\n for mod in mods:\n print mod.get_version()\n \"\"\"\n mods = [ModuleDriver(dev) for [dev, _, _] in list_ports.grep('CP210.')]\n yield mods\n for mod in mods:\n mod.close()\n\n\ndef find_module_device():\n \"\"\" Finds the first CP210x device. \"\"\"\n port = next(list_ports.grep('CP210.'), None)\n if port:\n [name, _, _] = port\n return name\n else:\n raise RuntimeError('LinkLabs module not found')\n","sub_path":"client/command_module.py","file_name":"command_module.py","file_ext":"py","file_size_in_byte":17434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489273436","text":"#coding:utf-8\r\n__author__ = 'Nick'\r\n'''\r\npython爬虫入门一之综述\r\n\r\n首先爬虫是什么?\r\n\r\n网络爬虫,是一种按照一定的规则,自动的抓取万维网信息的程序或者脚本。\r\n根据我的经验,要学习Python爬虫,我们要学习的共有以下几点:\r\n\r\nPython基础知识\r\nPython中urllib和urllib2库的用法\r\nPython正则表达式\r\nPython爬虫框架Scrapy\r\nPython爬虫更高级的功能\r\n\r\n1.Python基础学习\r\n\r\n首先,我们要用Python写爬虫,肯定要了解Python的基础吧,万丈高楼平地起,不能忘啦那地基,哈哈,那么我就分享一下自己曾经看过的一些Python教程,小伙伴们可以作为参考。\r\n\r\n1) 慕课网Python教程\r\n曾经有一些基础的语法是在慕课网上看的,上面附有一些练习,学习完之后可以作为练习,感觉效果还是蛮不错的,不过稍微遗憾的是内容基本上都是最基础的,入门开始的话,就这个吧\r\n\r\n学习网址:慕课网Python教程\r\n\r\n2) 廖雪峰Python教程\r\n后来,我发现了廖老师的Python教程,讲的那是非常通俗易懂哪,感觉也是非常不错,大家如果想进一步了解Python就看一下这个吧。\r\n\r\n学习网址:廖雪峰Python教程\r\n\r\n3) 简明Python教程\r\n还有一个我看过的,简明Python教程,感觉讲的也不错\r\n\r\n学习网址:简明Python教程\r\n\r\n2.Python urllib和urllib2 库的用法\r\n\r\nurllib和urllib2库是学习Python爬虫最基本的库,利用这个库我们可以得到网页的内容,并对内容用正则表达式提取分析,得到我们想要的结果。这个在学习过程中我会和大家分享的。\r\n\r\n3.Python 正则表达式\r\n\r\nPython正则表达式是一种用来匹配字符串的强有力的武器。它的设计思想是用一种描述性的语言来给字符串定义一个规则,凡是符合规则的字符串,我们就认为它“匹配”了,否则,该字符串就是不合法的。这个在后面的博文会分享的。\r\n\r\n4.爬虫框架Scrapy\r\n\r\n如果你是一个Python高手,基本的爬虫知识都已经掌握了,那么就寻觅一下Python框架吧,我选择的框架是Scrapy框架。这个框架有什么强大的功能呢?下面是它的官方介绍:\r\n\r\nHTML, XML源数据 选择及提取 的内置支持\r\n提供了一系列在spider之间共享的可复用的过滤器(即 Item Loaders),对智能处理爬取数据提供了内置支持。\r\n通过 feed导出 提供了多格式(JSON、CSV、XML),多存储后端(FTP、S3、本地文件系统)的内置支持\r\n提供了media pipeline,可以 自动下载 爬取到的数据中的图片(或者其他资源)。\r\n高扩展性。您可以通过使用 signals ,设计好的API(中间件, extensions, pipelines)来定制实现您的功能。\r\n内置的中间件及扩展为下列功能提供了支持:\r\ncookies and session 处理\r\nHTTP 压缩\r\nHTTP 认证\r\nHTTP 缓存\r\nuser-agent模拟\r\nrobots.txt\r\n爬取深度限制\r\n针对非英语语系中不标准或者错误的编码声明, 提供了自动检测以及健壮的编码支持。\r\n支持根据模板生成爬虫。在加速爬虫创建的同时,保持在大型项目中的代码更为一致。详细内容请参阅 genspider 命令。\r\n针对多爬虫下性能评估、失败检测,提供了可扩展的 状态收集工具 。\r\n提供 交互式shell终端 , 为您测试XPath表达式,编写和调试爬虫提供了极大的方便\r\n提供 System service, 简化在生产环境的部署及运行\r\n内置 Web service, 使您可以监视及控制您的机器\r\n内置 Telnet终端 ,通过在Scrapy进程中钩入Python终端,使您可以查看并且调试爬虫\r\nLogging 为您在爬取过程中捕捉错误提供了方便\r\n支持 Sitemaps 爬取\r\n具有缓存的DNS解析器\r\n官方文档:http://doc.scrapy.org/en/latest/\r\n\r\n等我们掌握了基础的知识,再用这个 Scrapy 框架吧!\r\n\r\n\r\n下面开始我们正式进入爬虫之旅吧!\r\n\r\n'''\r\n#---------------------------------------------------------------------------------------------------------------\r\n\r\n'''\r\nPython爬虫入门二之爬虫基础了解\r\n\r\n1.什么是爬虫\r\n\r\n爬虫,即网络爬虫,大家可以理解为在网络上爬行的一直蜘蛛,互联网就比作一张大网,而爬虫便是在这张网上爬来爬去的蜘蛛咯,如果它遇到资源,那么它就会抓取下来。想抓取什么?这个由你来控制它咯。\r\n\r\n比如它在抓取一个网页,在这个网中他发现了一条道路,其实就是指向网页的超链接,那么它就可以爬到另一张网上来获取数据。这样,整个连在一起的大网对这之蜘蛛来说触手可及,分分钟爬下来不是事儿。\r\n\r\n2.浏览网页的过程\r\n\r\n在用户浏览网页的过程中,我们可能会看到许多好看的图片,比如 http://image.baidu.com/ ,我们会看到几张的图片以及百度搜索框,这个过程其实就是用户输入网址之后,经过DNS服务器,找到服务器主机,向服务器发出一个请求,服务器经过解析之后,发送给用户的浏览器 HTML、JS、CSS 等文件,浏览器解析出来,用户便可以看到形形色色的图片了。\r\n\r\n因此,用户看到的网页实质是由 HTML 代码构成的,爬虫爬来的便是这些内容,通过分析和过滤这些 HTML 代码,实现对图片、文字等资源的获取。\r\n\r\n3.URL的含义\r\n\r\nURL,即统一资源定位符,也就是我们说的网址,统一资源定位符是对可以从互联网上得到的资源的位置和访问方法的一种简洁的表示,是互联网上标准资源的地址。互联网上的每个文件都有一个唯一的URL,它包含的信息指出文件的位置以及浏览器应该怎么处理它。\r\n\r\nURL的格式由三部分组成:\r\n①第一部分是协议(或称为服务方式)。\r\n②第二部分是存有该资源的主机IP地址(有时也包括端口号)。\r\n③第三部分是主机资源的具体地址,如目录和文件名等。\r\n爬虫爬取数据时必须要有一个目标的URL才可以获取数据,因此,它是爬虫获取数据的基本依据,准确理解它的含义对爬虫学习有很大帮助。\r\n\r\n4. 环境的配置\r\n\r\n学习Python,当然少不了环境的配置,在Windows下我用了PyCharm\r\n下一节,我们就正式步入 Python 爬虫学习的殿堂了,小伙伴准备好了嘛?\r\n\r\n\r\n'''","sub_path":"Exercises/jingmi_web_robot/rumen/rumen1.py","file_name":"rumen1.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310336989","text":"from Account import Account\nfrom Utils import dollar\n\n\nclass Customer:\n \"\"\"Represents a customer.\"\"\"\n def __init__(self, pin, name, account_list):\n \"\"\"\n :param pin: The customer's unique PIN\n :type pin: ``str``\n :param name: The customer's full name\n :type name: ``str``\n :param account_list: A list of small dictionaries specifying the type\n (checking, savings, money market) and balance of each account under\n the customer's name. This is has already been decoded from the\n raw JSON.\n :type account_list: ``list`` of ``dict``\n \"\"\"\n self.pin = pin\n self.name = name\n self.accounts = {} # Account objects stored by type (checking, etc.)\n self.summary = [] # A list of dictionaries\n self.num_transactions = 0\n\n # Instantiate `Account.Account` objects with the balance information\n # attached to the customer's name in the raw data.\n for item in account_list:\n self.accounts[item[\"account_type\"]] = Account(\n item[\"account_type\"], item[\"balance\"])\n\n def display_account_choices(self):\n \"\"\"Generates a prompt so the customer can choose which account to\n work in.\n\n :returns: A multiple choice set that gets fed to a `raw_input()`\n function later.\n :return type: ``str``\n \"\"\"\n message = \"\"\n for key, value in self.accounts.iteritems():\n instruction = \"To access your {} account, enter {}.\\n\".format(\n key, value.code)\n message += instruction\n return message\n\n def update_account_summary(self):\n \"\"\"For each account, adds a line stating the balance to the\n `Customer`'s summary list for later processing.\n\n :returns: None\n \"\"\"\n # First clear out any outdated information before proceeding.\n self.summary = []\n for account in self.accounts.values():\n funds_available_statement = \"{}: {} available\".format(\n account.account_type, dollar(account.balance))\n self.summary.append(funds_available_statement)\n\n def display_account_summary(self):\n \"\"\"Prints every line in the `Customer`'s summary list.\n\n :returns: A screen-ready summary of all account balances.\n :return type: ``str``\n \"\"\"\n message = \"Here is your account summary: \\n\"\n for statement in self.summary:\n message = message + statement + \"\\n\"\n return message\n","sub_path":"src/Customer.py","file_name":"Customer.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500481219","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom datetime import datetime\nfrom util.consts import *\nfrom util.utils import get_start_end_time\nfrom util.mysql import MysqlClient\nfrom channel.comm import save, get_all_values_by_four_field\nfrom base.config import Config\n\nlogger = logging.getLogger(__name__)\n\n\ndef pingce(start_time=None, end_time=None):\n\t\"\"\"\n\t\tdesc: 计算同步的开始,结束时间。 若输入开始和结束时间则为输入时间; 否则默认开始时间为6小时前,结束时间为当前时间\n\t\t:param start_time:\n\t\t:param end_time:\n\t\t:return:\n\t\"\"\"\n\tstart_time, end_time = get_start_end_time(PC_SYNC_LAST_TIME_PATH, start_time, end_time)\n\t\n\tif start_time == end_time:\n\t\tlogger.info(\"pingce start time equal end time return.\")\n\t\treturn\n\tlimit = ROW_COUNT\n\toffset = 0\n\t# 新增数据同步\n\tinsert(start_time, end_time, limit, offset)\n\t\n\t# 更新数据同步\n\tupdate(start_time, end_time, limit, offset)\n\n\ndef insert(start_time, end_time, limit, offset):\n\t\"\"\"\n\tdesc: 评测新增数据同步\n\t:param start_time:\n\t:param end_time:\n\t:param limit:\n\t:param offset:\n\t:return:\n\t\"\"\"\n\tincr_comm_sql = \"\"\"select id, publishtime,brand_id, title, comment_count, collection_count, brand, mall\n\t\t\t\t\t\tfrom zdmdb_probreport where publishtime >= '{start_time}'\n\t\t\t\t\t\tand publishtime < '{end_time}' and type = 3 and is_delete = 0 limit {l} offset {o}\"\"\"\n\tincr_sql = incr_comm_sql.format(start_time=start_time, end_time=end_time, l=limit, o=offset)\n\t\n\tselectMysqlClient = MysqlClient()\n\tresult = selectMysqlClient.getMany(incr_sql, ROW_COUNT, None, False)\n\t\n\twhile result:\n\t\toffset += limit\n\t\tvalue_list = []\n\t\t\n\t\tfor (t_article_id, t_publish_time, t_brand_id, t_title, t_comment_count, t_collection_count, t_brand, t_mall) in result:\n\t\t\tsync_home = 0\n\t\t\tis_top = 0\n\t\t\tmachine_report = 0\n\t\t\tpublish_time = t_publish_time\n\t\t\tsync_home_time = ''\n\t\t\tsync_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\tstatus = 0\n\t\t\tlogger.info(\"article_id=%s, publish_time=%s\", t_article_id, publish_time)\n\t\t\t\n\t\t\t# 品类,未上线,暂时不处理\n\t\t\t# TODO 需要从zdmdb_probreport_category树表表提取四级分类,暂时不处理\n\t\t\tcate_sql = None\n\t\t\t(level1_ids, level2_ids, level3_ids, level4_ids) = get_all_values_by_four_field(cate_sql)\n\t\t\t\n\t\t\t# 品牌\n\t\t\tbrand_ids = t_brand_id\n\t\t\t\n\t\t\t# 一个文章有多个标签id\n\t\t\t# TODO 评测库没有smzdm_tag_type_item 表,稍候处理\n\t\t\t# tag_sql = \"\"\"select tag_id from smzdm_tag_type_item where blog_id={aid} and type =7\"\"\".format(\n\t\t\t# \taid=t_article_id)\n\t\t\t# tag_ids = get_all_values_by_one_field(tag_sql)\n\t\t\ttag_ids = 0\n\t\t\tt_digital_price = ''\n\t\t\tt_worthy = 0\n\t\t\tt_unworthy = 0\n\t\t\tt_mall_id = 0\n\t\t\tt_praise = 0\n\t\t\tt_sum_collect_comment = 0\n\t\t\tt_title = t_title if t_title else ''\n\t\t\tt_comment_count = t_comment_count if t_comment_count else 0\n\t\t\tt_collection_count = t_collection_count if t_collection_count else 0\n\t\t\tt_brand = t_brand if t_brand else ''\n\t\t\tt_mall = t_mall if t_mall else ''\n\t\t\t\n\t\t\t# 查看是否为编辑同步到首页\n\t\t\t# 此部分只同步数据,同步的首页的状态在home.py中\n\t\t\t# home_sql = \"\"\"select is_write_post,set_auto_sync,is_write_post_time,is_home_top from zdmdb_probreport where id={aid}\"\"\".format(\n\t\t\t# \t\taid=t_article_id)\n\t\t\t# home_result = selectMysqlClient.getAll(home_sql, None, False)\n\t\t\t# if home_result:\n\t\t\t# \tif home_result[0][0] > 0: # 立即同步主站标识\n\t\t\t# \t\tsync_home = home_result[0][0]\n\t\t\t# \telif home_result[0][1] > 0: # 自动同步主站标识\n\t\t\t# \t\tsync_home = home_result[0][1]\n\t\t\t# \tsync_home_time = home_result[0][2]\n\t\t\t# \tis_top = home_result[0][3]\n\t\t\t\n\t\t\tvalue_list.append((t_article_id, PINGCE_CHANNEL_ID, \"%s:%s\" % (t_article_id, PINGCE_CHANNEL_ID),\n\t\t\t PINGCE_CHANNEL, level1_ids, level2_ids, level3_ids, level4_ids,\n\t\t\t tag_ids, brand_ids,\n\t\t\t sync_home, is_top, machine_report,\n\t\t\t publish_time if publish_time else DEFAULT_TIME,\n\t\t\t sync_time,\n\t\t\t sync_time,\n\t\t\t status,\n\t\t\t t_title, t_comment_count, t_collection_count, t_praise, t_sum_collect_comment, t_mall,\n\t\t\t t_brand, t_digital_price, t_worthy, t_unworthy, t_mall_id\n\t\t\t )\n\t\t\t )\n\t\t\n\t\tlogger.info(\"values_list: %s\", value_list)\n\t\t# 写入数据\n\t\tsave(value_list)\n\t\t\n\t\tnew_sql = incr_comm_sql.format(start_time=start_time, end_time=end_time, l=limit, o=offset)\n\t\tresult = selectMysqlClient.getMany(new_sql, ROW_COUNT, None, False)\n\t\n\tselectMysqlClient.close()\n\n\ndef update(start_time, end_time, limit, offset):\n\t\"\"\"\n\tdesc: 评测变化数据同步, 评测只同步首页的数据,非首页的数据不同步\n\t:param start_time:\n\t:param end_time:\n\t:param limit:\n\t:param offset:\n\t:return:\n\t\"\"\"\n\ttbl = Config[\"master.recommend_tbl\"]\n\tselectMysqlClient = MysqlClient()\n\tup_comm = \"\"\"select id,is_write_post,set_auto_sync,is_write_post_time,is_home_top,brand_id, title,\n\t\t\t\t\tcomment_count, collection_count, brand, mall from zdmdb_probreport\n\t\t\t\t\twhere is_write_post_time >= '{start_time}' and is_write_post_time < '{end_time}'\n\t\t\t\t\tand type = 3 and is_delete= 0\n\t\t\t\t\t limit {l} offset {o}\"\"\"\n\tup_sql = up_comm.format(start_time=start_time, end_time=end_time, l=limit, o=offset)\n\tlogger.info(\"up_sql: %s\", up_sql)\n\tresult = selectMysqlClient.getMany(up_sql, ROW_COUNT, None, False)\n\twhile result:\n\t\toffset += limit\n\t\tfor (t_article_id, t_write_post, t_auto_sync, t_post_time, t_is_home_top, t_brand_id, t_title, t_comment_count, t_collection_count, t_brand, t_mall) in result:\n\t\t\tlogger.info(\"update t_article_id=%s, t_write_post=%s, t_auto_sync=%s, t_post_time=%s, t_is_home_top= %s, t_brand_id=%s\",\n\t\t\t t_article_id, t_write_post, t_auto_sync, t_post_time, t_is_home_top, t_brand_id)\n\t\t\t\n\t\t\tsync_home = 0\n\t\t\tif t_write_post > 0:\n\t\t\t\tsync_home = t_write_post\n\t\t\telif t_auto_sync > 0:\n\t\t\t\tsync_home = t_auto_sync\n\t\t\t\n\t\t\tis_top = t_is_home_top\n\t\t\tsync_home_time = t_post_time if t_post_time else DEFAULT_TIME\n\t\t\t\n\t\t\t# 品类,未上线,暂时不处理\n\t\t\t# TODO 需要从zdmdb_probreport_category树表表提取四级分类,暂时不处理\n\t\t\tcate_sql = None\n\t\t\t(level1_ids, level2_ids, level3_ids, level4_ids) = get_all_values_by_four_field(cate_sql)\n\t\t\t\n\t\t\t# 品牌\n\t\t\tbrand_ids = t_brand_id\n\t\t\t\n\t\t\t# 一个文章有多个标签id\n\t\t\t# TODO 评测库没有smzdm_tag_type_item 表,稍候处理\n\t\t\t# tag_sql = \"\"\"select tag_id from smzdm_tag_type_item where blog_id={aid} and type =7\"\"\".format(\n\t\t\t# \t\taid=t_article_id)\n\t\t\t# tag_ids = get_all_values_by_one_field(tag_sql)\n\t\t\ttag_ids = 0\n\t\t\t\n\t\t\t# 这里只更新标签,品牌,品类属性,同步到首页的状态放到了home.py中\n\t\t\t# if level1_ids or level2_ids or level3_ids or level4_ids or tag_ids or brand_ids or sync_home or is_top or sync_home_time:\n\t\t\t# \thome_article_up_sql = \"\"\"update home_article set level1_ids='{l1}', level2_ids='{l2}', level3_ids = '{l3}',\n\t\t\t# \t\t\t\t\t\t\t\t\t level4_ids = '{l4}', tag_ids = '{ti}', brand_ids = '{bi}', sync_home = '{sh}',\n\t\t\t# \t\t\t\t\t\t\t\t\t is_top = '{it}', sync_home_time= '{sht}' where article_id = {aid}\n\t\t\t# \t\t\t\t\t\t\t\t\t and channel='{c}'\"\"\".format(l1=level1_ids, l2=level2_ids,\n\t\t\t# \t l3=level3_ids,\n\t\t\t# \t l4=level4_ids, ti=tag_ids, bi=brand_ids,\n\t\t\t# \t sh=sync_home, it=is_top,\n\t\t\t# \t sht=sync_home_time, aid=t_article_id,\n\t\t\t# \t c=PINGCE_CHANNEL)\n\t\t\t#\n\t\t\t# \tMysqlClient(mode=\"master\").update(home_article_up_sql)\n\t\t\n\t\t\t# if level1_ids or level2_ids or level3_ids or level4_ids or tag_ids or brand_ids:\n\t\t\thome_article_up_sql = u\"\"\"update {tbl} set level1_ids='{l1}', level2_ids='{l2}', level3_ids = '{l3}',\n\t\t\t\t\t\t\t\t\t\t\t level4_ids = '{l4}', tag_ids = '{ti}', brand_ids = '{bi}',\n\t\t\t\t\t\t\t\t\t\t\t title='{title}',\n\t\t\t\t\t\t\t\t\t\t\t\tcomment_count={comment_count},\n\t\t\t\t\t\t\t\t\t\t\t\tcollection_count={collection_count},\n\t\t\t\t\t\t\t\t\t\t\t\tmall='{mall}',\n\t\t\t\t\t\t\t\t\t\t\t\tbrand='{brand}'\n\t\t\t\t\t\t\t\t\t\t\t where article_id = {aid}\n\t\t\t\t\t\t\t\t\t\t\t and channel='{c}'\"\"\".format(tbl=tbl, l1=level1_ids, l2=level2_ids,\n\t\t\t l3=level3_ids,\n\t\t\t l4=level4_ids, ti=tag_ids, bi=brand_ids,\n\t\t\t title=t_title if t_title else '',\n\t\t\t comment_count=t_comment_count if t_comment_count else 0,\n\t\t\t collection_count=t_collection_count if t_collection_count else 0,\n\t\t\t mall=t_mall if t_mall else '',\n\t\t\t brand=t_brand if t_brand else '',\n\t\t\t aid=t_article_id, c=PINGCE_CHANNEL)\n\t\t\ttry:\n\t\t\t\tMysqlClient(mode=\"master\").update(home_article_up_sql)\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.warn(\"home_article_up_sql: %s, err: %s\", home_article_up_sql, str(e))\n\t\t\n\t\tup_sql = up_comm.format(start_time=start_time, end_time=end_time, l=limit, o=offset)\n\t\tresult = selectMysqlClient.getMany(up_sql, ROW_COUNT, None, False)\n\t\n\tselectMysqlClient.close()\n","sub_path":"DM/article_sync/channel/pingce.py","file_name":"pingce.py","file_ext":"py","file_size_in_byte":9502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12586866","text":"import datetime\nimport decimal\nimport json\nimport re\n\nfrom django.core.serializers import serialize\nfrom django.utils.safestring import mark_safe\nfrom django.views.generic import CreateView, DeleteView, ListView, UpdateView\n\nfrom authnapp.models import User\nfrom mainapp.models import (\n HeaderData,\n UK,\n Appartament,\n ConstantPayments,\n CurrentCounter,\n HistoryCounter,\n PersonalAccountStatus,\n Privileges,\n Recalculations,\n Services,\n Standart,\n Subsidies,\n VariablePayments,\n)\n\n\ndef main(request):\n pass\n\n\nclass InvoiceViews(ListView):\n model = User\n context_object_name = \"user\"\n template_name = \"invoice/invoice.html\"\n\n def get_queryset(self):\n return User.objects.filter(pk = self.request.user.id)\n\n def get_context_data(self, **kwargs):\n user = self.request.user\n self.wrapper()\n context = super().get_context_data(**kwargs)\n context[\"header\"] = mark_safe(serialize(\"json\", HeaderData.objects.filter(user=user)))\n context[\"constant\"] = mark_safe(serialize(\"json\", ConstantPayments.objects.filter(user=user)))\n context[\"variable\"] = mark_safe(serialize(\"json\", VariablePayments.get_last_val(user.id)))\n context[\"status\"] = mark_safe(serialize(\"json\", PersonalAccountStatus.get_item(user)))\n return context\n\n def wrapper(self):\n get_calc_const()\n get_calc_variable()\n get_head_data()\n \n\n# Расчет КОНСТАНТНЫХ платежей (по сигналу когда идут изменения в таблице Services)\ndef get_calc_const():\n users = User.objects.select_related().filter(is_staff=False)\n rate = Services.get_const_payments(1)\n\n for user in users:\n data = []\n total = 0\n pre_total = 0\n\n user_id = User.objects.get(id=user.id)\n appart = Appartament.objects.get(user=user)\n for el in rate:\n element = dict()\n element[\"service\"] = el.name\n element[\"unit\"] = el.unit\n element[\"rate\"] = el.rate\n\n if el.unit.name == \"м2\":\n element[\"accured\"] = el.rate * appart.sq_appart\n elif el.unit.name == \"чел\":\n element[\"accured\"] = el.rate * appart.num_owner\n else:\n element[\"accured\"] = el.rate\n\n element[\"standart\"] = \"\"\n element[\"volume\"] = \"\"\n element[\"coefficient\"] = el.factor if el.factor >= 0 else \"\"\n element[\"subsidies\"] = 0\n element[\"privileges\"] = 0\n element[\"recalculation\"] = 0\n element[\"total\"] = element[\"accured\"]\n element[\"pre_total\"] = element[\"accured\"]\n pre_total += element[\"pre_total\"]\n total += element[\"total\"]\n\n data.append(element)\n\n update_values = {\n \"data\": json.dumps(data, ensure_ascii=False, default=str),\n \"total\": decimal.Decimal(total),\n \"pre_total\": decimal.Decimal(total)\n }\n obj, created = ConstantPayments.objects.update_or_create(\n user=user_id, defaults=update_values\n )\n return (data, total)\n\n# Расчет ПЕРЕМЕННЫХ платежей (по сигналу)\n#TODO Какой сигнал? 30 число? или же после внесения счетчиков?\ndef get_calc_variable():\n users = User.objects.filter(is_staff=False)\n rate = Services.get_varybose_payments(1)\n\n for user in users:\n data = []\n total = 0 \n pre_total = 0\n period = datetime.datetime.now().replace(day=1)\n user_id = User.objects.get(id=user.id)\n appa = Appartament.get_item(user.id)[0]\n stand = Standart.get_last_val(appa.house_id)[0]\n sq_appa = appa.sq_appart\n hist = HistoryCounter.get_last_val(user.id)[0]\n\n try:\n #Если счетчики введены, считаем объем\n object_curr = CurrentCounter.get_last_val(user.id)[0]\n #TODO Почему тип list???\n volume_col = object_curr.col_water - hist.col_water,\n volume_hot = object_curr.hot_water - hist.hot_water,\n volume_sewage = volume_col[0] + volume_hot[0]\n curr = {\n \"standart\": False, \n \"volume_col\": volume_col[0], \n \"volume_hot\": volume_hot[0],\n \"volume_sewage\": volume_sewage,\n \"period\": object_curr.period\n }\n except:\n #Если счетчики не введены, берем общедомовой средний объем\n curr = {\n \"standart\": True,\n \"volume_col\": stand.col_water, \n \"volume_hot\": stand.col_water,\n \"volume_sewage\": (stand.col_water + stand.col_water) * sq_appa,\n \"period\": (period - datetime.timedelta(days=1))\n }\n\n subs = Subsidies.get_items(user.id)\n priv = Privileges.get_items(user.id)\n recl = Recalculations.get_last_val(user.id)\n\n for el in rate:\n calc = get_calc_service(el, curr, sq_appa, subs, priv, recl)\n data.append(calc)\n total += calc[\"total\"]\n pre_total += calc[\"pre_total\"]\n \n update_values = {\n \"data\": json.dumps(data, ensure_ascii=False, default=str),\n \"total\": decimal.Decimal(total),\n \"pre_total\": decimal.Decimal(pre_total)\n }\n obj, created = VariablePayments.objects.update_or_create(\n user=user_id, period=period, defaults=update_values\n )\n return (data, total, pre_total)\n\n# Делает расчет всех полей по Услуге\ndef get_calc_service(el, curr, sq_appa, subs, priv, recl):\n element = dict()\n water = False\n element[\"service\"] = el.name\n element[\"unit\"] = el.unit\n element[\"standart\"] = 0\n element[\"rate\"] = el.rate\n if re.search(r'холодная', el.name.lower()):\n element[\"volume\"] = curr[\"volume_col\"]\n water = True\n elif re.search(r'горячая', el.name.lower()):\n element[\"volume\"] = curr[\"volume_hot\"]\n water = True\n if not curr[\"standart\"] and water:\n element[\"accured\"] = el.rate * element[\"volume\"]\n elif curr[\"standart\"] and water:\n element[\"accured\"] = el.rate * element[\"volume\"] * sq_appa\n if re.search(r'водоотведение', el.name.lower()):\n element[\"volume\"] = curr[\"volume_sewage\"]\n element[\"accured\"] = el.rate * curr[\"volume_sewage\"]\n #TODO Электирчество кончилось... Кина не будет\n # if el.name == \"Электроэнергия (день)\" and prof.type_electric_meter == 2:\n # accured = el.rate * (curr.electric_day - hist.hist_electric_day)\n # elif el.name == \"Электроэнергия (ночь)\" and prof.type_electric_meter == 2:\n # accured = el.rate * (curr.electric_night - hist.hist_electric_night)\n # elif el.name == \"Электроэнергия\" and prof.type_electric_meter == 1:\n # accured = el.rate * curr.electric_single\n element[\"coefficient\"] = el.factor if el.factor > 0 else 1\n element[\"pre_total\"] = (element[\"accured\"] * element[\"coefficient\"])\n element[\"subsidies\"] = element[\"pre_total\"] * decimal.Decimal(get_sale(el.name, subs)/ 100)\n element[\"privileges\"] = element[\"pre_total\"] * decimal.Decimal(get_sale(el.name, priv)/ 100)\n element[\"recalculation\"] = get_recl(el.name, recl)\n element[\"total\"] = (element[\"accured\"] * element[\"coefficient\"]) - (element[\"subsidies\"] + element[\"privileges\"]) + element[\"recalculation\"]\n return (element)\n\n# Готовит данные для шапки (персональные, реквизиты)\n#TODO повесить сигналы на модели чтоб данные при изменении обновлялись\ndef get_head_data():\n users = User.objects.filter(is_staff=False)\n\n for user in users:\n data = dict()\n appa = Appartament.get_item(user.id)[0]\n uk = UK.get_item(appa.house.uk_id)\n\n data[\"payer\"] = user.name # Плательщик\n data[\"address\"] = appa\n data[\"sq_appart\"] = appa.sq_appart # Площадь квартиры\n data[\"num_living\"] = appa.num_owner # Кол-во проживающих\n data[\"name_uk\"] = UK.get_full_name(uk.id) # Название, адрес, тел. и т.д. УК\n data[\"requisites\"] = UK.get_requisites(uk.id) # Название, адрес, тел. и т.д. УК\n data[\"personal_account\"] = user.personal_account # Номер лицевого счета\n\n update_values = {\n \"data\": json.dumps(data, ensure_ascii=False, default=str),\n }\n obj, created = HeaderData.objects.update_or_create(\n user=user, defaults=update_values\n )\n\n# Возваращает субсидию или льготу при наличии или 0\ndef get_sale(name, arr):\n for el in arr:\n if el.service.name == name:\n return el.sale\n else:\n return 0\n return 0\n\n# Возваращает перерасчет при наличии или 0\ndef get_recl(name, arr):\n for el in arr:\n if el.service.name == name:\n return el.recalc\n else:\n return 0\n return 0 \n","sub_path":"invoice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630798502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 10:42:57 2020\n\n@author: simiyu\n\"\"\"\n\nfrom mysql.connector import connect\nimport pandas.io.sql as sql\n\n\ndb_name = \"sample_db\"\ntable_name =\"emplyee\"\ndb_user = \"root\"\ndb_user_pass = \"\"\n\n\ndef is_mysql_connection_available(connection):\n print('Attempting to connect to the database...')\n if connection.is_connected():\n print(\"Connection to the database established successfully\")\n return True\n else:\n print(\"No Connection to the database !!!\")\n return False\n\n\ndef display_all_emplyee(connection, table_name):\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM \"+str(table_name)+\";\")\n records = cursor.fetchall()\n\n heading = f\"Total registered \"+str(table_name)+\"s in the system: {cursor.rowcount}\"\n print(heading)\n print (\"-\" * len(heading))\n for row in records:\n print(f\"Name: {row[0]}\")\n print(f\"Age: {row[1]}\\n\")\n\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed\")\n\ndef export_to_excel(connection, table_name):\n # read the data\n df = sql.read_sql(\"select * from \"+str(table_name)+\";\", connection)\n # export the data into the excel sheet\n df.to_excel('employees.xlsx')\n print(\"Employees exported to employees.xlsx\")\n\n\ndef add_new_emplyee(connection, table_name,emplyee):\n sql_stmt = \"\"\"INSERT INTO \"\"\"+str(table_name)+\"\"\" (name,age) VALUES (%s,%s)\"\"\"\n\n cursor = connection.cursor(prepared=True)\n cursor.execute(sql_stmt,emplyee)\n connection.commit()\n cursor.close()\n connection.close()\n\n print(\"Record successfully inserted into the database using prepared stament\")\n\n\ndef search_emplyee(connection, table_name,query):\n sql_stmt = f\"SELECT * FROM \"+str(table_name)+\" WHERE name LIKE '%{query}%'\"\n\n cursor = connection.cursor()\n cursor.execute(sql_stmt)\n records = cursor.fetchall()\n\n heading = f\"search for '{query}' returned: {cursor.rowcount} rows\"\n print(heading)\n print (\"-\" * len(heading))\n for row in records:\n print(f\"Name: {row[0]}\")\n print(f\"Age: {row[1]}\\n\")\n\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed\")\n\n\ndef update_emplyee(connection, table_name, emplyee):\n sql_stmt = \"UPDATE \"+str(table_name)+\" SET name = %s,age = %s WHERE id = %s\"\n\n cursor = connection.cursor(prepared=True)\n cursor.execute(sql_stmt,emplyee)\n connection.commit()\n cursor.close()\n connection.close()\n\n print(\"Record successfully updated in the database using prepared stament\")\n\ndef delete_emplyee(connection, table_name,id):\n sql_stmt = \"DELETE FROM \"+str(table_name)+\" WHERE id = \"+str(id)\n cursor = connection.cursor(prepared=True)\n cursor.execute(sql_stmt)\n connection.commit()\n cursor.close()\n connection.close()\n\n print(\"Record successfully deleted from the database using prepared stament\")\n\n\n\ndef main():\n print(\"**** Follow the prompts carefully ****\")\n\n\n mydb = connect(host=\"localhost\", user=db_user, passwd=db_user_pass)\n\n if is_mysql_connection_available(mydb):\n mycursor = mydb.cursor()\n mycursor.execute(\"CREATE DATABASE IF NOT EXISTS \"+str(db_name))\n\n mydbtable = connect(host=\"localhost\", user=db_user, passwd=db_user_pass, database=db_name)\n if is_mysql_connection_available(mydbtable):\n mycursortb = mydbtable.cursor()\n mycursortb.execute(\"CREATE TABLE IF NOT EXISTS \"+str(table_name)+\" (name VARCHAR(255), age INT,id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))\")\n\n choice = str(input(\"Enter 'C' to add, 'R' to view single,'L' to list, 'E' to export to excel 'U' to update 'D' to delete and 'Q' to quit : \"))\n if(choice == 'C'):\n name = str(input(\"Enter Employee Full Name : \"))\n age = int(input(\"Enter Employee Age : \"))\n employee = (name, age)\n add_new_emplyee(mydbtable, table_name,employee)\n main()\n\n elif(choice == 'R'):\n name = str(input(\"Enter Employee Name or a part of name : \"))\n search_emplyee(mydbtable, table_name,name)\n main()\n\n elif(choice == 'L'):\n display_all_emplyee(mydbtable, table_name)\n main()\n\n elif(choice == 'E'):\n export_to_excel(mydbtable, table_name)\n main()\n elif(choice == 'U'):\n emplid = int(input(\"Enter Employee Id : \"))\n name = str(input(\"Enter Updated Employee Full Name : \"))\n age = int(input(\"Enter Updated Employee Age : \"))\n employee = (name, age, emplid)\n update_emplyee(mydbtable, table_name,employee)\n main()\n\n elif(choice == 'D'):\n emplid = int(input(\"Enter Employee Id : \"))\n delete_emplyee(mydbtable, table_name,emplid)\n main()\n\n elif (choice == 'Q'):\n raise SystemExit\n\n else:\n main()\n else:\n print(\"**** Please Provide valid database connection ****\")\n else:\n print(\"**** Please Provide valid database connection ****\")\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"scripting & automation/mysql_python_crud/mysql_crud.py","file_name":"mysql_crud.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346428465","text":"\"\"\"查找显示项目模板.\"\"\"\nimport json\nfrom itertools import groupby\n#from functools import partial\nfrom collections.abc import Mapping\nfrom pathlib import Path\nfrom typing import Optional, List\n\nfrom pmfp.const import PMFP_TEMPLATES_HOME\nfrom ._find_path import find_path\n\n\ndef _find_all()->Optional[List[Path]]:\n \"\"\"查找所有模板.\"\"\"\n print(\"展示所有模板列表\")\n result = find_path(\n PMFP_TEMPLATES_HOME,\n lambda path, depth: path.suffix == \".json\" and path.is_file()\n )\n return result\n\n\ndef _find_all_language(language: str)->Optional[List[Path]]:\n \"\"\"从指定语言中查找模板.\"\"\"\n print(f\"展示{language}的模板列表\")\n p = PMFP_TEMPLATES_HOME.joinpath(language.lower())\n if p.is_dir():\n result = find_path(\n p,\n lambda path, depth: path.suffix == \".json\" and path.is_file()\n )\n else:\n print(f\"找不到{language}语言对应的模板\")\n result = None\n return result\n\n\ndef _find_all_language_category(language: str, category: str)->Optional[List[Path]]:\n \"\"\"从特定语言的特定类型中查找模板.\"\"\"\n print(f\"展示{language}中的{category}分类的模板列表\")\n p = PMFP_TEMPLATES_HOME.joinpath(language.lower()).joinpath(category.lower())\n if p.is_dir():\n result = find_path(\n p,\n lambda path, depth: path.suffix == \".json\" and path.is_file()\n )\n else:\n print(f\"找不到{language}语言下{category}分类对应的模板\")\n result = None\n return result\n\n\ndef _show_template_list(template_list: List[Path])->bool:\n \"\"\"展示模板列表.\"\"\"\n to_show = []\n for path in template_list:\n result = path.parts[-3:]\n to_show.append(result)\n gp_language = groupby(sorted(to_show, key=lambda x: x[0]), key=lambda x: x[0])\n ret = []\n for i, ite in gp_language:\n print(f\"=====语言:{i}========\")\n gp_c = groupby(sorted(ite, key=lambda x: x[1]), key=lambda x: x[1])\n for j, jte in gp_c:\n print(f\"-----分类:{j}------\")\n for k in jte:\n temp_name = k[-2] + \"-\" + k[-1].split(\".\")[0]\n print(temp_name)\n ret.append(temp_name)\n return ret\n\n\ndef find_template_detail(name: str, path: Path)->bool:\n \"\"\"查找模板具体信息.\n\n Args:\n name (str): 模板名\n path (Path): 模板地址\n\n Returns:\n bool: 是否找到符合条件的模板\n\n \"\"\"\n if path.suffix == \".json\":\n with open(str(path), encoding=\"utf-8\") as f:\n content = json.load(f)\n result = True if content[\"name\"] == name else False\n else:\n result = False\n return result\n\n\ndef _find_module_from_all(name: str)->Optional[List[Path]]:\n \"\"\"从全部模板中找出某一名字的模板.\"\"\"\n print(f\"从全局查找模板{name}\")\n result = find_path(\n PMFP_TEMPLATES_HOME,\n lambda path, depth: find_template_detail(name, path)\n )\n return result\n\n\ndef _find_module_from_language(name: str, language: str)->Optional[List[Path]]:\n \"\"\"从某种语言的全部模板中找出某一名字的模板.\"\"\"\n print(f\"从{language}的组件中查找模板{name}\")\n p = PMFP_TEMPLATES_HOME.joinpath(language.lower())\n if p.is_dir():\n result = find_path(p, lambda path, depth: find_template_detail(name, path))\n else:\n print(f\"{language}语言不存在\")\n result = None\n return result\n\n\ndef _find_module_from_language_category(\n name: str,\n language: str,\n category: str)->Optional[List[Path]]:\n \"\"\"从某种语言的某个分类中的全部模板中找出某一名字的模板.\"\"\"\n print(f\"从{language}的{category}模板中查找模板{name}\")\n p = PMFP_TEMPLATES_HOME.joinpath(language.lower()).joinpath(category.lower())\n if p.is_dir():\n result = find_path(p, lambda path, depth: find_template_detail(name, path))\n else:\n print(f\"{language}语言的{category}分类不存在\")\n result = None\n return result\n\n\ndef _show_template_list_detail(template_list: List[Path])->bool:\n \"\"\"展示模板列表及各个模板细节.\"\"\"\n for path in template_list:\n parts = \"/\".join(path.parts[-3:])\n with open(str(path), encoding=\"utf-8\") as f:\n content = json.load(f)\n print(f\"-----{parts}---------------------\")\n for k, v in content.items():\n print(f\"{k}:\")\n if isinstance(v, Mapping):\n for i, j in v.items():\n print(f'- {i}: {j}')\n else:\n print(f'- {v}')\n return True\n\n\ndef _show_template(\n language: Optional[str] = None,\n category: Optional[str] = None)->bool:\n \"\"\"查找并展示模板列表.\n\n Args:\n language (Optional[str], optional): Defaults to None. 模板使用的语言.\n category (Optional[str], optional): Defaults to None. 模板的分类.\n\n Returns:\n bool: 正确展示返回True,否则返回False.\n\n \"\"\"\n if language is None:\n template_list = _find_all()\n else:\n if category is None:\n template_list = _find_all_language(language)\n else:\n template_list = _find_all_language_category(language, category)\n if not template_list:\n print(\"空的模板分类\")\n result = False\n else:\n result = _show_template_list(template_list)\n return result\n\n\ndef _show_target_template(\n name: str,\n language: Optional[str] = None,\n category: Optional[str] = None)->bool:\n \"\"\"查找并展示模板列表.\n\n Args:\n name (str):模板的名字.\n language (Optional[str], optional): Defaults to None. 模板使用的语言.\n category (Optional[str], optional): Defaults to None. 模板的分类.\n\n Returns:\n bool: 正确展示返回True,否则返回False.\n\n \"\"\"\n if language is None:\n template_list = _find_module_from_all(name)\n else:\n if category is None:\n template_list = _find_module_from_language(name, language)\n else:\n template_list = _find_module_from_language_category(name, language, category)\n if not template_list:\n print(f\"找不到对应的模板{name}\")\n result = False\n else:\n result = _show_template_list_detail(template_list)\n return result\n\n\ndef show(\n name: Optional[str] = None,\n language: Optional[str] = None,\n category: Optional[str] = None)->bool:\n \"\"\"展示模板.\n\n Args:\n name (Optional[str], optional): Defaults to None. 模板的名字,如果有的话则会展示同名模板的详细信息.\n language (Optional[str], optional): Defaults to None. 模板使用的语言.\n category (Optional[str], optional): Defaults to None. 模板的分类.\n\n Returns:\n bool: 正确展示返回True,否则返回False.\n\n \"\"\"\n if name is None:\n result = _show_template(language, category)\n else:\n result = _show_target_template(name, language, category)\n return result\n","sub_path":"pmfp/show/_show_template.py","file_name":"_show_template.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282608214","text":"import unittest\nfrom typing import List\nfrom collections import Counter\n\nclass Solution(unittest.TestCase):\n def totalFruit(self, tree: List[int]) -> int:\n \"\"\"\nIn a row of trees, the i-th tree produces fruit with type tree[i].\n\nYou start at any tree of your choice, then repeatedly perform the following steps:\n\nAdd one piece of fruit from this tree to your baskets. If you cannot, stop.\nMove to the next tree to the right of the current tree. If there is no tree to the right, stop.\nNote that you do not have any choice after the initial choice of starting tree: you must perform step 1, then step 2, then back to step 1, then step 2, and so on until you stop.\n\nYou have two baskets, and each basket can carry any quantity of fruit, but you want each basket to only carry one type of fruit each.\n\nWhat is the total amount of fruit you can collect with this procedure?\n\n\n\nExample 1:\n\nInput: [1,2,1]\nOutput: 3\nExplanation: We can collect [1,2,1].\nExample 2:\n\nInput: [0,1,2,2]\nOutput: 3\nExplanation: We can collect [1,2,2].\nIf we started at the first tree, we would only collect [0, 1].\nExample 3:\n\nInput: [1,2,3,2,2]\nOutput: 4\nExplanation: We can collect [2,3,2,2].\nIf we started at the first tree, we would only collect [1, 2].\nExample 4:\n\nInput: [3,3,3,1,2,1,1,2,3,3,4]\nOutput: 5\nExplanation: We can collect [1,2,1,1,2].\nIf we started at the first tree or the eighth tree, we would only collect 4 fruits.\n\n\nNote:\n\n1 <= tree.length <= 40000\n0 <= tree[i] < tree.length\n\n--\nBasic Idea: windowing, valid: meet only 2 types\n\"\"\"\n if not tree:\n return 0\n\n l = r = ret = 0\n meet = Counter()\n while r < len(tree):\n meet[tree[r]] += 1\n r += 1\n\n while len(meet) > 2:\n if meet[tree[l]] == 1:\n del meet[tree[l]]\n else:\n meet[tree[l]] -= 1\n l += 1\n\n ret = max(ret, r-l)\n\n return ret\n\n def testFruit(self):\n self.assertEqual(3, self.totalFruit([1,2,1]))\n self.assertEqual(3, self.totalFruit([0,1,2,2]))\n self.assertEqual(4, self.totalFruit([1,2,3,2,2]))\n self.assertEqual(5, self.totalFruit([3,3,3,1,2,1,1,2,3,3,4]))\n","sub_path":"src/main/python/fruit_into_baskets.py","file_name":"fruit_into_baskets.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430738154","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom ceidg_api import api\nfrom .mainWindow import Window\nfrom config_parser import config\n\n\nclass Auth(Window):\n def __init__(self, title):\n super().__init__(title)\n self.createWidgets()\n self.centerWindow(self.root)\n\n def createWidgets(self):\n '''Create Frames, labels, entries, buttons etc.'''\n # Frames\n main_frame = tk.Frame(self.root)\n # Labels\n token_label = tk.Label(main_frame, text='Token:')\n # Entries\n self.token_entry = tk.Entry(\n main_frame, width=30)\n # Buttons\n submit = tk.Button(main_frame, text='Sprawdź token', width=10,\n padx=10, font=Window.useFont(12),\n command=lambda: self.checkToken(\n self.token_entry.get().strip()))\n\n # Pack\n token_label.pack()\n self.token_entry.pack()\n submit.pack(pady=(10, 0))\n main_frame.pack(padx=10, pady=(5, 15))\n\n def checkToken(self, token):\n '''Check if given token is correct.'''\n if api.validateToken(token):\n config.createConfig(token)\n messagebox.showinfo('Sukces', 'Poprawny token.')\n self.root.destroy()\n else:\n # Here some message BOX\n messagebox.showinfo('Error', 'Niepoprawny token.')\n","sub_path":"window_modules/authWindow.py","file_name":"authWindow.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650094160","text":"import asyncio\nimport os.path\n\nfrom .plugin_mount import PluginMount\n\nfrom random import choice\nfrom subprocess import check_output\n\n\nclass CommandPlugin(metaclass=PluginMount):\n \"\"\"\n Class that stores the instances for each command.\n Every command must contain a command string property and a func function\n \"\"\"\n\n def func(self):\n raise NotImplementedError(\"Please implement this method.\")\n\n def __init__(self):\n raise NotImplementedError(\n \"Please implement a command variable and a helpstring variable.\")\n\n\nclass CommandObey(CommandPlugin):\n\n def __init__(self):\n self.command = \"!obey\"\n\n self.helpstring = \"!obey: \" \\\n \"This only works if you are one of the chosen ones.\"\n\n self.obey_dict = {\n # neosloth\n \"120767447681728512\": \"I obey.\",\n # Average Dragon\n \"182268688559374336\": \"Eat a dick, dragon.\"\n }\n\n def func(self, parent, message):\n if message.author.id in self.obey_dict.keys():\n return self.obey_dict[message.author.id]\n else:\n return \"I will not obey.\"\n\n\nclass CommandPing(CommandPlugin):\n\n def __init__(self):\n self.command = \"!pingme\"\n self.helpstring = \"!pingme: Pings the message.author.\"\n\n def func(self, parent, message):\n return message.author.mention\n\n\nclass CommandAddMe(CommandPlugin):\n \"\"\"!addme: The link to add Bolas to your Discord room.\"\"\"\n\n def __init__(self):\n self.command = \"!addme\"\n self.helpstring = \"!addme: \" \\\n \"The link to add Bolas to your Discord server.\"\n\n def func(self, parent, message):\n return \"https://discordapp.com/oauth2/authorize?client_id=245372541915365377&scope=bot&permissions=0\"\n\n\nclass CommandCoin(CommandPlugin):\n\n def __init__(self):\n self.command = \"!coin\"\n self.helpstring = \"!coin: Flips a coin.\"\n\n def func(self, parent, message):\n return choice([\"Heads\", \"Tails\"])\n\n\nclass CommandChoice(CommandPlugin):\n\n def __init__(self):\n self.command = \"!choose\"\n self.helpstring = \"!choose: Chooses an option. \"\\\n \"Example: !choose apples or oranges\"\n\n def func(self, parent, message):\n return \"I choose: {0}\".format(\n choice(\" \".join(message.content.split(\" \")[1:]).split(\" or \")))\n\n\nclass CommandGit(CommandPlugin):\n\n def __init__(self):\n self.command = \"!git\"\n self.helpstring = \"!git: Repo link and changelog.\"\n\n def func(self, parent, message):\n return \"{0}\\n{1}\\n```{2}```\".format(\n \"https://gitlab.com/neosloth/bolas\",\n \"https://github.com/superstepa/bolas\",\n check_output(\"git log --oneline -3\", shell=True).decode(\"utf-8\"))\n\n\nclass CommandStats(CommandPlugin):\n\n def __init__(self):\n self.command = \"!stats\"\n self.helpstring = \"!stats: Return the number of users and servers served.\"\n\n def func(self, parent, message):\n num_servers = len(parent.servers)\n #parents.server.members\n num_users = sum([len(server.members) for server in parent.servers])\n return \"Fetching cards for {} servers and {} users\".format(\n num_servers,\n num_users\n )\n\n\nclass CommandCockatrice(CommandPlugin):\n\n def __init__(self):\n self.command = \"!cockatrice\"\n self.helpstring = \"!cockatrice: Add yourself to the cockatrice role.\"\n self.role_name = \"Cockatrice\"\n\n def func(self, parent, message):\n\n if message.server is None:\n return \"Sorry, I can't set roles in PMs.\"\n\n # The discord bot Client only stores the user,\n # so we have to manually get the Member object\n client_member = message.server.get_member(parent.user.id)\n\n sufficient_permissions = message.channel.permissions_for(\n client_member).manage_roles\n\n if not sufficient_permissions:\n return \"I do not have sufficient permissions to set roles.\"\n\n cockatrice_role = None\n\n # Find the appropriate role object\n for role in message.server.roles:\n if role.name == self.role_name:\n cockatrice_role = role\n\n # Can't do anything if the role doesn't exist\n if cockatrice_role is None:\n return \"Sorry, this server does not have a cockatrice role.\"\n\n if cockatrice_role in message.author.roles:\n # The remove role method is a coroutine so we have to wrap it in an asyncio call\n asyncio.ensure_future(\n parent.remove_roles(message.author, cockatrice_role)\n )\n return \"Removed {0.name} from the Cockatrice role.\".format(\n message.author\n )\n else:\n # The add role method is a coroutine so we have to wrap it in an asyncio call\n asyncio.ensure_future(\n parent.add_roles(message.author, cockatrice_role)\n )\n return \"Added {0.name} to the Cockatrice role.\".format(\n message.author\n )\n\n\nclass CommandRule(CommandPlugin):\n def __init__(self):\n self.command = \"!rule\"\n self.helpstring = \"!rule {rule number or set of keywords.}: Cite a mtg rule.\"\n self.ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n # Move 1 directory up and into misc\n self.FILE_NAME = os.path.realpath(os.path.join(self.ROOT_DIR, \"../misc/MagicCompRules_20170707.txt\"))\n self.RULE_LIMIT = 10\n\n def get_rule(self, args):\n # First argument (presumably the rule number)\n num = args[1]\n # All the words after the command\n tokens = args[1:]\n result = \"\"\n rule_count = 0\n\n try:\n with open(self.FILE_NAME, \"r\", encoding=\"utf-8\") as f:\n # Using enumerate so the file is read sequentially and is not stored in memory\n for i, line in enumerate(f):\n if (line.startswith(str(num))):\n return line\n # Append the rule number if all the words are in that substring.\n # Only check the lines that start with a number.\n # Also check if we've gone over our rule count\n if (line[0].isdigit() and all(word.lower() in line.lower() for word in tokens) and rule_count < self.RULE_LIMIT):\n result = \"{}* {}\\n\".format(result, line.split(\" \")[0])\n rule_count += 1\n\n if rule_count >= self.RULE_LIMIT:\n result += \"The query returned too many results, so some of the results were omitted. Please provide more keywords to narrow the search down.\"\n\n return result or \"Could not find the matching rule.\"\n except FileNotFoundError:\n return \"Could not find the magic comprehensive rules file.\"\n\n def func(self, parent, message):\n args = message.content.split()\n if len(args) > 1:\n # Surround the result with markdown code tags (for nice bullets)\n return \"```markdown\\n{}```\".format(self.get_rule(args))\n else:\n return \"Please provide a rule number or a set of keywords.\"\\\n \" See the full list of rules here: http://magic.wizards.com/en/game-info/gameplay/rules-and-formats/rules\"\n","sub_path":"src/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369061407","text":"#!/usr/bin/env python3\n\nimport psycopg2\n\n# Create article_view\n#\n# create view article_view as\n# select author,title,count(*) as views\n# from articles,log\n# where log.path like concat('%',articles.slug)\n# group by articles.title,articles.author\n# order by views desc;\n\nquestion1 = (\"What are the three most popular articles of all time?\")\nquery1 = (\"select title,views from article_view limit 3;\")\n\nquestion2 = (\"Who are the most popular article authors of all time?\")\nquery2 = (\"\"\"\n select authors.name,sum(article_view.views) as views\n from article_view,authors\n where authors.id = article_view.author\n group by authors.name\n order by views desc;\n \"\"\")\n\n# Create new_log_view\n#\n# create view new_log_view as\n# select date(time),round(100.0*sum(case\n# log.status when '200 OK' then 0 else 1 end)/\n# count(log.status),2) as \"error rate\"\n# from log\n# group by date(time)\n# order by \"error rate\" desc;\n\nquestion3 = (\"On which days did more than 1% of requests lead to errors?\")\nquery3 = (\"select * from new_log_view where \\\"error rate\\\" > 1;\")\n\n\ndef connect(database=\"news\"):\n \"\"\"\n Connects to 'news' database via pyscopg2\n \"\"\"\n try:\n connection = psycopg2.connect(\"dbname={}\".format(database))\n cursor = connection.cursor()\n return connection, cursor\n except (Exception, psycopg2.Error) as error:\n print(\"Database connection failed: \", error)\n connection.close()\n\n\ndef results(query):\n \"\"\"\n Fetches results from PostgreSQL queries\n \"\"\"\n connection, cursor = connect()\n cursor.execute(query)\n return cursor.fetchall()\n connection.close()\n\n\ndef print_results(query_results):\n \"\"\"\n Prints results for query1 and query2\n \"\"\"\n print(query_results[1])\n for index, results in enumerate(query_results[0]):\n print(str(results[0] + \" - \" + str(results[1]) + \" views.\"))\n print(\"\\n\")\n\n\ndef print_request_errors(query_results):\n \"\"\"\n Prints results for query3\n \"\"\"\n print(query_results[1])\n for results in query_results[0]:\n print(str(str(results[0]) + \" - \"\n + str(results[1]) + \"% request errors.\"))\n\n\nif __name__ == '__main__':\n popular_articles = results(query1), question1\n popular_authors = results(query2), question2\n request_errors = results(query3), question3\n print_results(popular_articles)\n print_results(popular_authors)\n print_request_errors(request_errors)\n","sub_path":"vagrant/logs-analysis/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482029206","text":"import PySimpleGUI as sg\nimport pickle\nfrom device import Device\nfrom groups import Group\n\nsg.theme('BrownBlue')\n# Groups/Topics\n# groups = {'Group 1': ['Light 1', 'Light 2', 'Controller 1'],\n# 'Group 2': ['Temperature', 'RGB', 'Controller 2']}\ngroups = {}\n# groups_test = {}\n# groups = {'Group 1': []}\ndevices = {}\n\n# Input and Output Peripherals\nInput = ['Button', 'Potentiometer', 'Temperature Sensor']\nOutput = ['LED', 'RGB', 'Buzzer']\n\n\ndef load():\n \"\"\" Unpickle a file of pickled data. \"\"\"\n with open('groups.pkl', \"rb\") as f:\n while True:\n try:\n groups = yield pickle.load(f)\n except EOFError:\n break\n return groups\n\n\ndef save():\n with open('groups.pkl', 'wb') as output: # Overwrites any existing file.\n pickle.dump(groups, output, pickle.HIGHEST_PROTOCOL)\n\n\ndef new_device():\n i_col = [[]]\n for i in Input:\n i_col.append([sg.Checkbox(i, size=(20, 1), key='-in' + i + '-')])\n\n o_col = [[]]\n for i in Output:\n o_col.append([sg.Checkbox(i, size=(20, 1), key='-out' + i + '-')])\n\n k = []\n for keys, values in groups.items():\n k.append(keys)\n\n layout = [[sg.Text('Name:', pad=(0, 10)), sg.InputText('', key='-name-')],\n [sg.Text('Group:', pad=(0, 10)), sg.InputCombo(k, key='-group-')],\n [sg.Column([[sg.Frame('Input', i_col), sg.Frame('Output', o_col)]])],\n [sg.Button('Add', pad=(20, 10)), sg.Cancel(pad=(20, 10))]]\n\n window = sg.Window(\"New Device\", auto_size_text=True, auto_size_buttons=True, resizable=True,\n layout=layout, default_element_size=(12, 1), default_button_element_size=(12, 1),\n font=(\"Helvetica\", 20))\n while True:\n event, values = window.read()\n if event == 'Cancel' or event == sg.WIN_CLOSED:\n break\n if event == 'Add':\n # Make sure that neither Name or Group are empty\n if str(values['-name-']) == '' or str(values['-group-']) == '':\n sg.popup('You must fill in all forms!')\n continue\n\n name = str(values['-name-'])\n input = []\n output = []\n for i in Input:\n if values['-in' + i + '-'] is True:\n input.append(i)\n\n for i in Output:\n if values['-out' + i + '-'] is True:\n output.append(i)\n\n # print(name, group, input, output)\n new_dev = Device(name, input, output)\n devices[new_dev.name] = new_dev\n if not groups:\n groups[str(values['-group-'])] = new_dev.name\n else:\n groups[str(values['-group-'])].append(new_dev.name)\n\n # groups_test[str(values['-group-'])].add(new_dev) # Add class Device to class Group\n save()\n break\n\n window.close()\n\n\ndef new_group():\n device_col = [[]]\n for i in devices:\n device_col.append([sg.Checkbox(i, size=(20, len(devices)), key='-d' + i + '-')])\n\n layout = [[sg.Text('Name:', pad=(0, 10)), sg.InputText('', key='-name-')],\n [sg.Column([[sg.Frame('Devices', device_col)]])],\n [sg.Button('Add', pad=(20, 10)), sg.Cancel(pad=(20, 10))]]\n\n window = sg.Window(\"New Group\", auto_size_text=True, auto_size_buttons=True, resizable=True,\n layout=layout, default_element_size=(12, 1), default_button_element_size=(12, 1),\n font=(\"Helvetica\", 20))\n while True:\n event, values = window.read()\n if event == 'Cancel' or event == sg.WIN_CLOSED:\n break\n if event == 'Add':\n if values['-name-'] not in groups.keys(): # if the name is not already taken\n d_col = []\n for i in devices: # loops through all devices\n if values['-d' + i + '-'] is True: # if a device is checked\n if not any(d_col): # if it's the first device\n d_col = [i] # add it as the primary\n else: # if not\n d_col.append(i) # add it to the group\n\n groups[values['-name-']] = d_col # add the group to the groups dictionary\n print(groups)\n # groups_test[values['-name-']] = Group(values['-name-'], d_col)\n break\n else:\n sg.popup('This Group Name is already being used.')\n\n window.close()\n\n\ndef group_column():\n column = [[]]\n for i in groups:\n column.append([sg.Text(str(i + ':'), key=str(i))])\n column.append([sg.Listbox(values=groups[i], size=(20, len(groups[i])), key='-g' + i + '-')])\n return column\n\n\ndef rename_group():\n # Get group names\n k = []\n for keys, values in groups.items():\n k.append(keys)\n\n # Make a window to rename the group\n event, values = sg.Window(str('Rename Group'), [\n [sg.Text('Select Group:'), sg.InputCombo(k, key='-group-')],\n [sg.Text('Enter new name:'), sg.InputText('', key='-new-')],\n [sg.Button('Submit'), sg.Button('Cancel')]], font=(\"Helvetica\", 20),\n default_element_size=(12, 1), default_button_element_size=(12, 1)).read(\n close=True)\n # If submitted, change names and refresh window\n if event == 'Submit':\n groups[values['-new-']] = groups.pop(values['-group-'])\n\n\ndef del_group():\n # Get group names\n k = []\n for keys, values in groups.items():\n k.append(keys)\n\n # Make a window to rename the group\n event, values = sg.Window(str('Delete Group'), [\n [sg.Text('Select Group:'), sg.InputCombo(k, key='-group-')],\n [sg.Button('Submit'), sg.Button('Cancel')]], font=(\"Helvetica\", 20),\n default_element_size=(12, 1), default_button_element_size=(12, 1)).read(\n close=True)\n # If submitted, change names and refresh window\n if event == 'Submit':\n sure = sg.popup_yes_no('Are you sure you would like to delete ' + values['-group-'] + '?')\n if sure == 'Yes':\n groups.pop(values['-group-'], None)\n\n\ndef rename_dev():\n # Get device names\n k = []\n for keys, values in devices.items():\n k.append(keys)\n\n # Make a window to rename the device\n event, values = sg.Window(str('Rename Device'), [\n [sg.Text('Select Device:'), sg.InputCombo(k, key='-devices-')],\n [sg.Text('Enter new name:'), sg.InputText('', key='-new-')],\n [sg.Button('Submit'), sg.Button('Cancel')]], font=(\"Helvetica\", 20),\n default_element_size=(12, 1), default_button_element_size=(12, 1)).read(\n close=True)\n # If submitted, change names and refresh window\n if event == 'Submit':\n devices[values['-new-']] = devices.pop(values['-devices-'])\n for key, val in groups.items():\n if values['-devices-'] in val:\n val.remove(values['-devices-'])\n val.append(values['-new-'])\n groups[key] = val\n\n\ndef del_dev():\n # Get group names\n k = []\n for keys, values in devices.items():\n k.append(keys)\n\n # Make a window to rename the group\n event, values = sg.Window(str('Delete Device'), [\n [sg.Text('Select Device:'), sg.InputCombo(k, key='-device-')],\n [sg.Button('Submit'), sg.Button('Cancel')]], font=(\"Helvetica\", 20),\n default_element_size=(12, 1), default_button_element_size=(12, 1)).read(\n close=True)\n # If submitted, change names and refresh window\n if event == 'Submit':\n sure = sg.popup_yes_no('Are you sure you would like to delete ' + values['-device-'] + '?')\n if sure == 'Yes':\n devices.pop(values['-devices-'])\n for key, val in groups.items():\n if values['-devices-'] in val:\n val.remove(values['-devices-'])\n groups[key] = val\n\n\ndef main():\n # Menu Definition\n menu_def = [['New', ['New Device', 'New Group']], ['Edit', ['Rename', ['Group::R', 'Device::R'],\n 'Delete', ['Group::D', 'Device::D']]]]\n #groups = load()\n if not groups:\n layout = [[sg.Menu(menu_def, tearoff=False, key='-Menu-', font=(\"Helvetica\", 12))],\n [sg.Text('No Groups or Devices have been added.')],\n [sg.Text('Add a Group or Device through the Menu')]]\n else:\n # Make the column containing the Groups and Devices\n column_1 = group_column()\n\n layout = [[sg.Menu(menu_def, tearoff=False, key='-Menu-', font=(\"Helvetica\", 12))],\n [sg.Column([[sg.Frame('', column_1, key='-Groups-')]])],\n [sg.Button(\"OK\", pad=(0, 10))]]\n\n # Create the window\n window = sg.Window(\"Main Interface\", auto_size_text=True, auto_size_buttons=True, resizable=True,\n layout=layout, default_element_size=(12, 1), default_button_element_size=(12, 1),\n size=(800, 500), font=(\"Helvetica\", 20))\n\n # Create an event loop\n while True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n if event == \"OK\" or event == sg.WIN_CLOSED:\n save()\n break\n\n if event == 'New Device':\n new_device()\n for i in groups:\n window.Element('-g' + i + '-').Update(values=groups[i])\n main()\n\n if event == 'New Group':\n new_group()\n main() # Refresh the interface\n break\n\n if event == 'Group::R':\n rename_group()\n main()\n break\n\n if event == 'Group::D':\n del_group()\n main()\n break\n\n if event == 'Device::R':\n rename_dev()\n main()\n break\n if event == 'Device::D':\n del_dev()\n main()\n break\n\n save()\n window.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Interface/MQTT/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":10175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650294903","text":"import sys\n\nif __name__ == \"__main__\":\n T = int(input())\n \n for i in range(T):\n c, p = map(int, input().split())\n \n print('You get {0} piece(s) and your dad gets {1} piece(s).'.format(c//p, c%p))\n ","sub_path":"python/10178.py","file_name":"10178.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249754118","text":"import sqlite3\n\nclass dbopener():\n def __init__(self):\n self.conn = sqlite3.connect('scores.db')\n self.c = self.conn.cursor()\n\nclass menu():\n def __init__(self, db):\n self.db = db\n self.choice = ''\n\n def chooseScoreboard(self):\n self.choice = input('Do you want to\\n[1] See all scores ordered by average\\n[2] See all scores ordered by minimum time\\n[3] See top10 best averages\\n[4] See top10 fastest single scores\\n$ ')\n print('\\n\\n\\n########################################################################\\n')\n if self.choice == '1':\n self.allScoresAvg()\n elif self.choice == '3':\n self.bestAverage()\n elif self.choice == '4':\n self.bestMinimum()\n elif self.choice == '2':\n self.allScoresMin()\n else:\n print('Invalid option.')\n return\n print('\\n########################################################################\\n\\n\\n')\n\n def allScoresAvg(self):\n self.db.c.execute('SELECT * FROM highscores ORDER BY avgscore')\n data = self.db.c.fetchall()\n j = 0\n for i in data:\n print('#{3}\\tName: {0}\\tAverage: {1:.5f}\\tMinimum: {2:.5f}'.format(i[0], i[1], i[2], j+1))\n j+=1\n\n def allScoresMin(self):\n self.db.c.execute('SELECT * FROM highscores ORDER BY minscore')\n data = self.db.c.fetchall()\n j = 0\n for i in data:\n print('#{3}\\tName: {0}\\tAverage: {1:.5f}\\tMinimum: {2:.5f}'.format(i[0], i[1], i[2], j+1))\n j+=1\n\n def bestAverage(self):\n self.db.c.execute('SELECT * FROM highscores ORDER BY avgscore')\n data = self.db.c.fetchall()\n j = 0\n for i in range(10):\n if i < len(data):\n print('#{3}\\tName: {0}\\tAverage: {1:.5f}\\tMinimum: {2:.5f}'.format(data[i][0], data[i][1], data[i][2], i+1))\n\n def bestMinimum(self):\n self.db.c.execute('SELECT * FROM highscores ORDER BY minscore')\n data = self.db.c.fetchall()\n j = 0\n for i in range(10):\n if i < len(data):\n print('#{3}\\tName: {0}\\tAverage: {1:.5f}\\tMinimum: {2:.5f}'.format(data[i][0], data[i][1], data[i][2], i+1))\n\nMyDb = dbopener()\nMyMenu = menu(MyDb)\n\nMyMenu.chooseScoreboard()\n","sub_path":"reactiongame/highscores.py","file_name":"highscores.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"639965140","text":"\nimport os\nimport openpyxl as xl\nfrom InvoiceGenerator.pdf import SimpleInvoice\nfrom tempfile import NamedTemporaryFile\nfrom InvoiceGenerator.api import Invoice, Item, Client, Provider, Creator\n\nwb = xl.load_workbook('CustomerInfo.xlsx')\nsheet = wb['Sheet1']\n\n\n# choose english as language\nos.environ[\"INVOICE_LANG\"] = \"en\"\nfor row in range(2, sheet.max_row + 1):\n paid = sheet.cell(row, 3)\n if not paid:\n client = Client(summary=f'Name: {sheet.cell(row, 1)}', address=sheet.cell(row, 5), zip_code=sheet.cell(row, 8), city=sheet.cell(row, 6), vat_id=f'tax id: {sheet.cell(row, 4)}', email=sheet.cell(row, 2))\n provider = Provider('Shira Teichman', bank_account='2600420569', bank_code='2010')\n creator = Creator('John Doe')\n invoice = Invoice(client, provider, creator)\n invoice.currency_locale = 'en_US.UTF-8'\n invoice.add_item(Item(32, 600, description=\"Item 1\"))\n invoice.add_item(Item(60, 50, description=\"Item 2\", tax=21))\n invoice.add_item(Item(50, 60, description=\"Item 3\", tax=0))\n invoice.add_item(Item(5, 600, description=\"Item 4\", tax=15))\n pdf = SimpleInvoice(invoice)\n pdf.gen(\"invoice.pdf\", generate_qr_code=True)\n#making a pdf\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531739576","text":"# -*- coding: utf-8 -*-\n\nimport os.path\nimport datetime\nimport posixpath\nimport time\n\nfrom django.utils.http import http_date\n\nimport djcelery\n\ngettext = lambda s: s\n\ndjcelery.setup_loader()\n\nPROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\n\nDEBUG = False\nTEMPLATE_DEBUG = True\n\nSERVE_MEDIA = DEBUG\n\n# django-compressor is turned off by default due to deployment overhead for\n# most users. See <URL> for more information\nCOMPRESS = False\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": \"crate\",\n }\n}\n\nTIME_ZONE = \"UTC\"\nLANGUAGE_CODE = \"en-us\"\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [\n os.path.join(PROJECT_ROOT, os.pardir, \"locale\"),\n]\n\nLANGUAGES = (\n (\"en\", u\"English\"),\n (\"es\", u\"Español\"),\n (\"fr\", u\"Français\"),\n (\"de\", u\"Deutsch\"),\n (\"pt-br\", u\"Português (Br)\"),\n (\"ru\", u\"Русский\"),\n # (\"sv\", u\"Svenska\"),\n)\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, \"site_media\", \"media\")\nMEDIA_URL = \"/site_media/media/\"\n\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, \"site_media\", \"static\")\nSTATIC_URL = \"/site_media/static/\"\n\nADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, \"admin/\")\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_ROOT, \"static\"),\n]\n\nSTATICFILES_FINDERS = [\n \"staticfiles.finders.FileSystemFinder\",\n \"staticfiles.finders.AppDirectoriesFinder\",\n \"staticfiles.finders.LegacyAppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n]\n\nCOMPRESS_OUTPUT_DIR = \"cache\"\n\nTEMPLATE_LOADERS = [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n]\n\nMIDDLEWARE_CLASSES = [\n \"django_hosts.middleware.HostsMiddleware\",\n \"djangosecure.middleware.SecurityMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django_openid.consumer.SessionConsumer\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n\n \"pagination.middleware.PaginationMiddleware\",\n\n \"pinax.apps.account.middleware.LocaleMiddleware\",\n \"pinax.middleware.security.HideSensistiveFieldsMiddleware\",\n]\n\nROOT_URLCONF = \"crate_project.urls\"\nROOT_HOSTCONF = \"crate_project.hosts\"\n\nDEFAULT_HOST = \"default\"\n\nWSGI_APPLICATION = \"crate_project.wsgi.application\"\n\nTEMPLATE_DIRS = [\n os.path.join(PROJECT_ROOT, \"templates\"),\n]\n\nJINJA_TEMPLATE_DIRS = [\n os.path.join(PROJECT_ROOT, \"templates\", \"_jinja2\"),\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = [\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n\n \"staticfiles.context_processors.static\",\n\n \"pinax.core.context_processors.pinax_settings\",\n\n \"pinax.apps.account.context_processors.account\",\n\n # \"notification.context_processors.notification\",\n \"announcements.context_processors.site_wide_announcements\",\n]\n\nINSTALLED_APPS = [\n # Admin Dashboard\n \"admin_tools\",\n \"admin_tools.theming\",\n \"admin_tools.menu\",\n \"admin_tools.dashboard\",\n\n # Django\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"django.contrib.humanize\",\n \"django.contrib.markup\",\n\n \"pinax.templatetags\",\n\n # theme\n \"pinax_theme_bootstrap\",\n\n # external (Pinax)\n # \"notification\", # must be first\n \"staticfiles\",\n \"pagination\",\n \"compressor\",\n \"django_openid\",\n \"timezones\",\n \"emailconfirmation\",\n \"announcements\",\n \"idios\",\n \"metron\",\n\n # Pinax\n \"pinax.apps.account\",\n \"pinax.apps.signup_codes\",\n\n # external (Project)\n \"south\",\n \"djcelery\",\n \"django_hosts\",\n \"haystack\",\n \"storages\",\n \"intercom\",\n \"celery_haystack\",\n \"tastypie\",\n \"djangosecure\",\n\n # project\n \"about\",\n \"aws_stats\",\n \"profiles\",\n \"packages\",\n \"pypi\",\n \"search\",\n \"crate\",\n \"evaluator\",\n \"favorites\",\n \"history\",\n \"helpdocs\",\n]\n\nFIXTURE_DIRS = [\n os.path.join(PROJECT_ROOT, \"fixtures\"),\n]\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\nABSOLUTE_URL_OVERRIDES = {\n \"auth.user\": lambda o: \"/profiles/profile/%s/\" % o.username,\n}\n\nAUTH_PROFILE_MODULE = \"profiles.Profile\"\nNOTIFICATION_LANGUAGE_MODULE = \"account.Account\"\n\nCONTACT_EMAIL = \"support@crate.io\"\n\nACCOUNT_OPEN_SIGNUP = True\nACCOUNT_USE_OPENID = True\nACCOUNT_REQUIRED_EMAIL = True\nACCOUNT_EMAIL_VERIFICATION = True\nACCOUNT_EMAIL_AUTHENTICATION = False\nACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = True\n\nAUTHENTICATION_BACKENDS = [\n \"pinax.apps.account.auth_backends.AuthenticationBackend\",\n]\n\nPASSWORD_HASHERS = (\n \"django.contrib.auth.hashers.BCryptPasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2PasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.MD5PasswordHasher\",\n \"django.contrib.auth.hashers.CryptPasswordHasher\",\n)\n\nLOGIN_URL = \"/account/login/\"\nLOGIN_REDIRECT_URLNAME = \"what_next\"\nLOGOUT_REDIRECT_URLNAME = \"search\"\n\nEMAIL_CONFIRMATION_DAYS = 2\nEMAIL_DEBUG = DEBUG\n\nDEBUG_TOOLBAR_CONFIG = {\n \"INTERCEPT_REDIRECTS\": False,\n}\n\nCELERY_SEND_TASK_ERROR_EMAILS = True\nCELERY_DISABLE_RATE_LIMITS = True\nCELERY_TASK_PUBLISH_RETRY = True\n\nCELERYD_MAX_TASKS_PER_CHILD = 10000\n\nCELERY_IGNORE_RESULT = True\n\nCELERY_TASK_RESULT_EXPIRES = 7 * 24 * 60 * 60 # 7 Days\n\nCELERYD_HIJACK_ROOT_LOGGER = False\n\nCELERYBEAT_SCHEDULER = \"djcelery.schedulers.DatabaseScheduler\"\n\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 15\n\nAWS_QUERYSTRING_AUTH = False\nAWS_S3_SECURE_URLS = False\n\nAWS_HEADERS = {\n \"Expires\": lambda: http_date(time.mktime((datetime.datetime.now() + datetime.timedelta(days=365)).timetuple())),\n \"Cache-Control\": \"max-age=31556926\",\n}\n\n\nMETRON_SETTINGS = {\n \"google\": {3: \"UA-28759418-1\"},\n \"gauges\": {3: \"4f1e4cd0613f5d7003000002\"}\n}\n\nADMIN_TOOLS_INDEX_DASHBOARD = \"crate.dashboard.CrateIndexDashboard\"\n","sub_path":"crate_project/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90236706","text":"# coding=UTF-8\nimport pygame\n\nclass Pont:\n\tdef __init__(self, koord):\n\t\tself.koord = tuple(map(int, koord))\n\t\tself.ert = float(\"inf\")\n\t\tself.szomszedok = []\n\t\t\n\tdef __str__(self):\n\t\treturn str(self.koord)\n\n# Beolvasás\n\nwith open(\"graf-0.csv\") as be:\n\tszel, mag, pontdb, eldb = tuple(map(int, be.readline().split(\";\")))\n\t\n\tpontok = []\n\tfor _ in range(pontdb):\n\t\tpontok.append(Pont(be.readline().split(\";\")[1:3]))\n\t\t\n\telek = []\n\tfor _ in range(eldb):\n\t\telek.append(list(map(lambda x: int(x)-1, be.readline().split(\";\")[:3])))\n\t\telek[-1][2] += 1\n\t\tpontok[elek[-1][0]].szomszedok.append((elek[-1][1], elek[-1][2]))\n\t\tpontok[elek[-1][1]].szomszedok.append((elek[-1][0], elek[-1][2]))\n\t\t\n\t#print(pontok)\n\t#print(elek)\n\n# pygame betöltése, háttér\t\n\nkezd = int(input(\"Kezdőpont sorszáma: \"))\ncel = int(input(\"Célpont sorszáma: \"))\n\npontok[kezd].ert = 0\nismert = {kezd}\n\nfor i, ert in pontok[kezd].szomszedok:\n\tpontok[i].ert = ert\n\t\nperem = {(pontok[ssz].ert, ssz) for ssz, _ in pontok[kezd].szomszedok}\n\nwhile len(perem)>0:\n\tminert, minssz = min(perem)\n\t\n\tperem.remove((minert, minssz))\n\tismert.add(minssz)\n\tif minssz == cel:\n\t\tbreak\n\t\n\tfor i, ert in pontok[minssz].szomszedok:\n\t\tif i not in ismert:\n\t\t\tif pontok[i].ert > minert + ert:\n\t\t\t\tpontok[i].ert = minert + ert\n\t\t\tperem.add((pontok[i].ert, i))\n\nprint(pontok[cel].ert)\n\npygame.init()\nscreen = pygame.display.set_mode((szel, mag))\npygame.draw.rect(screen, (255, 255, 255), (0, 0, szel, mag))\n\npygame.font.init()\nbetu = pygame.font.SysFont(\"calibri\", 16)\nszoveg = betu.render(\"Gráf\", False, (0, 0, 0))\nscreen.blit(szoveg, (10, 10))\n\n# fő ciklus\n\nvege = False\nwhile not vege:\n\t# Program vége-e\n\t\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tvege = True\n\t\n\t# Pontok kirajzolása\n\t\n\tfor pont in pontok:\n\t\tpygame.draw.circle(screen, (0, 0, 0), pont.koord, 5)\n\t\t\n\tfor el in elek:\n\t\tpygame.draw.line(screen, (0, 0, 0), pontok[el[0]].koord, pontok[el[1]].koord, 1)\n\t\t\n\t# Rajzolás vége\n\t\t\n\tpygame.display.flip()\n","sub_path":"graf-0.py","file_name":"graf-0.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461822436","text":"'''\nCreated on Jan 31, 2016\n\n@author: johannes niederhauser\n\ntrying python for the first time\n'''\n\nsecret = 100\nguess = 0\ncounter = 0\n\nwhile guess != secret:\n guess = int(input(\"Guess the number: \"))\n \n if guess < secret:\n print(\"too little\")\n \n if guess > secret:\n print(\"too big\")\n \n counter = counter + 1\n\nprint(\"You have guessed the number within\", counter, \"attempts.\")","sub_path":"GuessNumbers/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418226388","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport collections.abc\nimport copy\nimport numpy as np\nfrom gammapy.maps import Map\nfrom gammapy.modeling.models import Models, ProperModels\nfrom gammapy.utils.scripts import make_name, make_path, read_yaml, write_yaml\nfrom gammapy.utils.table import table_from_row_data\n\n__all__ = [\"Dataset\", \"Datasets\"]\n\n\nclass Dataset(abc.ABC):\n \"\"\"Dataset abstract base class.\n\n TODO: add tutorial how to create your own dataset types.\n\n For now, see existing examples in Gammapy how this works:\n\n - `gammapy.cube.MapDataset`\n - `gammapy.spectrum.SpectrumDataset`\n - `gammapy.spectrum.FluxPointsDataset`\n \"\"\"\n\n _residuals_labels = {\n \"diff\": \"data - model\",\n \"diff/model\": \"(data - model) / model\",\n \"diff/sqrt(model)\": \"(data - model) / sqrt(model)\",\n }\n\n @property\n def mask(self):\n \"\"\"Combined fit and safe mask\"\"\"\n mask_safe = (\n self.mask_safe.data if isinstance(self.mask_safe, Map) else self.mask_safe\n )\n mask_fit = (\n self.mask_fit.data if isinstance(self.mask_fit, Map) else self.mask_fit\n )\n if mask_safe is not None and mask_fit is not None:\n mask = mask_safe & mask_fit\n elif mask_fit is not None:\n mask = mask_fit\n elif mask_safe is not None:\n mask = mask_safe\n else:\n mask = None\n return mask\n\n def stat_sum(self):\n \"\"\"Total statistic given the current model parameters.\"\"\"\n stat = self.stat_array()\n\n if self.mask is not None:\n stat = stat[self.mask]\n\n return np.sum(stat, dtype=np.float64)\n\n @abc.abstractmethod\n def stat_array(self):\n \"\"\"Statistic array, one value per data point.\"\"\"\n\n def copy(self, name=None):\n \"\"\"A deep copy.\"\"\"\n new = copy.deepcopy(self)\n name = make_name(name)\n new._name = name\n # propagate new dataset name\n if new._models is not None:\n for m in new._models:\n if m.datasets_names is not None:\n for k, d in enumerate(m.datasets_names):\n if d == self.name:\n m.datasets_names[k] = name\n if hasattr(new, \"background_model\") and m == new.background_model:\n m._name = name + \"-bkg\"\n return new\n\n @staticmethod\n def _compute_residuals(data, model, method=\"diff\"):\n with np.errstate(invalid=\"ignore\"):\n if method == \"diff\":\n residuals = data - model\n elif method == \"diff/model\":\n residuals = (data - model) / model\n elif method == \"diff/sqrt(model)\":\n residuals = (data - model) / np.sqrt(model)\n else:\n raise AttributeError(\n f\"Invalid method: {method!r}. Choose between 'diff',\"\n \" 'diff/model' and 'diff/sqrt(model)'\"\n )\n return residuals\n\n\nclass Datasets(collections.abc.MutableSequence):\n \"\"\"Dataset collection.\n\n Parameters\n ----------\n datasets : `Dataset` or list of `Dataset`\n Datasets\n \"\"\"\n\n def __init__(self, datasets=None):\n if datasets is None:\n datasets = []\n\n if isinstance(datasets, Datasets):\n datasets = datasets._datasets\n elif isinstance(datasets, Dataset):\n datasets = [datasets]\n elif not isinstance(datasets, list):\n raise TypeError(f\"Invalid type: {datasets!r}\")\n\n unique_names = []\n for dataset in datasets:\n if dataset.name in unique_names:\n raise (ValueError(\"Dataset names must be unique\"))\n unique_names.append(dataset.name)\n\n self._datasets = datasets\n\n @property\n def parameters(self):\n \"\"\"Unique parameters (`~gammapy.modeling.Parameters`).\n\n Duplicate parameter objects have been removed.\n The order of the unique parameters remains.\n \"\"\"\n return self.models.parameters.unique_parameters\n\n @property\n def models(self):\n \"\"\"Unique models (`~gammapy.modeling.Models`).\n\n Duplicate model objects have been removed.\n The order of the unique models remains.\n \"\"\"\n return ProperModels(self)\n\n @property\n def names(self):\n return [d.name for d in self._datasets]\n\n @property\n def is_all_same_type(self):\n \"\"\"Whether all contained datasets are of the same type\"\"\"\n return len(set(_.__class__ for _ in self)) == 1\n\n @property\n def is_all_same_shape(self):\n \"\"\"Whether all contained datasets have the same data shape\"\"\"\n return len(set(_.data_shape for _ in self)) == 1\n\n @property\n def is_all_same_energy_shape(self):\n \"\"\"Whether all contained datasets have the same data shape\"\"\"\n return len(set(_.data_shape[0] for _ in self)) == 1\n\n def stat_sum(self):\n \"\"\"Compute joint likelihood\"\"\"\n stat_sum = 0\n # TODO: add parallel evaluation of likelihoods\n for dataset in self:\n stat_sum += dataset.stat_sum()\n return stat_sum\n\n def __str__(self):\n str_ = self.__class__.__name__ + \"\\n\"\n str_ += \"--------\\n\"\n\n for idx, dataset in enumerate(self):\n str_ += f\"idx={idx}, id={hex(id(dataset))!r}, name={dataset.name!r}\\n\"\n\n return str_\n\n def copy(self):\n \"\"\"A deep copy.\"\"\"\n return copy.deepcopy(self)\n\n @classmethod\n def read(cls, path, filedata=\"_datasets.yaml\", filemodel=\"_models.yaml\"):\n \"\"\"De-serialize datasets from YAML and FITS files.\n\n Parameters\n ----------\n path : str, Path\n Base directory of the datasets files.\n filedata : str\n file path or name of yaml datasets file\n filemodel : str\n file path or name of yaml models file\n\n Returns\n -------\n dataset : 'gammapy.modeling.Datasets'\n Datasets\n \"\"\"\n from . import DATASET_REGISTRY\n\n path = make_path(path)\n\n if (path / filedata).exists():\n filedata = path / filedata\n else:\n filedata = make_path(filedata)\n if (path / filemodel).exists():\n filemodel = path / filemodel\n else:\n filemodel = make_path(filemodel)\n\n models = Models.read(filemodel)\n data_list = read_yaml(filedata)\n\n datasets = []\n for data in data_list[\"datasets\"]:\n if (path / data[\"filename\"]).exists():\n data[\"filename\"] = str(make_path(path / data[\"filename\"]))\n dataset = DATASET_REGISTRY.get_cls(data[\"type\"]).from_dict(data, models)\n datasets.append(dataset)\n return cls(datasets)\n\n def write(self, path, prefix=\"\", overwrite=False, write_covariance=True):\n \"\"\"Serialize datasets to YAML and FITS files.\n\n Parameters\n ----------\n path : `pathlib.Path`\n path to write files\n prefix : str\n common prefix of file names\n overwrite : bool\n overwrite datasets FITS files\n write_covariance : bool\n save covariance or not\n \"\"\"\n\n path = make_path(path).resolve()\n datasets_dictlist = []\n for dataset in self._datasets:\n filename = f\"{prefix}_data_{dataset.name}.fits\"\n dataset.write(path / filename, overwrite)\n datasets_dictlist.append(dataset.to_dict(filename=filename))\n datasets_dict = {\"datasets\": datasets_dictlist}\n\n write_yaml(datasets_dict, path / f\"{prefix}_datasets.yaml\", sort_keys=False)\n self.models.write(\n path / f\"{prefix}_models.yaml\",\n overwrite=overwrite,\n write_covariance=write_covariance,\n )\n\n def stack_reduce(self, name=None):\n \"\"\"Reduce the Datasets to a unique Dataset by stacking them together.\n\n This works only if all Dataset are of the same type and if a proper\n in-place stack method exists for the Dataset type.\n\n Returns\n -------\n dataset : ~gammapy.utils.Dataset\n the stacked dataset\n \"\"\"\n if not self.is_all_same_type:\n raise ValueError(\n \"Stacking impossible: all Datasets contained are not of a unique type.\"\n )\n\n dataset = self[0].copy(name=name)\n for ds in self[1:]:\n dataset.stack(ds)\n return dataset\n\n def info_table(self, cumulative=False, region=None):\n \"\"\"Get info table for datasets.\n\n Parameters\n ----------\n cumulative : bool\n Cumulate info across all observations\n\n Returns\n -------\n info_table : `~astropy.table.Table`\n Info table.\n \"\"\"\n if not self.is_all_same_type:\n raise ValueError(\"Info table not supported for mixed dataset type.\")\n\n stacked = self[0].copy(name=self[0].name)\n\n rows = [stacked.info_dict()]\n\n for dataset in self[1:]:\n if cumulative:\n stacked.stack(dataset)\n row = stacked.info_dict()\n else:\n row = dataset.info_dict()\n\n rows.append(row)\n\n return table_from_row_data(rows=rows)\n\n def __getitem__(self, key):\n return self._datasets[self.index(key)]\n\n def __delitem__(self, key):\n del self._datasets[self.index(key)]\n\n def __setitem__(self, key, dataset):\n if isinstance(dataset, Dataset):\n if dataset.name in self.names:\n raise (ValueError(\"Dataset names must be unique\"))\n self._datasets[self.index(key)] = dataset\n else:\n raise TypeError(f\"Invalid type: {type(dataset)!r}\")\n\n def insert(self, idx, dataset):\n if isinstance(dataset, Dataset):\n if dataset.name in self.names:\n raise (ValueError(\"Dataset names must be unique\"))\n self._datasets.insert(idx, dataset)\n else:\n raise TypeError(f\"Invalid type: {type(dataset)!r}\")\n\n def index(self, key):\n if isinstance(key, (int, slice)):\n return key\n elif isinstance(key, str):\n return self.names.index(key)\n elif isinstance(key, Dataset):\n return self._datasets.index(key)\n else:\n raise TypeError(f\"Invalid type: {type(key)!r}\")\n\n def __len__(self):\n return len(self._datasets)\n","sub_path":"gammapy/datasets/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95110946","text":"import numpy as np\nimport os\n\nimport tvm\nfrom tvm import relay, auto_scheduler\nfrom tvm.relay import data_dep_optimization as ddo\nimport tvm.relay.testing\nfrom tvm.contrib import graph_executor\nfrom tvm.contrib.utils import tempdir\n\nimport argparse\n\nimport ml_collections\nfrom tt_mixer import TTMixer\nimport torch\n\nfrom tvm.relay.dataflow_pattern import *\nfrom tvm.relay.testing import run_opt_pass\nfrom tvm.relay.op.op import register_injective_schedule\n\ndef get_mixer_b16_tt_config(args):\n \"\"\"Returns TTMixer-B/16 configuration\"\"\"\n config = ml_collections.ConfigDict()\n config = ml_collections.ConfigDict()\n config.name = 'Mixer-B_16'\n config.patches = ml_collections.ConfigDict({'size': (16, 16)})\n config.hidden_dim = 768\n config.hidden_shape = args.hidden_tt_shape\n config.num_blocks = 12\n config.tokens_mlp_dim = 384\n config.channels_mlp_dim = 3072\n config.channels_mlp_shape = args.channels_tt_shape\n config.tt_ranks = args.tt_ranks\n return config\n\ndef set_configs(args):\n\n args.save_path = \"saved_models/B_16_cifar_10.pt\"\n args.img_size = 224\n args.num_classes = 10\n args.tt_ranks = [int(i) for i in args.tt_ranks.split(',')]\n args.hidden_tt_shape = [int(i) for i in args.hidden_tt_shape.split(',')]\n args.channels_tt_shape = [int(i) for i in args.channels_tt_shape.split(',')]\n args.target_layer = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n\n return args\n\nparser = argparse.ArgumentParser()\n# TT-format Configuration\n\n# ranks config [64, 64], [32, 32], [16, 16], [8, 8]\nparser.add_argument(\"--tt-ranks\", default=\"16, 16\",\n type=str,\n help=\"Ranks for TT-Format\")\n# 768 factorize (e.g. 768 = 8 x 8 x 12)\nparser.add_argument(\"--hidden-tt-shape\", default=\"8, 8, 12\",\n type=str,\n help=\"Factorized hidden dimension for TT-format\")\n# 3072 factorize (e.g. 3072 = 12 x 12 x 16)\nparser.add_argument(\"--channels-tt-shape\", default=\"12, 16, 16\",\n type=str,\n help=\"Factorized channel dimension for TT-format\")\nargs = parser.parse_args()\n\nargs = set_configs(args)\n\nimport logging\n#logging.getLogger('auto_scheduler').setLevel(logging.DEBUG)\n\n#def compute_tt_linear(data, w1, w2, w3):\n@relay.op.register_compute(\"tt_linear\")\ndef compute_tt_linear(attrs, inputs):\n out = inputs[0] + 1\n return [out]\n\nrelay.op.op.register_injective_schedule(\"tt_linear\")\nrelay.op.op.register_pattern(\"tt_linear\", relay.op.op.OpPattern.INJECTIVE)\n\ndef get_network(name, batch_size, layout=\"NHWC\", dtype=\"float32\", use_sparse=True):\n \"\"\"Get the symbol definition and random weight of a network\"\"\"\n\n config = get_mixer_b16_tt_config(args)\n model = TTMixer(config,\n args.img_size,\n num_classes=args.num_classes,\n patch_size=16,\n zero_head=False,\n target_layer=args.target_layer)\n\n input_shape = (1, 3, 224, 224)\n output_shape = (1, 10)\n input_data = torch.randn(input_shape)\n scripted_model = torch.jit.trace(model, input_data).eval()\n\n input_name = \"input0\"\n shape_list = [(input_name, input_shape)]\n\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n\n def make_tt_linear_pattern():\n w1 = is_op(\"transpose\")(wildcard())\n\n x1 = is_op(\"nn.dense\")(wildcard(), w1)\n x1 = is_op(\"reshape\")(x1)\n #x1 = is_op(\"reshape\")(x1)\n x1 = is_op(\"transpose\")(x1)\n x1 = is_op(\"reshape\")(x1)\n #x1 = is_op(\"reshape\")(x1)\n\n w2 = is_op(\"transpose\")(wildcard())\n\n x2 = is_op(\"nn.dense\")(x1, w2)\n x2 = is_op(\"reshape\")(x2)\n #x2 = is_op(\"reshape\")(x2)\n x2 = is_op(\"transpose\")(x2)\n x2 = is_op(\"reshape\")(x2)\n #x2 = is_op(\"reshape\")(x2)\n\n w3 = is_op(\"transpose\")(wildcard())\n\n x3 = is_op(\"nn.dense\")(x2, w3)\n return x3\n\n pattern_table = [(\"tt_linear\", make_tt_linear_pattern())]\n\n\n class TTLinearCallback(DFPatternCallback):\n def __init__(self, require_type=False):\n super().__init__(require_type)\n self.x1 = wildcard()\n self.w1 = wildcard()\n self.w2 = wildcard()\n self.w3 = wildcard()\n self.b = wildcard()\n self.pattern = is_op(\"reshape\")(self.x1)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w1))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w2))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w3))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"add\")(self.b, self.pattern)\n\n self.x2 = self.pattern\n\n self.pattern = is_op(\"multiply\")(self.pattern, wildcard())\n self.pattern = is_op(\"erf\")(self.pattern)\n self.pattern = is_op(\"multiply\")(self.pattern, wildcard())\n self.pattern = is_op(\"add\")(wildcard(), self.pattern)\n self.pattern = is_op(\"multiply\")(self.x2, self.pattern)\n self.w4 = wildcard()\n self.w5 = wildcard()\n self.w6 = wildcard()\n self.b2 = wildcard()\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w4))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w5))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"transpose\")(self.pattern)\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"nn.dense\")(self.pattern, is_op(\"transpose\")(self.w6))\n self.pattern = is_op(\"reshape\")(self.pattern)\n self.pattern = is_op(\"add\")(self.b2, self.pattern)\n\n def callback(self, pre, post, node_map):\n print(\"haha\")\n x1 = node_map[self.x1][0]\n w1 = node_map[self.w1][0]\n w2 = node_map[self.w2][0]\n w3 = node_map[self.w3][0]\n return x1\n\n\n expr = run_opt_pass(mod[\"main\"], relay.transform.SimplifyExpr())\n mod = tvm.IRModule.from_expr(expr)\n\n #expr = run_opt_pass(mod[\"main\"], relay.transform.MergeComposite(pattern_table), import_prelude=False)\n #mod = tvm.IRModule.from_expr(expr)\n\n #expr = rewrite(TTLinearCallback(), mod[\"main\"])\n #mod = tvm.IRModule.from_expr(expr)\n\n print(mod)\n\n return mod, params, input_shape, output_shape\n\n#### DEVICE CONFIG ####\n\n# Replace \"aarch64-linux-gnu\" with the correct target of your board.\n# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.\n# FIXME(tmoreau89, merrymercy): We leave '-device=arm_cpu' out of the target string\n# because we're sharing x86 op strategy.\ntarget = tvm.target.Target(\"llvm -mtriple=arm-linux-gnueabihf -mattr=+neon\")\n\n# Also replace this with the device key, rpc host and rpc port in your tracker\ndevice_key = \"xu4\"\nrpc_host = \"10.201.135.165\"\nrpc_port = 8109\n\n# Set this to True if you use ndk tools for cross compiling\n# And also set the environment variable below to point to the cross compiler\nuse_ndk = False\n# os.environ[\"TVM_NDK_CC\"] = \"/usr/bin/aarch64-linux-gnu-g++\"\n\n#### TUNING OPTION ####\nnetwork = \"mobilenet\"\nuse_sparse = True\nbatch_size = 1\n#layout = args.layout\ndtype = \"float32\"\nlog_file = \"R%d.json\" % (args.tt_ranks[0])\n#log_file = \"test.json\"\n\n#################################################################\n# Extract Search Tasks\n# --------------------\n# Next, we extract the search tasks and their weights from a network.\n# The weight of a task is the number of appearances of the task's subgraph\n# in the whole network.\n# By using the weight, we can approximate the end-to-end latency of the network\n# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the\n# latency of a task and :code:`weight[t]` is the weight of the task.\n# The task scheduler will just optimize this objective.\n\n# Extract tasks from the network\nprint(\"Get model...\")\nmod, params, input_shape, output_shape = get_network(\n network, batch_size, \"NCHW\", dtype=dtype, use_sparse=use_sparse\n)\nprint(\"Extract tasks...\")\ntasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target)\n\nfor idx, task in enumerate(tasks):\n print(\"========== Task %d (workload key: %s) ==========\" % (idx, task.workload_key))\n print(task.compute_dag)\n\n\n#################################################################\n# Tuning and Evaluation\n# ---------------------\n# Now, we set some options for tuning and launch the search tasks\n#\n# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.\n# You can set it to a small number (e.g., 200) for a fast demonstrative run.\n# In practice, we recommend setting it around :code:`800 * len(tasks)`,\n# which is typically enough for the search to converge.\n# For example, there are 29 tasks in resnet-50, so we can set it as 20000.\n# You can adjust this parameter according to your time budget.\n# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,\n# The measurement records can be used to query the history best, resume the search,\n# and do more analyses later.\n# * see :any:`auto_scheduler.TuningOptions`,\n# :any:`auto_scheduler.LocalRunner` for more parameters.\n#\n# After auto-tuning, we can compile the network with the best schedules we found.\n# All measurement records are dumped into the log file during auto-tuning,\n# so we can read the log file and load the best schedules.\n\n\ndef tune_and_evaluate():\n print(\"Begin tuning...\")\n '''\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=2000, # change this to 20000 to achieve the best performance\n builder=auto_scheduler.LocalBuilder(build_func=\"ndk\" if use_ndk else \"default\"),\n runner=auto_scheduler.RPCRunner(\n device_key,\n host=rpc_host,\n port=rpc_port,\n timeout=3000,\n repeat=1,\n min_repeat_ms=200,\n enable_cpu_cache_flush=True,\n ),\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n\n tuner.tune(tune_option)\n #'''\n\n # Compile with the history best\n print(\"Compile...\")\n with auto_scheduler.ApplyHistoryBest(log_file):\n with tvm.transform.PassContext(\n opt_level=3, config={\"relay.backend.use_auto_scheduler\": True}\n ):\n lib = relay.build(mod, target=target, params=params)\n\n # Export library\n tmp = tempdir()\n if use_ndk:\n from tvm.contrib import ndk\n\n filename = \"net.so\"\n lib.export_library(tmp.relpath(filename), ndk.create_shared)\n else:\n filename = \"net.tar\"\n lib.export_library(tmp.relpath(filename))\n\n # Upload module to device\n print(\"Upload...\")\n remote = auto_scheduler.utils.request_remote(device_key, rpc_host, rpc_port, timeout=10000)\n remote.upload(tmp.relpath(filename))\n rlib = remote.load_module(filename)\n\n # Create graph executor\n dev = remote.cpu()\n module = graph_executor.GraphModule(rlib[\"default\"](dev))\n data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n module.set_input(\"input0\", data_tvm)\n\n # Evaluate\n print(\"Evaluate inference time cost...\")\n print(module.benchmark(dev, repeat=3, min_repeat_ms=500))\n\n\n# We do not run the tuning in our webpage server since the server doesn't have a Raspberry Pi,\n# or device tracker running.\n# Uncomment the following line to run it by yourself.\n\ntune_and_evaluate()\n\n\n######################################################################\n# .. note:: Explaining the printed information during tuning\n#\n# During the tuning, a lot of information will be printed on the console.\n# They are used for debugging purposes. The most important info is the output\n# of the task scheduler. The following table is a sample output.\n#\n# .. code-block:: c\n#\n# ----------------------------------------------------------------------\n# ------------------------------ [ Task Scheduler ]\n# ----------------------------------------------------------------------\n# | ID | Latency (ms) | Speed (GFLOPS) | Trials |\n# -------------------------------------------------\n# | 0 | 0.013 | 0.31 | 64 |\n# | 1 | 0.845 | 2.43 | 448 |\n# | 2 | 0.046 | -0.00 | 64 |\n# | 3 | 4.194 | 24.53 | 2112 |\n# | 4 | 0.109 | 9.21 | 64 |\n# | 5 | 1.759 | 29.27 | 896 |\n# | 6 | 0.083 | 6.01 | 64 |\n# | 7 | 3.084 | 33.38 | 7680 |\n# | 8 | 0.136 | 14.78 | 384 |\n# | 9 | 1.349 | 38.23 | 768 |\n# | 10 | 0.133 | 7.55 | 128 |\n# | 11 | 2.747 | 37.56 | 1536 |\n# | 12 | 0.338 | 11.87 | 192 |\n# | 13 | 1.295 | 40.00 | 704 |\n# | 14 | 0.482 | 4.16 | 256 |\n# | 15 | 2.686 | 38.56 | 1344 |\n# | 16 | 0.884 | 9.08 | 448 |\n# | 17 | 1.332 | 39.18 | 704 |\n# | 18 | 1.045 | 3.84 | 576 |\n# | 19 | 1.391 | 38.09 | 704 |\n# | 20 | 0.777 | 10.34 | 448 |\n# | 21 | 0.739 | 30.97 | 448 |\n# -------------------------------------------------\n# Estimated total latency: 38.347 ms Trials: 19992 Used time : 19260 s Next ID: 3\n#\n# This table lists the latency and (estimated) speed of all tasks.\n# It also lists the allocation of measurement trials for all tasks.\n# The last line prints the total weighted latency of these tasks,\n# which can be a rough estimation of the end-to-end execution time\n# of the network.\n# The last line also prints the total number of measurement trials,\n# total time spent on auto-tuning and the id of the next task to tune.\n#\n# There will also be some \"dmlc::Error\"s errors, because the\n# auto-scheduler will try some invalid schedules.\n# You can safely ignore them if the tuning can continue, because these\n# errors are isolated from the main process.\n#\n\n######################################################################\n# .. note:: Terminate the tuning earlier\n#\n# You can terminate the tuning earlier by forcibly killing this process.\n# As long as you get at least one valid schedule for each task in the log file,\n# you should be able to do the compilation (the secion below).\n#\n\n#################################################################\n# Other Tips\n# ----------\n# 1. During the tuning, the auto-scheduler needs to compile many programs and\n# extract feature from them. This part is CPU-intensive,\n# so a high-performance CPU with many cores is recommended for faster search.\n# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`\n# to distill the large log file and only save the best useful records.\n# 3. You can resume a search from the previous log file. You just need to\n# add a new argument :code:`load_log_file` when creating the task scheduler\n# in function :code:`run_tuning`. Say,\n# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`\n# 4. If you have multiple target CPUs, you can use all of them for measurements to\n# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`\n# to learn how to use the RPC Tracker and RPC Server.\n# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`\n# with :any:`auto_scheduler.RPCRunner`.\n","sub_path":"kernel/fused_tt/tune_tt_fused.py","file_name":"tune_tt_fused.py","file_ext":"py","file_size_in_byte":16746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315163984","text":"from __future__ import print_function, division\nimport pandas as pd\nimport numpy as np\nfrom os.path import join, isdir, isfile, dirname, abspath\nfrom os import getcwd\nfrom sys import getfilesystemencoding\nfrom nilmtk.datastore import Key\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.utils import check_directory_exists, get_datastore\nfrom nilm_metadata import convert_yaml_to_hdf5\nfrom inspect import currentframe, getfile, getsourcefile\n\n\ncolumn_mapping = {\n 'frequency': ('frequency', \"\"),\n 'voltage': ('voltage', \"\"),\n 'W': ('power', 'active'),\n 'energy': ('energy', 'apparent'),\n 'A': ('current', ''),\n 'reactive_power': ('power', 'reactive'),\n 'apparent_power': ('power', 'apparent'),\n 'power_factor': ('pf', ''),\n 'PF': ('pf', ''),\n 'phase_angle': ('phi', ''),\n 'VA': ('power', 'apparent'),\n 'VAR': ('power', 'reactive'),\n 'VLN': ('voltage', \"\"),\n 'V': ('voltage', \"\"),\n 'f': ('frequency', \"\")\n}\n\nTIMESTAMP_COLUMN_NAME = \"timestamp\"\nTIMEZONE = \"Asia/Kolkata\"\n\ndef convert_iawe(iawe_path, output_filename, format=\"HDF\"):\n \"\"\"\n Parameters\n ----------\n iawe_path : str\n The root path of the iawe dataset.\n output_filename : str\n The destination filename (including path and suffix).\n \"\"\"\n\n check_directory_exists(iawe_path)\n\n # Open data store\n store = get_datastore(output_filename, format, mode='w')\n electricity_path = join(iawe_path, \"electricity\")\n\n # Mains data\n for chan in range(1, 13):\n key = Key(building=1, meter=chan)\n filename = join(electricity_path, \"%d.csv\" % chan)\n print('Loading ', chan)\n df = pd.read_csv(filename)\n df.drop_duplicates(subset=[\"timestamp\"], inplace=True)\n df.index = pd.to_datetime(df.timestamp.values, unit='s', utc=True)\n df = df.tz_convert(TIMEZONE)\n df = df.drop(TIMESTAMP_COLUMN_NAME, 1)\n df.rename(columns=lambda x: column_mapping[x], inplace=True)\n df.columns.set_names(LEVEL_NAMES, inplace=True)\n df = df.convert_objects(convert_numeric=True)\n df = df.dropna()\n df = df.astype(np.float32)\n df = df.sort_index()\n store.put(str(key), df)\n store.close()\n convert_yaml_to_hdf5(join(_get_module_directory(), 'metadata'),\n output_filename)\n\n print(\"Done converting iAWE to HDF5!\")\n\n\ndef _get_module_directory():\n # Taken from http://stackoverflow.com/a/6098238/732596\n path_to_this_file = dirname(getfile(currentframe()))\n if not isdir(path_to_this_file):\n encoding = getfilesystemencoding()\n path_to_this_file = dirname(unicode(__file__, encoding))\n if not isdir(path_to_this_file):\n abspath(getsourcefile(lambda _: None))\n if not isdir(path_to_this_file):\n path_to_this_file = getcwd()\n assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'\n return path_to_this_file\n","sub_path":"nilmtk/dataset_converters/iawe/convert_iawe.py","file_name":"convert_iawe.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214304913","text":"import os\nimport sys\nfrom classes.CSVParser import CSVParser\nfrom classes.ComputerVision import ComputerVision\n\n\ndef main():\n # Read user input and check if file exists\n csv_input = None\n while csv_input != \"q\":\n print(\"\\n Input Path:\")\n csv_input = raw_input(\"Please enter or copy/paste the full path of a csv file (or type 'q' to exit): \\n\")\n if csv_input == \"q\":\n sys.exit()\n if not os.path.isfile(csv_input):\n print(\"\\n Error {}: \\n File not found. \\n\")\n else:\n break\n print(\"\\n Using file: \" + csv_input + \"\\n\")\n\n # collect image pairs from csv into a dictionary and add each dictionary to a list 'images'\n # images = [ { \"image1\" : \"string\", \"image2\": \"string\"}, ...]\n csv = CSVParser()\n images = csv.get_images(csv_input)\n\n # Compute results for each image pair\n print(\"\\n Computing comparisons... \\n\")\n computer_vision = ComputerVision()\n result_pairs = []\n for pair in images:\n if not os.path.isfile(pair[\"image1\"]):\n print(\"FileNotFoundException {}: \\n\" + pair[\"image1\"] + \" does not exist! Please check the file path.\")\n continue\n if not os.path.isfile(pair[\"image2\"]):\n print(\"FileNotFoundException {}: \\n\" + pair[\"image2\"] + \" does not exist! Please check the file path.\")\n continue\n result_pairs.append(computer_vision.find_similarity(pair))\n\n # get an output path to write results file to\n csv_output = None\n while csv_input != \"q\":\n print(\"\\n Output Path:\")\n csv_output = raw_input(\"Please enter or copy/paste an existing full path (or type 'q' to exit): \\n\")\n if csv_output == \"q\":\n sys.exit()\n if not os.path.isdir(csv_output):\n print(\"\\n Error {}: \\n Path not found. \\n\")\n else:\n break\n\n csv.write_results_file(result_pairs, csv_output)\n print(\"\\n Success: \\n Output has been saved to 'results.csv' \")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"CompareImages.py","file_name":"CompareImages.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"477592412","text":"def panCakes(filename):\r\n f=open(filename,'rU')\r\n tc=int(f.readline())\r\n g=open('panCakesLarge.out','w')\r\n for i in range(tc):\r\n s=f.readline()\r\n count=0\r\n last=s.rfind('-')\r\n if last=='-1':\r\n g.write(('Case #%d: %d\\n')%(i+1,count))\r\n continue\r\n while s[last]=='-':\r\n if s[0]=='+':\r\n upto=s.find('-')\r\n if upto!=-1:\r\n s='-'*upto+s[upto:]\r\n count+=1\r\n else:\r\n break\r\n if s[0]=='-':\r\n upto=s.find('+')\r\n if upto!=-1:\r\n s='+'*upto+s[upto:]\r\n count+=1\r\n else:\r\n count+=1\r\n break\r\n g.write(('Case #%d: %d\\n')%(i+1,count))\r\npanCakes('B-large.in')\r\n","sub_path":"codes/CodeJamCrawler/16_0_2/sri.ram/perfectPanCake.py","file_name":"perfectPanCake.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597333488","text":"import numpy as np\nimport scipy\nfrom scipy import optimize\n\ndef load_csv_to_ndarray(filepath):\n # Read using numpy lib and return all rows, no header in this case\n return np.genfromtxt(filepath, dtype=float, delimiter=',')\n\n# Transform 1d feature to 256d using the trigonometric feature map/expansion\ndef transform_feature(one_dim_feature_vector, weighted=False):\n transformed_feature_vector = np.zeros((len(one_dim_feature_vector), 2*128))\n\n for i in range(0, len(one_dim_feature_vector)):\n feature = one_dim_feature_vector[i]\n transformed_feature = np.zeros(2*128)\n\n for j in range(0, 128):\n transformed_feature[2*j] = np.cos((j+1) * feature)\n if weighted:\n transformed_feature[2*j] = transformed_feature[2*j] * np.ceil(((2*j)+1)/2)\n\n transformed_feature[(2*j)+1] = np.sin((j+1) * feature)\n if weighted:\n transformed_feature[(2*j)+1] = transformed_feature[(2*j)+1] * np.ceil(((2*j)+2)/2)\n\n transformed_feature_vector[i] = transformed_feature\n\n return transformed_feature_vector\n\n# Use either np.linalg.pinv or np.linalg.lstsq to compute the minimum Euclidean norm solution\n# Both yield the same results\n# The default method used is the Moore Pseudo inverse as it can accommodate all types of matrices,\n# including those that don't have full row rank\ndef compute_min_euclidean_norm_solution(feature_matrix, label_vector, use_pinv=True):\n if use_pinv:\n print('USE PINV')\n pseudo_inverse = np.linalg.pinv(feature_matrix)\n return np.dot(pseudo_inverse, label_vector)\n\n print('USE LSTSQ')\n # Get X'\n feature_matrix_transposed = feature_matrix.T\n\n # Compute X'X (covariance matrix)\n feature_matrix_matmul = np.dot(feature_matrix_transposed, feature_matrix)\n\n # Basically, lstsq(M_1, M_2) will return a solution, let say M_sol\n # M_sol has the least squares or l2 norms amongst other solutions (there can also be no solution)\n # M_sol fulfills the equation: np.dot(M_1, M_sol) = M_2\n # In our case, M_1 would be X'X, M_2 would be X'y and M_sol would be the weights we seek\n return np.linalg.lstsq(feature_matrix_matmul, np.dot(feature_matrix_transposed, label_vector), rcond=None)[0]\n\n# Create weights of size 256 as described in the problem: [ceil(1/2), ceil(2/2), ceil(3/2), ceil(4/2), ...]\n# and a diagonal matrix that represents the weights\ndef create_weights_diagonal_matrix():\n weights = np.zeros(2*128)\n\n for j in range(0, 2*128):\n weights[j] = np.ceil((j+1)/2)\n\n return weights, np.diag(weights)\n\n# Count nonzero entries in the solution with threshold = 1.e-15,\n# i.e. values that are less than the threshold are treated as zero\ndef count_nonzero_entries(weights, threshold=1.e-15):\n count = 0\n\n for weight in weights:\n if abs(weight) < threshold:\n count = count + 1\n\n return count\n\n# Calculate squared loss for debugging purposes\ndef calculate_squared_loss(prediction_values, label_vector, mean=True):\n # Calculate sigma (y_hat - y)^2\n total_squared_loss = np.sum((prediction_values - label_vector) ** 2)\n\n # Divide error by total observations to get mean squared loss\n if mean:\n return total_squared_loss / len(label_vector)\n\n return total_squared_loss\n\n# Main function for experiment 1a\ndef compute_w_euclid(use_pinv=True):\n print('Experiment 1a: ')\n print()\n\n train_df = load_csv_to_ndarray('hw3p1_train.csv')\n test_df = load_csv_to_ndarray('hw3p1_test.csv')\n\n one_dim_feature_vector = train_df[:,0]\n label_vector = train_df[:,1]\n\n feature_matrix = transform_feature(one_dim_feature_vector)\n\n sol = compute_min_euclidean_norm_solution(feature_matrix, label_vector, use_pinv)\n print('First 16 elements: ')\n print(sol[0:16])\n print()\n print('Last 16 elements: ')\n print(sol[240:256])\n print()\n\n print('Norm:')\n print(np.linalg.norm(sol))\n print()\n\n test_feature_matrix = transform_feature(test_df)\n test_pred_result = np.dot(test_feature_matrix, sol)\n\n print('Mean squared loss: ')\n print(calculate_squared_loss(np.dot(feature_matrix, sol), label_vector))\n print()\n\n print('Number of nonzero entries:')\n print(count_nonzero_entries(sol))\n print()\n\n print('--------------------------------')\n\n # # By default, Mac OS X can't directly render matplotlib\n # # To use matplotlib, please use Jupyter Notebook or set the backend properly (https://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python)\n # import matplotlib.pyplot as plt\n #\n # fig = plt.figure(figsize=(10, 10))\n #\n # plt.title('Minimum Euclidean Plot')\n # plt.plot(test_df, test_pred_result, color='pink', linewidth=1, marker='o', markersize=3, mfc='white', mec='black')\n #\n # plt.savefig('1a.png', bbox_inches='tight')\n #\n # plt.show()\n\ndef compute_w_weighted(use_pinv=True):\n print('Experiment 1b: ')\n print()\n\n train_df = load_csv_to_ndarray('hw3p1_train.csv')\n test_df = load_csv_to_ndarray('hw3p1_test.csv')\n\n one_dim_feature_vector = train_df[:,0]\n label_vector = train_df[:,1]\n\n feature_matrix = transform_feature(one_dim_feature_vector)\n\n _, weights_diagonal_matrix = create_weights_diagonal_matrix()\n\n # Stack feature_matrix with diagonal matrix of weights\n weighted_feature_matrix = np.vstack((feature_matrix, weights_diagonal_matrix))\n\n # Pad label_vector with zeros to match the dimension of new feature_matrix\n padded_label_vector = np.zeros(2*128 + len(label_vector))\n padded_label_vector[0:len(label_vector)] = label_vector\n\n sol = compute_min_euclidean_norm_solution(weighted_feature_matrix, padded_label_vector, use_pinv)\n print('First 16 elements: ')\n print(sol[0:16])\n print()\n print('Last 16 elements: ')\n print(sol[240:256])\n print()\n\n print('Norm:')\n print(np.linalg.norm(sol))\n print()\n\n test_feature_matrix = transform_feature(test_df)\n test_pred_result = np.dot(test_feature_matrix, sol)\n\n print('Mean squared loss: ')\n print(calculate_squared_loss(np.dot(feature_matrix, sol), label_vector))\n print()\n\n print('Number of nonzero entries:')\n print(count_nonzero_entries(sol))\n print()\n\n print('--------------------------------')\n\n # # By default, Mac OS X can't directly render matplotlib\n # # To use matplotlib, please use Jupyter Notebook or set the backend properly (https://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python)\n # import matplotlib.pyplot as plt\n #\n # fig = plt.figure(figsize=(10, 10))\n #\n # plt.title('Minimum Weighted Euclidean Plot')\n # plt.plot(test_df, test_pred_result, color='pink', linewidth=1, marker='o', markersize=3, mfc='white', mec='black')\n #\n # plt.savefig('1b.png', bbox_inches='tight')\n #\n # plt.show()\n\ndef compute_w_dantzig():\n print('Experiment 1c: ')\n print()\n\n train_df = load_csv_to_ndarray('hw3p1_train.csv')\n test_df = load_csv_to_ndarray('hw3p1_test.csv')\n\n one_dim_feature_vector = train_df[:,0]\n label_vector = train_df[:,1]\n\n feature_matrix = transform_feature(one_dim_feature_vector)\n\n # weights: [ceil(1/2), ceil(2,2), ...], size: 256\n weights, _ = create_weights_diagonal_matrix()\n\n # Size: 512\n appended_weights = np.zeros(2*2*128)\n\n # This is the linear objective function\n # First 256 is for w, last 256 for v\n # Give zero weights to w (first 256) because\n # we care only about sum(abs(w)), i.e. sum(v)\n appended_weights[2*128:2*2*128] = weights\n\n # Equality constraint\n\n # A^tA\n A_eq = np.dot(feature_matrix.transpose(), feature_matrix)\n # Add 256 more zero columns to fit for v\n A_eq = np.hstack((A_eq, np.zeros((2*128, 2*128))))\n\n # Label vector stay as is, A^tb will have shape 256\n b_eq = np.dot(feature_matrix.transpose(), label_vector)\n\n # Inequality constraint\n # Each pair of v and w needs to follow: - w - v <= 0 and w - v <= 0\n A_ub = np.zeros((512, 512))\n for i in range(0, int(len(A_ub)/2)):\n # -w_j - v_j\n A_ub[2*i][i] = -1\n A_ub[2*i][i+256] = -1\n\n # w_j - v_j\n A_ub[(2*i)+1][i] = 1\n A_ub[(2*i)+1][i+256] = -1\n\n # Upper bound for w and v\n b_ub = np.zeros(512)\n\n # Set bound for w from -inf to inf (None, None)\n # Set bound for v from 0 to inf (0, None) -> absolute value, non-negative\n bounds = []\n for i in range(0, 256):\n bounds.append((None, None))\n for i in range(0, 256):\n bounds.append((0, None))\n\n # Call the linear programming solver with method revised simplex\n res = scipy.optimize.linprog(appended_weights, A_eq=A_eq, b_eq=b_eq, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method='revised simplex')\n\n # Get the solution from result object\n dantzig_sol = res.x[0:256]\n\n print('First 16 elements: ')\n print(dantzig_sol[0:16])\n print()\n print('Last 16 elements: ')\n print(dantzig_sol[240:256])\n print()\n\n print('Norm:')\n print(np.linalg.norm(dantzig_sol))\n print()\n\n print('Mean squared loss: ')\n print(calculate_squared_loss(np.dot(feature_matrix, dantzig_sol), label_vector))\n print()\n\n print('Number of nonzero entries:')\n print(count_nonzero_entries(dantzig_sol))\n print()\n\n print('--------------------------------')\n\n # # By default, Mac OS X can't directly render matplotlib\n # # To use matplotlib, please use Jupyter Notebook or set the backend properly (https://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python)\n # import matplotlib.pyplot as plt\n #\n # test_feature_matrix = transform_feature(test_df)\n # test_pred_result = np.dot(test_feature_matrix, dantzig_sol)\n #\n # fig = plt.figure(figsize=(10, 10))\n #\n # plt.title('Dantzig Selector Euclidean Plot')\n # plt.plot(test_df, test_pred_result, color='pink', linewidth=1, marker='o', markersize=3, mfc='white', mec='black')\n #\n # plt.savefig('1c.png', bbox_inches='tight')\n #\n # plt.show()\n\nif __name__ == '__main__':\n compute_w_euclid()\n\n compute_w_weighted()\n\n compute_w_dantzig()\n","sub_path":"hw_3/submission/gd2551_hw3/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":10139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121447907","text":"import requests\nimport json\nimport time\nimport datetime\nimport random\n\nfrom fake_useragent import UserAgent\nfrom dateutil.relativedelta import relativedelta\n\n\nclass HuiTun(object):\n def __init__(self, phone, passwd, keyword):\n \"\"\"\n 灰豚数据\n @param phone: 登录手机号\n @param passwd: 登录密码\n @param keyword: 搜索关键词\n \"\"\"\n self.user_agent = UserAgent(verify_ssl=False).random\n self.phone = phone\n self.passwd = passwd\n self.keyword = keyword\n self.cookie = ''\n\n def login_huitun(self):\n \"\"\"\n 登陆灰豚数据\n @return: 登录后的cookie值\n \"\"\"\n login_url_first = 'https://login.huitun.com/weChat/userLogin'\n headers_first = {\n 'Content-Type': 'application/json',\n 'Host': 'login.huitun.com',\n 'Origin': 'https://dy.huitun.com',\n 'User-Agent': self.user_agent,\n }\n data = {\n 'mobile': self.phone,\n 'password': self.passwd,\n }\n requests.post(login_url_first, headers=headers_first, data=json.dumps(data))\n\n headers_second = {\n 'Content-Type': 'application/json',\n 'Origin': 'https://dy.huitun.com',\n 'User-Agent': self.user_agent,\n }\n login_url_second = f'https://dyapi.huitun.com/userLogin?_t={int(time.time() * 1000)}'\n res = requests.post(login_url_second, headers=headers_second, data=json.dumps(data))\n cookies = res.cookies.items()\n cookie = ''\n for name, value in cookies:\n cookie += '{0}={1}'.format(name, value)\n self.cookie = cookie\n\n def search_live_user(self):\n \"\"\"\n 搜索用户,获取uid(必须)、点赞等数据\n @return: 获取到的数据\n \"\"\"\n search_url = 'https://dyapi.huitun.com/search/user'\n headers = {\n 'Cookie': self.cookie,\n 'Host': 'dyapi.huitun.com',\n 'Origin': 'https://dy.huitun.com',\n 'User-Agent': self.user_agent,\n }\n params = {\n '_t': int(time.time() * 1000),\n 'cids': '',\n 'tagList': '',\n 'followerRange': '',\n 'diggRange': '',\n 'ageRange': '',\n 'scoreRange': '',\n 'gender': '',\n 'province': '',\n 'region': '',\n 'maxGender': '',\n 'maxAge': '',\n 'maxArea': '',\n 'maxCity': '',\n 'customVerify': '',\n 'verify': 'false',\n 'blueV': 'false',\n 'mcn': 'false',\n 'fusionShopEnter': 'false',\n 'promotionUser': 'false',\n 'contact': 'false',\n 'goodsSource': '',\n 'sales': '',\n 'visitorRange': '',\n 'prices': '',\n 'goodsKeyword': '',\n 'keywordMatch': '',\n 'goodsCates': '',\n 'subIds': '',\n 'leafIds': '',\n 'keyword': self.keyword,\n 'from': '1',\n 'sortField': '',\n 'tag': '0',\n 'fusionShopFlag': 'false',\n }\n try:\n response = requests.get(search_url, headers=headers, params=params)\n if response.status_code != 200:\n self.login_huitun()\n response = requests.get(search_url, headers=headers, params=params)\n\n datas = response.json().get('data')[0]\n item = dict()\n item['uid'] = datas.get('uid', '')\n item['nickname'] = datas.get('nickname', '')\n item['douyin_id'] = datas.get('authorId', '')\n item['fans_count'] = datas.get('followerCountTotal', '')\n item['like_count'] = datas.get('totalFavorited', '')\n item['video_count'] = datas.get('awemeCount', '')\n return item\n except:\n return {}\n\n def get_fans_add(self, uid):\n \"\"\"\n 获取粉丝增加数量\n @param uid: 唯一标识\n @return:\n \"\"\"\n fans_add_url = 'https://dyapi.huitun.com/user/detail'\n headers = {\n 'Cookie': self.cookie,\n 'Host': 'dyapi.huitun.com',\n 'Origin': 'https://dy.huitun.com',\n 'User-Agent': self.user_agent,\n }\n params = {\n '_t': int(time.time() * 1000),\n 'uid': uid,\n 'example': uid,\n }\n try:\n response = requests.get(fans_add_url, headers=headers, params=params).json()\n datas_fan = response.get('data')\n video_added = datas_fan.get('awewe30Count', 0)\n return video_added\n except:\n return 0\n\n def get_goods_num(self, uid):\n \"\"\"\n 获取直播带货数据\n @param uid: 唯一标识\n @return:\n \"\"\"\n goods_url = 'https://dyapi.huitun.com/live/record'\n headers = {\n 'Cookie': self.cookie,\n 'Host': 'dyapi.huitun.com',\n 'Origin': 'https://dy.huitun.com',\n 'User-Agent': self.user_agent,\n }\n page_num = 1\n now_date = datetime.datetime.today()\n end = f'{now_date.year}-{now_date.month}-{now_date.day}'\n last_month = now_date - relativedelta(months=1)\n start = f'{last_month.year}-{last_month.month}-{last_month.day}'\n goods_num = 0\n while True:\n params = {\n '_t': int(time.time() * 1000),\n 'from': page_num,\n 'time': '',\n 'has': 'HAS_GOODS',\n 'keyword': '',\n 'mod': 'DESC',\n 'sort': '',\n 'start': start,\n 'end': end,\n 'filterMap': '',\n 'uid': uid,\n 'example': uid,\n }\n try:\n response = requests.get(goods_url, headers=headers, params=params).json()\n datas = response.get('data')\n # 数据为空的时候退出循环\n if not datas:\n return goods_num\n\n for i in datas:\n goods_num += i.get('goodsNum', 0)\n page_num += 1\n time.sleep(random.uniform(1, 3))\n except:\n return goods_num\n\n def main(self):\n self.login_huitun()\n if not self.cookie:\n return {}\n res = self.search_live_user()\n uid = res.get('uid', 0)\n if uid:\n video_add = self.get_fans_add(uid)\n bring_goods = self.get_goods_num(uid)\n res.update({\n 'video_added': video_add,\n 'bring_goods': bring_goods,\n })\n return res\n\n\nif __name__ == '__main__':\n login_phone = ''\n login_passwd = ''\n keyword_search = ''\n print(HuiTun(login_phone, login_passwd, keyword_search).main())\n","sub_path":"第三方平台/抖音/huitun.py","file_name":"huitun.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378852004","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport sys\nimport gtk\nfrom NDImplementationDecorator import NDImplementationDecorator\n\n\nclass VulnerabilityNodeDialog:\n def __init__(self,objt,environmentName,dupProperty,overridingEnvironment,builder):\n self.window = builder.get_object(\"VulnerabilityNodeDialog\")\n self.decorator = NDImplementationDecorator(builder)\n self.decorator.updateTextCtrl(\"vulnerabilityNameCtrl\",objt.name())\n self.decorator.updateTextCtrl(\"vulnerabilityTypeCtrl\",objt.type())\n self.decorator.updateTextCtrl(\"vulnerabilitySeverityCtrl\",objt.severity(environmentName,dupProperty,overridingEnvironment))\n self.decorator.updateMLTextCtrl(\"vulnerabilityDescriptionCtrl\",objt.description())\n assets = []\n for asset in objt.assets(environmentName,dupProperty):\n assets.append([asset])\n self.decorator.updateListCtrl(\"vulnerabilityAssetsCtrl\",['Asset'],gtk.ListStore(str),assets)\n self.window.resize(300,325)\n\n def on_vulnerabilityOkButton_clicked(self,callback_data):\n self.window.destroy()\n\n def show(self):\n self.window.show()\n","sub_path":"cairis/cairis/VulnerabilityNodeDialog.py","file_name":"VulnerabilityNodeDialog.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396109631","text":"# iter.py\n# 普通递归,根据调用层次决定占用多少栈帧\ndef fact(n):\n\tif n == 1:\n\t\treturn 1\n\treturn n*fact(n-1)\n\nprint(fact(10))\n\n'''\n尾递归优化:返回函数本身,而不使用函数做其他运算\n只会占用一个栈帧\ndef fact(n):\n return fact_iter(n, 1)\n\ndef fact_iter(num, product):\n if num == 1:\n return product\n return fact_iter(num - 1, num * product)\n'''\n","sub_path":"iter.py","file_name":"iter.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465428488","text":"import pandas as pd\nfrom scipy.io.arff import loadarff\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\n\ndef read_in_dataset():\n raw_data = loadarff('iris.arff')\n iris_df = pd.DataFrame(raw_data[0])\n return iris_df\n\ndef create_mean_matrix(iris_df, class_names, features_to_search):\n\n mean_matrix = []\n mean_dict_list = {}\n\n for class_name in class_names:\n mean_dict_list[class_name] = []\n\n for class_name in class_names:\n for feature in features_to_search:\n mean_feature_val_of_class = iris_df.loc[(iris_df['class'] == class_name)][feature].mean()\n mean_dict_list[class_name].append(mean_feature_val_of_class)\n\n mean_matrix.append(mean_dict_list[class_name])\n\n return mean_matrix\n\ndef compute_scatter_within_matrix(iris_df, class_names, features_to_search, mean_matrix):\n # init scatter matrix\n Scatter_Within = np.zeros((4, 4))\n # index to get our features for a class this is class index number\n count = 0\n\n for class_name in class_names:\n # init scatter matrix for this current class\n scatter_matrix_for_class = np.zeros((4, 4))\n # get the mean matrix\n mean_vector_for_class = mean_matrix[count]\n # get data for this class\n class_feature_df = iris_df.loc[(iris_df['class'] == class_name)]\n # get the feature values for this class\n feature_vals_df = class_feature_df[features_to_search]\n # iterate through the rows which is the value for each four features of this class\n for index, row in feature_vals_df.iterrows():\n # convert to np array so we can do other operations like transpose\n np_feature_row = np.array([row.sepallength, row.sepalwidth, row.petallength, row.petalwidth])\n # convert to np array so we can do other operations like transpose\n np_mean_vec_for_class = np.array(mean_vector_for_class)\n # reshape it to be a column so we can do the transpose after\n np_column_feature_vec = np_feature_row.reshape(4, 1) # getting it as column\n # reshape it to be a column so we can do the transpose after\n np_mean_feature_vec = np_mean_vec_for_class.reshape(4, 1) # getting it as column\n # calc the distance from mean\n row_instance_minus_mean = np_column_feature_vec - np_mean_feature_vec # xi−mi\n # transpose\n row_instance_minus_mean_T = row_instance_minus_mean.T # Transpose of xi−mi\n # dot product is just multiplying each cell with appropriate cell on other matrix\n multiply_result = row_instance_minus_mean.dot(row_instance_minus_mean_T) # multiplication part\n # add up the results\n scatter_matrix_for_class += multiply_result\n # go to next class\n count += 1\n # summ up the summation\n Scatter_Within += scatter_matrix_for_class\n\n return Scatter_Within\n\ndef compute_scatter_between_matrix(iris_df, class_names, features_to_search, mean_matrix):\n\n # we want the overall mean of just the classes we are looking for\n iris_df_target_classes = iris_df.loc[(iris_df['class'].isin(class_names))]\n overall_mean = np.array(np.mean(iris_df_target_classes, axis=0).to_numpy())\n overall_mean_reshape = overall_mean.reshape(4,1)\n\n Scatter_Between = np.zeros((4, 4))\n count = 0\n for class_name in class_names:\n # get the features for the class we're looking at\n class_feature_df = iris_df.loc[(iris_df['class'] == class_name)]\n # get the number of instances for this class\n num_of_instances_of_class= len(class_feature_df.index)\n # reshape mean vector to be a column\n mean_vector_for_class = np.array(mean_matrix[count]).reshape(4,1)\n # subtract the mean for this minus overall mean\n mean_instance_minus_overall_mean = mean_vector_for_class - overall_mean_reshape\n # Transpose\n mean_instance_minus_overall_mean_T = mean_instance_minus_overall_mean.T\n # dot product is the multiply step\n multiply_result = mean_instance_minus_overall_mean.dot(mean_instance_minus_overall_mean_T)\n # times the number of instances\n multiply_result_times_num_instances = multiply_result * num_of_instances_of_class\n # summation\n Scatter_Between += multiply_result_times_num_instances\n # go to next class\n count += 1\n\n return Scatter_Between\n\ndef LDA(class_names, title):\n\n features_to_search = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth']\n # class_names = [b'Iris-setosa', b'Iris-versicolor', b'Iris-virginica']\n class_dict = {}\n iris_df = read_in_dataset()\n mean_matrix = create_mean_matrix(iris_df, class_names, features_to_search)\n Scatter_Within = compute_scatter_within_matrix(iris_df, class_names, features_to_search, mean_matrix)\n Scatter_Between = compute_scatter_between_matrix(iris_df, class_names, features_to_search, mean_matrix)\n eigen_values, eigen_vectors = np.linalg.eig(np.linalg.inv(Scatter_Within).dot(Scatter_Between))\n\n list_eigen_vals = list(eigen_values)\n sorted_index = []\n sorted_eigen_vals = eigen_values[:]\n # Find the sort of the eigenvectors from highest to least\n sorted_eigen_vals.sort()\n sorted_eigen_vals = list(sorted_eigen_vals)\n sorted_eigen_vals.reverse()\n for sorted_eigen in sorted_eigen_vals:\n i = np.where(eigen_values == sorted_eigen)\n sorted_index.append(i[0][0])\n\n # print()\n eigen_vector_interest1 = eigen_vectors[0].reshape(4, 1)\n eigen_vector_interest2 = eigen_vectors[1].reshape(4, 1)\n\n # We're joining the two eigen vectors of interest together\n W = np.hstack((eigen_vector_interest1, eigen_vector_interest2))\n\n iris_df_only_classes_we_want = iris_df.loc[iris_df['class'].isin(class_names)]\n iris_data_we_want = np.array(iris_df_only_classes_we_want[features_to_search])\n X_lda = iris_data_we_want.dot(W)\n\n le = LabelEncoder()\n # since we have multiple classes in the data\n y = le.fit_transform(iris_df_only_classes_we_want['class'])\n\n plt.title(title)\n plt.xlabel('LD1')\n plt.ylabel('LD2')\n plt.scatter(\n X_lda[:, 0],\n X_lda[:, 1],\n c=y, # This is our classes\n cmap='rainbow',\n alpha=1, # just plugged in 1\n edgecolors='b'\n )\n plt.show()\n\ndef problem2_a():\n\n #[b'Iris-setosa', b'Iris-versicolor', b'Iris-virginica']\n class_names = [b'Iris-setosa', b'Iris-versicolor']\n LDA(class_names, \"iris-setosa and iris-versicolor\")\n class_names = [b'Iris-setosa', b'Iris-virginica']\n LDA(class_names, \"iris-setosa and iris-virginica\")\n class_names = [b'Iris-versicolor', b'Iris-virginica']\n LDA(class_names, \"iris-versicolor and iris-virginica\")\n\ndef main():\n problem2_a()\n\nif __name__ == '__main__':\n main()","sub_path":"app/Problem2_a.py","file_name":"Problem2_a.py","file_ext":"py","file_size_in_byte":6981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520653108","text":"import logging\r\nlogger = logging.getLogger(\"Listener.modules.delete\")\r\n\r\ndef remove(caminho,parametro):\r\n\r\n\toutput = []\r\n\ttry:\r\n\t\tf = open (caminho, \"r\")\r\n\t\tarquivo = f.read()\r\n\t\tfor line in arquivo.split(';'):\r\n\t\t\tif not parametro in line:\r\n\t\t\t\toutput.append(line+';')\r\n\t\tf.close()\r\n\r\n\t\tf = open(caminho, 'w')\r\n\t\tf.writelines(output)\t\t\t\r\n\t\tf.close()\r\n\r\n\texcept FileNotFoundError as error:\r\n\t\tlogger.error('<ERROR: %s>'%error)\r\n","sub_path":"modules/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4931350","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport codecs\n\n\nclass AboutWindow(object):\n\n # Параметры окна\n def setup_ui(self, dialog_about_us):\n dialog_about_us.setObjectName(\"DialogAboutUs\")\n dialog_about_us.resize(240, 320)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"images/handyman.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n dialog_about_us.setWindowIcon(icon)\n self.text_browser = QtWidgets.QTextBrowser(dialog_about_us)\n self.text_browser.setGeometry(QtCore.QRect(0, 0, 241, 321))\n self.text_browser.setObjectName(\"textBrowser\")\n self.retranslate_ui(dialog_about_us)\n QtCore.QMetaObject.connectSlotsByName(dialog_about_us)\n\n def retranslate_ui(self, dialog_about_us):\n dialog_about_us.setWindowTitle(QtWidgets.QApplication.translate(\"DialogAboutUs\", \"About\", None))\n f = codecs.open('../resources/About.html', 'r', encoding='utf-8')\n self.text_browser.setHtml(QtWidgets.QApplication.translate(\"DialogAboutUs\", f.read(), None))\n","sub_path":"scanner/gui/AboutWindow.py","file_name":"AboutWindow.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544411358","text":"# coding:utf8\nfrom django.db import models\nfrom OA.models import Project\n\n\n# Create your models here.\nclass Employee(models.Model):\n\n class Meta:\n verbose_name_plural = '员工信息'\n\n position_list = (\n ('SD', '校长'),\n ('ASD', '助理校长'),\n ('DOS', '教学副校长'),\n ('CCM', '咨询经理'),\n ('CRM', '客服经理'),\n ('CCS', '咨询主管'),\n ('CRS', '客服主管'),\n ('TRM', '学科组长'),\n ('TRS', '学科带头人'),\n ('CC', '教育顾问'),\n ('CR', '班主任'),\n ('TR', '学科教师'),\n ('YWTR', '语文教师'),\n ('SXTR', '数学教师'),\n ('YYTR', '英语教师'),\n ('WLTR', '物理教师'),\n ('HXTR', '化学教师'),\n ('KXTR', '科学教师'),\n ('ZSDTR', '政史地教师'),\n ('QTTR', '其他教师'),\n ('TL', '出纳'),\n ('CP', '教务专员'),\n ('AD', '行政'),\n ('CL', '保洁'),\n ('LMS', '市场主管')\n )\n\n gender = (\n ('M', '男'),\n ('F', '女'),\n )\n\n diploma_list = (\n ('Dr', '博士'),\n ('Ma', '硕士'),\n ('Ba', '本科'),\n ('Sp', '专科'),\n ('ot', '其他'),\n )\n\n\n # user_index = models.SmallIntegerField('序号', )\n user_name = models.CharField('姓名', max_length=10)\n user_position = models.CharField('职位', choices=position_list, max_length=10)\n user_mail = models.EmailField('邮箱')\n user_workcode = models.CharField('工号', unique=True, default=0,max_length=12)\n user_school = models.ForeignKey(Project, verbose_name='校区', null=True)\n user_phonenumber = models.CharField('联系方式', max_length=11)\n user_salary = models.IntegerField('薪资', default=0, blank=True)\n user_arrivetime = models.DateField('到岗时间', null=True)\n user_ID = models.CharField('身份证号', unique=True, max_length=20, null=True)\n user_gender = models.CharField('性别', choices=gender, max_length=2)\n user_birthday = models.DateField('出生年月', null=True)\n user_diploma = models.CharField('最高学历', choices=diploma_list, default='Ba', max_length=10)\n user_graduate_colledge = models.CharField('毕业院校', max_length= 40)\n user_major = models.CharField('专业', max_length=20)\n\n def __unicode__(self):\n return self.user_name\n","sub_path":"hr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262004404","text":"from .models import Host,Operatingsystem,Model,Environment,Hostgroup,Puppetclass,Manufacturer,Report\nfrom .foremanapi import foreman_get\nimport math\nimport re\nimport datetime\n\ndef update_fact(id):\n try:\n host = Host.objects.get(host_id=id)\n facts = foreman_get('/hosts/%s/facts' % (id))[host.name]\n host.ip = facts['ipaddress']\n host.processors = facts['processorcount']\n host.processor_type = facts['processor0']\n host.os_name = facts['operatingsystem']\n host.os_major = facts['os::release::major']\n host.os_minor = facts['os::release::minor']\n host.os_release = facts['operatingsystemrelease']\n if \"M\" in facts['memorysize']:\n host.memory = math.ceil(float(facts['memorysize'].split()[0]) * 0.001)\n else:\n host.memory = math.ceil(float(facts['memorysize'].split()[0]))\n if facts.has_key('disks::sda::size'):\n host.disk = facts['disks::sda::size'].split()[0]\n host.uptime = facts['uptime_days']\n if facts.has_key('serialnumber'):\n host.sn = facts['serialnumber']\n if facts.has_key('is_virtual'):\n host.is_virtual = facts['is_virtual'].capitalize()\n host.manufacturer = Manufacturer.objects.get_or_create(name=facts['manufacturer'])[0]\n host.model = Model.objects.get_or_create(name=facts['productname'])[0]\n host.save()\n \n except Exception as e:\n pass\n #raise e\n\ndef update_host():\n results = foreman_get('/hosts')\n for result in results:\n try:\n last_report = datetime.datetime.strptime(result['last_report'][:-4],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=8)\n host, created = Host.objects.update_or_create(host_id=result['id'],defaults={'name': result['name'],'hostgroup_id': result['hostgroup_id'],'environment_id': result['environment_id'],'created_at': result['created_at'][:-4],'last_report': last_report,'status': result['global_status_label']})\n if created:\n update_fact(host.host_id)\n except Exception as e:\n continue \n\ndef update_facts():\n hosts = Host.objects.all()\n for host in hosts:\n update_fact(host.host_id)\n # try:\n # host_id = host.host_id\n # facts = foreman_get('/hosts/%s/facts' % (host_id))[host.name]\n # host.ip = facts['ipaddress']\n # host.processors = facts['processorcount']\n # host.processor_type = facts['processor0']\n # host.os_name = facts['operatingsystem']\n # host.os_major = facts['os::release::major']\n # host.os_minor = facts['os::release::minor']\n # host.os_release = facts['operatingsystemrelease']\n # if \"M\" in facts['memorysize']:\n # host.memory = math.ceil(float(facts['memorysize'].split()[0]) * 0.001)\n # else:\n # host.memory = math.ceil(float(facts['memorysize'].split()[0]))\n # host.disk = facts['disks::sda::size'].split()[0]\n # host.uptime = facts['uptime_seconds']\n # host.save()\n # except Exception as e:\n # raise e\n # continue\n\ndef update_operatingsystem():\n results = foreman_get('/operatingsystems')\n for result in results:\n try:\n Operatingsystem.objects.get_or_create(operatingsystem_id=result['id'],defaults={'name': result['name'],'major': result['major'],'minor': result['minor']})\n except Exception as e:\n continue\n\n# def update_model():\n# results = foreman_get('/models')\n# for result in results:\n# try:\n# Model.objects.get_or_create(model_id=result['id'],defaults={'name': result['name']})\n# except Exception as e:\n# continue\n\ndef update_environment():\n results = foreman_get('/environments')\n for result in results:\n try:\n Environment.objects.update_or_create(environment_id=result['id'],defaults={'name': result['name']})\n except Exception as e:\n continue\n\ndef update_hostgroup():\n results = foreman_get('/hostgroups')\n for result in results:\n try:\n Hostgroup.objects.update_or_create(hostgroup_id=result['id'],defaults={'name': result['name'],'environment_id': result['environment_id']})\n except Exception as e:\n continue\n\ndef update_class():\n results = foreman_get('/puppetclasses')\n for k,v in results.items():\n for pclass in v:\n try:\n Puppetclass.objects.get_or_create(class_id=pclass['id'],defaults={'name': pclass['name']})\n except Exception as e:\n # raise e\n continue \n\ndef update_reports():\n search = \"?search=reported+%3D+Today++and++eventful++%3D++true\"\n results = foreman_get(\"/config_reports\",search=search)\n for result in results:\n try:\n reported_at = datetime.datetime.strptime(result['reported_at'][:-4],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=8)\n report,created = Report.objects.get_or_create(r_id=result['id'],defaults ={'host_id': result['host_id'],'reported_at': reported_at,'applied': result['status']['applied'],'restarted': result['status']['restarted'],'failed': result['status']['failed'],'failed_restarts': result['status']['failed_restarts'],'skipped': result['status']['skipped']})\n except Exception as e:\n # raise e\n continue\n\ndef inventory():\n hosts = Host.objects.all()\n f = open('/etc/ansible/hosts','r+')\n ips = f.read()\n for host in hosts:\n try:\n if re.match(host.ip,ips):\n pass\n else:\n f.write('%s\\n'%(host.ip))\n except Exception as e:\n continue \n f.close()\n\n\n","sub_path":"cmdb/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592545228","text":"\nfrom joblib import dump, load\nimport pandas as pd, numpy as np, os, argparse\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\n\n# Argument parser\ndef _parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--filepath', type=str, default='/opt/ml/processing/input/')\n parser.add_argument('--filename', type=str, default='data.csv')\n parser.add_argument('--outputpath', type=str, default='/opt/ml/processing/output/')\n # Parse the arguments\n return parser.parse_known_args()\n\n# Main Training Loop\nif __name__==\"__main__\":\n # Process arguments\n args, _ = _parse_args()\n # Load the dataset\n df = pd.read_csv(os.path.join(args.filepath, args.filename))\n X,y = df.drop('y', axis=1), df.y\n # Define the pipeline and train it\n pipe = Pipeline([('scaler', StandardScaler())])\n transformed = pipe.fit_transform(X)\n # Generate the output files - train and test\n output = pd.concat([pd.DataFrame(transformed), y], axis=1)\n train, test = train_test_split(output, random_state=42)\n train.to_csv(os.path.join(args.outputpath, 'train/train.csv'), index=False)\n test.to_csv(os.path.join(args.outputpath, 'test/test.csv'), index=False)\n # Store the pipeline\n dump(pipe, os.path.join(args.outputpath, 'pipeline/preproc-pipeline.joblib'))\n","sub_path":"sagemaker-pipelines/tabular/sklearn-scheduled-training-and-inference/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582494774","text":"#single layer network with, 32x32, nodes. The first layer used an RELU \n#activation algorithm with the last softmax layer containing number of classes, 10, \n#nodes. With a batch size of 32 and 32 epochs, we were able to achieve a testing\n# accuracy of 0.5456 (54.56%) taking a total 1,541 seconds or 25.68 minutes with an epoch \n#taking an average of 48.1563 seconds or 0.80 minutes and a loss of \n#1.30503999999999989 (calculated by a sgd loss function).\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.utils import np_utils\n\n#The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,\n#with 6000 images per class. There are 50000 training images and 10000 test images.\nbatch_size = 32 #Number of images used in each optimization step\nnum_classes = 10 #One class per digit\nepochs = 32 #Number of times the whole data is used to learn\ntest_img_count = 10000 #number of images in test data set\ntrain_img_count = 50000 #number of images in training dataset\nnum_pixles = 32*32*3 #32x32 imgs with 3 channel color\n\n(X_train, y_train), (X_test, y_test) = cifar10.load_data() #load data from mnist\nX_test = X_test.reshape(test_img_count, num_pixles)\nX_train = X_train.reshape(train_img_count, num_pixles)\n\n#normalize X values\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n# convert class vectors to binary class matrices (ie one-hot vectors)\nY_train = np_utils.to_categorical(y_train, num_classes)\nY_test = np_utils.to_categorical(y_test, num_classes)\n\n#Define the model achitecture\nmodel = Sequential()\n#layer has num_pixles nodes with an input of num_pixles inputs\nmodel.add(Dense(32*32,\n activation = 'relu',\n input_shape=(num_pixles,)))\n#Dropout consists in randomly setting a fraction rate of input units\n#to 0 at each update during training time, which helps prevent overfitting.\nmodel.add(Dropout(0.5))\n#final layer used to catagorize the output. Each output is from\n#a specific catagory is the probability that the\n#that the training image is a specific catagory\nmodel.add(Dense(10,\n activation = 'softmax'))\n\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=\"sgd\",\n metrics=[\"accuracy\"])\n\n#let the architecture learn the correct params needed to be accurate to labels\nmodel.fit(X_train,\n Y_train,\n batch_size,\n epochs,\n verbose=1,\n validation_data=(X_test, Y_test))\n\n#Evaluate how the model does on the test set\nscore = model.evaluate(X_test,\n Y_test,\n verbose=1)\nprint(score)","sub_path":"CIFAR-NN.py","file_name":"CIFAR-NN.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371650622","text":"# -*- coding: utf-8 -*-\n\"\"\"Test suite for axonapi.api.assets.\"\"\"\nimport copy\nimport datetime\nimport json\n\nimport pytest\nfrom axonius_api_client.api import json_api\nfrom axonius_api_client.constants.api import GUI_PAGE_SIZES\nfrom axonius_api_client.constants.general import SIMPLE\nfrom axonius_api_client.exceptions import ApiError, NotFoundError\n\nfrom ...meta import QUERIES\nfrom ...utils import get_schema\n\n\nclass SavedQueryPrivate:\n def test_private_get(self, apiobj):\n result = apiobj.saved_query._get()\n assert isinstance(result, list)\n for item in result:\n assert isinstance(item, json_api.saved_queries.SavedQuery)\n validate_sq(item.to_dict())\n\n\nclass SavedQueryPublic:\n def test_get_no_generator(self, apiobj):\n rows = apiobj.saved_query.get(generator=False)\n assert not rows.__class__.__name__ == \"generator\"\n assert isinstance(rows, list)\n for row in rows:\n assert isinstance(row, dict)\n validate_sq(row)\n\n def test_get_generator(self, apiobj):\n gen = apiobj.saved_query.get(generator=True)\n assert gen.__class__.__name__ == \"generator\"\n assert not isinstance(gen, list)\n\n rows = [x for x in gen]\n for row in rows:\n assert isinstance(row, dict)\n validate_sq(row)\n\n def test_get_tags(self, apiobj):\n tags = apiobj.saved_query.get_tags()\n assert isinstance(tags, list)\n for tag in tags:\n assert isinstance(tag, str)\n\n def test_get_by_name(self, apiobj):\n sq = apiobj.saved_query.get()[0]\n value = sq[\"name\"]\n row = apiobj.saved_query.get_by_name(value=value)\n assert isinstance(row, dict)\n assert row[\"name\"] == value\n\n def test_get_by_name_error(self, apiobj):\n value = \"badwolf_yyyyyyyyyyyy\"\n with pytest.raises(NotFoundError):\n apiobj.saved_query.get_by_name(value=value)\n\n def test_get_by_uuid(self, apiobj):\n sq = apiobj.saved_query.get()[0]\n value = sq[\"uuid\"]\n row = apiobj.saved_query.get_by_uuid(value=value)\n assert isinstance(row, dict)\n assert row[\"uuid\"] == value\n\n def test_get_by_uuid_error(self, apiobj):\n value = \"badwolf_xxxxxxxxxxxxx\"\n with pytest.raises(NotFoundError):\n apiobj.saved_query.get_by_uuid(value=value)\n\n def test_get_by_tags(self, apiobj):\n tags = [y for x in apiobj.saved_query.get() for y in x.get(\"tags\", [])]\n value = tags[0]\n rows = apiobj.saved_query.get_by_tags(value=value)\n assert isinstance(rows, list)\n for row in rows:\n assert isinstance(row, dict)\n assert value in row[\"tags\"]\n\n def test_get_by_tags_error(self, apiobj):\n value = \"badwolf_wwwwwwww\"\n with pytest.raises(NotFoundError):\n apiobj.saved_query.get_by_tags(value=value)\n\n @pytest.fixture(scope=\"class\")\n def sq_fixture(self, apiobj):\n get_schema(apiobj=apiobj, field=\"specific_data.data.last_seen\")\n field_simple = apiobj.FIELD_SIMPLE\n\n name = \"badwolf torked\"\n fields = [\"adapters\", \"last_seen\", \"id\", field_simple]\n\n sort_field = field_simple\n # colfilters = {field_simple: \"a\"}\n sort_desc = False\n gui_page_size = GUI_PAGE_SIZES[-1]\n tags = [\"badwolf1\", \"badwolf2\"]\n description = \"badwolf torked\"\n query = QUERIES[\"not_last_seen_day\"]\n\n try:\n apiobj.saved_query.delete_by_name(value=name)\n except NotFoundError:\n pass\n\n row = apiobj.saved_query.add(\n name=name,\n fields=fields,\n sort_field=sort_field,\n sort_descending=sort_desc,\n # column_filters=colfilters,\n gui_page_size=gui_page_size,\n tags=tags,\n description=description,\n query=query,\n )\n validate_sq(row)\n\n assert row[\"name\"] == name\n assert row[\"query_type\"] == \"saved\"\n assert row[\"tags\"] == tags\n assert row[\"description\"] == description\n assert row[\"private\"] is False\n assert row[\"view\"][\"query\"][\"filter\"] == query\n assert row[\"view\"][\"query\"][\"onlyExpressionsFilter\"] == query\n assert row[\"view\"][\"query\"][\"expressions\"] == []\n assert row[\"view\"][\"pageSize\"] == gui_page_size\n # assert row[\"view\"][\"colFilters\"] == colfilters\n assert row[\"view\"][\"sort\"][\"field\"] == sort_field\n assert row[\"view\"][\"sort\"][\"desc\"] == sort_desc\n\n yield row\n\n try:\n apiobj.saved_query.delete_by_name(value=name)\n except NotFoundError:\n pass\n\n def test_add_remove(self, apiobj, sq_fixture):\n row = apiobj.saved_query.delete_by_name(value=sq_fixture[\"name\"])\n assert isinstance(row, dict)\n assert row[\"uuid\"] == sq_fixture[\"uuid\"]\n\n with pytest.raises(NotFoundError):\n apiobj.saved_query.get_by_name(value=sq_fixture[\"name\"])\n\n def test_add_error_no_fields(self, apiobj):\n name = \"badwolf_nnnnnnnnnnnnn\"\n with pytest.raises(ApiError):\n apiobj.saved_query.add(name=name, fields_default=False)\n\n def test_add_error_bad_sort_field(self, apiobj):\n name = \"badwolf_sssssssssssss\"\n fields = \"last_seen\"\n sort_field = \"badwolf\"\n with pytest.raises(ApiError):\n apiobj.saved_query.add(name=name, fields=fields, sort_field=sort_field)\n\n def test_add_error_bad_colfilter(self, apiobj):\n name = \"badwolf_ttttttttttt\"\n fields = \"last_seen\"\n colfilters = {\"badwolf\": \"badwolf\"}\n with pytest.raises(ApiError):\n apiobj.saved_query.add(name=name, fields=fields, column_filters=colfilters)\n\n\nclass TestSavedQueryDevices(SavedQueryPrivate, SavedQueryPublic):\n @pytest.fixture(scope=\"class\")\n def apiobj(self, api_devices):\n return api_devices\n\n\nclass TestSavedQueryUsers(SavedQueryPrivate, SavedQueryPublic):\n @pytest.fixture(scope=\"class\")\n def apiobj(self, api_users):\n return api_users\n\n\ndef validate_qexpr(qexpr, asset):\n assert isinstance(qexpr, dict)\n\n compop = qexpr.pop(\"compOp\")\n assert isinstance(compop, str)\n\n field = qexpr.pop(\"field\")\n assert isinstance(field, str)\n\n idx = qexpr.pop(\"i\", 0)\n assert isinstance(idx, int)\n\n leftbracket = qexpr.pop(\"leftBracket\", 0)\n assert isinstance(leftbracket, (int, bool))\n\n rightbracket = qexpr.pop(\"rightBracket\", 0)\n assert isinstance(rightbracket, (int, bool))\n\n logicop = qexpr.pop(\"logicOp\")\n assert isinstance(logicop, str)\n\n notflag = qexpr.pop(\"not\")\n assert isinstance(notflag, bool)\n\n value = qexpr.pop(\"value\")\n assert isinstance(value, SIMPLE) or value is None\n\n obj = qexpr.pop(\"obj\", False)\n assert isinstance(obj, bool)\n\n nesteds = qexpr.pop(\"nested\", [])\n assert isinstance(nesteds, list)\n\n fieldtype = qexpr.pop(\"fieldType\", \"\")\n assert isinstance(fieldtype, str)\n\n children = qexpr.pop(\"children\", []) # new in 2.15\n assert isinstance(children, list) # new in 2.15\n\n filtered_adapters = qexpr.pop(\"filteredAdapters\", {})\n assert isinstance(filtered_adapters, dict) or filtered_adapters is None\n\n context = qexpr.pop(\"context\", \"\") # new in 2.15\n assert isinstance(context, str)\n\n timestamp = qexpr.pop(\"timestamp\", \"\")\n assert isinstance(timestamp, str)\n\n brackweight = qexpr.pop(\"bracketWeight\", 0)\n assert isinstance(brackweight, int)\n\n qfilter = qexpr.pop(\"filter\", \"\")\n assert isinstance(qfilter, str)\n\n for nested in nesteds:\n validate_nested(nested, asset)\n\n for child in children:\n validate_nested(child, asset)\n\n assert not qexpr, list(qexpr)\n\n\ndef validate_nested(nested, asset):\n assert isinstance(nested, dict)\n\n nfiltered_adapters = nested.pop(\"filteredAdapters\", {})\n assert isinstance(nfiltered_adapters, dict) or nfiltered_adapters is None\n\n ncondition = nested.pop(\"condition\")\n assert isinstance(ncondition, str)\n\n nexpr = nested.pop(\"expression\")\n assert isinstance(nexpr, dict)\n\n nidx = nested.pop(\"i\")\n assert isinstance(nidx, int)\n\n assert not nested, list(nested)\n\n\ndef validate_sq(asset):\n asset = copy.deepcopy(asset)\n assert isinstance(asset, dict)\n\n original = copy.deepcopy(asset)\n assert original == asset\n\n assert asset[\"query_type\"] in [\"saved\"]\n\n date_fetched = asset.pop(\"date_fetched\")\n assert isinstance(date_fetched, str)\n\n last_updated = asset.pop(\"last_updated\", None)\n assert isinstance(last_updated, (str, datetime.datetime, type(None)))\n\n name = asset.pop(\"name\")\n assert isinstance(name, str)\n\n query_type = asset.pop(\"query_type\")\n assert isinstance(query_type, str)\n\n user_id = asset.pop(\"user_id\")\n assert isinstance(user_id, str)\n\n uuid = asset.pop(\"uuid\")\n assert isinstance(uuid, str)\n\n description = asset.pop(\"description\")\n assert isinstance(description, str) or description is None\n\n timestamp = asset.pop(\"timestamp\", \"\")\n assert isinstance(timestamp, (str, type(None)))\n\n archived = asset.pop(\"archived\", False) # added in 2.15\n assert isinstance(archived, bool)\n\n updated_by_str = asset.pop(\"updated_by\")\n assert isinstance(updated_by_str, str)\n\n updated_by = json.loads(updated_by_str)\n assert isinstance(updated_by, dict)\n\n updated_by_deleted = updated_by.pop(\"deleted\")\n assert isinstance(updated_by_deleted, bool)\n\n # 4.5\n updated_by_is_first_login = updated_by.pop(\"is_first_login\", False)\n assert isinstance(updated_by_is_first_login, bool)\n\n # 4.5\n updated_by_permanent = updated_by.pop(\"permanent\", False)\n assert isinstance(updated_by_permanent, bool)\n\n updated_str_keys_req = [\n \"first_name\",\n \"last_name\",\n \"source\",\n \"user_name\",\n ]\n for updated_str_key in updated_str_keys_req:\n val = updated_by.pop(updated_str_key)\n assert isinstance(val, (str, int, float)) or val is None\n\n updated_str_keys_opt = [\n \"_id\",\n \"last_updated\",\n \"password\",\n \"pic_name\",\n \"role_id\",\n \"salt\",\n ]\n for updated_str_key in updated_str_keys_opt:\n val = updated_by.pop(updated_str_key, None)\n assert isinstance(val, (str, int, float)) or val is None\n\n assert not updated_by\n\n tags = asset.pop(\"tags\", [])\n assert isinstance(tags, list)\n for tag in tags:\n assert isinstance(tag, str)\n\n predefined = asset.pop(\"predefined\", False)\n assert isinstance(predefined, bool)\n\n private = asset.pop(\"private\", False)\n assert isinstance(private, bool)\n\n view = asset.pop(\"view\")\n assert isinstance(view, dict)\n\n colsizes = view.pop(\"coloumnSizes\", [])\n assert isinstance(colsizes, list)\n\n for x in colsizes:\n assert isinstance(x, int)\n\n fields = view.pop(\"fields\")\n assert isinstance(fields, list)\n\n for x in fields:\n assert isinstance(x, str)\n\n page = view.pop(\"page\", 0)\n assert isinstance(page, int)\n\n pagesize = view.pop(\"pageSize\", 0)\n assert isinstance(pagesize, int)\n\n sort = view.pop(\"sort\")\n assert isinstance(sort, dict)\n\n sort_desc = sort.pop(\"desc\")\n assert isinstance(sort_desc, bool)\n\n sort_field = sort.pop(\"field\")\n assert isinstance(sort_field, str)\n\n query = view.pop(\"query\")\n assert isinstance(query, dict)\n\n \"\"\" changed in 4.5\n colfilters = view.pop(\"colFilters\", {})\n assert isinstance(colfilters, dict)\n for k, v in colfilters.items():\n assert isinstance(k, str)\n assert isinstance(v, str)\n \"\"\"\n\n # 4.5\n \"\"\" structure:\n [\n {\n \"columnFilter\": {\n \"aqlExpression\": '(\"specific_data.data.name\" == ' 'regexMatch(\"a\", \"i\"))',\n \"arrayFields\": [],\n \"complexNestedFields\": [],\n \"complexParentToUnwind\": None,\n \"fieldPath\": \"specific_data.data.name\",\n \"fieldType\": \"string\",\n \"filterExpressions\": [\n {\n \"bracketWeight\": 0,\n \"children\": [],\n \"compOp\": \"columnFilterContains\",\n \"field\": \"specific_data.data.name\",\n \"fieldType\": \"axonius\",\n \"filter\": '(\"specific_data.data.name\" ' '== regexMatch(\"a\", \"i\"))',\n \"leftBracket\": 0,\n \"logicOp\": \"\",\n \"not\": False,\n \"rightBracket\": 0,\n \"value\": \"a\",\n }\n ],\n \"isComplexField\": False,\n \"isComplexNestedField\": False,\n \"nestedFilteredFields\": [],\n },\n \"fieldPath\": \"specific_data.data.name\",\n }\n ]\n \"\"\"\n colfilters = view.pop(\"colFilters\", [])\n assert isinstance(colfilters, list)\n for colfilter in colfilters:\n assert isinstance(colfilter, dict)\n\n qfilter = query.pop(\"filter\")\n assert isinstance(qfilter, str) or qfilter is None\n\n qexprs = query.pop(\"expressions\", [])\n assert isinstance(qexprs, list)\n\n qmeta = query.pop(\"meta\", {})\n assert isinstance(qmeta, dict)\n\n qonlyexprfilter = query.pop(\"onlyExpressionsFilter\", \"\")\n assert isinstance(qonlyexprfilter, str)\n\n qsearch = query.pop(\"search\", None)\n assert qsearch is None or isinstance(qsearch, str)\n\n historical = view.pop(\"historical\", None)\n assert historical is None or isinstance(historical, SIMPLE)\n\n \"\"\" changed in 4.5\n # 3.6+\n excluded_adapters = view.pop(\"colExcludedAdapters\", {})\n assert isinstance(excluded_adapters, dict)\n \"\"\"\n # 4.5\n \"\"\" structure\n [{\"exclude\": [\"chef_adapter\"], \"fieldPath\": \"specific_data.data.name\"}]\n \"\"\"\n excluded_adapters = view.pop(\"colExcludedAdapters\", [])\n assert isinstance(excluded_adapters, list)\n for excluded_adapter in excluded_adapters:\n assert isinstance(excluded_adapter, dict)\n\n # 4.0\n always_cached = asset.pop(\"always_cached\")\n assert isinstance(always_cached, bool)\n asset_scope = asset.pop(\"asset_scope\")\n assert isinstance(asset_scope, bool)\n is_asset_scope_query_ready = asset.pop(\"is_asset_scope_query_ready\")\n assert isinstance(is_asset_scope_query_ready, bool)\n is_referenced = asset.pop(\"is_referenced\")\n assert isinstance(is_referenced, bool)\n _id = asset.pop(\"id\")\n assert isinstance(_id, str) and _id\n\n for qexpr in qexprs:\n validate_qexpr(qexpr, asset)\n\n assert not query, list(query)\n assert not sort, list(sort)\n assert not view, list(view)\n assert not asset, list(asset)\n","sub_path":"axonius_api_client/tests/tests_api/tests_assets/test_saved_query.py","file_name":"test_saved_query.py","file_ext":"py","file_size_in_byte":14735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241457277","text":"from flask import abort, Blueprint, make_response, request, redirect, json\r\n\r\nfrom libs.admin_authentication import authenticate_researcher_study_access\r\nfrom libs.json_logic import do_validate_survey\r\nfrom database.study_models import Survey\r\n\r\nsurvey_api = Blueprint('survey_api', __name__)\r\n\r\n################################################################################\r\n############################## Creation/Deletion ###############################\r\n################################################################################\r\n\r\n\r\n@survey_api.route('/create_survey/<string:study_id>/<string:survey_type>', methods=['GET', 'POST'])\r\n@authenticate_researcher_study_access\r\ndef create_survey(study_id=None, survey_type='tracking_survey'):\r\n new_survey = Survey.create_with_settings(study_id=study_id, survey_type=survey_type)\r\n return redirect('/edit_survey/{:d}'.format(new_survey.id))\r\n\r\n\r\n@survey_api.route('/delete_survey/<string:survey_id>', methods=['GET', 'POST'])\r\n@authenticate_researcher_study_access\r\ndef delete_survey(survey_id=None):\r\n try:\r\n survey = Survey.objects.get(pk=survey_id)\r\n except Survey.DoesNotExist:\r\n return abort(404)\r\n\r\n study_id = survey.study_id\r\n survey.mark_deleted()\r\n return redirect('/view_study/{:d}'.format(study_id))\r\n\r\n################################################################################\r\n############################# Setters and Editors ##############################\r\n################################################################################\r\n\r\n\r\n@survey_api.route('/update_survey/<string:survey_id>', methods=['GET', 'POST'])\r\n@authenticate_researcher_study_access\r\ndef update_survey(survey_id=None):\r\n try:\r\n survey = Survey.objects.get(pk=survey_id)\r\n except Survey.DoesNotExist:\r\n return abort(404)\r\n\r\n # BUG: There is an unknown situation where the frontend sends a string requiring an extra\r\n # deserialization operation, causing 'content' to be a string containing a json string\r\n # containing a json list, instead of just a string containing a json list.\r\n # print(request.values.get('content', ''))\r\n json_content = request.values.get('content')\r\n content = None\r\n\r\n # Weird corner case: the Image survey does not have any content associated with it. Therefore,\r\n # when you try and make a post request to save any settings you have, it gives you a 500 error\r\n # because the request.values.get('content') returns a json item of \"\". The recursive_survey_content_json_decode\r\n # function is not able to decode 2 double quotations marks. This is why retrieving the json_content from the post\r\n # request is put outside of the decode statement. HOWEVER, evaluating json_content == \"\" returns false, since the\r\n # LITERAL value of the json_content is 2 quotation marks, NOT an empty string. Thus, we need to compare the\r\n # json_content to a string of 2 quotation marks (ie. '\"\"')\r\n if json_content != '\"\"':\r\n content = recursive_survey_content_json_decode(json_content)\r\n content = make_slider_min_max_values_strings(content)\r\n \r\n if survey.survey_type == Survey.TRACKING_SURVEY:\r\n errors = do_validate_survey(content)\r\n if len(errors) > 1:\r\n return make_response(json.dumps(errors), 400)\r\n \r\n # These three all stay JSON when added to survey\r\n content = json.dumps(content)\r\n timings = request.values['timings']\r\n settings = request.values['settings']\r\n survey.update(content=content, timings=timings, settings=settings)\r\n \r\n return make_response(\"\", 201)\r\n\r\n\r\ndef recursive_survey_content_json_decode(json_entity):\r\n \"\"\" Decodes through up to 100 attempts a json entity until it has deserialized to a list. \"\"\"\r\n count = 100\r\n decoded_json = None\r\n while not isinstance(decoded_json, list):\r\n count -= 1\r\n if count < 0:\r\n raise Exception(\"could not decode json entity to list\")\r\n decoded_json = json.loads(json_entity)\r\n return decoded_json\r\n\r\n\r\ndef make_slider_min_max_values_strings(json_content):\r\n \"\"\" Turns min/max int values into strings, because the iOS app expects strings. This is for\r\n backwards compatibility; when all the iOS apps involved in studies can handle ints,\r\n we can remove this function. \"\"\"\r\n for question in json_content:\r\n if 'max' in question:\r\n question['max'] = str(question['max'])\r\n if 'min' in question:\r\n question['min'] = str(question['min'])\r\n return json_content\r\n","sub_path":"api/survey_api.py","file_name":"survey_api.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595306366","text":"import sys\nimport click\nfrom . import _cli as cli\nfrom astropy.io import fits\nimport glob\nfrom tkinter.filedialog import askdirectory\n\ndef read(directory):\n \"\"\"\n Takes a directory containing fits files and returns them as a list\n \"\"\"\n paths = glob.glob(\"{}/*.fits*\".format(directory))\n hduls = [fits.open(p) for p in paths]\n return hduls\n\ndef write(hduls, directory, format_):\n \"\"\"\n Writes all given hduls into the directory specified\n \"\"\"\n import os\n for i, h in enumerate(hduls):\n path = os.path.join(directory, format_.format(number=i))\n click.echo(f\"writing hdul to {path}\")\n h.writeto(path)\n return hduls\n\n@cli.cli.command(\"write\")\n@click.option('-d', '--directory', type=str, help=\"Specify path to directory to save fitsfiles.\", default=\"./\")\n@click.option('-f', '--format', \"format_\", type=str, help=\"Specify string format for filename.\", default=\"{number}.fits\")\n@cli.operator\n## write function wrapper\ndef write_cmd(hduls, directory, format_):\n \"\"\"\n Writes all given hduls into the directory specified\n \"\"\"\n return write(hduls, directory, format_)\n\n@cli.cli.command(\"read\")\n@click.option('-d', '--directory', type=str, help=\"Specify path to directory of fitsfiles.\", required=False)\n@cli.generator\n## read function wrapper\ndef read_cmd(directory):\n \"\"\"\n Takes a directory containing fits files and returns them as a list\n \"\"\"\n # add try (evaluate if the try is efficient)\n if directory is None:\n try:\n directory = askdirectory()\n except:\n click.echo(\"Visual file dialog does not exist, please use option -d and specify path to directory to read fitsfiles.\", err=True)\n sys.exit()\n hduls = read(directory)\n if hduls:\n return hduls\n else:\n sys.exit(f\"Could not open fitsfiles from directory {directory}\")\n","sub_path":"sdi/fitsio.py","file_name":"fitsio.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46888370","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 16:14:51 2019\n@author: tiago\n\"\"\"\n\n#%% Lecture 001\nrunfile('dip.py')\n\nfilename = os.path.join(folder, \"baboon.png\")\n\n#%% Read a grayscale image\nimg = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\n\nif type(img) is not np.ndarray:\n print(\"Error: Image not found!\")\n\nplt.imshow(img)\nplt.show()\n# cv2.namedWindow(\"img\", cv2.WINDOW_KEEPRATIO)\n# cv2.imshow(\"img\", img)\n# #cv2.waitKey(0)\n# while True:\n# if 0xFF & cv2.waitKey(1) == ord('q'):\n# break\n# cv2.destroyAllWindows()\n\n#%% Read a color image\nimg = cv2.imread(filename, cv2.IMREAD_COLOR)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nplt.imshow(img)\nplt.show()\n\n#%%\nbgr = cv2.imread(filename, cv2.IMREAD_COLOR)\nrgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\nplt.imshow(rgb)\nplt.show()\n\n#%% Splitting RGB channels\nbgr = cv2.imread(filename, cv2.IMREAD_COLOR)\n\nrgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\nrgb_lst = cv2.split(rgb)\n\nplt.figure(1, figsize=(18,9)), plt.clf\nplt.subplot(232), plt.imshow(rgb), plt.title('RGB')\nplt.subplot(234), plt.imshow(rgb_lst[0], cmap='gray'), plt.title('R')\nplt.subplot(235), plt.imshow(rgb_lst[1], cmap='gray'), plt.title('G')\nplt.subplot(236), plt.imshow(rgb_lst[2], cmap='gray'), plt.title('B')\nplt.show()\n\n#%% Using a function handle\nbgr2rgb = lambda x : cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\nbgr = cv2.imread(filename, cv2.IMREAD_COLOR)\nplt.figure(2)\nplt.imshow(bgr2rgb(bgr))\nplt.show()\n\n#%% Splitting RGB channels\nbgr = cv2.imread(filename, cv2.IMREAD_COLOR)\nrgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\nr, g, b = cv2.split(rgb)\n\nplt.figure(1), plt.clf\nplt.subplot(232), plt.imshow(rgb), plt.title('RGB')\nplt.subplot(234), plt.imshow(r, cmap='gray'), plt.title('R')\nplt.subplot(235), plt.imshow(g, cmap='gray'), plt.title('G')\nplt.subplot(236), plt.imshow(b, cmap='gray'), plt.title('B')\nplt.show()\n","sub_path":"db/aula01.py","file_name":"aula01.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344669086","text":"word = input(\"Enter the word\")\n#print(type(word))\n#print(word)\nbasic_mark = '1'\ni = 0\nstatus = []\nj = 0\nfor a in word:\n if a != basic_mark and i == 0: #Warunek wykona sie tylko dla pierwszego a i ustanawia nasze b\n status.append(a)\n b = a\n i+=1\n #print(\"Wykonal sie if\")\n elif a == b:\n j+=1\n #print(\"Wykonal sie elif 1\")\n elif a != b:\n if j != 0:\n status.append(j+1)\n b = a\n j = 0\n #print(\"Wykonal sie elif 2\", b)\n status.append(b)\n else:\n status.append(a)\n #print(\"Wykonal sie else\")\n#print(status)\nstatus.append(j+1)\n#print(status)\nfor b in status:\n print(b, end ='')","sub_path":"Letters.py","file_name":"Letters.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591236898","text":"# coding: utf8 \nimport json\nfrom datetime import datetime, timedelta\nfrom base_request_handler import BaseRequestHandler\nfrom buyer_model import BuyerModel\nfrom bee_errors import *\n\n\nclass BuyerRequestHandler(BaseRequestHandler):\n \"\"\"处理来自买家的请求\"\"\"\n def __init__(self, *args, **kwargs):\n super(BuyerRequestHandler, self).__init__(*args, **kwargs)\n self.model = BuyerModel()\n\n def _pre_do(self, option):\n user_id = self.get_param('user_id')\n if user_id:\n self.model.set_user_id(user_id)\n\n def _do(self, option):\n if option == '_get_mp_session':\n self._get_mp_session()\n elif option == '_wx_enroll':\n self._wx_enroll()\n elif option == '_get_waiters':\n self._get_waiters()\n elif option == '_get_user_info':\n self._get_user_info()\n elif option == '_enroll':\n self._enroll()\n elif option == '_get_categories':\n self._get_categories()\n elif option == '_get_items':\n self._get_items()\n elif option == '_commit_orders':\n self._commit_orders()\n elif option == '_get_orders':\n self._get_orders()\n elif option == '_cancel_order':\n self._cancel_order()\n else:\n self.write_fail(ERR_INVALID_OPTION)\n\n def _get_mp_session(self):\n \"\"\"获取微信用户session\"\"\"\n js_code = self.get_param('js_code', essential=True)\n # fixme\n self.write_success({'openid': 'foo', 'unionid': 'bar', \n 'session_key': 'foobar'})\n \n def _wx_enroll(self):\n \"\"\"获取微信用户手机号码\"\"\"\n session_key = self.get_param('session_key', essential=True)\n iv = self.get_param('iv', essential=True)\n encrypted_data = self.get_param('encrypted_data', essential=True)\n # fixme\n self.write_success({'phone_number': '18520191011'})\n\n def _get_waiters(self):\n \"\"\"获取服务点列表\"\"\"\n region = self.get_param('region', essential=True)\n waiters = self.model.get_waiters(region)\n self.write_success({'waiters': waiters}) \n \n def _get_verification_code(self):\n \"\"\"获取短信验证码\"\"\"\n phone_number = self.get_param('phone_number', essential=True)\n self.sms_login_manager.send_verification_code(phone_number)\n self.write_success()\n\n def _get_user_info(self):\n \"\"\"获取用户信息\"\"\"\n mp_union_id = self.get_param('mp_union_id', essential=True)\n info = self.model.get_user_info(mp_union_id)\n if not info:\n return self.write_fail(ERR_USER_NOT_EXIST)\n self.write_success({'user': info})\n\n def _enroll(self):\n \"\"\"注册\"\"\"\n mp_union_id = self.get_param('mp_union_id', essential=True)\n number = self.get_param('phone_number', essential=True)\n code = self.get_param('verification_code', essential=True)\n name = self.get_param('name', essential=True)\n avatar = self.get_param('avatar', essential=True)\n #if not self.sms_login_manager.check_verification_code(number, code):\n # return self.write_fail(ERR_WRONG_VERIFICATION_CODE)\n user = self.model.enroll(mp_union_id, number, name, avatar)\n if not user:\n return self.write_fail(ERR_ENROLL_FAILED)\n info = self.model.get_user_info(mp_union_id)\n if not info:\n return self.write_fail(ERR_USER_NOT_EXIST)\n self.write_success({'user': info})\n\n def _get_categories(self): \n \"\"\"获取商品类目\"\"\"\n waiter_id = self.get_param('waiter_id', essential=True, rtype=int)\n categories = self.model.get_categories(waiter_id)\n self.write_success({'categories': categories}) \n \n def _get_items(self): \n \"\"\"获取类目下的商品\"\"\"\n waiter_id = self.get_param('waiter_id', essential=True, rtype=int)\n category_id = self.get_param('category_id', essential=True, rtype=int)\n items = self.model.get_items(waiter_id, category_id)\n self.write_success({'items': items})\n\n def _commit_orders(self): \n \"\"\"提交新订单\"\"\"\n waiter_id = self.get_param('waiter_id', essential=True, rtype=int)\n orders = self.get_param('orders', essential=True)\n orders = json.loads(orders)\n err = self.model.commit_orders(waiter_id, orders)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n\n def _get_orders(self): \n \"\"\"获取已下单订单\"\"\"\n orders = self.model.get_orders()\n self.write_success({'orders': orders})\n \n def _cancel_order(self):\n \"\"\"取消订单\"\"\"\n order_id = self.get_param('order_id', essential=True, rtype=int)\n err = self.model.cancel_order(order_id)\n if err:\n self.write_fail(err[0], err[1])\n else:\n self.write_success()\n \n","sub_path":"app/server/buyer_request_handler.py","file_name":"buyer_request_handler.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573924838","text":"import urllib\nfrom bs4 import BeautifulSoup\nfrom logzero import logger\n\ndef get_urldict(url_str):\n html_str = urllib.request.urlopen(url_str).read() # urlopen这个方法返回的是bytes对象,需要解码为str\n soup = BeautifulSoup(html_str,'lxml') # 传入一个字符串或者文件句柄初始化靓汤\n #url_lists = soup.find_all(\"table\")1\n url_lists = soup.select('.tbspan')[0]\n return url_lists\n\n\nurls = 'http://www.dytt8.net/html/gndy/dyzz/index.html'\naa = get_urldict(urls)\n\nprint(aa)\n\n\n\n\n","sub_path":"FilmHeaven/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274117175","text":"from apscheduler.schedulers.blocking import BlockingScheduler\n\nsched = BlockingScheduler(timezone=\"Europe/Istanbul\")\n\n@sched.scheduled_job('cron', day_of_week='mon-fr', hour=10)\ndef scheduled_job():\n print('Exec 1000...')\n exec(open('HerGun50CentiTurkLirasi.py').read())\n\n@sched.scheduled_job('cron', day_of_week='mon-fr', hour=18)\ndef scheduled_job():\n print('Exec 1800...')\n exec(open('HerGun50CentiTurkLirasi.py').read())\n\nsched.start()\n","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624771324","text":"\"\"\"\nCopyright (c) 2004-Present Pivotal Software, Inc.\n\nThis program and the accompanying materials are made available under\nthe terms of the under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport time\n\nimport unittest2 as unittest\n\nfrom mpp.lib.gplog import GpLog, GpLogException, _DEFAULT_OUT_FILE\nfrom mpp.lib.PSQL import PSQL\n\nclass GpLogRegressionTests(unittest.TestCase):\n\n def test_gather_log_to_default_file(self):\n if os.path.exists(_DEFAULT_OUT_FILE):\n os.remove(_DEFAULT_OUT_FILE)\n self.assertFalse(os.path.exists(_DEFAULT_OUT_FILE))\n start_time = time.time()\n PSQL.run_sql_command(\"select pg_sleep(2)\")\n end_time = time.time()\n GpLog.gather_log(start_time=start_time, end_time=end_time)\n self.assertTrue(os.path.exists(_DEFAULT_OUT_FILE))\n self.assertTrue(os.path.getsize(_DEFAULT_OUT_FILE) > 0)\n\n def test_gather_log_out_file(self):\n out_file = '/tmp/cluster2.logs'\n if os.path.exists(out_file):\n os.remove(out_file)\n self.assertFalse(os.path.exists(out_file))\n start_time = time.time()\n time.sleep(2)\n end_time = time.time()\n GpLog.gather_log(start_time=start_time, end_time=end_time, out_file=out_file)\n self.assertTrue(os.path.exists(out_file))\n self.assertTrue(os.path.getsize(out_file) > 0)\n\n def test_check_log(self):\n start_time = time.time()\n PSQL.run_sql_command(\"SELECT * from some_table_that_does_not_exist_to_generate_errors_in_logs\")\n time.sleep(2)\n end_time = time.time()\n self.assertTrue(GpLog.check_log_for_errors(start_time, end_time))\n \n \n \n \n","sub_path":"src/test/tinc/tincrepo/mpp/lib/regress/regress_gplog.py","file_name":"regress_gplog.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269150148","text":"'''\nhttps://docs.python.org/3/library/argparse.html#module-argparse\n'''\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('Greeting', type=str,\n help='This is the string that greets the program!')\nparser.add_argument('integers', metavar='N', type=int, nargs='+',\n help='an integer for the accumulator')\nparser.add_argument('-letters', metavar='L', type=str, nargs='+',\n help='a set of letters')\nparser.add_argument('-names', type=str, nargs='+',\n help='Names of people')\nparser.add_argument('--sum', dest='accumulate', action='store_const',\n const=sum, default=max,\n help='sum the integers (default: find the max)')\n\nargs = parser.parse_args()\nprint(args.accumulate(args.integers))\nprint(args.letters)\nprint(args.names)","sub_path":"tutorials/terminal_program/terminal_program_3.py","file_name":"terminal_program_3.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88015215","text":"import sys\nimport os\nimport argparse\nimport collections\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport tensorflow.keras.backend as K\n\nuer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, uer_dir)\n\nfrom scripts.convert_bert_from_original_tf_to_uer import tensors_to_transopse\n\n\ndef assign_tf_var(tensor: np.ndarray, name: str):\n tf_var = tf.get_variable(dtype=tensor.dtype, shape=tensor.shape, name=name)\n tf.keras.backend.set_value(tf_var, tensor)\n return tf_var\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--layers_num\", type=int, default=12,\n help=\".\")\n parser.add_argument(\"--input_model_path\", type=str, default=\"models/input_model.bin\",\n help=\".\")\n parser.add_argument(\"--output_model_path\", type=str, default=\"models/output_model.ckpt\",\n help=\".\")\n parser.add_argument(\"--type\", choices=[\"bert\", \"mlm\"], default=\"bert\",\n help=\"The training target of the pretraining model.\")\n\n args = parser.parse_args()\n\n input_model = torch.load(args.input_model_path, map_location=\"cpu\")\n\n session = tf.Session()\n K.set_session(session)\n\n output_model = collections.OrderedDict()\n\n output_model[\"bert/embeddings/word_embeddings\"] = input_model[\"embedding.word.embedding.weight\"]\n output_model[\"bert/embeddings/position_embeddings\"] = input_model[\"embedding.pos.embedding.weight\"]\n output_model[\"bert/embeddings/token_type_embeddings\"] = input_model[\"embedding.seg.embedding.weight\"][1:, :]\n output_model[\"bert/embeddings/LayerNorm/gamma\"] = input_model[\"embedding.layer_norm.gamma\"]\n output_model[\"bert/embeddings/LayerNorm/beta\"] = input_model[\"embedding.layer_norm.beta\"]\n\n for i in range(args.layers_num):\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/query/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.0.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/query/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.0.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/key/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.1.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/key/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.1.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/value/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.2.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/self/value/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.linear_layers.2.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/output/dense/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.final_linear.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/output/dense/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".self_attn.final_linear.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/output/LayerNorm/gamma\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".layer_norm_1.gamma\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/attention/output/LayerNorm/beta\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".layer_norm_1.beta\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/intermediate/dense/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".feed_forward.linear_1.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/intermediate/dense/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".feed_forward.linear_1.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/output/dense/kernel\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".feed_forward.linear_2.weight\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/output/dense/bias\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".feed_forward.linear_2.bias\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/output/LayerNorm/gamma\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".layer_norm_2.gamma\"]\n output_model[\"bert/encoder/layer_\" + str(i) + \"/output/LayerNorm/beta\"] = \\\n input_model[\"encoder.transformer.\" + str(i) + \".layer_norm_2.beta\"]\n \n if args.type == \"bert\":\n output_model[\"bert/pooler/dense/kernel\"] = input_model[\"target.sp.linear_1.weight\"]\n output_model[\"bert/pooler/dense/bias\"] = input_model[\"target.sp.linear_1.bias\"]\n output_model[\"cls/seq_relationship/output_weights\"] = input_model[\"target.sp.linear_2.weight\"]\n output_model[\"cls/seq_relationship/output_bias\"] = input_model[\"target.sp.linear_2.bias\"]\n output_model[\"cls/predictions/transform/dense/kernel\"] = input_model[\"target.mlm.linear_1.weight\"]\n output_model[\"cls/predictions/transform/dense/bias\"] = input_model[\"target.mlm.linear_1.bias\"]\n output_model[\"cls/predictions/transform/LayerNorm/gamma\"] = input_model[\"target.layer_norm.gamma\"]\n output_model[\"cls/predictions/transform/LayerNorm/beta\"] = input_model[\"target.layer_norm.beta\"]\n output_model[\"cls/predictions/output_bias\"] = input_model[\"target.mlm.linear_2.bias\"]\n\n tf_vars = []\n\n for k, v in output_model.items():\n tf_name = k\n torch_tensor = v.cpu().numpy()\n if any([x in k for x in tensors_to_transopse]):\n torch_tensor = torch_tensor.T\n tf_tensor = assign_tf_var(tensor=torch_tensor, name=tf_name)\n tf_vars.append(tf_tensor)\n print(\"{0}{1}initialized\".format(tf_name, \" \" * (60 - len(tf_name))))\n\n saver = tf.train.Saver(tf_vars)\n saver.save(session, args.output_model_path)\n K.clear_session()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/convert_bert_from_uer_to_original_tf.py","file_name":"convert_bert_from_uer_to_original_tf.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"43689666","text":"# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>\n\n\"\"\"Test should_create_event_for method.\"\"\"\n\nfrom datetime import date\nimport ddt\nfrom freezegun import freeze_time\n\nfrom ggrc.models import all_models\nfrom ggrc.gcalendar import calendar_event_builder\nfrom integration.ggrc_workflows.models import factories as wf_factories\nfrom integration.ggrc import TestCase\n\n\n# pylint: disable=protected-access\n@ddt.ddt\nclass TestShouldCreateEventForTask(TestCase):\n \"\"\"Tests for should_create_event_for method.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test.\"\"\"\n super(TestShouldCreateEventForTask, self).setUp()\n self.client.get(\"/login\")\n self.builder = calendar_event_builder.CalendarEventBuilder()\n\n @ddt.data((u\"Deprecated\", False, False),\n (u\"In Progress\", False, True),\n (u\"Assigned\", False, True),\n (u\"Finished\", False, False),\n\n (u\"Declined\", True, True),\n (u\"Verified\", True, False),\n (u\"Deprecated\", True, False),\n (u\"In Progress\", True, True),\n (u\"Finished\", True, True),\n (u\"Assigned\", True, True))\n @ddt.unpack\n def test_task_status(\n self, task_status, is_verification_needed, should_create_event\n ):\n \"\"\"Check that the event should be created for specified task statuses.\"\"\"\n with freeze_time(\"2015-01-1 12:00:00\"):\n cycle = wf_factories.CycleFactory(\n is_verification_needed=is_verification_needed,\n )\n task = wf_factories.CycleTaskGroupObjectTaskFactory(\n status=task_status,\n end_date=date(2015, 1, 5),\n cycle=cycle,\n )\n self.assertEquals(self.builder._should_create_event_for(task),\n should_create_event)\n\n def test_overdue_task(self):\n \"\"\"Check that the event should not be created overdue tasks.\"\"\"\n with freeze_time(\"2015-01-05 12:00:00\"):\n task = wf_factories.CycleTaskGroupObjectTaskFactory(\n status=u\"In Progress\",\n end_date=date(2015, 1, 1),\n )\n self.assertEquals(self.builder._should_create_event_for(task), False)\n\n def test_is_in_history_task(self):\n \"\"\"Check that the event should not be created is_in_history tasks.\"\"\"\n with freeze_time(\"2015-01-01 12:00:00\"):\n cycle = wf_factories.CycleFactory(is_current=False)\n task = wf_factories.CycleTaskGroupObjectTaskFactory(\n status=u\"In Progress\",\n end_date=date(2015, 1, 5),\n cycle=cycle,\n )\n self.assertEquals(self.builder._should_create_event_for(task), False)\n\n @ddt.data((False, False), (True, True))\n @ddt.unpack\n def test_task_archived(self, recurrence, should_create_event):\n \"\"\"Check creation of event based on workflow archived states.\"\"\"\n with freeze_time(\"2015-01-01 12:00:00\"):\n workflow = wf_factories.WorkflowFactory(\n unit=all_models.Workflow.WEEK_UNIT,\n recurrences=recurrence,\n next_cycle_start_date=date(2015, 1, 5),\n )\n cycle = wf_factories.CycleFactory(workflow=workflow)\n task = wf_factories.CycleTaskGroupObjectTaskFactory(\n status=u\"In Progress\",\n end_date=date(2015, 1, 5),\n cycle=cycle,\n )\n self.assertEquals(self.builder._should_create_event_for(task),\n should_create_event)\n","sub_path":"test/integration/ggrc/gcalendar/test_should_create_event_for_task.py","file_name":"test_should_create_event_for_task.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463399289","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom artan.testing.sql_utils import ReusedSparkTestCase\nfrom artan.filter import RecursiveLeastSquaresFilter, LinearKalmanFilter, LeastMeanSquaresFilter\nfrom pyspark.ml.linalg import Vectors, Matrices\nimport numpy as np\n\n\nclass RLSTests(ReusedSparkTestCase):\n\n np.random.seed(0)\n\n def test_simple_rls(self):\n df = self.spark.createDataFrame(\n [(1.0, Vectors.dense(0.0, 5.0)),\n (0.0, Vectors.dense(1.0, 2.0)),\n (1.0, Vectors.dense(2.0, 1.0)),\n (0.0, Vectors.dense(3.0, 3.0)), ], [\"label\", \"features\"])\n\n rls = RecursiveLeastSquaresFilter(2)\n\n model = rls.transform(df).filter(\"stateIndex=4\").collect()\n state = model[0].state.values\n\n expected = np.array([5.31071176e-09, 1.53846148e-01])\n np.testing.assert_array_almost_equal(state, expected)\n\n def test_ols_equivalence(self):\n # Simple ols problem\n # y = a * x + b + r\n # Where r ~ N(0, 1)\n n = 40\n a = 0.5\n b = 2\n x = np.arange(0, n)\n r = np.random.normal(0, 1, n)\n y = a * x + b + r\n features = x.reshape(n, 1)\n features = np.concatenate([features, np.ones_like(features)], axis=1)\n\n df = self.spark.createDataFrame(\n [(float(y[i]), Vectors.dense(features[i])) for i in range(n)], [\"label\", \"features\"])\n\n # set high regularization matrix factor to get close to OLS solution\n rls = RecursiveLeastSquaresFilter(2)\\\n .setInitialEstimate(Vectors.dense([1.0, 1.0]))\\\n .setRegularizationMatrixFactor(10E6)\n\n model = rls.transform(df)\n state = model.filter(\"stateIndex = {}\".format(n)).collect()[0].state.values\n\n # Check equivalence with least squares solution with numpy\n expected, _, _, _ = np.linalg.lstsq(features, y, rcond=None)\n np.testing.assert_array_almost_equal(state, expected)\n\n\nclass LMSTests(ReusedSparkTestCase):\n\n np.random.seed(0)\n\n def test_filter_trend(self):\n # y = a * x + N(0, 1)\n n = 40\n a = 0.2\n x = np.arange(0, n)\n r = np.random.normal(0, 1, n)\n y = a * x + r\n features = x.reshape(n, 1)\n\n df = self.spark.createDataFrame(\n [(float(y[i]), Vectors.dense(features[i])) for i in range(n)], [\"l\", \"f\"])\n\n lms = LeastMeanSquaresFilter(1)\\\n .setInitialEstimate(Vectors.dense([10.0]))\\\n .setRegularizationConstant(1.0)\\\n .setLearningRate(1.0)\\\n .setLabelCol(\"l\")\\\n .setFeaturesCol(\"f\")\n\n model = lms.transform(df)\n state = model.filter(\"stateIndex = {}\".format(n)).collect()[0].state.values\n\n np.testing.assert_array_almost_equal(state, np.array([0.2]), 2)\n\n\nclass LinearKalmanFilterTests(ReusedSparkTestCase):\n\n np.random.seed(0)\n\n def test_ols_equivalence(self):\n # Simple ols problem\n # y = a * x + b + r\n # Where r ~ N(0, 1)\n n = 40\n a = 0.27\n b = 1.2\n x = np.arange(0, n)\n r = np.random.normal(0, 1, n)\n y = (a * x + b + r).reshape(n, 1)\n features = x.reshape(n, 1)\n features = np.concatenate([features, np.ones_like(features)], axis=1)\n df = self.spark.createDataFrame(\n [(Vectors.dense(y[i]), Matrices.dense(1, 2, features[i])) for i in range(n)],\n [\"measurement\", \"measurementModel\"])\n lkf = LinearKalmanFilter(2, 1)\\\n .setMeasurementModelCol(\"measurementModel\")\\\n .setMeasurementCol(\"measurement\")\\\n .setInitialCovariance(Matrices.dense(2, 2, (np.eye(2)*10).reshape(4, 1)))\\\n .setProcessModel(Matrices.dense(2, 2, np.eye(2).reshape(4, 1)))\\\n .setProcessNoise(Matrices.dense(2, 2, np.zeros(4)))\\\n .setMeasurementNoise(Matrices.dense(1, 1, [10E-5]))\n\n model = lkf.transform(df)\n state = model.filter(\"stateIndex = {}\".format(n)).collect()[0].state.values\n\n # Check equivalence with least squares solution with numpy\n expected, _, _, _ = np.linalg.lstsq(features, y, rcond=None)\n np.testing.assert_array_almost_equal(state, expected.reshape(2), decimal=5)\n\n def test_multiple_model_adaptive_filter(self):\n n = 100\n a = 0.27\n b = 1.2\n x = np.concatenate([np.arange(0, n), np.arange(0, n)])\n r = np.random.normal(0, 1, n * 2)\n y = (a * x + b + r).reshape(n * 2, 1)\n features = x.reshape(n * 2, 1)\n features = np.concatenate([features, np.ones_like(features)], axis=1)\n state_keys = [\"1\"] * n + [\"2\"] * n\n df = self.spark.createDataFrame(\n [(state_keys[i], Vectors.dense(y[i]), Matrices.dense(1, 2, features[i])) for i in range(n*2)],\n [\"state_key\",\"measurement\", \"measurementModel\"])\n\n mmaeFilter = LinearKalmanFilter(2, 1)\\\n .setStateKeyCol(\"state_key\")\\\n .setMeasurementModelCol(\"measurementModel\")\\\n .setMeasurementCol(\"measurement\")\\\n .setInitialCovariance(Matrices.dense(2, 2, (np.eye(2)*10).reshape(4, 1)))\\\n .setProcessModel(Matrices.dense(2, 2, np.eye(2).reshape(4, 1)))\\\n .setProcessNoise(Matrices.dense(2, 2, np.zeros(4)))\\\n .setMeasurementNoise(Matrices.dense(1, 1, [1.0]))\\\n .setSlidingLikelihoodWindow(5)\\\n .setEnableMultipleModelAdaptiveEstimation()\n\n model = mmaeFilter.transform(df)\n state = model.filter(\"stateIndex = {}\".format(n)).collect()[0].state.values\n\n expected, _, _, _ = np.linalg.lstsq(features, y, rcond=None)\n np.testing.assert_array_almost_equal(state, expected.reshape(2), decimal=0)\n","sub_path":"python/artan/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151406431","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass QiushibaikeSpider(CrawlSpider):\n name = 'qiushibaike'\n allowed_domains = ['qiushibaike.com']\n start_urls = ['https://www.qiushibaike.com/']\n\n rules = (\n Rule(LinkExtractor(allow=r'.*?/$'), callback='parse_item'),\n )\n\n def parse_item(self, response):\n print(response.text)\n item = {}\n #item['domain_id'] = response.xpath('//input[@id=\"sid\"]/@value').get()\n #item['name'] = response.xpath('//div[@id=\"name\"]').get()\n #item['description'] = response.xpath('//div[@id=\"description\"]').get()\n return item\n","sub_path":"job_gaoxiao/spiders/qiushibaike.py","file_name":"qiushibaike.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341408073","text":"from django.test import TestCase\n\nfrom airMonitor.models.Chart import Chart\n\n\nclass TestChartWrapper(TestCase):\n def test_file_load(self):\n chart = Chart()\n self.assertIn(\"type\", chart.dict())\n self.assertIn(\"data\", chart.dict())\n self.assertIn(\"options\", chart.dict())\n\n def test_empty_data(self):\n chart = Chart()\n self.assertEqual(len(chart._datasets), 0)\n\n def test_add_data_same_key(self):\n chart = Chart()\n for i in range(10):\n chart.add_data(\"key\", i)\n self.assertEqual(len(chart._datasets), 1)\n\n def test_add_data_multiple_keys(self):\n chart = Chart()\n for key in [\"key1\", \"key2\", \"key3\", \"key4\"]:\n for i in range(10):\n chart.add_data(key, i)\n self.assertEqual(len(chart._datasets), 4)\n","sub_path":"airMonitor/test/test_chart.py","file_name":"test_chart.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545902705","text":"# -*- coding: utf-8 -*-\n# @Time : 2022/11/18 13:55\n# @Author : HHB\n# @FileName: SohuSuzhou.py\n# @Software: PyCharm\n\"\"\"\n Name: 搜狐 苏州\n Description: http://sz.sohu.com/news/yaowen.shtml\n \n\"\"\"\n\n\n\nimport requests\nfrom lxml import etree\nimport pymysql\n\n\n\n\nclass Sohu(object):\n\n def __init__(self):\n\n self.url = \"http://sz.sohu.com/news/yaowen.shtml\"\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/91.0.4472.124 Safari/537.36\"}\n self.connect = pymysql.connect(host='192.168.3.21', port=3306, db='policeproject',\n user='root', passwd='123456', charset='utf8', )\n self.cursor = self.connect.cursor()\n\n def time_sc(self, punlish_time):\n # 2014年08月08日06:34\n time_sc_new = punlish_time.replace(\"年\", \"-\").replace(\"月\", \"-\").replace(\"日\", \" \")\n return time_sc_new + \":00\"\n\n def get_data(self):\n try:\n response = requests.get(url=self.url, headers=self.headers)\n response.encoding = response.apparent_encoding\n html = etree.HTML(response.text)\n data_list = html.xpath('//div[@class=\"list14\"]/ul/li')\n\n for i in data_list:\n\n title = i.xpath('./a/text()')\n url = i.xpath('./a/@href')\n\n if not title:\n continue\n else:\n print(title)\n print(url)\n\n self.get_detail(title[0], url[0])\n except Exception as e:\n print(e)\n\n def get_detail(self, title, url):\n\n response = requests.get(url=url, headers=self.headers)\n response.encoding = response.apparent_encoding\n html = etree.HTML(response.text)\n\n content = html.xpath('//div[@id=\"contentText\"]/p//text()')\n\n if not content:\n content = html.xpath('//div[@id=\"contentText\"]/text()')\n else:\n content = content\n content = \"\".join(content).replace(\"\\n\", \"\").replace(\"\\r\", \"\").strip()\n print(\"正文:\", content)\n\n publish_time = html.xpath('//div[@class=\"time\"]/text()')[0]\n time_sc_new = self.time_sc(publish_time)\n print(\"时间:\", time_sc_new)\n\n sql = \"insert into news_data(`title`,\" \\\n \"`content`,\" \\\n \"`url`,\" \\\n \"`source`,\" \\\n \"`publish_time`) value (%s,%s,%s,%s,%s)\"\n\n self.cursor.execute(sql, (\n title, content,\n url, '搜狐苏州', time_sc_new))\n self.connect.commit()\n\n\n\nif __name__ == '__main__':\n sohu = Sohu()\n sohu.get_data()","sub_path":"PoliceProject/SohuSuzhou.py","file_name":"SohuSuzhou.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15811370","text":"#!/usr/bin/env python\n\nimport asyncio\nimport websockets\n\n\nasync def hello():\n uri = \"ws://localhost:8765\"\n async with websockets.connect(uri) as websocket:\n print(\"Receiving data\")\n await websocket.send(\"Hello world!\")\n while True:\n res = await websocket.recv()\n print(res)\n\n a = input()\n\n\nasyncio.get_event_loop().run_until_complete(hello())\n","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336145902","text":"from flask import request, jsonify\nfrom flask import Blueprint, Response\n\nfrom app.service.person import PersonService\n\nperson_bp = Blueprint('person', __name__)\n\n\n@person_bp.route('/person/', methods=['POST'])\ndef create_person():\n person_service = PersonService()\n try:\n data = request.json\n name = data['name']\n primary_email = data['email']\n person_service.create(name=name, primary_email=primary_email)\n return Response(\"Accepted\", status=202)\n except Exception as e:\n return Response(str(e), status=422)\n\n\n@person_bp.route('/person/<int:id>', methods=['GET'])\ndef get_person_by_id(id):\n person_service = PersonService()\n try:\n data = person_service.retrieve_one(id)\n result = {'name': data.name,\n 'primary email': data.primary_email}\n return jsonify(result)\n except Exception as e:\n return Response(str(e), status=422)\n\n\n@person_bp.route('/person/', methods=['GET'])\ndef get_persons():\n person_service = PersonService()\n try:\n data = person_service.retrieve()\n result = formatter(data)\n return jsonify(result)\n except Exception as e:\n return Response(str(e), status=422)\n\n\n@person_bp.errorhandler(404)\ndef api_not_found(e):\n return \"Invalid API request\"\n\n\ndef formatter(person):\n result = []\n for obj in person:\n result.append({\n 'id': obj.id,\n 'name': obj.name,\n 'primary email': obj.primary_email\n })\n return result\n","sub_path":"2021-02-05-meetup-18-flask-app-testing-part2/app/api/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427704505","text":"#!/usr/bin/env python\n\"\"\"\n*\n* copyright: 2012 Leif Theden <leif.theden@gmail.com>\n* license: GPL-3\n*\n* This file is part of pyrikura/purikura.\n*\n* pyrikura is free software: you can redistribute it and/or modify\n* it under the terms of the GNU General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n*\n* pyrikura is distributed in the hope that it will be useful,\n* but WITHOUT ANY WARRANTY; without even the implied warranty of\n* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n* GNU General Public License for more details.\n*\n* You should have received a copy of the GNU General Public License\n* along with pyrikura. If not, see <http://www.gnu.org/licenses/>.\n*\n\"\"\"\n\n\"\"\"\nTethered Camera Controller for Photo Booth\n\nVery basic script that triggers a sequence of images on a tethered camera\nand saves the output as jpeg images.\n\nThis script is intended to be used in conjunction with other software that\ncollectively creates a seemless photo booth experience.\n\"\"\"\n\nfrom yapsy.PluginManager import PluginManager\n\nfrom pyrikura.workflow import Node\nimport logging\nimport argparse\nimport os\nimport itertools\nimport re\nimport pickle\nimport time\n\n\n\nevent_name = 'test'\n\nsettings = {}\nsettings['shutter_sound'] = os.path.join('sounds', 'bell.wav')\nsettings['printsrv'] = os.path.join('/', 'home', 'mjolnir', 'smb-printsrv')\n#settings['template'] = os.path.join('templates', 'polaroid0.template')\nsettings['template'] = os.path.join('templates', '2x6vintage.template')\nsettings['originals'] = os.path.join('/', 'home', 'mjolnir', 'events', event_name, 'originals')\nsettings['temp_image'] = 'capture.jpg'\n\n\ndef build():\n arduino = Node('Arduino', '/dev/ttyACM0', 9600)\n repeater = Node('Repeater', 4, 5)\n tether = Node('Tether')\n composer = Node('Composer', template=settings['template'])\n stdout = Node('ConsolePrinter')\n archiver = Node('FileCopy', dest=settings['originals'])\n spooler = Node('FileCopy', dest=settings['printsrv'])\n twitter = Node('Twitter', 'twitter.secrets')\n beep = Node('Beeper', settings['shutter_sound'])\n\n beep.subscribe(repeater)\n repeater.subscribe(arduino)\n tether.subscribe(repeater)\n composer.subscribe(tether)\n archiver.subscribe(tether)\n spooler.subscribe(composer)\n #twitter.subscribe(composer)\n\n return [arduino, stdout, composer, spooler, archiver, tether, twitter,\n repeater]\n\n\ndef run():\n logging.basicConfig(level=logging.INFO)\n\n os.chdir('/home/mjolnir/git/PURIKURA')\n\n # H A N D L E P L U G I N S\n pm = PluginManager()\n pm.setPluginPlaces(['./pyrikura/plugins'])\n pm.collectPlugins()\n\n for pi in pm.getAllPlugins():\n logging.info('loading plugin %s', pi.name)\n pm.activatePluginByName(pi.name)\n\n brokers = {}\n nodes = build()\n head = nodes[0]\n\n for node in nodes:\n brokers[node] = node.load(pm)\n\n for node, broker in brokers.items():\n for other in node._listening:\n broker.subscribe(brokers[other])\n\n start = time.time()\n last_time = 0\n shots = 0\n last_trigger = 0\n for broker in itertools.cycle(brokers.values()):\n broker.update()\n\n #eyefi = Watcher(eyefi_incoming, re.compile('.*\\.jpg$', re.I))\n\n\nprofile = False\nif __name__ == '__main__':\n if profile:\n import cProfile\n import pstats\n\n try:\n cProfile.run('run()', 'results.prof')\n except KeyboardInterrupt:\n pass\n\n p = pstats.Stats(\"results.prof\")\n p.strip_dirs()\n p.sort_stats('time').print_stats(20)\n \n else:\n run()\n","sub_path":"pbtether.py","file_name":"pbtether.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380318803","text":"'''\n\nThe prime 41, can be written as the sum of six consecutive primes:\n\n41 = 2 + 3 + 5 + 7 + 11 + 13\nThis is the longest sum of consecutive primes that adds to a prime below one-hundred.\n\nThe longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953.\n\nWhich prime, below one-million, can be written as the sum of the most consecutive primes?\n\n'''\nimport math, datetime\n\ndef prime_sieve(n):\n sieve = [True] * (n//2)\n for i in range(3, int(math.sqrt(n))+1, 2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1, n//2) if sieve[i]]\n\ndef is_prime(num):\n if num < 3 or num % 2 == 0:\n return (num == 2)\n else:\n return all(num % i != 0 for i in range(3, int(num**0.5 + 2), 2))\n\ndef p50(maximo):\n primes = prime_sieve(maximo // 4)\n dict_results = {}\n max_terms = 0\n max_consecutive = 0\n for i in range(0, len(primes)):\n aux = None\n maxi = 0\n suma = 0\n terms = 0\n aux_terms = None\n for prime in primes[i:]:\n suma += prime\n terms += 1\n if suma > maximo:\n break\n if is_prime(suma):\n aux_terms = terms\n maxi = suma\n if aux_terms > max_terms:\n max_terms = aux_terms\n max_consecutive = maxi\n\n return max_consecutive\n\n\n\ntt1 = datetime.datetime.now()\n\nprint('Answer: ', p50(1000000)) # 997651\n\ntt2 = datetime.datetime.now()\n\nprint('Time: ', tt2 - tt1) # 4.3 seconds\n\n","sub_path":"50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4955811","text":"# -------программа для сортировки слов из текста, слова не повторяются\r\ndef obrabotka_stroki(enter_word_isodrabotka):\r\n enter_word_list_isobrabotka = []\r\n enter_word_list_1_isobrabotka = []\r\n digit_str = '1234567890'\r\n punctuation_str = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_{|}~'\r\n\r\n for i in punctuation_str:\r\n enter_word_isodrabotka = enter_word_isodrabotka.replace(i, ' ')\r\n enter_word_list_isobrabotka = enter_word_isodrabotka.split()\r\n for i in range(len(enter_word_list_isobrabotka)):\r\n y = 0\r\n for a in digit_str:\r\n if a in enter_word_list_isobrabotka[i]:\r\n y = 1\r\n break\r\n if y == 0:\r\n enter_word_list_1_isobrabotka.append(enter_word_list_isobrabotka[i])\r\n return enter_word_list_1_isobrabotka\r\n\r\ndef statistika(enter_word_list_isstatistika):\r\n dict1 = {}\r\n for i in enter_word_list_isstatistika:\r\n if i in dict1:\r\n dict1[i] += 1\r\n else:\r\n dict1[i] = 1\r\n return dict1\r\n\r\nprint('''Здравствуйте.\r\nДанная программа для сортировки слов из текста, где слова не повторяются. \r\nПользователь вводит с клавиатуры строки, состоящие из слов.\r\nПустая строка означает прекратить ввод текста. Программа выводит на экран слова из текста,\r\nотсортированные по алфавиту.''')\r\n\r\nwhile True:\r\n vubor_menu = 0\r\n print('''\r\nДля продолжения нажмите: 1\r\nДля выхода нажмите: 2\r\n''')\r\n enter_word = ''\r\n enter_word_1 = ''\r\n enter_word_list = []\r\n enter_word_list_1 = []\r\n\r\n try:\r\n vubor_menu = int(input())\r\n except:\r\n continue\r\n if vubor_menu == 2:\r\n break\r\n elif vubor_menu == 1:\r\n while True:\r\n\r\n print('''Введите с клавиатуры строки, состоящие из слов. Вы можете нажимать Enter для ввода новой строки.\r\nПустая строка означает прекратить ввод текста.''')\r\n\r\n while True:\r\n enter_word = ''\r\n enter_word = input('Введите строку')\r\n if enter_word == '':\r\n break\r\n else:\r\n enter_word_1 = enter_word_1 + ' ' + enter_word\r\n enter_word_list_1 = obrabotka_stroki(enter_word_1)\r\n if len(enter_word_list_1) == 0:\r\n print('Извините, нету слов')\r\n else:\r\n print('Сортировка слов а алфавитном порядке (в данном варианте - вывод в столбик):')\r\n enter_word_list_1 = sorted(enter_word_list_1)\r\n dict2 = {}\r\n dict2 = statistika(enter_word_list_1)\r\n for i in dict2:\r\n print(i)\r\n break\r\n else:\r\n continue\r\n\r\nprint('''\r\nВы вышли из программы\r\n''')","sub_path":"hw_3_3_v1.py","file_name":"hw_3_3_v1.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126219652","text":"from flask import Flask, request\nfrom timeit import default_timer as timer\n\nimport json\nimport math\nimport random\n\napp = Flask(__name__)\nprint(\"Serving now\")\n\n\n@app.route('/random', methods=['GET'])\ndef entry():\n start = timer()\n num = int(request.args.get('num', '10'))\n randoms = [\"%06d\" % math.floor(random.random() * 999999) for _ in\n range(0, num)]\n ret = json.dumps(randoms, separators=(',', ':'))\n stop = timer()\n print('{0:0.3f}ms'.format((stop - start) * 1e3))\n return ret\n","sub_path":"servers/flask+uwsgi-python3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"17687741","text":"'''\nlimited版に編集してある\n'''\n\n\nclass IdCreater():\n\n def __init__(self, target_file, read_file, file_type):\n self.target_file = target_file\n self.read_file = read_file\n self.file_type = file_type\n\n def creat_file(self):\n abst_list = self.read()\n self.write(abst_list)\n\n def read(self):\n with open(self.read_file , 'r') as r:\n abst_list = [abst for abst in r]\n return abst_list\n\n def write(self, abst_list):\n write_file = self.target_file + 'id_data/id_' + self.file_type + '.txt'\n with open(write_file, 'w') as w:\n for abst in abst_list:\n split_list = abst.split('\\t')\n abst_id_str = split_list[0]\n abst_title_str = split_list[1]\n abst_abst_str = split_list[2]\n written_str = abst_id_str + '\\t' + abst_abst_str\n w.write(written_str)\n\n\nclass AnnoCreater():\n\n def __init__(self, target_file, read_file, file_type):\n self.target_file = target_file\n self.read_file = read_file\n self.file_type = file_type\n\n def creat_file(self):\n ne_list = self.read()\n self.write(ne_list)\n\n def read(self):\n with open(self.read_file , 'r') as r:\n ne_list = [ne for ne in r]\n return ne_list\n\n def write(self, ne_list):\n write_file = self.target_file + 'anno_data/anno_' + self.file_type + '.txt'\n with open(write_file, 'w') as w:\n for ne in ne_list:\n #ne: \n #21826085\tA\t946\t957\thaloperidol\tTRIVIAL\n split_list = ne.split('\\t')\n abst_id_str = split_list[0]\n title_or_abst_tag = split_list[1]\n s_index = split_list[2]\n e_index = split_list[3]\n ne_name = split_list[4]\n ne_type = split_list[5]\n # タイトルは除外\n if title_or_abst_tag == 'A':\n #written_str = abst_id_str + '\\t' + ne_name + '\\t' + s_index + '\\t' + e_index + '\\t' + ne_type\n if ne_type.strip() in ['SYSTEMATIC', 'FAMILY', 'TRIVIAL']:\n written_str = abst_id_str + '\\t' + ne_name + '\\t' + s_index + '\\t' + e_index + '\\t' + 'CHEMICAL\\n'\n w.write(written_str)\n\n\n\ndef main():\n TARGET_FILE = '/cl/work/shusuke-t/ds_ner/orig_data/chemdner/data/limited_type_data/'\n make_id_corpus = True\n if make_id_corpus:\n TOY_FILE = TARGET_FILE + 'orig/toy.abstracts.txt' \n TRAIN_FILE = TARGET_FILE + 'orig/training.abstracts.txt' \n DEV_FILE = TARGET_FILE + 'orig/development.abstracts.txt'\n EVAL_FILE = TARGET_FILE + 'orig/evaluation.abstracts.txt'\n\n id_toy_creater = IdCreater(TARGET_FILE, TOY_FILE, 'toy')\n id_toy_creater.creat_file()\n\n id_toy_creater = IdCreater(TARGET_FILE, TRAIN_FILE, 'train')\n id_toy_creater.creat_file()\n id_valid_creater = IdCreater(TARGET_FILE, DEV_FILE, 'valid')\n id_valid_creater.creat_file()\n id_test_creater = IdCreater(TARGET_FILE, EVAL_FILE, 'test')\n id_test_creater.creat_file()\n\n\n make_anno_corpus = True\n if make_anno_corpus:\n TOY_FILE = TARGET_FILE + 'orig/toy.annotations.txt'\n TRAIN_FILE = TARGET_FILE + 'orig/training.annotations.txt'\n DEV_FILE = TARGET_FILE + 'orig/development.annotations.txt'\n EVAL_FILE = TARGET_FILE + 'orig/evaluation.annotations.txt'\n\n anno_toy_creater = AnnoCreater(TARGET_FILE, TOY_FILE, 'toy')\n anno_toy_creater.creat_file()\n\n anno_train_creater = AnnoCreater(TARGET_FILE, TRAIN_FILE, 'train')\n anno_train_creater.creat_file()\n anno_valid_creater = AnnoCreater(TARGET_FILE, DEV_FILE, 'valid')\n anno_valid_creater.creat_file()\n anno_test_creater = AnnoCreater(TARGET_FILE, EVAL_FILE, 'test')\n anno_test_creater.creat_file()\n\nif __name__ == '__main__':\n main()\n","sub_path":"orig_data/chemdner/annotator/creat_id_anno_data.py","file_name":"creat_id_anno_data.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360322256","text":"from turtle import *\r\n\r\nht()\r\nbgcolor(\"black\")\r\ntitle(\"Ein Quadrat auf Reise\")\r\ncolormode(255)\r\nschoenefarbe=(255,185,65)\r\nphi=3\r\npensize(1)\r\nspeed(0)\r\n\r\ndef quadrat():\r\n for i in range(4):\r\n forward(200)\r\n right(90)\r\n\r\ndef meine_farbe():\r\n pencolor(255,22,225)\r\n\r\nfor i in range(360//phi):\r\n if 0==i%2:\r\n pencolor(schoenefarbe)\r\n else:\r\n meine_farbe()\r\n quadrat()\r\n right(phi)\r\n\r\n\r\nmainloop()\r\n \r\n","sub_path":"Quellen/python/8 Allerlei Q-Gruppenmitgliedern/Urs/Quadrat_auf_Reisen.py","file_name":"Quadrat_auf_Reisen.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191021705","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n count=0\n ptr=head\n while ptr:\n if count==n:\n break\n count+=1\n ptr=ptr.next\n \n \n if not ptr:\n return head.next\n else:\n ptr2=head\n while ptr.next:\n ptr=ptr.next\n ptr2=ptr2.next\n \n next=ptr2.next;\n if next:\n ptr2.next=ptr2.next.next\n else:\n ptr2.next=None\n return head","sub_path":"19 Remove Nth Node From End of List.py","file_name":"19 Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499237891","text":"import re\n\nimport requests\n\nmaster_version_file = 'taskcat/master_version'\ndevelop_version_file = 'taskcat/develop_version'\n\ndef get_pip_version(pkginfo_url):\n pkginfo = requests.get(pkginfo_url).text\n for record in pkginfo.split('\\n'):\n if record.startswith('Version'):\n current_version = str(record).split(':', 1)\n return (current_version[1]).strip()\n\n\n#current_develop_version = get_pip_version('https://testpypi.python.org/pypi?name=taskcat&:action=display_pkginfo')\ncurrent_develop_version = '0.dev373.dev3'\nprint(\"PyPi Develop Version is [{}]\".format(current_develop_version))\n#current_master_version = get_pip_version('https://pypi.python.org/pypi?name=taskcat&:action=display_pkginfo')\ncurrent_master_version = '730.73.73'\nprint(\"PyPi Master Version is [{}]\".format(current_master_version))\n\nnew_poduction_release = int(re.findall(r'\\d+', current_master_version)[-1])\nnew_development_release = int(re.findall(r'\\d+', current_develop_version)[-1])\n\nproduction_version =re.sub('\\d$', lambda x: str(int(x.group(0)) + 1), current_master_version)\ndevelopment_version =re.sub('\\d$', lambda x: str(int(x.group(0)) + 1), current_develop_version)\n\nprint(\"current_develop_version\")\nprint(development_version)\nprint(\"current_master_version\")\nprint(production_version)\n\nwith open(master_version_file, 'w') as m:\n m.write(str(current_master_version))\n\nwith open(develop_version_file, 'w') as d:\n d.write(str(current_develop_version))","sub_path":"getpipversion.py","file_name":"getpipversion.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"416194325","text":"import os\r\nimport pandas as pd\r\nimport shutil\r\nimport errno\r\nimport win32com.client\r\n\r\n\r\n\r\n#Set the proper print window dimensions\r\npd.set_option('display.height', 1000)\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\n\r\ndef copyDirectory(src,dest):\r\n try:\r\n shutil.copytree(src, dest)\r\n #Directories are the same\r\n except shutil.Error as e:\r\n print('Directory not copied. Error: %s' % e)\r\n #Any error saying that the directory doesn't exist\r\n except OSError as e:\r\n print('Directory not copied. Error: %s' %e)\r\n\r\ndef copy(src,dest):\r\n try:\r\n shutil.copytree(src, dest)\r\n #Any error saying that the directory doesn't exist\r\n except OSError as e:\r\n if e.errno == errno.ENOTDIR:\r\n shutil.copy(src,dest)\r\n else:\r\n print('Directory not copied. Error: %s' %e)\r\n \r\n#filepath = 'C:/Users/jyoo/Desktop/'\r\n#filepath = 'C:/Users/Justin/Dropbox/Walgreens/'\r\n\r\n#======================================================\r\n#filepath = 'C:/Users/Justin/Desktop/Walgreens Folders/' #Home\r\nfilepath = 'C:/Users/jyoo/Desktop/App Folders/' #Work\r\n#======================================================\r\n\r\n#======================================================\r\n#df = pd.read_csv('C:/Users/Justin/Dropbox/Walgreens/Site List.csv',header = 0) #Home\r\ndf = pd.read_csv('C:/Users/jyoo/Desktop/Site List.csv',header = 0) #Work\r\n#======================================================\r\n\r\n#print(list(df.columns.values))\r\n#print(df)\r\n\r\n#print(df.iloc[0,0])\r\n#print(df.iloc[0,1])\r\n#print(df.iloc[1,1])\r\n#print(testcase.zfill(10))\r\n\r\n#for i in range(0,5):\r\nfor i in range(0,len(df.index)):\r\n #======================================================\r\n #src = 'C:/Users/Justin/Desktop/template' #Home\r\n src = 'C:/Users/jyoo/Desktop/template' #Work\r\n #======================================================\r\n dest = filepath + \"FD \" + str(df.iloc[i,0]).zfill(4) + ' - ' + str(df.iloc[i,1]) + ', ' + str(df.iloc[i,2]) + '/'\r\n copy(src, dest)\r\n os.rename(dest + '/1 - Pre-Application/PY9 Assessment Workbook v4.5.xlsm', dest + '/1 - Pre-Application/' + 'FD ' + str(df.iloc[i,0]).zfill(4) + ' PY9 Assessment Workbook.xlsm')\r\n print(\"Finished Copying \" + str(i) + \" out of \" + str(len(df.index)) + \".\")\r\n #if not os.path.exists(filepath + str(df.iloc[i,0]).zfill(4) + ' - ' + str(df.iloc[i,1]) + ', ' + str(df.iloc[i,2]) + '/' ):\r\n # os.mkdir(filepath + str(df.iloc[i,0]).zfill(4) + ' - ' + str(df.iloc[i,1]) + ', ' + str(df.iloc[i,2]) + '/' )\r\n\r\n#if not os.path.exists(filepath + foldername):\r\n# os.mkdir(filepath + foldername)\r\n#print(type(folders))\r\n\r\nprint(\"All Done\")\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"_Python - Create Multiple Folders - DT.py","file_name":"_Python - Create Multiple Folders - DT.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"179106219","text":"# Example local settings file\r\n\r\n# Leave in debug until sure this is instance running correctly\r\nDEBUG = True\r\nDEBUG_ALIAS = 'release'\r\n\r\n# Identify transactions in Braintree as coming from this instance\r\nBRAINTREE_PREFIX = 'rel'\r\n\r\nBASE_URL = 'test.huddle.com.au'\r\n\r\nDUCKCREEK_STORE_XML = {\r\n 'RSP': True,\r\n 'ZIP': False,\r\n 'REQ': True,\r\n}\r\n\r\nSERVICES = {\r\n 'motorweb': True,\r\n 'car_quote': True,\r\n 'travel_quote': True,\r\n}\r\n\r\n# Nginx domains that are allowed to run the site itself\r\nALLOWED_HOSTS = [\r\n 'localhost:3000',\r\n 'localhost:8002',\r\n '*',\r\n]\r\n\r\n# Nginx domains will be allowed to make API calls\r\nCORS_ORIGIN_WHITELIST = [\r\n 'localhost:3000',\r\n 'localhost:8002',\r\n '*',\r\n]\r\n\r\n# Redfine the urls which will be sent to the Angular app\r\nFRONTEND_URL = \"http://%s\" % BASE_URL\r\nAPI_URL = \"http://api.%s\" % BASE_URL\r\nADMIN_URL = \"http://admin.%s\" % BASE_URL\r\n#FRONTEND_URL = \"http://localhost:3000\"\r\n#API_URL = \"http://localhost:8000\"\r\n#ADMIN_URL = \"http://localhost:8001\"\r\n\r\n# Database instance details\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\r\n 'NAME': 'huddlemoneytestdb',\r\n 'HOST': 'localhost',\r\n 'USER': 'huddlemoney-test-db-user',\r\n 'PASSWORD': 'huddle123',\r\n }\r\n}\r\n\r\nDUCKCREEK_MODE = 'sandbox'\r\nBRAINTREE_MODE = 'sandbox'\r\nVEDA_MODE = 'sandbox'\r\nSLACK_MODE = 'sandbox'\r\n","sub_path":"huddlemoney/roles/test-deploy-code/files/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"169053521","text":"# SISTEMA PRINCIPAL\nfrom lib.arquivo.metodos import *\nfrom lib.interface.menus import *\nimport time\n\n\n# Cores para o terminal\ncor = {\n 'vermelho': '\\033[1;31m',\n 'verde': '\\033[1;32m',\n 'azul': '\\033[1;34m',\n 'ciano': '\\033[1;36m',\n 'magenta': '\\033[1;35m',\n 'amarelo': '\\033[1;33m',\n 'preto': '\\033[1;30m',\n 'branco': '\\033[1;37m',\n 'reset': '\\033[1;0;0m',\n 'reverso': '\\033[1;2m',\n 'bgpreto': '\\033[1;40m',\n 'bgvermelho': '\\033[1;41m',\n 'bgverde': '\\033[1;42m',\n 'bgamarelo': '\\033[1;43m',\n 'bgazul': '\\033[1;44m',\n 'bgmagenta': '\\033[1;45m',\n 'bgciano': '\\033[1;46m',\n 'bgbranco': '\\033[1;47m'}\n\n\n# Cria aquivos com a data atual para manipulação\ndataAtual = datetime.today()\ndataArquivo = str(f'{dataAtual.day}.{dataAtual.month}.{dataAtual.year}')\n# data atual para criar arquivo\narqPacSol = (dataArquivo + '-LISTA-TRANSPORTE-PACIENTES-SOLICITADOS' + '.txt')\narqPacEmTransp = (dataArquivo + '-LISTA-TRANSPORTE-PACIENTES-TRANSPORTANDO' + '.txt')\narqPacTranspFinal = (dataArquivo + '-LISTA-TRANSPORTE-PACIENTES-FINALIZADOS' + '.txt')\niniciaArquivosRegistro(arqPacSol)\niniciaArquivosRegistro(arqPacEmTransp)\niniciaArquivosRegistro(arqPacTranspFinal)\n\n\n# Chama menu\nlistMenus = ['Lista de pacientes',\n 'Solicitar transporte', \n 'Buscar pacientes',\n 'Finalizar transporte',\n 'Sair do programa']\nwhile True:\n # 1 - Lista de pacientes\n # 2 - Solicitar transporte\n # 3 - Buscar paciente\n # 4 - Finalizar transporte\n # 5 - Sair do sistema\n opt = menu(listMenus, 'CENTRAL DE TRANSPORTE')\n if opt == 1:\n mostraCadastro(arqPacSol, 'PAINEL - SOLICITADOS')\n mostraCadastro(arqPacEmTransp, 'PAINEL - TRANSPORTANDO')\n mostraCadastro(arqPacTranspFinal, 'PAINEL - FINALIZADOS')\n elif opt == 2:\n cadastraPac(arqPacSol)\n elif opt == 3:\n mudaDePainel(arqPacSol, arqPacEmTransp, 'BUSCAR PACIENTE')\n elif opt == 4:\n # Implantar a finalização de transporte\n mudaDePainel(arqPacEmTransp, arqPacTranspFinal, 'FINALIZAR TRANSPORTE')\n elif opt == 5:\n titulo('FIM DO PROGRAMA', txtCor=\"bgpreto\")\n break\n else:\n print(f\"{cor['vermelho']}ERRO: Digite uma opção válida!{cor['reset']}\")\n time.sleep(1.3)\n","sub_path":"central_transporte_leforte/central_transp/sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526353993","text":"### Install and import dependencies ###\n\nimport tensorflow as tf\n#import tensorflow datasets\nimport tensorflow_datasets as tfds\n\ntfds.disable_progress_bar()\n\n#helper libraries\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport logging\nlogger = tf.get_logger()\nlogger.setLevel(logging.ERROR)\n\n### Import the Fashion MNIST dataset ###\n\ndataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)\ntrain_dataset, test_dataset = dataset['train'], dataset['test']\n\n\"\"\"\n0 \tT-shirt/top\n1 \tTrouser\n2 \tPullover\n3 \tDress\n4 \tCoat\n5 \tSandal\n6 \tShirt\n7 \tSneaker\n8 \tBag\n9 \tAnkle boot\n\"\"\"\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n### Explore the data ###\n\nnum_train_examples = metadata.splits['train'].num_examples\nnum_test_examples = metadata.splits['test'].num_examples\nprint(\"Number of training examples: {}\".format(num_train_examples))\nprint(\"Number of test examples: {}\".format(num_test_examples))\n\n### Preprocess the data ###\n\ndef normalize(images, labels):\n images = tf.cast(images, tf.float32)\n images /= 255\n return images, labels\n\n# The map function applies the normalize function to each element in the train and test datasets\ntrain_dataset = train_dataset.map(normalize)\ntest_dataset = test_dataset.map(normalize)\n\n# The first time you use the dataset, the images will be loaded from disk\n# Caching will keep them in memory, making training faster\ntrain_dataset = train_dataset.cache()\ntest_dataset = test_dataset.cache()\n\n### Explore the processed data ###\n\n# Display the first 25 images from the training set and display the class name below each image.\nplt.figure(figsize=(10,10))\ni = 0\nfor (image, label) in test_dataset.take(25):\n image = image.numpy().reshape((28,28))\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(image, cmap=plt.cm.binary)\n plt.xlabel(class_names[label])\n i += 1\nplt.show()\n\n### Build the model ###\n### Setup the layers ###\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu, input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D((2, 2), strides=2),\n tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D((2, 2), strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10)\n])\n\n### Compile the model ###\n\nmodel.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n)\n\n### Train the model ###\n\nBATCH_SIZE = 32\ntrain_dataset = train_dataset.cache().repeat().shuffle(num_train_examples).batch(BATCH_SIZE)\ntest_dataset = test_dataset.cache().batch(BATCH_SIZE)\n\nmodel.fit(train_dataset, epochs=10, steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE))\n\n### Evaluate accuracy ###\nprint(\"Starting testing\")\ntest_loss, test_accuracy = model.evaluate(test_dataset, steps=math.ceil(num_test_examples/32))\n\n### Make predictions and explore ###\nfor test_images, test_labels in test_dataset.take(1):\n test_images = test_images.numpy()\n test_labels = test_labels.numpy()\n predictions = model.predict(test_images)\n\n# predictions.shape = (32, 10) ( because one batch is 32)\n\nprint(predictions[0])\n\nprint(np.argmax(predictions[0]))\n\nprint(test_labels[0])\n\n# We can graph this to look at the full set of 10 class predictions\n\ndef plot_image(i, predictions_array, true_labels, images):\n predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img[..., 0], cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n predictions_array = [i for i in predictions_array if i >= 0]\n print(\"predictions array = \", predictions_array)\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100 * np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n# Let's look at the 0th image, predictions, and prediction array.\ni = 0\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions, test_labels, test_images)\n\n# Finally, use the trained model to make a prediction about a single image.\n# Grab an image from the test dataset\nimg = test_images[0]\n\n# image shape = (28, 28, 1)\n\n# NOTE : tf.keras models are optimized to make predictions on a batch, or collection, of examples at once.\n# So even though we're using a single image, we need to add it to a list:\n\n# Add the image to a batch where it's the only member.\nimg = np.array([img])\n\n# img.shape = (1, 28, 28, 1)\n# Now predict the image:\n\npredictions_single = model.predict(img)\n\nprint(\"predictions single : \",predictions_single)\n\nplot_value_array(0, predictions_single, test_labels)\n_ = plt.xticks(range(10), class_names, rotation=45)\n\n#model.predict returns a list of lists, one for each image in the batch of data.\nprint(np.argmax(predictions_single[0]))","sub_path":"MNIST_fashion_CNN.py","file_name":"MNIST_fashion_CNN.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173564890","text":"import cv2\r\nimport lungc\r\nimport pickle\r\n\r\nnum=input(\"Enter test image number: \")\r\nimg=cv2.imread(\"test/test (\"+num+\").jpg\",0)\r\nleft_feature,right_feature,img_left,img_right=lungc.process_lung_test(img)\r\n\r\n\r\nfilename = 'finalized_model.sav'\r\nloaded_model = pickle.load(open(filename, 'rb'))\r\nresult = loaded_model.predict([left_feature])\r\nprint(result[0])\r\nif(result[0]):\r\n\tprint(\"Tumour present\")\r\nelse:\r\n\tprint(\"Normal Lung\")\r\n","sub_path":"PredictCancer.py","file_name":"PredictCancer.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504699818","text":"\"\"\"\nScript to solve Day 25\n\"\"\"\nimport re\n\ndef read_content(content) -> dict:\n info = {'start': None, 'steps': None}\n active = None\n cur_val = None\n for c in content:\n if info['start'] is None:\n info['start'] = safe_regex(r'state ([A-Z]).', c)\n elif info['steps'] is None:\n val = safe_regex(r'(\\d+)', c)\n info['steps'] = int(val)\n elif not c:\n active = None\n cur_val = None\n elif active is None:\n active = safe_regex(r'state ([A-Z]):', c)\n info[active] = {0: {}, 1: {}}\n elif cur_val is None:\n val = safe_regex(r'value is ([0-1])', c)\n cur_val = int(val)\n else:\n reset = False\n try:\n val = safe_regex(r'Write the value ([0-1]).', c)\n val = int(val)\n cat = 'wr'\n except ValueError:\n try:\n val = safe_regex(r'Move one slot to the ([a-z]+).', c)\n cat = 'dir'\n except ValueError:\n val = safe_regex(r'state ([A-Z]).', c)\n cat = 'nxt'\n reset = True\n info[active][cur_val][cat] = val\n if reset:\n cur_val = None\n return info\n\n\ndef safe_regex(regex, string) -> str:\n ma = re.search(regex, string)\n if ma:\n return ma.group(1)\n else:\n raise ValueError(f'No match for {regex} in {string}')\n\n\ndef part1(content):\n info = read_content(content)\n ones = {}\n cnt = 0\n state = info['start']\n pos = 0\n while cnt < info['steps']:\n if pos not in ones:\n ones[pos] = 0\n cur_read = ones[pos]\n wr = info[state][cur_read]['wr']\n #if wr == 1 and cur_read == 0:\n # ones.append(pos)\n #elif wr == 0 and cur_read == 1:\n # ones.remove(pos)\n ones[pos] = wr\n direc = info[state][cur_read]['dir']\n if direc == 'right':\n pos += 1\n else:\n pos -= 1\n state = info[state][cur_read]['nxt']\n cnt += 1\n return sum(ones.values())\n\n\ndef part2(content):\n pass\n\n\nif __name__ == '__main__':\n test = ['Begin in state A.',\n 'Perform a diagnostic checksum after 6 steps.',\n '',\n 'In state A:',\n 'If the current value is 0:',\n ' - Write the value 1.',\n ' - Move one slot to the right.',\n ' - Continue with state B.',\n 'If the current value is 1:',\n ' - Write the value 0.',\n ' - Move one slot to the left.',\n ' - Continue with state B.',\n '',\n 'In state B:',\n 'If the current value is 0:',\n ' - Write the value 1.',\n ' - Move one slot to the left.',\n ' - Continue with state A.',\n 'If the current value is 1:',\n ' - Write the value 1.',\n ' - Move one slot to the right.',\n ' - Continue with state A.']\n assert part1(test) == 3\n","sub_path":"sol2017/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354972835","text":"# -*- Encoding: UTF-8 -*-\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\n\n# Create a U-Net architecture for semantic segmentation\nclass Block(nn.Module):\n \"\"\"Block at each level of the U\"\"\"\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, padding_mode=\"zeros\")\n self.relu = nn.ReLU()\n self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1, padding_mode=\"zeros\")\n\n def forward(self, x):\n return self.relu(self.conv2(self.relu(self.conv1(x))))\n\nclass Encoder(nn.Module):\n \"\"\"Left part of the U, a number of Blocks\"\"\"\n def __init__(self, ch_list=(1, 4, 8, 16)):\n super().__init__()\n num_ch = len(ch_list)\n self.encoder_blocks = nn.ModuleList([Block(ch_list[i], ch_list[i+1]) for i in range(num_ch-1)])\n self.pool = nn.MaxPool2d(2)\n\n def forward(self, x):\n ftrs = []\n for block in self.encoder_blocks:\n x = block(x)\n ftrs.append(x)\n x = self.pool(x)\n return ftrs\n\n\nclass Decoder(nn.Module):\n def __init__(self, ch_list=(16, 8, 4)):\n super().__init__()\n self.ch_list = ch_list\n num_ch = len(ch_list)\n # Up-convolutions\n self.upconvs = nn.ModuleList([nn.ConvTranspose2d(self.ch_list[i], self.ch_list[i+1], 2, 2)\n for i in range(num_ch-1)])\n self.dec_blocks = nn.ModuleList([Block(ch_list[i], ch_list[i+1]) for i in range(num_ch-1)])\n\n def forward(self, x, encoder_features):\n for i in range(len(self.ch_list) - 1):\n # Up-convolution\n x = self.upconvs[i](x)\n # Concatenate with corresponding encoder-feature map\n enc_ftrs = self.crop(encoder_features[i], x)\n x = torch.cat([x, enc_ftrs], dim=1)\n # Perform convolutions\n x = self.dec_blocks[i](x)\n return x\n\n def crop(self, enc_features, x):\n _, _, H, W = x.shape\n enc_features = torchvision.transforms.CenterCrop([H, W])(enc_features)\n return enc_features\n\nclass UNet(nn.Module):\n def __init__(self, enc_chs=(1, 4, 8, 16), dec_chs=(16, 8, 4), num_class=5, retain_dim=False, out_size=(24,8)):\n super().__init__()\n self.encoder = Encoder(enc_chs)\n self.decoder = Decoder(dec_chs)\n self.head = nn.Conv2d(dec_chs[-1], num_class, 1)\n self.retain_dim = retain_dim\n\n def forward(self, x):\n enc_ftrs = self.encoder(x)\n out = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])\n out = self.head(out)\n if self.retain_dim:\n out = F.interpolate(out, out_size)\n return out\n\n\n# End of file models.py\n","sub_path":"utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633106907","text":"import cv2 as cv\r\nimport numpy as np\r\nimport time\r\nimport boto3\r\nfrom pathlib import Path\r\nimport os\r\nimport PIL\r\nimport logging\r\n\r\n# create logger\r\nlogger1 = logging.getLogger('YOLO Detection and OCR')\r\nlogger1.setLevel(logging.DEBUG)\r\n\r\n# create console handler and set level to debug\r\nch1 = logging.StreamHandler()\r\nch1.setLevel(logging.DEBUG)\r\n\r\n# create formatter\r\nformatter1 = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\r\n# add formatter to ch\r\nch1.setFormatter(formatter1)\r\n\r\n# add ch to logger\r\nlogger1.addHandler(ch1)\r\n\r\n# Initialize aws credentials\r\n\r\nACCESS_KEY = 'XXXXXXXXXXXXXXXXX'\r\nSECRET_KEY = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\r\ns3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\r\n\r\n# Initialize the parameters\r\nvehicle_threshold = .5\r\nvehicle_weights = 'vehicle-detector/yolo-voc.weights'\r\nvehicle_netcfg = 'vehicle-detector/yolo-voc.cfg'\r\n\r\n# Load the model\r\nvehicle_net = cv.dnn.readNetFromDarknet(vehicle_netcfg, vehicle_weights)\r\nvehicle_net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\r\nvehicle_net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\r\n\r\nclass Label:\r\n\r\n\tdef __init__(self,cl=-1,tl=np.array([0.,0.]),br=np.array([0.,0.]),prob=None):\r\n\t\tself.__tl \t= tl\r\n\t\tself.__br \t= br\r\n\t\tself.__cl \t= cl\r\n\t\tself.__prob = prob\r\n\r\n\tdef __str__(self):\r\n\t\treturn 'Class: %d, top_left(x:%f,y:%f), bottom_right(x:%f,y:%f)' % (self.__cl, self.__tl[0], self.__tl[1], self.__br[0], self.__br[1])\r\n\r\n\tdef copy(self):\r\n\t\treturn Label(self.__cl,self.__tl,self.__br)\r\n\r\n\tdef wh(self): return self.__br-self.__tl\r\n\r\n\tdef cc(self): return self.__tl + self.wh()/2\r\n\r\n\tdef tl(self): return self.__tl\r\n \r\n\tdef br(self): return self.__br\r\n\r\n\tdef tr(self): return np.array([self.__br[0],self.__tl[1]])\r\n\r\n\tdef bl(self): return np.array([self.__tl[0],self.__br[1]])\r\n\r\n\tdef cl(self): return self.__cl\r\n\r\n\tdef area(self): return np.prod(self.wh())\r\n\r\n\tdef prob(self): return self.__prob\r\n\r\n\tdef set_class(self,cl):\r\n\t\tself.__cl = cl\r\n\r\n\tdef set_tl(self,tl):\r\n\t\tself.__tl = tl\r\n\r\n\tdef set_br(self,br):\r\n\t\tself.__br = br\r\n\r\n\tdef set_wh(self,wh):\r\n\t\tcc = self.cc()\r\n\t\tself.__tl = cc - .5*wh\r\n\t\tself.__br = cc + .5*wh\r\n\r\n\tdef set_prob(self,prob):\r\n\t\tself.__prob = prob\r\n\r\ndef getOutputsNames(net):\r\n \"\"\" Get the names of the output layers.\r\n \r\n Generally in a sequential CNN network there will be \r\n only one output layer at the end. In the YOLOv3 \r\n architecture, there are multiple output layers giving\r\n out predictions. This function gives the names of the \r\n output layers. An output layer is not connected to \r\n any next layer.\r\n \r\n Args\r\n net : neural network\r\n \"\"\"\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\r\ndef postprocess(frame, outs, confThreshold, nmsThreshold=0.4):\r\n frameHeight = frame.shape[0]\r\n frameWidth = frame.shape[1]\r\n\r\n classIds = []\r\n confidences = []\r\n boxes = []\r\n # Scan through all the bounding boxes output from the network and keep only the\r\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\r\n classIds = []\r\n confidences = []\r\n boxes = []\r\n predictions = []\r\n\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n classId = np.argmax(scores)\r\n confidence = scores[classId]\r\n if confidence > confThreshold:\r\n center_x = int(detection[0] * frameWidth)\r\n center_y = int(detection[1] * frameHeight)\r\n width = int(detection[2] * frameWidth)\r\n height = int(detection[3] * frameHeight)\r\n left = int(center_x - width / 2)\r\n top = int(center_y - height / 2)\r\n classIds.append(classId)\r\n confidences.append(float(confidence))\r\n boxes.append([left, top, width, height])\r\n\r\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\r\n # lower confidences.\r\n if nmsThreshold:\r\n indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\r\n else:\r\n indices = [[x] for x in range(len(boxes))]\r\n\r\n for i in indices:\r\n i = i[0]\r\n box = boxes[i]\r\n left = box[0]\r\n top = box[1]\r\n width = box[2]\r\n height = box[3]\r\n predictions.append([classIds[i], confidences[i], [left, top, left + width, top + height]])\r\n\r\n return predictions\r\n\r\ndef crop_region(I,label,bg=0.5):\r\n\r\n wh = np.array(I.shape[1::-1])\r\n\r\n ch = I.shape[2] if len(I.shape) == 3 else 1\r\n tl = np.floor(label.tl()*wh).astype(int)\r\n br = np.ceil (label.br()*wh).astype(int)\r\n outwh = br-tl\r\n\r\n if np.prod(outwh) == 0.:\r\n return None\r\n\r\n outsize = (outwh[1],outwh[0],ch) if ch > 1 else (outwh[1],outwh[0])\r\n if (np.array(outsize) < 0).any():\r\n pause()\r\n Iout = np.zeros(outsize,dtype=I.dtype) + bg\r\n\r\n offset = np.minimum(tl,0)*(-1)\r\n tl = np.maximum(tl,0)\r\n br = np.minimum(br,wh)\r\n wh = br - tl\r\n\r\n Iout[offset[1]:(offset[1] + wh[1]),offset[0]:(offset[0] + wh[0])] = I[tl[1]:br[1],tl[0]:br[0]]\r\n\r\n return Iout\r\n\r\ndef vehicle_detection(frame):\r\n\r\n # Create a 4D blob from a frame.\r\n blob = cv.dnn.blobFromImage(frame, 1/255, (416, 416), [0,0,0], 1, crop=False)\r\n\r\n # Sets the input to the network\r\n vehicle_net.setInput(blob)\r\n\r\n # Runs the forward pass to get output of the output layers\r\n outs = vehicle_net.forward(getOutputsNames(vehicle_net))\r\n\r\n # Remove the bounding boxes with low confidence\r\n R = postprocess(frame, outs, vehicle_threshold)\r\n \r\n Icars = []\r\n Lcars = []\r\n\r\n if len(R):\r\n WH = np.array(frame.shape[1::-1], dtype=float)\r\n for i, r in enumerate(R):\r\n # if classId in ['car', 'bus'] and confidence > vehicle_threshold\r\n if r[0] in [6, 7] and r[1] > vehicle_threshold:\r\n box = r[2]\r\n x1,y1,x2,y2 = (np.array(r[2])/np.concatenate((WH,WH))).tolist()\r\n tl = np.array([x1, y1])\r\n br = np.array([x2, y2])\r\n label = Label(0,tl,br)\r\n Lcars.append(label)\r\n Icar = crop_region(frame,label)\r\n Icars.append(Icar.astype(np.uint8))\r\n\r\n return Icars, Lcars\r\n\r\ndef im2single(I):\r\n assert(I.dtype == 'uint8')\r\n return I.astype('float32')/255.\r\n\r\nclass DLabel (Label):\r\n\r\n\tdef __init__(self,cl,pts,prob):\r\n\t\tself.pts = pts\r\n\t\ttl = np.amin(pts,1)\r\n\t\tbr = np.amax(pts,1)\r\n\t\tLabel.__init__(self,cl,tl,br,prob)\r\n\r\ndef getWH(shape):\r\n return np.array(shape[1::-1]).astype(float)\r\n\r\n\r\ndef IOU(tl1,br1,tl2,br2):\r\n wh1,wh2 = br1-tl1,br2-tl2\r\n assert((wh1>=.0).all() and (wh2>=.0).all())\r\n \r\n intersection_wh = np.maximum(np.minimum(br1,br2) - np.maximum(tl1,tl2),0.)\r\n intersection_area = np.prod(intersection_wh)\r\n area1,area2 = (np.prod(wh1),np.prod(wh2))\r\n union_area = area1 + area2 - intersection_area;\r\n return intersection_area/union_area\r\n\r\ndef IOU_labels(l1,l2):\r\n return IOU(l1.tl(),l1.br(),l2.tl(),l2.br())\r\n\r\ndef nms(Labels,iou_threshold=.5):\r\n\r\n SelectedLabels = []\r\n Labels.sort(key=lambda l: l.prob(),reverse=True)\r\n \r\n for label in Labels:\r\n non_overlap = True\r\n for sel_label in SelectedLabels:\r\n if IOU_labels(label,sel_label) > iou_threshold:\r\n non_overlap = False\r\n break\r\n if non_overlap:\r\n SelectedLabels.append(label)\r\n\r\n return SelectedLabels\r\n\r\ndef getRectPts(tlx,tly,brx,bry):\r\n\treturn np.matrix([[tlx,brx,brx,tlx],[tly,tly,bry,bry],[1.,1.,1.,1.]],dtype=float)\r\n\r\ndef find_T_matrix(pts,t_pts):\r\n\tA = np.zeros((8,9))\r\n\tfor i in range(0,4):\r\n\t\txi = pts[:,i];\r\n\t\txil = t_pts[:,i];\r\n\t\txi = xi.T\r\n\t\t\r\n\t\tA[i*2, 3:6] = -xil[2]*xi\r\n\t\tA[i*2, 6: ] = xil[1]*xi\r\n\t\tA[i*2+1, :3] = xil[2]*xi\r\n\t\tA[i*2+1, 6: ] = -xil[0]*xi\r\n\r\n\t\r\n\t[U,S,V] = np.linalg.svd(A)\r\n\tH = V[-1,:].reshape((3,3))\r\n\r\n\treturn H\r\n\r\ndef reconstruct(Iorig,I,Y,out_size,threshold=.9):\r\n \r\n\tnet_stride \t= 2**4\r\n\tside \t\t= ((208. + 40.)/2.)/net_stride # 7.75\r\n\r\n\tProbs = Y[...,0]\r\n\t# print Probs\r\n\tAffines = Y[...,2:]\r\n\trx,ry = Y.shape[:2]\r\n\t# print Y.shape\r\n\tywh = Y.shape[1::-1]\r\n\t# print ywh\r\n\tiwh = np.array(I.shape[1::-1],dtype=float).reshape((2,1))\r\n\t# print iwh\r\n\r\n\txx,yy = np.where(Probs>threshold)\r\n\t# print xx,yy\r\n\r\n\tWH = getWH(I.shape)\r\n\tMN = WH/net_stride\r\n\r\n\t# print MN\r\n\r\n\tvxx = vyy = 0.5 #alpha\r\n\r\n\tbase = lambda vx,vy: np.matrix([[-vx,-vy,1.],[vx,-vy,1.],[vx,vy,1.],[-vx,vy,1.]]).T\r\n\tlabels = []\r\n\r\n\tfor i in range(len(xx)):\r\n\t\ty,x = xx[i],yy[i]\r\n\t\taffine = Affines[y,x]\r\n\t\tprob = Probs[y,x]\r\n\r\n\t\tmn = np.array([float(x) + .5,float(y) + .5])\r\n\r\n\t\tA = np.reshape(affine,(2,3))\r\n\t\tA[0,0] = max(A[0,0],0.)\r\n\t\tA[1,1] = max(A[1,1],0.)\r\n\t\t# print A\r\n\t\tpts = np.array(A*base(vxx,vyy)) #*alpha\r\n\t\t# print pts\r\n\t\tpts_MN_center_mn = pts*side\r\n\t\tpts_MN = pts_MN_center_mn + mn.reshape((2,1))\r\n\r\n\t\tpts_prop = pts_MN/MN.reshape((2,1))\r\n\r\n\t\tlabels.append(DLabel(0,pts_prop,prob))\r\n\r\n\t# print(labels)\r\n\tfinal_labels = nms(labels,.1)\r\n\tTLps = []\r\n\r\n\tif len(final_labels):\r\n\t\tfinal_labels.sort(key=lambda x: x.prob(), reverse=True)\r\n\t\tfor i,label in enumerate(final_labels):\r\n\r\n\t\t\tt_ptsh \t= getRectPts(0,0,out_size[0],out_size[1])\r\n\t\t\tptsh \t= np.concatenate((label.pts*getWH(Iorig.shape).reshape((2,1)),np.ones((1,4))))\r\n\t\t\tH \t\t= find_T_matrix(ptsh,t_ptsh)\r\n\t\t\tIlp \t= cv.warpPerspective(Iorig,H,out_size,borderValue=.0)\r\n\t\t\t# cv.imshow(\"frame\", Iorig)\r\n\t\t\t# cv.waitKey(0)\r\n\r\n\t\t\tTLps.append(Ilp)\r\n\r\n\treturn final_labels,TLps\r\n\r\ndef detect_lp(model,I,max_dim,net_step,out_size,threshold):\r\n\r\n\tmin_dim_img = min(I.shape[:2])\r\n\tfactor \t\t= float(max_dim)/min_dim_img\r\n\t# print I.shape[:2]\r\n\r\n\tw,h = (np.array(I.shape[1::-1],dtype=float)*factor).astype(int).tolist()\r\n\tw += (w%net_step!=0)*(net_step - w%net_step)\r\n\th += (h%net_step!=0)*(net_step - h%net_step)\r\n\t# print w\r\n\t# print h\r\n\tIresized = cv.resize(I,(w,h))\r\n\r\n\tT = Iresized.copy()\r\n\tT = T.reshape((1,T.shape[0],T.shape[1],T.shape[2]))\r\n\r\n\tstart \t= time.time()\r\n\tYr \t\t= model.predict(T)\r\n\tYr \t\t= np.squeeze(Yr)\r\n\telapsed = time.time() - start\r\n\t# print(Yr)\r\n\tL,TLps = reconstruct(I,Iresized,Yr,out_size,threshold)\r\n\r\n\treturn L,TLps,elapsed\r\n\r\n\r\n# Import necessary modules\r\nfrom keras.models import model_from_json\r\n\r\n# Initialize the parameters\r\nlp_threshold = .6\r\nwpod_lp_weights_path = 'lp-detector/wpod-net_update1.h5'\r\nwpod_lp_json_path = 'lp-detector/wpod-net_update1.json'\r\n\r\n# Load the model\r\nwith open(wpod_lp_json_path,'r') as json_file:\r\n wpod_json = json_file.read()\r\nlp_net = model_from_json(wpod_json)\r\nlp_net.load_weights(wpod_lp_weights_path)\r\n\r\ndef lp_detection(vehicle_img):\r\n\r\n ratio = float(max(vehicle_img.shape[:2]))/min(vehicle_img.shape[:2])\r\n side = int(ratio * 288.)\r\n bound_dim = min(side + (side % (2**4) ), 608)\r\n Llps, LlpImgs, elapsed = detect_lp(lp_net,im2single(vehicle_img),bound_dim,2**4,(240,80),lp_threshold)\r\n \r\n Ilps = []\r\n for LlpImg in LlpImgs:\r\n Ilp = LlpImg * 255.\r\n Ilps.append(Ilp.astype(np.uint8))\r\n\r\n return Llps, Ilps, elapsed\r\n\r\n# Initialize the parameters\r\nocr_threshold = .4\r\nocr_weights_path = 'ocr/ocr-net.weights'\r\nocr_confi_path = 'ocr/ocr-net.cfg'\r\nocr_classes_path = 'ocr/ocr-net.names'\r\nocr_classes = None\r\n\r\nwith open(ocr_classes_path, 'rt') as f:\r\n ocr_classes = f.read().rstrip('\\n').split('\\n')\r\n\r\n# load the network\r\nocr_net = cv.dnn.readNetFromDarknet(ocr_confi_path, ocr_weights_path)\r\nocr_net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\r\nocr_net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\r\n\r\ndef dknet_label_conversion(R,img_width,img_height, classes):\r\n\tWH = np.array([img_width,img_height],dtype=float)\r\n\tL = []\r\n\tfor r in R:\r\n\t\tcenter = np.array(r[2][:2])/WH\r\n\t\twh2 = (np.array(r[2][2:])/WH)*.5\r\n\t\tL.append(Label(ord(classes[r[0]]),tl=center-wh2,br=center+wh2,prob=r[1]))\r\n\treturn L\r\n\r\ndef lp_ocr(lp_img):\r\n h,w,_ = lp_img.shape\r\n # Create a 4D blob from a frame\r\n blob = cv.dnn.blobFromImage(lp_img, 1/255, (240, 80), [0,0,0], 1, crop=False)\r\n # Sets the input to the network\r\n ocr_net.setInput(blob)\r\n # Runs the forward pass to get output of the output layers\r\n outs = ocr_net.forward(getOutputsNames(ocr_net))\r\n # Remove the bounding boxes with low confidence\r\n R = postprocess(lp_img, outs, ocr_threshold, None)\r\n\r\n lp_str = ''\r\n if len(R):\r\n L = dknet_label_conversion(R, w, h, ocr_classes)\r\n L.sort(key=lambda x: x.tl()[0])\r\n lp_str = ''.join([chr(l.cl()) for l in L])\r\n\r\n return lp_str\r\n\r\ndef draw_label(I,l,color=(255,0,0),thickness=1):\r\n\twh = np.array(I.shape[1::-1]).astype(float)\r\n\ttl = tuple((l.tl()*wh).astype(int).tolist())\r\n\tbr = tuple((l.br()*wh).astype(int).tolist())\r\n\tcv.rectangle(I,tl,br,color,thickness=thickness)\r\n\r\ndef draw_losangle(I,pts,color=(1.,1.,1.),thickness=1):\r\n\tassert(pts.shape[0] == 2 and pts.shape[1] == 4)\r\n\r\n\tfor i in range(4):\r\n\t\tpt1 = tuple(pts[:,i].astype(int).tolist())\r\n\t\tpt2 = tuple(pts[:,(i+1)%4].astype(int).tolist())\r\n\t\tcv.line(I,pt1,pt2,color,thickness)\r\n\r\ndef write2img(Img,label,strg,txt_color=(0,0,0),bg_color=(255,255,255),font_size=1):\r\n\twh_img = np.array(Img.shape[1::-1])\r\n\r\n\tfont = cv.FONT_HERSHEY_SIMPLEX\r\n\r\n\twh_text,v = cv.getTextSize(strg, font, font_size, 3)\r\n\tbl_corner = label.tl()*wh_img\r\n\r\n\ttl_corner = np.array([bl_corner[0],bl_corner[1]-wh_text[1]])/wh_img\r\n\tbr_corner = np.array([bl_corner[0]+wh_text[0],bl_corner[1]])/wh_img\r\n\tbl_corner /= wh_img\r\n\r\n\tif (tl_corner < 0.).any():\r\n\t\tdelta = 0. - np.minimum(tl_corner,0.)\r\n\telif (br_corner > 1.).any():\r\n\t\tdelta = 1. - np.maximum(br_corner,1.)\r\n\telse:\r\n\t\tdelta = 0.\r\n\r\n\ttl_corner += delta\r\n\tbr_corner += delta\r\n\tbl_corner += delta\r\n\r\n\ttpl = lambda x: tuple((x*wh_img).astype(int).tolist())\r\n\r\n\tcv.rectangle(Img, tpl(tl_corner), tpl(br_corner), bg_color, -1)\t\r\n\tcv.putText(Img,strg,tpl(bl_corner),font,font_size,txt_color,3)\r\n\r\n\r\ndef processimage(spath, addr, vehstr):\r\n lp_str_list = []\r\n im1 = PIL.Image.open(spath)\r\n p = Path(spath)\r\n fpath = p.parent\r\n jpegpath = os.path.join(fpath,'incoming.jpg')\r\n im1.save(jpegpath)\r\n # read incoming image\r\n org_img = cv.imread(jpegpath)\r\n test_img = org_img.copy()\r\n # detect cars\r\n Icars, Lcars = vehicle_detection(test_img)\r\n #print('# vehicle detected: {}'.format(len(Icars)))\r\n logger1.info('vehicle detected: %d - for the client - %s', len(Icars), addr)\r\n # for each detected car in incoming image\r\n for Icar, Lcar in zip(Icars, Lcars):\r\n # draw car bounding box on incoming image\r\n draw_label(test_img,Lcar,color=(0,255,255),thickness=3)\r\n # detect LP in detected car\r\n Llps, Ilps, elapsed = lp_detection(Icar) \r\n # for each detected LP in detected car image\r\n for Llp, Ilp in zip(Llps, Ilps):\r\n # draw LP bounding box on incoming image\r\n pts = Llp.pts*Lcar.wh().reshape(2,1) + Lcar.tl().reshape(2,1)\r\n ptspx = pts*np.array(test_img.shape[1::-1],dtype=float).reshape(2,1)\r\n draw_losangle(test_img,ptspx,color=(0,0,255),thickness=3)\r\n # Recognize characters\r\n lp_str = lp_ocr(Ilp)\r\n lp_str_list.append(lp_str)\r\n #print(f'Detected number plate is- {lp_str}')\r\n logger1.info('License plate detected - %s - for the client - %s', lp_str, addr)\r\n # write text on incoming image\r\n llp = Label(0,tl=pts.min(1),br=pts.max(1))\r\n write2img(test_img,llp,lp_str) \r\n filepath = os.path.join(fpath , 'detected-image.jpg')\r\n cv.imwrite(filepath, test_img)\r\n s3file = addr.replace('.','-',3) + '.jpg'\r\n if vehstr.upper() in lp_str_list:\r\n s3.upload_file(filepath, \"raj-5g-bucket\", s3file)\r\n return 0\r\n else:\r\n return 1\r\n \r\n\r\n","sub_path":"server/vehiclesearch.py","file_name":"vehiclesearch.py","file_ext":"py","file_size_in_byte":15404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237948254","text":"import logging\nimport subprocess\nimport time\nimport os\n\nfrom haystack import Finder\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.indexing.cleaning import clean_wiki_text\nfrom haystack.indexing.utils import convert_files_to_dicts\nfrom haystack.reader.farm import FARMReader\nfrom haystack.reader.transformers import TransformersReader\nfrom haystack.utils import print_answers\nfrom haystack.retriever.elasticsearch import ElasticsearchRetriever\n\n# windows workaround to prevent endless recursion\nif __name__ == '__main__':\n programstart = time.time()\n # Start new server or connect to a running one. true and false respectively\n LAUNCH_ELASTICSEARCH = False\n # Determines whether the Elasticsearch Server has to be populated with data\n # or not\n POPULATE_DOCUMENT_STORE = False\n # Start an Elasticsearch server\n if LAUNCH_ELASTICSEARCH:\n logging.info(\"Starting Elasticsearch ...\")\n status = subprocess.run(\n ['docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.6.2'],\n shell=True\n )\n if status.returncode:\n raise Exception(\"Failed to launch Elasticsearch. If you want to \"\n \"connect to an existing Elasticsearch instance\"\n \"then set LAUNCH_ELASTICSEARCH in the script to False.\")\n time.sleep(15)\n\n # Connect to Elasticsearch\n document_store = ElasticsearchDocumentStore(\n host=\"localhost\", username=\"\", password=\"\", index=\"document\")\n\n # ## Cleaning & indexing documents\n\n # Initialize Elasticsearch with docs\n if POPULATE_DOCUMENT_STORE:\n # set path to directory containing the text files\n doc_dir = os.getcwd() + \"\\\\kbQA\\\\data\\\\tesla\"\n # convert files to dicts containing documents that can be indexed to our\n # datastore\n dicts = convert_files_to_dicts(\n dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)\n\n # write the docs to the elasticsearch database\n document_store.write_documents(dicts)\n\n # ## Initalize Retriever, Reader, & Finder\n # ### Retriever\n # Retrievers help narrowing down the scope for the Reader to smaller units\n # of text where a given question\n # could be answered.\n # We use Elasticsearch's default BM25 algorithm\n retriever = ElasticsearchRetriever(document_store=document_store)\n # ### Reader\n # A Reader scans the texts returned by retrievers in detail and extracts\n # the k best answers. It is based on a powerful, but slower deep learning model.\n reader = TransformersReader(\n model=\"dbmdz/bert-base-german-uncased\", tokenizer=\"dbmdz/bert-base-german-uncased\", use_gpu=-1)\n # ### Finder\n # The Finder sticks together reader and retriever in a pipeline to answer\n # our actual questions.\n finder = Finder(reader, retriever)\n initend = time.time()\n questions = [\n \"worauf sollte man auf Fähren achten?\",\n \"wird die verkehrschilderkennung für alle kommen?\",\n \"was beinhaltet der Autopilot?\",\n \"wie viel verbaucht das Model 3?\",\n \"fährt das auto wenn der stecker steckt?\",\n \"Welche dimension haben die kleinen Sommerreifen?\",\n \"wie viel zoll haben die Sommerreifen?\",\n \"Werden UV-Strahlen beim Tesla geblockt?\",\n\n \"Ich habe bei Tesla 500€ pro Rad bezahlt.\",\n \"Tempomat Geschwindigkeit ändern.\",\n \"die batterie sollte mindestens 50% haben.\"\n ]\n # auch hier wieder: Kleinschreibung zwingend notwendig!\n # question = question.lower()\n times = []\n\n for question in questions:\n start_time = time.process_time()\n prediction = finder.get_answers(\n question, top_k_reader=5, top_k_retriever=5)\n end_time = time.process_time()\n times.append(end_time - start_time)\n print_answers(prediction, details=\"minimal\")\n\n total = 0\n for zeit in times:\n total = total + zeit\n print(total / len(times))\n\n print(\"init_time:\")\n print(initend-programstart)\n","sub_path":"kbQA/kbQA_german_elasticsearch.py","file_name":"kbQA_german_elasticsearch.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278696435","text":"from flask import Flask, render_template, jsonify\nfrom pymongo import MongoClient\nimport sys, json\nfrom bson import json_util\nfrom bson.json_util import dumps\nimport pandas as pd\nimport time\nimport requests as req\nimport time\nimport datetime\nfrom flask_pymongo import PyMongo\nimport os \n\nMONGODB_URI = os.environ.get('MONGODB_URI')\nif not MONGODB_URI:\n MONGODB_URI = \"mongodb://localhost:27017/austinDB\"\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = MONGODB_URI\nmongo = PyMongo(app)\n\n##########################################################################\n#################### Creating the Database #############################\n##########################################################################\nurl = \"https://data.austintexas.gov/resource/r3af-2r8x.json?$limit=50000&$offset=0\"\n\n# def get_data():\n # get the json from austin data api\ntraffic_response = req.get(url)\ntraffic_json = traffic_response.json()\ntime.sleep(1)\n\n# append json into DataFrame\ndf = pd.DataFrame(traffic_json)\ntime.sleep(1)\n\n# Selecting specific columns needed\nclean_df = df[['address','issue_reported','location_latitude', 'location_longitude', 'published_date']]\n\n# Drop any NAN values and append to a list\ndropped = clean_df.dropna()\nnewdate = dropped.loc[0:, 'published_date']\nlisty = []\nfor i in newdate:\n listy.append(i[0:10])\ntime.sleep(1)\n\n# delete old values for published_date and pass in new list to published_date column\nxyz = dropped.drop('published_date', axis=1)\nxyz['published_date'] = listy\ntime.sleep(1)\n\n\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017\nDBS_NAME = 'austinDB'\nCOLLECTION_NAME = 'austinData'\ndata = json_util.loads(xyz.to_json(orient='records'))\nconnection = MongoClient(MONGODB_HOST, MONGODB_PORT)\n# blah = connection.austinDB.dropDatabase()\ndb = connection.austinDB\ndb.austinData.remove()\ntime.sleep(1)\n# db.dropDatabase()\naustinData = db.austinData\nposts_id = austinData.insert_many(data)\n\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017\nDBS_NAME = 'austinDB'\nCOLLECTION_NAME = 'austinData'\napp = Flask(__name__)\n\n\nFIELDS = {'_id': False, 'address': True, 'issue_reported': True, 'location_latitude': True, 'location_longitude': True, 'published_date': True}\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/austin/data\")\ndef austin_incidents():\n connection = MongoClient(MONGODB_HOST, MONGODB_PORT)\n collection = connection[DBS_NAME][COLLECTION_NAME]\n incidents = collection.find(projection=FIELDS)\n json_incidents = []\n for incident in incidents:\n json_incidents.append(incident)\n json_incidents = json.dumps(json_incidents, default=json_util.default)\n connection.close()\n return json_incidents\n\n@app.route(\"/incident_types\")\ndef incident_types():\n connection = MongoClient(MONGODB_HOST, MONGODB_PORT)\n collection = connection[DBS_NAME][COLLECTION_NAME]\n incidents = collection.distinct( \"issue_reported\" )\n json_incidents = []\n for incident in incidents:\n json_incidents.append(incident)\n json_incidents = json.dumps(json_incidents, default=json_util.default)\n connection.close()\n return json_incidents\n\n@app.route(\"/dates\")\ndef dates():\n connection = MongoClient(MONGODB_HOST, MONGODB_PORT)\n collection = connection[DBS_NAME][COLLECTION_NAME]\n dates = collection.distinct( \"published_date\" )\n json_dates = []\n for date in dates: \n json_dates.append(date)\n json_dates = json.dumps(json_dates, default=json_util.default)\n connection.close()\n return json_dates \n\n@app.route(\"/api/v1.1/pie/\")\ndef pieChartData():\n connection = MongoClient(MONGODB_HOST, MONGODB_PORT)\n collection = connection[DBS_NAME][COLLECTION_NAME]\n issues = collection.find(projection=FIELDS)\n df = pd.DataFrame(list(issues))\n top10=df[['issue_reported','location_latitude']].groupby(['issue_reported']).count().sort_values('location_latitude',ascending=False)[:10].reset_index().rename(columns={'Location':'Num Incidents'})\n json = top10.reset_index(drop=True)\n dictionary = json.to_dict(orient='records')\n connection.close()\n return jsonify(dictionary)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=True)","sub_path":"austin_traffic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"183659352","text":"#\n# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)\n# Ivannikov Institute for System Programming of the Russian Academy of Sciences\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport re\n\nfrom core.utils import unique_file_name\nfrom core.vtg.emg.common import get_conf_property, get_necessary_conf_property\nfrom core.vtg.emg.common.c import Function\nfrom core.vtg.emg.common.c.types import Pointer, Primitive\nfrom core.vtg.emg.modelTranslator.fsa_translator.common import initialize_automaton_variables\n\n\nclass Aspect(Function):\n \"\"\"\n Representation of the aspect file pointcuts for source functions which calls should be modified or replaced by\n models. This is an aspect-oriented extension of the C language which is supported by CIF.\n \"\"\"\n\n def __init__(self, name, declaration, aspect_type=\"after\"):\n super(Aspect, self).__init__(name, declaration)\n self.aspect_type = aspect_type\n\n def define(self):\n \"\"\"\n Print description of the replacement that should be made to the source funtion calls.\n\n :return: List of strings.\n \"\"\"\n lines = list()\n lines.append(\"around: call({}) \".format(\"$ {}(..)\".format(self.name)) +\n \" {\\n\")\n lines.extend(['\\t{}\\n'.format(stm) for stm in self.body])\n lines.append(\"}\\n\")\n return lines\n\n\nclass CModel:\n \"\"\"Representation of the environment model in the C language (with extensions).\"\"\"\n\n mem_function_map = {\n \"ALLOC\": \"ldv_xmalloc\",\n \"UALLOC\": \"ldv_xmalloc_unknown_size\",\n \"ZALLOC\": \"ldv_zalloc\"\n }\n free_function_map = {\n \"FREE\": \"ldv_free\"\n }\n irq_function_map = {\n \"IN_INTERRUPT_CONTEXT\": 'ldv_in_interrupt_context',\n \"SWITCH_TO_IRQ_CONTEXT\": 'ldv_switch_to_interrupt_context',\n \"SWITCH_TO_PROCESS_CONTEXT\": 'ldv_switch_to_process_context'\n }\n\n def __init__(self, logger, conf, workdir, files, entry_point_name, entry_file):\n self.entry_file = entry_file\n self.entry_name = entry_point_name\n self.files = files\n self.types = list()\n self._logger = logger\n self._conf = conf\n self._workdir = workdir\n self._variables_declarations = dict()\n self._variables_initializations = dict()\n self._function_definitions = dict()\n self._function_declarations = dict()\n self._headers = dict()\n self._before_aspects = dict()\n self._call_aspects = dict()\n self.__external_allocated = dict()\n\n def add_headers(self, file, headers):\n \"\"\"\n Add headers include directives to the particular file.\n\n :param file: C file.\n :param headers: List of header files.\n :return: None.\n \"\"\"\n if file not in self._headers:\n self._headers[file] = [headers]\n else:\n # This is to avoid dependencies broken\n self._headers[file].append(headers)\n\n def add_function_definition(self, func):\n \"\"\"\n Add a function definition to the main environment model file.\n\n :param func: Function object.\n :return: None.\n \"\"\"\n if not func.definition_file:\n raise RuntimeError('Always expect file to place function definition')\n if func.definition_file not in self._function_definitions:\n self._function_definitions[func.definition_file] = dict()\n if self.entry_file not in self._function_definitions:\n self._function_definitions[self.entry_file] = dict()\n\n self._function_definitions[func.definition_file][func.name] = func.define()\n self.add_function_declaration(func.definition_file, func, extern=False)\n\n def add_function_declaration(self, file, func, extern=False):\n \"\"\"\n Add a function declaration to the file.\n\n :param file: File name.\n :param func: Function object.\n :param extern: Add it as an extern function.\n :return: None.\n \"\"\"\n if file not in self._function_declarations:\n self._function_declarations[file] = dict()\n\n if extern and func.name in self._function_declarations[file]:\n return\n self._function_declarations[file][func.name] = func.declare(extern=extern)\n\n def add_global_variable(self, variable, file, extern=False, initialize=True):\n \"\"\"\n Add a global variable declararation or/and initalization to the target file.\n\n :param variable: Variable object.\n :param file: File name.\n :param extern: Add it as an extern variable.\n :param initialize: Add also the global variable initialization.\n :return: None.\n \"\"\"\n if not file and variable.file:\n file = variable.file\n elif not file:\n file = self.entry_file\n\n if file not in self._variables_declarations:\n self._variables_declarations[file] = dict()\n if file not in self._variables_initializations:\n self._variables_initializations[file] = dict()\n\n if extern and variable.name not in self._variables_declarations[file]:\n self._variables_declarations[file][variable.name] = variable.declare(extern=extern) + \";\\n\"\n elif not extern:\n self._variables_declarations[file][variable.name] = variable.declare(extern=extern) + \";\\n\"\n if initialize:\n if variable.value and \\\n ((isinstance(variable.declaration, Pointer) and isinstance(variable.declaration.points, Function))\n or isinstance(variable.declaration, Primitive)):\n self._variables_initializations[file][variable.name] = variable.declare_with_init() + \";\\n\"\n elif not variable.value and isinstance(variable.declaration, Pointer):\n if file not in self.__external_allocated:\n self.__external_allocated[file] = []\n self.__external_allocated[file].append(variable)\n\n def text_processor(self, automaton, statement):\n \"\"\"\n Analyze given C code statement and replace all found EMG extensions with the clean C code.\n\n :param automaton: Automaton object.\n :param statement: Statement string.\n :return: Refined C statements list.\n \"\"\"\n models = FunctionModels(self._logger, self._conf, self.mem_function_map, self.free_function_map,\n self.irq_function_map)\n return models.text_processor(automaton, statement)\n\n def add_function_model(self, func, body):\n \"\"\"\n Add function model to the environment model.\n\n :param func: Function object to model.\n :param body: List of C statements which should replace function calls.\n :return: None.\n \"\"\"\n new_aspect = Aspect(func.name, func.declaration)\n new_aspect.body = body\n files = set()\n files.update(func.files_called_at)\n files.update(func.declaration_files)\n for file in files:\n if file not in self._call_aspects:\n self._call_aspects[file] = list()\n self._call_aspects[file].append(new_aspect)\n\n def print_source_code(self, additional_lines):\n \"\"\"\n Generate an environment model as a C code. The code is distributed across aspect addictions for original\n source files and the main environment model C code.\n\n :param additional_lines: Dictionary with the user-defined C code:\n {'file name': {'definitions': [...], 'declarations': []}}\n :return: Dictionary {'file': Path to generated file with the Code}\n \"\"\"\n aspect_dir = \"aspects\"\n self._logger.info(\"Create directory for aspect files {}\".format(\"aspects\"))\n os.makedirs(aspect_dir.encode('utf8'), exist_ok=True)\n\n if get_conf_property(self._conf[\"translation options\"], \"propogate headers to instrumented files\"):\n for file in (f for f in self.files if f in additional_lines):\n self.add_headers(file,\n get_necessary_conf_property(self._conf[\"translation options\"], \"additional headers\"))\n\n addictions = dict()\n # Write aspects\n for file in self.files:\n lines = list()\n\n # Check headers\n if file == self.entry_file:\n if self.entry_file in self._headers:\n lines.extend(['#include <{}>\\n'.format(h) for h in\n self._collapse_headers_sets(self._headers[self.entry_file])])\n lines.append(\"\\n\")\n\n for tp in self.types:\n lines.append(tp.to_string('') + \" {\\n\")\n for field in list(tp.fields.keys()):\n lines.append(\"\\t{};\\n\".format(tp.fields[field].to_string(field, typedef='complex_and_params'),\n scope={self.entry_file}))\n lines.append(\"};\\n\")\n lines.append(\"\\n\")\n else:\n # Generate function declarations\n self._logger.info('Add aspects to a file {!r}'.format(file))\n\n # Add headers\n if file in self._headers and self._headers[file]:\n lines.append('before: file (\"$this\")\\n')\n lines.append('{\\n')\n lines.extend(['#include <{}>\\n'.format(h) for h in\n self._collapse_headers_sets(self._headers[file])])\n lines.append(\"\\n\")\n lines.append(\"}\\n\\n\")\n\n # Add model itself\n lines.append('after: file (\"$this\")\\n')\n lines.append('{\\n')\n\n if file in additional_lines and 'declarations' in additional_lines[file] and \\\n len(additional_lines[file]['declarations']) > 0:\n lines.append(\"\\n\")\n lines.append(\"/* EMG aliases */\\n\")\n lines.extend(additional_lines[file]['declarations'])\n\n if file in self._function_declarations:\n lines.append(\"\\n\")\n lines.append(\"/* EMG Function declarations */\\n\")\n for func in self._function_declarations[file].keys():\n lines.extend(self._function_declarations[file][func])\n\n if file in self._variables_declarations:\n lines.append(\"\\n\")\n lines.append(\"/* EMG variable declarations */\\n\")\n for variable in self._variables_declarations[file].keys():\n lines.extend(self._variables_declarations[file][variable])\n\n if file in self._variables_initializations and len(self._variables_initializations[file]) > 0:\n lines.append(\"\\n\")\n lines.append(\"/* EMG variable initialization */\\n\")\n for variable in self._variables_initializations[file].keys():\n lines.extend(self._variables_initializations[file][variable])\n\n if file in additional_lines and 'definitions' in additional_lines[file] and \\\n len(additional_lines[file]['definitions']) > 0:\n lines.append(\"\\n\")\n lines.append(\"/* EMG aliases for functions */\\n\")\n lines.extend(additional_lines[file]['definitions'])\n\n if file in self._function_definitions and len(self._function_definitions[file]) > 0:\n lines.append(\"\\n\")\n lines.append(\"/* EMG function definitions */\\n\")\n for func in self._function_definitions[file].keys():\n lines.extend(self._function_definitions[file][func])\n lines.append(\"\\n\")\n\n if file != self.entry_file:\n lines.append(\"}\\n\\n\")\n\n if file in self._call_aspects and len(self._call_aspects[file]) > 0:\n lines.append(\"/* EMG kernel function models */\\n\")\n for aspect in self._call_aspects[file]:\n lines.extend(aspect.define())\n lines.append(\"\\n\")\n\n if file != self.entry_file:\n name = \"{}.aspect\".format(unique_file_name(\"aspects/ldv_\" + os.path.splitext(os.path.basename(file))[0],\n '.aspect'))\n path = os.path.relpath(name, self._workdir)\n self._logger.info(\"Add aspect file {!r}\".format(path))\n addictions[file] = path\n else:\n name = self.entry_file\n with open(name, \"w\", encoding=\"utf8\") as fh:\n fh.writelines(lines)\n\n return addictions\n\n def compose_entry_point(self, given_body):\n \"\"\"\n Generate an entry point function for the environment model.\n\n :param given_body: Body of the main function provided by a translator.\n :return: List of C statements of the generated function body.\n \"\"\"\n ep = Function(self.entry_name, \"int {}(void)\".format(self.entry_name))\n ep.definition_file = self.entry_file\n body = ['/* LDV {' + '\"thread\": 1, \"type\": \"CONTROL_FUNCTION_BEGIN\", \"comment\": \"Entry point \\'{0}\\'\", '\n '\"function\": \"{0}\"'.format(self.entry_name) + '} */']\n\n # Init external allocated pointers\n cnt = 0\n functions = []\n if len(self.__external_allocated.keys()) > 0:\n for file in sorted([f for f in self.__external_allocated.keys() if len(self.__external_allocated[f]) > 0]):\n func = Function('ldv_allocate_external_{}'.format(cnt),\n \"void ldv_allocate_external_{}(void)\".format(cnt))\n func.declaration_files.add(file)\n func.definition_file = file\n\n init = [\"{} = {}();\".format(var.name, 'external_allocated_data') for\n var in self.__external_allocated[file]]\n func.body = init\n\n self.add_function_definition(func)\n self.add_function_declaration(self.entry_file, func, extern=True)\n functions.append(func)\n cnt += 1\n\n gl_init = Function('ldv_initialize_external_data',\n 'void ldv_initialize_external_data(void)')\n gl_init.declaration_files.add(self.entry_file)\n gl_init.definition_file = self.entry_file\n init_body = ['{}();'.format(func.name) for func in functions]\n gl_init.body = init_body\n self.add_function_definition(gl_init)\n body.extend([\n '/* Initialize external data */',\n 'ldv_initialize_external_data();'\n ])\n\n if get_conf_property(self._conf, \"initialize requirements\"):\n body += [\n '/* LDV {\"action\": \"INIT\", \"type\": \"CALL_BEGIN\", \"callback\": true, '\n '\"comment\": \"Initialize requirement models.\"} */',\n 'ldv_initialize();',\n '/* LDV {\"action\": \"INIT\", \"type\": \"CALL_END\"} */'\n ]\n\n body += ['/* LDV {\"action\": \"SCENARIOS\", \"type\": \"CONDITION_BEGIN\", '\n '\"comment\": \"Begin Environment model scenarios.\"} */'] + given_body + \\\n ['/* LDV {\"action\": \"SCENARIOS\", \"type\": \"CONDITION_END\"} */']\n\n if get_conf_property(self._conf, \"check final state\"):\n body += [\n '/* LDV {\"action\": \"FINAL\", \"callback\": true, \"type\": \"CALL_BEGIN\", '\n '\"comment\": \"Check requirement model final state at the exit if required.\"} */',\n 'ldv_check_final_state();',\n '/* LDV {\"action\": \"FINAL\", \"type\": \"CALL_END\"} */'\n ]\n\n body.append('return 0;')\n body.append('/* LDV {' + '\"comment\": \"Exit entry point \\'{0}\\'\", \"type\": \"CONTROL_FUNCTION_END\",'\n ' \"function\": \"{0}\"'.format(self.entry_name) + '} */')\n\n ep.body = body\n self.add_function_definition(ep)\n\n return body\n\n def _collapse_headers_sets(self, sets):\n final_list = []\n sortd = sorted(sets, key=lambda f: len(f))\n while len(sortd) > 0:\n data = sortd.pop()\n difference = set(data).difference(set(final_list))\n if len(difference) > 0 and len(difference) == len(data):\n # All headers are new\n final_list.extend(data)\n elif len(difference) > 0:\n position = len(final_list)\n for header in reversed(data):\n if header not in difference:\n position = final_list.index(header)\n else:\n final_list.insert(position, header)\n return final_list\n\n\nclass FunctionModels:\n \"\"\"Class represent common C extensions for simplifying environmen model C code generation.\"\"\"\n\n mem_function_template = \"\\$({})\\(%({})%(?:,\\s?(\\w+))?\\)\"\n simple_function_template = \"\\$({})\\(\"\n access_template = '\\w+(?:(?:[.]|->)\\w+)*'\n mem_function_re = re.compile(mem_function_template.format('\\w+', access_template))\n simple_function_re = re.compile(simple_function_template.format('\\w+'))\n access_re = re.compile('(%{}%)'.format(access_template))\n arg_re = re.compile('\\$ARG(\\d+)')\n\n def __init__(self, logger, conf, mem_function_map, free_function_map, irq_function_map):\n self._logger = logger\n self._conf = conf\n self.mem_function_map = mem_function_map\n self.free_function_map = free_function_map\n self.irq_function_map = irq_function_map\n self.signature = None\n self.ualloc_flag = None\n\n def text_processor(self, automaton, statement):\n \"\"\"\n Analyze given C code statement and replace all found EMG extensions with the clean C code.\n\n :param automaton: Automaton object.\n :param statement: C statement string.\n :return: New statements list.\n \"\"\"\n # Replace function names\n stms = []\n matched = False\n\n # Find state reinitialization\n if re.compile('\\$REINITIALIZE_STATE;').search(statement):\n statements = initialize_automaton_variables(self._conf, automaton)\n stms.extend(statements)\n\n # First replace simple replacements\n for number in self.arg_re.findall(statement):\n new_number = int(number) - 1\n statement = statement.replace('$ARG{}'.format(number), 'arg{}'.format(new_number))\n\n # Replace function calls\n for fn in self.simple_function_re.findall(statement):\n matched = True\n\n # Bracket is required to ignore CIF expressions like $res or $arg1\n if fn in self.mem_function_map or fn in self.free_function_map:\n access = self.mem_function_re.search(statement).group(2)\n if fn in self.mem_function_map:\n replacement = self._replace_mem_call\n else:\n replacement = self._replace_free_call\n\n accesses = automaton.process.resolve_access('%{}%'.format(access))\n for access in accesses:\n signature = access.label.declaration\n if signature:\n var = automaton.determine_variable(access.label)\n if isinstance(var.declaration, Pointer):\n self.signature = var.declaration\n self.ualloc_flag = True\n new = self.mem_function_re.sub(replacement, statement)\n stms.append(new)\n else:\n self._logger.warning(\"Cannot get signature for the label {!r}\".format(access.label.name))\n elif fn in self.irq_function_map:\n statement = self.simple_function_re.sub(self.irq_function_map[fn] + '(', statement)\n stms.append(statement)\n else:\n raise NotImplementedError(\"Model function '${}' is not supported\".format(fn))\n\n if not matched:\n stms = [statement]\n\n # Replace rest accesses\n final = []\n for original_stm in stms:\n # Collect dublicates\n stm_set = {original_stm}\n\n while len(stm_set) > 0:\n stm = stm_set.pop()\n match = self.access_re.search(stm)\n if match:\n expression = match.group(1)\n accesses = automaton.process.resolve_access(expression)\n for access in accesses:\n var = automaton.determine_variable(access.label)\n stm = stm.replace(expression, var.name)\n stm_set.add(stm)\n else:\n final.append(stm)\n\n return final\n\n def _replace_mem_call(self, match):\n func, label_name, flag = match.groups()\n size = '0'\n\n if func not in self.mem_function_map:\n raise NotImplementedError(\"Model of {} is not supported\".format(func))\n elif not self.mem_function_map[func]:\n raise NotImplementedError(\"Set implementation for the function {}\".format(func))\n\n if isinstance(self.signature, Pointer):\n if func == 'ALLOC' and self.ualloc_flag:\n # Do not alloc memory anyway for unknown resources anyway to avoid incomplete type errors\n func = 'UALLOC'\n if get_conf_property(self._conf, 'disable ualloc') and func == 'UALLOC':\n func = 'ALLOC'\n if func != 'UALLOC' and get_conf_property(self._conf, 'allocate with sizeof'):\n size = 'sizeof({})'.format(self.signature.points.to_string('', typedef='complex_and_params'))\n\n return \"{}({})\".format(self.mem_function_map[func], size)\n else:\n raise ValueError('This is not a pointer')\n\n def _replace_free_call(self, match):\n func, label_name, flag = match.groups()\n if func not in self.free_function_map:\n raise NotImplementedError(\"Model of {} is not supported\".format(func))\n elif not self.free_function_map[func]:\n raise NotImplementedError(\"Set implementation for the function {}\".format(func))\n\n # Create function call\n if isinstance(self.signature, Pointer):\n return \"{}(%{}%)\".format(self.free_function_map[func], label_name)\n else:\n raise ValueError('This is not a pointer')\n","sub_path":"core/core/vtg/emg/modelTranslator/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":23066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460664537","text":"\"\"\"\nHTML - FORM APP\nNotes:\nform method POST is:\n -more secure\n -allows for more characters to be sent at one time\n -is hidden\nform method GET is:\n -friendlier to work with\n -visible\n -part of the url\n -has character limitations\n\nusing the name attribute in an input , allows the browser to distinguish between fields: <input type=\"\" value=\"\" NAME=\"\" />\nthis information (name and its value) will show up in the url\n\n---------------------------------------------------------------------------------------\nthe server sending information to the client is a .response\nto print items (server to client)\n self.response.write()\n\nthe information that is sent from the client to the server is a request\nto access items (client to server)\n self.request.GET\n---------------------------------------------------------------------------------------\n\nCONDITIONALS IN FORMS\nuse a conditional statement to only send the information when a user inputs it\nyou cant access the information from a form unless you have already sent the information\na conditional statement will tell the page to only do this if it has recieved information from a form\n\"\"\"\nimport webapp2\n\n#the entire application is done from within one class\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n\n#------------------------------------------------------------multi-line string\n#the page head attribute holds the htmls head information and the opening body tag\n page_head = \"\"\"\n<!DOCTYPE HTML>\n<html>\n <head>\n <title>Simple Login Form\n \n \"\"\"\n#------------------------------------------------------------multi-line string\n\n#------------------------------------------------------------multi-line string\n#the page_body attribute holds the page content (in this case a form)\n page_body =\"\"\"

\n \n \n \"\"\"\n#------------------------------------------------------------multi-line string\n\n#------------------------------------------------------------multi-line string\n#the page_close attribute holds the closing tags for the html elements of the body\n page_close = \"\"\"\n
\n \n\"\"\"\n#------------------------------------------------------------multi-line string\n\n#------------------------------------------------------------Request and Response Conditional\n #this conditional codes for 2 different views: the page load, and the user input\n if self.request.GET:\n #stores information i get from the form\n user = self.request.GET['user'] #store the information in a variable named user\n email = self.request.GET['email'] #store the information in a variable named email\n\n #place the user and the users email just before the form in the page body\n #will print out if there is a response from the user\n #removing the page body allows you to reload the page and return the users inputs\n self.response.write(page_head + user + ' ' + email + \"\"\"+ page_body\"\"\" + page_close)\n else:\n #if the request does not work and there is no a request or url\n #the server sending information to the client is a .response to the request.GET\n #the response then publishes to the page\n self.response.write(page_head + page_body + page_close)\n#------------------------------------------------------------Request and Response Conditional\n\"\"\"\nSELF.REQUEST.GET\nsend information with GET\nthe information that is sent from the client to the server is a request\nitems that the browser sends to the server\nsee the response with the self.request\nself.request.GET is an associative array\nput in the key associated with the value that you want into the array\nnames in the array must match the values of the names in the form inputs\nthis will return the values in the console / log\n\nSELF.RESPONSE.WRITE\n\"\"\"\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n","sub_path":"_python basics/html-login-form/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591099321","text":"# Copyright (c) 2018- Xilinx, Inc (Alessandro Pappalardo)\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n\n# 3. Neither the names of Xilinx, Facebook, Deepmind Technologies, NYU,\n# NEC Laboratories America and IDIAP Research Institute nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom typing import Tuple, Optional, List, Union\n\nimport torch\nfrom torch.nn import Parameter, Module\n\nimport brevitas\nimport brevitas.config as config\nfrom brevitas.core.function_wrapper import Identity\n\nfrom brevitas.core.stats import _ParameterListStats, _RuntimeStats, DEFAULT_MOMENTUM\nfrom brevitas.core.restrict_val import _RestrictClampValue\n\n\nclass StatsFromParameterScaling(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n scaling_stats_impl: Module,\n scaling_stats_input_view_shape_impl: Module,\n scaling_stats_input_concat_dim: int,\n tracked_parameter_list: List[torch.nn.Parameter],\n restrict_scaling_impl: Module,\n scaling_shape: Tuple[int, ...],\n affine_rescaling: bool = False,\n scaling_min_val: Optional[float] = None) -> None:\n super(StatsFromParameterScaling, self).__init__()\n self.parameter_list_stats = _ParameterListStats(\n scaling_stats_impl,\n scaling_shape,\n scaling_stats_input_view_shape_impl,\n scaling_stats_input_concat_dim,\n tracked_parameter_list)\n self.stats_scaling_impl = _StatsScaling(\n restrict_scaling_impl,\n scaling_shape,\n scaling_min_val,\n affine_rescaling)\n\n @brevitas.jit.script_method\n def forward(self, ignored: torch.Tensor) -> torch.Tensor:\n stats = self.parameter_list_stats()\n return self.stats_scaling_impl(stats)\n\n\nclass _StatsScaling(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n restrict_scaling_impl: Module,\n scaling_shape: Tuple[int, ...],\n scaling_min_val: Optional[float] = None,\n affine_rescaling: bool = False) -> None:\n super(_StatsScaling, self).__init__()\n\n if affine_rescaling:\n self.affine_rescaling = _AffineRescaling(scaling_shape)\n else:\n self.affine_rescaling = Identity()\n self.restrict_clamp_scaling = _RestrictClampValue(scaling_min_val, restrict_scaling_impl)\n self.restrict_scaling_pre = restrict_scaling_impl.restrict_init_module()\n\n @brevitas.jit.script_method\n def forward(self, stats: torch.Tensor) -> torch.Tensor:\n stats = self.restrict_scaling_pre(stats)\n stats = self.affine_rescaling(stats)\n stats = self.restrict_clamp_scaling(stats)\n return stats\n\n\nclass RuntimeStatsScaling(brevitas.jit.ScriptModule):\n\n def __init__(\n self,\n scaling_stats_impl: Module,\n scaling_stats_input_view_shape_impl: Module,\n scaling_stats_permute_dims: Tuple[int, ...],\n restrict_scaling_impl: Module,\n scaling_shape: Tuple[int, ...],\n affine_rescaling: bool,\n scaling_stats_momentum: float = DEFAULT_MOMENTUM,\n scaling_min_val: Optional[float] = None) -> None:\n super(RuntimeStatsScaling, self).__init__()\n\n self.runtime_stats = _RuntimeStats(\n scaling_stats_impl,\n scaling_shape,\n scaling_stats_input_view_shape_impl,\n scaling_stats_permute_dims,\n scaling_stats_momentum)\n self.stats_scaling_impl = _StatsScaling(\n restrict_scaling_impl,\n scaling_shape,\n scaling_min_val,\n affine_rescaling)\n\n @brevitas.jit.script_method\n def forward(self, x: torch.Tensor):\n stats = self.runtime_stats(x)\n return self.stats_scaling_impl(stats)\n\n\nclass _AffineRescaling(brevitas.jit.ScriptModule):\n\n def __init__(self, scaling_shape):\n super(_AffineRescaling, self).__init__()\n self.affine_weight = Parameter(torch.ones(scaling_shape))\n self.affine_bias = Parameter(torch.zeros(scaling_shape))\n\n @brevitas.jit.script_method\n def forward(self, x):\n out = x * self.affine_weight + self.affine_bias\n out = torch.abs(out)\n return out\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n super(_AffineRescaling, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)\n affine_weight_key = prefix + 'affine_weight'\n affine_bias_key = prefix + 'affine_bias'\n if config.IGNORE_MISSING_KEYS and affine_weight_key in missing_keys:\n missing_keys.remove(affine_weight_key)\n if config.IGNORE_MISSING_KEYS and affine_bias_key in missing_keys:\n missing_keys.remove(affine_bias_key)","sub_path":"brevitas/core/scaling/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56503473","text":"\n'''\n heroes.py\n\n a collection of function to read and write to\n a text file containg ...\n'''\n\n\n_hero_path = \"./data/heroes.txt\"\n\n\ndef get_heroes(starts_with=\"\"):\n '''\n get_heroes list the heroes in a file\n\n Parameters:\n starts_with: a string that can filter the heroes returned\n\n Output:\n a list of heroes\n\n Exceptions\n ---\n'''\n with open(_hero_path) as hero_file:\n # comprehension\n hero_list = [\n line.strip()\n for line in hero_file\n if line.startswith(starts_with)\n ]\n return hero_list\n\n\ndef add_hero(name):\n '''\n add_hero add a hero to the file lised above\n The hero will be sorted into the list so may not be at the end\n\n Parameters:\n name: a string that hold the naame of the hero to be added\n\n Output:\n none\n\n Exceptions\n ---\n '''\n existing_heroes = get_heroes()\n if name not in existing_heroes:\n existing_heroes.append(name)\n sorted_heroes = sorted(existing_heroes)\n with open(_hero_path, \"w\") as output_file:\n for h in sorted_heroes:\n output_file.write(h + \"\\n\")\n\n\nclass Hero:\n\n hero_path = \"./data/heroes.txt\"\n\n def __init__(self, name, home):\n self.name = name\n self.home = home\n\n def describe(self):\n print(self.name + \" is a hero.\")\n\n @classmethod\n def reset(cls):\n # clear out the contents of the hero file\n pass\n\n\nclass SuperHero(Hero):\n\n def __init__(self, name, home, power, weakness):\n\n super().__init__(name, home)\n self.power = power\n self.weakness = weakness\n\n\ndef main():\n h = Hero(\"Wonder Woman\", \"avalon\")\n h.describe()\n\n Hero.reset()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"heroes.py","file_name":"heroes.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356464400","text":"'''\nThis file is part of MSR Ensemble (MSRE-X).\n\nMSRE-X is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nMSRE-X is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with MSRE-X. If not, see .\n\nMSR Ensemble (MSRE-X) Version 0.5, Prototype Alpha\n\nAuthors:\nEdmund S. L. Lam sllam@qatar.cmu.edu\nIliano Cervesato iliano@cmu.edu\n\n* This implementation was made possible by an NPRP grant (NPRP 09-667-1-100, Effective Programming \nfor Large Distributed Ensembles) from the Qatar National Research Fund (a member of the Qatar \nFoundation). The statements made herein are solely the responsibility of the authors.\n'''\n\nimport z3 as z3\n\nfrom msrex.misc.infix import Infix\n\nfrom msrex.misc.smt_utils import SMTSolver, SMTConstraint, SAT, UNSAT, UNKNOWN, SMTTypeTerm, get_new_var\n\n# MSRE type representation in z3\n\nsmt_type = None\n\nTY_LOC = 'loc'\nTY_INT = 'int'\nTY_CHAR = 'char'\nTY_FLOAT = 'float'\nTY_BOOL = 'bool'\nTY_STR = 'string'\nTY_DEST = 'dest'\nTY_LIST = 'list'\nTY_MSET = 'mset'\nTY_TUPLE = 'tuple'\nTY_UNIT = 'unit'\nTY_ARROW = 'arrow'\n\ndef init_default_type():\n\tType = z3.Datatype('Type')\n\tType.declare(TY_LOC)\n\tType.declare(TY_INT)\n\tType.declare(TY_CHAR)\n\tType.declare(TY_FLOAT)\n\tType.declare(TY_BOOL)\n\tType.declare(TY_STR)\n\tType.declare(TY_DEST)\n\tType.declare(TY_LIST, ('type',Type))\n\tType.declare(TY_MSET, ('type',Type))\n\tType.declare(TY_TUPLE, ('left',Type), ('right',Type))\n\tType.declare(TY_UNIT)\n\tType.declare(TY_ARROW, ('in',Type), ('out',Type))\n\n\t# Insert new types\n\n\tType = Type.create()\n\t\n\tglobal smt_type\n\tsmt_type = Type\n\ninit_default_type()\n\n'''\n@Infix\ndef or_cons(c1, c2):\n\tc = SMTConstraint(z3.Or(c1.cons,c2.cons))\n\tc.justify = c1.justify + c2.justify\n\treturn c\n'''\n\ndef tyVar(id=None):\n\tif id == None:\n\t\tid = get_new_var(\"t\")\n\treturn z3.Const(id, smt_type)\n\ntyLoc = smt_type.loc\ntyInt = smt_type.int\ntyFloat = smt_type.float\ntyBool = smt_type.bool\ntyChar = smt_type.char\ntyStr = smt_type.string\ntyDest = smt_type.dest\n\ndef tyList(ty_term):\n\treturn smt_type.list(ty_term)\n\ndef tyMSet(ty_term):\n\treturn smt_type.mset(ty_term)\n\ndef tyTuple(left, right):\n\treturn smt_type.tuple(left, right)\n\ntyUnit = smt_type.unit\n\ndef tyArrow(inpt, outpt):\n\treturn smt_type.arrow(inpt, outpt)\n\n\n\n","sub_path":"msrex/misc/z3_utils/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189716817","text":"def solve():\n n,k=map(int,input().split())\n A=list(map(int,input().split()))\n A=[(a,i) for i,a in enumerate(A)]\n B=[0]*n\n while True:\n for coach in range(1,3):\n #print(A,B)\n A_len=len(A)\n if A_len==0:\n return ''.join(list(map(str,B)))\n m=max(A)\n mi=A.index(m)\n for i in range(max(0,mi-k),min(A_len,mi+k+1)):\n #print(A[i])\n B[A[i][1]]=coach\n #del A[max(0,mi-k):min(A_len,mi+k+1)]\n A = A[:max(0,mi-k)] + A[min(A_len,mi+k+1):]\n\nprint(solve())","sub_path":"codeforces/cr552_3/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444853007","text":"import pandas\n\nfrom modin.data_management.utils import length_fn_pandas, width_fn_pandas\nfrom modin.engines.base.frame.partition import BaseFramePartition\n\n\nclass PandasOnPythonFramePartition(BaseFramePartition):\n \"\"\"This abstract class holds the data and metadata for a single partition.\n The methods required for implementing this abstract class are listed in\n the section immediately following this.\n\n The API exposed by the children of this object is used in\n `BaseFrameManager`.\n\n Note: These objects are treated as immutable by `BaseFrameManager`\n subclasses. There is no logic for updating inplace.\n \"\"\"\n\n def __init__(self, data, length=None, width=None, call_queue=None):\n self.data = data\n if call_queue is None:\n call_queue = []\n self.call_queue = call_queue\n self._length_cache = length\n self._width_cache = width\n\n def get(self):\n \"\"\"Flushes the call_queue and returns the data.\n\n Note: Since this object is a simple wrapper, just return the data.\n\n Returns:\n The object that was `put`.\n \"\"\"\n self.drain_call_queue()\n return self.data.copy()\n\n def apply(self, func, **kwargs):\n \"\"\"Apply some callable function to the data in this partition.\n\n Note: It is up to the implementation how kwargs are handled. They are\n an important part of many implementations. As of right now, they\n are not serialized.\n\n Args:\n func: The lambda to apply (may already be correctly formatted)\n\n Returns:\n A new `BaseFramePartition` containing the object that has had `func`\n applied to it.\n \"\"\"\n\n def call_queue_closure(data, call_queues):\n result = data.copy()\n for func, kwargs in call_queues:\n try:\n result = func(result, **kwargs)\n except Exception as e:\n self.call_queue = []\n raise e\n return result\n\n self.data = call_queue_closure(self.data, self.call_queue)\n self.call_queue = []\n return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))\n\n def add_to_apply_calls(self, func, **kwargs):\n return PandasOnPythonFramePartition(\n self.data.copy(), call_queue=self.call_queue + [(func, kwargs)]\n )\n\n def drain_call_queue(self):\n if len(self.call_queue) == 0:\n return\n self.apply(lambda x: x)\n\n def mask(self, row_indices=None, col_indices=None):\n new_obj = self.add_to_apply_calls(\n lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])\n )\n new_obj._length_cache = (\n len(row_indices)\n if not isinstance(row_indices, slice)\n else self._length_cache\n )\n new_obj._width_cache = (\n len(col_indices)\n if not isinstance(col_indices, slice)\n else self._width_cache\n )\n return new_obj\n\n def to_pandas(self):\n \"\"\"Convert the object stored in this partition to a Pandas DataFrame.\n\n Note: If the underlying object is a Pandas DataFrame, this will likely\n only need to call `get`\n\n Returns:\n A Pandas DataFrame.\n \"\"\"\n dataframe = self.get()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n\n return dataframe\n\n def to_numpy(self):\n \"\"\"Convert the object stored in this partition to a NumPy Array.\n\n Returns:\n A NumPy Array.\n \"\"\"\n return self.apply(lambda df: df.values).get()\n\n @classmethod\n def put(cls, obj):\n \"\"\"A factory classmethod to format a given object.\n\n Args:\n obj: An object.\n\n Returns:\n A `RemotePartitions` object.\n \"\"\"\n return cls(obj)\n\n @classmethod\n def preprocess_func(cls, func):\n \"\"\"Preprocess a function before an `apply` call.\n\n Note: This is a classmethod because the definition of how to preprocess\n should be class-wide. Also, we may want to use this before we\n deploy a preprocessed function to multiple `BaseFramePartition`\n objects.\n\n Args:\n func: The function to preprocess.\n\n Returns:\n An object that can be accepted by `apply`.\n \"\"\"\n return func\n\n @classmethod\n def length_extraction_fn(cls):\n \"\"\"The function to compute the length of the object in this partition.\n\n Returns:\n A callable function.\n \"\"\"\n return length_fn_pandas\n\n @classmethod\n def width_extraction_fn(cls):\n \"\"\"The function to compute the width of the object in this partition.\n\n Returns:\n A callable function.\n \"\"\"\n return width_fn_pandas\n\n _length_cache = None\n _width_cache = None\n\n def length(self):\n if self._length_cache is None:\n self._length_cache = self.apply(self.length_extraction_fn()).data\n return self._length_cache\n\n def width(self):\n if self._width_cache is None:\n self._width_cache = self.apply(self.width_extraction_fn()).data\n return self._width_cache\n\n @classmethod\n def empty(cls):\n return cls(pandas.DataFrame())\n","sub_path":"modin/engines/python/pandas_on_python/frame/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320053693","text":"\"\"\"\nSchrijf een programma dat een string inleest van de gebruiker en alternerend een hoofdletter en een kleine letter weergeeft. \\\\\n*Hint:* Je kunt hiervoor gebruik maken van de functie upper().\n (Merk op dat characters die niet voldoen aan de isalpha() voorwaarde genegeerd worden!)\n\"\"\"\n\n# Ask input from user\nstring = input(\"Geef een zin: \")\n\n# Initialisation\nnew_string = \"\"\n\n# Changing in upper and lower case letters\nfor i in range(len(string)):\n if i % 2 == 0:\n new_string += string[i].upper()\n else:\n new_string += string[i].lower()\n\n# Print result\nprint(new_string)\n\n","sub_path":"03_caseAlternator.py","file_name":"03_caseAlternator.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"538136749","text":"#!/usr/bin/python\r\n\r\n# Copyright 2016, Gurobi Optimization, Inc.\r\n\r\n# This example formulates and solves the following simple QP model:\r\n#\r\n# minimize x + y + x^2 + x*y + y^2 + y*z + z^2\r\n# subject to x + 2 y + 3 z >= 4\r\n# x + y >= 1\r\n#\r\n# The example illustrates the use of dense matrices to store A and Q\r\n# (and dense vectors for the other relevant data). We don't recommend\r\n# that you use dense matrices, but this example may be helpful if you\r\n# already have your data in this format.\r\n\r\nimport sys\r\nfrom gurobipy import *\r\n\r\ndef lp_optimize(rows, cols, c, obj_sense, A, sense, rhs, lb, ub, vtype,\r\n solution):\r\n\r\n model = Model()\r\n\r\n # Add variables to model\r\n for j in range(cols):\r\n model.addVar(lb=lb[j], ub=ub[j], vtype=vtype[j])\r\n model.update()\r\n vars = model.getVars()\r\n\r\n # Populate A matrix\r\n for i in range(rows):\r\n expr = LinExpr()\r\n for j in range(cols):\r\n if A[i][j] != 0:\r\n expr += A[i][j]*vars[j]\r\n model.addConstr(expr, sense[i], rhs[i])\r\n\r\n # Populate objective\r\n obj = QuadExpr()\r\n# for i in range(cols):\r\n# for j in range(cols):\r\n# if Q[i][j] != 0:\r\n# obj += Q[i][j]*vars[i]*vars[j]\r\n for j in range(cols):\r\n if c[j] != 0:\r\n obj += c[j]*vars[j]\r\n model.setObjective(obj, obj_sense)\r\n\r\n # Write model to a file\r\n model.update()\r\n model.write('autoGenerated.lp')\r\n\r\n # Solve\r\n model.optimize()\r\n\r\n print(\"Obj Value: \", model.objVal)\r\n for v in model.getVars():\r\n print(v.varName, v.x, \"\\n\")\r\n\r\n # if model.status == GRB.Status.OPTIMAL:\r\n # x = model.getAttr('x', vars)\r\n # for i in range(cols):\r\n # solution[i] = x[i]\r\n # return True\r\n # else:\r\n # return False\r\n\r\n\r\n","sub_path":"zac/PA1/myMatrixLpSolver.py","file_name":"myMatrixLpSolver.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607945301","text":"\"\"\"\r\n@description: tests ccode in Task_3 with test cases, prints input, output and expected\r\n@author: David Lei\r\n@since: 15/4/2015\r\n@modified: 15/4/2015\r\n@:param : valid test cases\r\n@:return: the binary representation of n is returned by binaryrevlist, the result is returned by powermodified\r\n@pre-condition: test cases valid i.e. a wont crash due to strange test input\r\n@post-condition: test cases valid i.e. a wont crash due to strange test input\r\n@complexity: Worst-Case: O(N)\r\n@complexity: Best-Case: O(N)\r\n\"\"\"\r\n\r\n# test powermodified does what it is meant to do\r\n\r\ndef binaryrevlist (n):\r\n rev_list = [0]*n\r\n counter = 0\r\n while n > 0:\r\n if n%2 == 0:\r\n rev_list[counter] = 0\r\n else:\r\n rev_list[counter] = 1\r\n n = n//2\r\n counter += 1\r\n rev_list = rev_list[:counter]\r\n return rev_list\r\n\r\n\r\ndef powermodified(b,e):\r\n rev_binary = binaryrevlist(e)\r\n result = 1\r\n idx = len(rev_binary) - 1\r\n while idx >= 0: # goes through all items in the list, idx from n-1 to 0 inclusive\r\n # print(idx)\r\n result = result * result\r\n if rev_binary[idx]: #\r\n result = result * b\r\n idx -= 1\r\n # print(rev_binary)\r\n return result\r\n\r\ndef test(base, exp):\r\n if exp < 0:\r\n print(\"Error, the exponent you entered is negative\")\r\n else:\r\n result = powermodified(base,exp)\r\n return result\r\n\r\ndef test_cases():\r\n testcase1 = [3,7]\r\n testcase2 = [-2,19]\r\n testcase3 = [7,-4]\r\n testcase4 = [0,3]\r\n # testcase5 = ['ab','cd'], auto validates against non-numerical inputs\r\n print(\"testcase1: base = %s, exp = %s, output = %s, expected = %s\" % (\r\n str(testcase1[0]), str(testcase1[1]), str(test(testcase1[0], testcase1[1])), '2187'))\r\n print(\"testcase2: base = %s, exp = %s, output = %s, expected = %s\" % (\r\n str(testcase2[0]), str(testcase2[1]), str(test(testcase2[0], testcase2[1])), '-524288'))\r\n print(\"testcase3: base = %s, exp = %s, output = %s, expected = %s\" % (\r\n str(testcase3[0]), str(testcase3[1]), str(test(testcase3[0], testcase3[1])), 'None, will print error message \\\"Error, the exponent you entered is negative\\\"'))\r\n print(\"testcase4: base = %s, exp = %s, output = %s, expected = %s\" % (\r\n str(testcase4[0]), str(testcase4[1]), str(test(testcase4[0], testcase4[1])), '0'))\r\n\r\n\r\ntest_cases()\r\n\r\n","sub_path":"FIT1008-Intro-To-Computer-Science/Pracs/26029391_Prac5/New_Power.py","file_name":"New_Power.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226449964","text":"import os\n\nfrom setuptools import setup\n\n\nNAME = \"RMTfunc\"\nREQUIREMENTS = [\n \"colorama\",\n # \"curses\",\n \"EMD-signal\",\n \"empyricalRMT\",\n \"multiprocess\",\n \"nibabel\",\n \"nilearn\",\n \"numba\",\n \"numpy\",\n \"progressbar\",\n \"scipy\",\n]\n\n\ndef read_file(path):\n with open(os.path.join(os.path.dirname(__file__), path)) as fp:\n return fp.read()\n\n\nsetup(\n name=NAME,\n packages=[NAME],\n author=\"Derek Berger\",\n maintainer=\"Derek Berger\",\n author_email=\"dmberger.dev@gmail.com\",\n version=\"0.1dev\",\n description=\"Eigenvalue unfolding and spectral observable computation\",\n url=\"https://github.com/stfxecutables/empyricalRMT\",\n license=\"MIT\",\n long_description=read_file(\"README.md\"),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n ],\n install_requires=REQUIREMENTS,\n python_requires=\">=3\",\n keywords=\"RMT RandomMatrixTheory spectral observabales eigenvalues unfolding\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45859794","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom gitrepo.items import GitrepoItem\n\nclass RepoSpider(scrapy.Spider):\n name = 'repo'\n# allowed_domains = ['github.com']\n @property\n def start_urls(self):\n url_tmpl = 'https://github.com/shiyanlou?page={}&tab=repositories'\n return (url_tmpl.format(i) for i in range(1,5))\n\n def parse(self, response):\n for repository in response.css('li.col-12'):\n yield ({\n 'name':repository.xpath('.//div/h3/a/text()').extract_first().strip(),\n 'update_time':repository.xpath('.//relative-time/@datetime').extract_first()\n })\n\n","sub_path":"gitrepo/gitrepo/spiders/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625252442","text":"import cv2\nimport numpy as np\n\nfilename = 'sudokuPaper.jpeg'\nimg = cv2.imread(filename)\n# th2 = cv2.adaptiveThreshold(\n# img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ngray = np.float32(gray)\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\n\n# result is dilated for marking the corners, not important\ndst = cv2.dilate(dst, None)\n\n# Threshold for an optimal value, it may vary depending on the image.\nimg[dst > 0.01 * dst.max()] = [255, 0, 0]\n\n# cv2.imshow('dst', img)\ncv2.imwrite('harriscorner.jpeg', img)\n\n# if cv2.waitKey(0) & 0xff == 27:\n# cv2.destroyAllWindows()\n","sub_path":"harrisCorner.py","file_name":"harrisCorner.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60603864","text":"\"\"\"Micro moments.\"\"\"\n\nimport abc\nimport collections\nfrom typing import Any, Dict, Hashable, List, Optional, Sequence, TYPE_CHECKING\n\nimport numpy as np\n\nfrom . import options\nfrom .utilities.basics import Array, StringRepresentation, format_number, format_table\n\n\n# only import objects that create import cycles when checking types\nif TYPE_CHECKING:\n from .economies.economy import Economy # noqa\n\n\nclass Moment(StringRepresentation):\n \"\"\"Information about a single micro moment.\"\"\"\n\n value: Array\n market_ids: Optional[Array]\n\n def __init__(self, value: float, market_ids: Optional[Sequence] = None) -> None:\n \"\"\"Validate information about the moment to the greatest extent possible without an economy instance.\"\"\"\n self.value = np.asarray(value, options.dtype)\n if self.value.size != 1:\n raise ValueError(\"The micro moment value must be a scalar.\")\n\n self.market_ids = None\n if market_ids is not None:\n self.market_ids = np.asarray(market_ids, np.object)\n unique, counts = np.unique(self.market_ids, return_counts=True)\n duplicates = unique[counts > 1]\n if duplicates.size > 0:\n raise ValueError(f\"The following market IDs are duplicated in market_ids: {duplicates}.\")\n\n def __str__(self) -> str:\n \"\"\"Format information about the micro moment as a string.\"\"\"\n return f\"{self._format_markets()}: {self._format_moment()}\"\n\n def _format_markets(self) -> str:\n \"\"\"Format information about the markets associated with the micro moment as a string.\"\"\"\n if self.market_ids is None:\n return \"All\"\n if len(self.market_ids) == 1:\n return self.market_ids[0]\n return \", \".join(str(t) for t in self.market_ids)\n\n def _format_moment(self) -> str:\n \"\"\"Construct a string expression for the micro moment.\"\"\"\n formatted = self._format_value()\n if self.value < 0:\n formatted = f\"{formatted} + {format_number(float(self.value))[1:]}\"\n elif self.value > 0:\n formatted = f\"{formatted} - {format_number(float(self.value))[1:]}\"\n return formatted\n\n @abc.abstractmethod\n def _format_value(self) -> str:\n \"\"\"Construct a string expression for the micro moment value.\"\"\"\n\n def _validate(self, economy: 'Economy') -> None:\n \"\"\"Check that all market IDs associated with this moment are in the economy.\"\"\"\n if self.market_ids is not None:\n extra_ids = set(self.market_ids) - set(economy.unique_market_ids)\n if extra_ids:\n raise ValueError(f\"market_ids contains the following extra IDs: {sorted(extra_ids)}.\")\n\n\nclass DemographicExpectationMoment(Moment):\n r\"\"\"Configuration for micro moments that match expectations of demographics for agents who choose certain products.\n\n For example, micro data can sometimes be used to compute the mean of a demographic such as income, :math:`y_{it}`,\n for agents who choose product :math:`j`. With the value :math:`v_m` of this mean, a micro moment :math:`m` in market\n :math:`t` for agent :math:`i` can be defined by\n\n .. math:: g_{M,imt} = \\frac{y_{it} s_{ijt}}{s_{jt}} - v_m.\n\n Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which\n the micro data used to compute :math:`v_m` are relevant, which gives :math:`\\bar{g}_{M,m}` in\n :eq:`averaged_micro_moments`.\n\n Parameters\n ----------\n product_id : `object`\n ID of the product :math:`j` or ``None`` to denote the outside option :math:`j = 0`. If not ``None``, there must\n be exactly one of this ID in the ``product_ids`` field of ``product_data`` in :class:`Problem` or\n :class:`Simulation` for each market over which this micro moment will be averaged.\n demographics_index : `int`\n Column index of the demographic :math:`y_{it}` (which can be any demographic, not just income) in the matrix of\n agent demographics, :math:`d`. This should be between zero and :math:`D - 1`, inclusive.\n value : `float`\n Value :math:`v_m` of the mean estimated from micro data.\n market_ids : `array-like, optional`\n Distinct market IDs over which the micro moments will be averaged to get :math:`\\bar{g}_{M,m}`. These are also\n the only markets in which the moments will be computed. By default, the moments are computed for and averaged\n across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on\n average the same for all markets.\n\n Examples\n --------\n - :doc:`Tutorial `\n\n \"\"\"\n\n product_id: Optional[Any]\n demographics_index: int\n\n def __init__(\n self, product_id: Optional[Any], demographics_index: int, value: float,\n market_ids: Optional[Sequence] = None) -> None:\n \"\"\"Validate information about the moment to the greatest extent possible without an economy instance.\"\"\"\n super().__init__(value, market_ids)\n if not isinstance(demographics_index, int) or demographics_index < 0:\n raise ValueError(\"demographics_index must be a positive int.\")\n self.product_id = product_id\n self.demographics_index = demographics_index\n\n def _format_value(self) -> str:\n \"\"\"Construct a string expression for the covariance moment.\"\"\"\n product = \"Outside\" if self.product_id is None else f\"'{self.product_id}'\"\n return f\"E[Demographic Column {self.demographics_index} | {product}]\"\n\n def _validate(self, economy: 'Economy') -> None:\n \"\"\"Check that matrix indices are valid in the economy.\"\"\"\n super()._validate(economy)\n economy._validate_product_id(self.product_id, self.market_ids)\n if self.demographics_index >= economy.D:\n raise ValueError(f\"demographics_index must be between 0 and D = {economy.D}, inclusive.\")\n\n\nclass DemographicCovarianceMoment(Moment):\n r\"\"\"Configuration for micro moments that match covariances between product characteristics and demographics.\n\n For example, micro data can sometimes be used to compute the sample covariance between a product characteristic\n :math:`x_{jt}` of an agent's choice :math:`j`, and a demographic such as income, :math:`y_{it}`, amongst those\n agents who purchase an inside good. With the value :math:`v_m` of this sample covariance, a micro moment :math:`m`\n in market :math:`t` for agent :math:`i` can be defined by\n\n .. math:: g_{M,imt} = (z_{it} - \\bar{z}_t)(y_{it} - \\bar{y}_t) - v_m\n\n where :math:`\\bar{z}_t = \\sum_i w_{it} z_{it}`, :math:`\\bar{y}_t = \\sum_i w_{it} y_{it}`, and conditional on\n choosing an inside good, the expected value of :math:`x_{jt}` for agent :math:`i` is\n\n .. math:: z_{it} = \\sum_{j \\in J_t} x_{jt}s_{ij(-0)t}\n\n where :math:`s_{ij(-0)t} = s_{ijt} / (1 - s_{i0t})` is the probability of :math:`i` choosing :math:`j` when the\n outside option is removed from the choice set.\n\n Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which\n the micro data used to compute :math:`v_m` are relevant, which gives :math:`\\bar{g}_{M,m}` in\n :eq:`averaged_micro_moments`.\n\n Parameters\n ----------\n X2_index : `int`\n Column index of :math:`x_{jt}` in the matrix of demand-side nonlinear product characteristics, :math:`X_2`. This\n should be between zero and :math:`K_2 - 1`, inclusive.\n demographics_index : `int`\n Column index of the demographic :math:`y_{it}` (which can be any demographic, not just income) in the matrix of\n agent demographics, :math:`d`. This should be between zero and :math:`D - 1`, inclusive.\n value : `float`\n Value :math:`v_m` of the sample covariance estimated from micro data.\n market_ids : `array-like, optional`\n Distinct market IDs over which the micro moments will be averaged to get :math:`\\bar{g}_{M,m}`. These are also\n the only markets in which the moments will be computed. By default, the moments are computed for and averaged\n across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on\n average the same for all markets.\n\n Examples\n --------\n - :doc:`Tutorial `\n\n \"\"\"\n\n X2_index: int\n demographics_index: int\n\n def __init__(\n self, X2_index: int, demographics_index: int, value: float, market_ids: Optional[Sequence] = None) -> None:\n \"\"\"Validate information about the moment to the greatest extent possible without an economy instance.\"\"\"\n super().__init__(value, market_ids)\n if not isinstance(X2_index, int) or X2_index < 0:\n raise ValueError(\"X2_index must be a positive int.\")\n if not isinstance(demographics_index, int) or demographics_index < 0:\n raise ValueError(\"demographics_index must be a positive int.\")\n self.X2_index = X2_index\n self.demographics_index = demographics_index\n\n def _format_value(self) -> str:\n \"\"\"Construct a string expression for the covariance moment.\"\"\"\n return f\"Cov(X2 Column {self.X2_index}, Demographic Column {self.demographics_index} | Inside)\"\n\n def _validate(self, economy: 'Economy') -> None:\n \"\"\"Check that matrix indices are valid in the economy.\"\"\"\n super()._validate(economy)\n if self.X2_index >= economy.K2:\n raise ValueError(f\"X2_index must be between 0 and K2 = {economy.K2}, inclusive.\")\n if self.demographics_index >= economy.D:\n raise ValueError(f\"demographics_index must be between 0 and D = {economy.D}, inclusive.\")\n\n\nclass DiversionProbabilityMoment(Moment):\n r\"\"\"Configuration for micro moments that match second choice probabilities of certain products for agents whose\n first choices are certain other products.\n\n For example, micro data can sometimes be used to compute the share of agents who would choose product :math:`k` if\n :math:`j` were removed from the choice set, out of those agents whose first choice is :math:`j`. With the value\n :math:`v_m` of this share, a micro moment :math:`m` in market :math:`t` for agent :math:`i` can be defined by\n\n .. math:: g_{M,imt} = \\frac{s_{ik(-j)t} s_{ijt}}{s_{jt}} - v_m\n\n where :math:`s_{ik(-j)t} = s_{ijt} / (1 - s_{ijt})` is the probability of :math:`i` choosing :math:`k` when\n :math:`j` is removed from the choice set. Rearranging terms gives the equivalent definition\n\n .. math:: g_{M,imt} = \\frac{s_{ik(-j)t} - s_{ikt}}{s_{jt}} - v_m,\n\n which is more reminiscent of the long-run diversion ratios :math:`\\bar{\\mathscr{D}}_{jk}` computed by\n :meth:`ProblemResults.compute_long_run_diversion_ratios`.\n\n Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which\n the micro data used to compute :math:`v_m` are relevant, which gives :math:`\\bar{g}_{M,m}` in\n :eq:`averaged_micro_moments`.\n\n Parameters\n ----------\n product_id1 : `object`\n ID(s) of the first choice products :math:`j` or ``None`` to denote the outside option :math:`j = 0`. There must be\n exactly one of each of these ID(s) in the ``product_ids`` field of ``product_data`` in :class:`Problem` or\n :class:`Simulation` for each market over which this micro moment will be averaged.\n product_id2 : `object`\n ID of the second choice product :math:`k` or ``None`` to denote the outside option :math:`j = 0`. If not\n ``None``, there must be exactly one of this ID for each market over which this micro moment will be averaged.\n value : `float`\n Value :math:`v_m` of the share estimated from micro data.\n market_ids : `array-like, optional`\n Distinct market IDs over which the micro moments will be averaged to get :math:`\\bar{g}_{M,m}`. These are also\n the only markets in which the moments will be computed. By default, the moments are computed for and averaged\n across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on\n average the same for all markets.\n\n Examples\n --------\n - :doc:`Tutorial `\n\n \"\"\"\n\n product_id1: Optional[Any]\n product_id2: Optional[Any]\n\n def __init__(\n self, product_id1: Any, product_id2: Optional[Any], value: Sequence,\n market_ids: Optional[Sequence] = None) -> None:\n \"\"\"Validate information about the moment to the greatest extent possible without an economy instance.\"\"\"\n super().__init__(value, market_ids)\n if product_id1 is None and product_id2 is None:\n raise ValueError(\"At least one of product_id1 or product_id2 must be not None.\")\n self.product_id1 = product_id1\n self.product_id2 = product_id2\n\n def _format_value(self) -> str:\n \"\"\"Construct a string expression for the covariance moment.\"\"\"\n product1 = \"Outside\" if self.product_id1 is None else f\"'{self.product_id1}'\"\n product2 = \"Outside\" if self.product_id2 is None else f\"'{self.product_id2}'\"\n return f\"P({product1} First, {product2} Second)\"\n\n def _validate(self, economy: 'Economy') -> None:\n \"\"\"Check that matrix indices are valid in the economy.\"\"\"\n super()._validate(economy)\n\n # if self.market_ids is None:\n # these_market_ids = economy.unique_market_ids\n # else:\n # these_market_ids = self.market_ids\n #\n # for t in these_market_ids:\n # count = 0\n # if self.product_id1 is not None:\n # for this_product in self.product_id1:\n # count += (self.products.product_ids[self._product_market_indices[t]] == this_product).sum()\n # if count == 0:\n # raise ValueError(\n # f\"None of the producs are in market '{t}': \"\n # f\"{list(sorted(self.products.product_ids[self._product_market_indices[t]]))}.\"\n # )\n\n for this_product in self.product_id1:\n economy._validate_product_id(this_product, self.market_ids)\n\n \n economy._validate_product_id(self.product_id2, self.market_ids)\n\n\nclass DiversionCovarianceMoment(Moment):\n r\"\"\"Configuration for micro moments that match covariances between product characteristics of first and second\n choices.\n\n For example, survey data can sometimes be used to compute the sample covariance between a product characteristic\n :math:`x_{jt}^{(1)}` of an agent's first choice :math:`j` and either the same or a different product characteristic\n :math:`x_{kt}^{(2)}` of the agent's second choice :math:`k` if :math:`j` were removed from the choice set, amongst\n those agents whose first and second choices are both inside goods. With the value :math:`v_m` of this sample\n covariance, a micro moment :math:`m` in market :math:`t` for agent :math:`i` can be defined by\n\n .. math:: g_{M,imt} = (z_{it}^{(1)} - \\bar{z}_t^{(1)})(z_{it}^{(2)} - \\bar{z}_t^{(2)}) - v_m\n\n where :math:`\\bar{z}_t^{(1)} = \\sum_i w_{it} z_{it}^{(1)}`, :math:`\\bar{z}_t^{(2)} = \\sum_i w_{it} z_{it}^{(2)}`,\n and conditional on purchasing inside goods, the expected values of :math:`x_{jt}^{(1)}` and :math:`x_{kt}^{(2)}` for\n agent :math:`i` are\n\n .. math::\n\n z_{it}^{(1)} = \\sum_{j \\in J_t} x_{jt}^{(1)} s_{ij(-0)t}, \\quad\n z_{it}^{(2)} = \\sum_{j, k \\in J_t} x_{kt}^{(2)} s_{ik(-0,j)t} s_{ij(-0)t}\n\n where :math:`s_{ij(-0)t}` is the probability of choosing :math:`j` when the outside option is removed from the\n choice set and :math:`s_{ik(-0,j)t}` is the probability of choosing :math:`k` when both the outside option and\n :math:`j` are removed from the choice set.\n\n Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which\n the micro data used to compute :math:`v_m` are relevant, which gives :math:`\\bar{g}_{M,m}` in\n :eq:`averaged_micro_moments`.\n\n Parameters\n ----------\n X2_index1 : `int`\n Column index of :math:`x_{jt}^{(1)}` in the matrix of demand-side nonlinear product characteristics,\n :math:`X_2`. This should be between zero and :math:`K_2 - 1`, inclusive.\n X2_index2 : `int`\n Column index of :math:`x_{kt}^{(2)}` in the matrix of demand-side nonlinear product characteristics,\n :math:`X_2`. This should be between zero and :math:`K_2 - 1`, inclusive.\n value : `float`\n Value :math:`v_m` of the sample covariance estimated from micro data.\n market_ids : `array-like, optional`\n Distinct market IDs over which the micro moments will be averaged to get :math:`\\bar{g}_{M,m}`. These are also\n the only markets in which the moments will be computed. By default, the moments are computed for and averaged\n across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on\n average the same for all markets.\n\n Examples\n --------\n - :doc:`Tutorial `\n\n \"\"\"\n\n X2_index1: int\n X2_index2: int\n\n def __init__(self, X2_index1: int, X2_index2: int, value: float, market_ids: Optional[Sequence] = None) -> None:\n \"\"\"Validate information about the moment to the greatest extent possible without an economy instance.\"\"\"\n super().__init__(value, market_ids)\n if not isinstance(X2_index1, int) or X2_index1 < 0:\n raise ValueError(\"X2_index1 must be a positive int.\")\n if not isinstance(X2_index2, int) or X2_index2 < 0:\n raise ValueError(\"X2_index2 must be a positive int.\")\n self.X2_index1 = X2_index1\n self.X2_index2 = X2_index2\n\n def _format_value(self) -> str:\n \"\"\"Construct a string expression for the covariance moment.\"\"\"\n return f\"Cov(X2 Column {self.X2_index1} First, X2 Column {self.X2_index2} Second | Inside)\"\n\n def _validate(self, economy: 'Economy') -> None:\n \"\"\"Check that matrix indices are valid in the economy.\"\"\"\n super()._validate(economy)\n if self.X2_index1 >= economy.K2:\n raise ValueError(f\"X2_index1 must be between 0 and K2 = {economy.K2}, inclusive.\")\n if self.X2_index2 >= economy.K2:\n raise ValueError(f\"X2_index2 must be between 0 and K2 = {economy.K2}, inclusive.\")\n\n\nclass Moments(object):\n \"\"\"Information about a sequence of micro moments.\"\"\"\n\n micro_moments: Sequence[Moment]\n MM: int\n\n def __init__(self, micro_moments: Sequence[Moment]) -> None:\n \"\"\"Store information about a sequence of micro moment instances.\"\"\"\n self.micro_moments = micro_moments\n self.MM = len(micro_moments)\n\n def format(self, title: str, values: Optional[Array] = None) -> str:\n \"\"\"Format micro moments (and optionally their values) as a string.\"\"\"\n\n # construct the leftmost part of the table that always shows up\n header = [\"Index\", \"Markets\", \"Moment\"]\n data: List[List[str]] = []\n for m, moment in enumerate(self.micro_moments):\n data.append([str(m), moment._format_markets(), moment._format_moment()])\n\n # add moment values\n if values is not None:\n header.append(\"Value\")\n for m, value in enumerate(values):\n data[m].append(format_number(value))\n\n return format_table(header, *data, title=title)\n\n\nclass EconomyMoments(Moments):\n \"\"\"Information about a sequence of micro moments in an economy.\"\"\"\n\n market_indices: Dict[Hashable, Array]\n market_counts: Array\n pairwise_market_counts: Array\n\n def __init__(self, economy: 'Economy', micro_moments: Sequence[Moment]) -> None:\n \"\"\"Validate and store information about a sequence of micro moment instances in the context of an economy.\"\"\"\n\n # validate the moments\n if not isinstance(micro_moments, collections.abc.Sequence):\n raise TypeError(\"micro_moments must be a sequence of micro moment instances.\")\n for moment in micro_moments:\n if not isinstance(moment, Moment):\n raise TypeError(\"micro_moments must consist only of micro moment instances.\")\n try:\n moment._validate(economy)\n except Exception as exception:\n raise ValueError(f\"The micro moment '{moment}' is invalid.\") from exception\n\n # store basic moment information\n super().__init__(micro_moments)\n\n # identify market indices\n self.market_indices: Dict[Hashable, Array] = {}\n for t in economy.unique_market_ids:\n market_index_t = np.array([m.market_ids is None or t in m.market_ids for m in self.micro_moments])\n self.market_indices[t] = np.flatnonzero(market_index_t)\n\n # count the number of markets associated with moments\n self.market_counts = np.zeros((self.MM, 1), np.int)\n self.pairwise_market_counts = np.zeros((self.MM, self.MM), np.int)\n for m, moment_m in enumerate(self.micro_moments):\n market_ids_m = set(economy.unique_market_ids if moment_m.market_ids is None else moment_m.market_ids)\n self.market_counts[m] = len(market_ids_m)\n for n, moment_n in enumerate(self.micro_moments):\n market_ids_n = set(economy.unique_market_ids if moment_n.market_ids is None else moment_n.market_ids)\n self.pairwise_market_counts[m, n] = len(market_ids_m & market_ids_n)\n\n\nclass MarketMoments(Moments):\n \"\"\"Information about a sequence of micro moments in a market.\"\"\"\n\n def __init__(self, economy_moments: EconomyMoments, t: Hashable) -> None:\n \"\"\"Select only those micro moment instances that will be computed for this market.\"\"\"\n super().__init__([m for m in economy_moments.micro_moments if m.market_ids is None or t in m.market_ids])\n","sub_path":"pyblp/moments.py","file_name":"moments.py","file_ext":"py","file_size_in_byte":21952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497848227","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport os.path\nimport subprocess\nimport tempfile\nimport urllib.request\nimport PIL.Image\n\nDG_VERSION = '1.0.3'\nDG_URL_TEMPLATE = 'http://gungho-pqdm.s3.amazonaws.com/download/900_ReleaseAndroid/v{}/'\nDG_URL = DG_URL_TEMPLATE.format(DG_VERSION)\n\nCOMPONENTS = ('IconPackUnit', 'IconPackPanel', 'IconPackArea')\n\nDISUNITY_JAR_FILE = os.path.join('disunity', 'disunity.jar')\n\ntry:\n os.makedirs('icons', exist_ok=True)\n\n with tempfile.TemporaryDirectory() as d:\n for component in COMPONENTS:\n filename = component + '.unity3d'\n print('Downloading the latest', filename)\n\n unity3d_path = os.path.join(d, filename)\n urllib.request.urlretrieve(DG_URL + filename, unity3d_path)\n\n print('Unpacking unity3d...')\n subprocess.check_call(['java', '-jar', DISUNITY_JAR_FILE,\n '-c', 'extract', unity3d_path])\n\n# subprocess.check_call(['java', '-cp',\n# '/Users/kennytm/XCodeProjects/ata/disunity/lib/commons-cli-1.2/commons-cli-1.2.jar:/Users/kennytm/XCodeProjects/ata/disunity/lib/commons-io-2.4/commons-io-2.4.jar:/Users/kennytm/XCodeProjects/ata/disunity/lib/commons-lang3-3.1/commons-lang3-3.1.jar:/Users/kennytm/XCodeProjects/ata/ioutils/dist/ioutils.jar:/Users/kennytm/XCodeProjects/ata/disunity/dist/disunity.jar',\n# 'info.ata4.unity.cli.DisUnityCli',\n# '-c', 'extract', unity3d_path])\n\n text_assets = os.path.join(d, component, 'TextAsset')\n texture2ds = os.path.join(d, component, 'Texture2D')\n\n for texture2d_filename in os.listdir(texture2ds):\n print(texture2d_filename, '...')\n png_filename = os.path.splitext(texture2d_filename)[0] + '.png'\n png_filename = os.path.join(texture2ds, png_filename)\n subprocess.check_call(['etc1tool',\n os.path.join(texture2ds, texture2d_filename),\n '--decode', '-o', png_filename])\n map_filename = os.path.splitext(texture2d_filename)[0] + '.txt'\n with open(os.path.join(text_assets, map_filename), 'r') as f:\n map_js = json.load(f, strict=False)\n\n image = PIL.Image.open(png_filename).transpose(PIL.Image.FLIP_TOP_BOTTOM)\n for fn, content in map_js['frames'].items():\n frame = content['frame']\n left = frame['x']\n top = frame['y']\n right = left + frame['w']\n bottom = top + frame['h']\n cropped_image = image.crop((left, top, right, bottom))\n cropped_image.save(os.path.join('icons', fn))\n\n print('Done')\n\nfinally:\n urllib.request.urlcleanup()\n\n","sub_path":"dg/download-icon-packs.py","file_name":"download-icon-packs.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"153267038","text":"from __future__ import absolute_import\nimport re\n\nimport schematics\nfrom schematics.exceptions import ModelValidationError, ModelConversionError\n\nfrom spidermon.contrib.validation.validator import Validator\nfrom .translator import SchematicsMessageTranslator\nfrom . import monkeypatches\n\n\nclass SchematicsValidator(Validator):\n default_translator = SchematicsMessageTranslator()\n name = \"Schematics\"\n\n def __init__(self, model, translator=None, use_default_translator=True):\n super(SchematicsValidator, self).__init__(\n translator=translator, use_default_translator=use_default_translator\n )\n self._model = model\n self._fields_required = {}\n self._save_required_fields()\n self._data = {}\n\n def _validate(self, data, strict=False):\n self._set_data(data)\n model = self._get_model_instance(strict=strict)\n try:\n model.validate()\n except ModelValidationError as e:\n self._add_errors(e.messages)\n self._restore_required_fields()\n\n def _reset(self):\n super(SchematicsValidator, self)._reset()\n self._data = {}\n\n def _set_data(self, data):\n self._data = dict(data)\n\n def _get_model_instance(self, strict):\n try:\n return self._model(raw_data=self._data, strict=strict)\n except ModelConversionError as e:\n self._add_errors(e.messages)\n for field_name in e.messages.keys():\n self._set_field_as_not_required(field_name)\n self._data.pop(field_name)\n return self._get_model_instance(strict=strict)\n\n def _save_required_fields(self):\n for field_name, field in self._model._fields.items():\n self._fields_required[field_name] = field.required\n\n def _restore_required_fields(self):\n for field_name, required in self._fields_required.items():\n self._model._fields[field_name].required = required\n\n def _set_field_as_not_required(self, field_name):\n if field_name in self._model._fields:\n self._model._fields[field_name].required = False\n\n def _add_errors(self, errors):\n if schematics.__version__.startswith(\"1.\"):\n for field_name, messages in errors.items():\n if isinstance(messages, dict):\n transformed_errors = self._get_transformed_child_errors(\n field_name, messages\n )\n self._add_errors(transformed_errors)\n else:\n self._errors[field_name] += (\n messages if isinstance(messages, list) else [messages]\n )\n else:\n from schematics.datastructures import FrozenDict\n\n for field_name, messages in errors.items():\n if isinstance(messages, (dict, FrozenDict)):\n transformed_errors = self._get_transformed_child_errors(\n field_name, messages\n )\n self._add_errors(transformed_errors)\n else:\n messages = self._clean_messages(messages)\n self._errors[field_name] += messages\n\n def _get_transformed_child_errors(self, field_name, errors):\n return dict([(\"%s.%s\" % (field_name, k), v) for k, v in errors.items()])\n\n def _clean_messages(self, messages):\n \"\"\"\n This is necessary when using Schematics 2.*, because it encapsulates\n the validation error messages in a different way.\n \"\"\"\n from schematics.exceptions import BaseError, ErrorMessage\n from schematics.datastructures import FrozenList\n\n if type(messages) not in (list, FrozenList):\n messages = [messages]\n\n clean_messages = []\n for message in messages:\n if isinstance(message, BaseError):\n message = message.messages\n\n if isinstance(message, ErrorMessage):\n clean_messages.append(message.summary)\n elif isinstance(message, FrozenList):\n for err in message:\n # err is an ErrorMessage object\n clean_messages.append(err.summary)\n else:\n clean_messages.append(message)\n\n return clean_messages\n","sub_path":"spidermon/contrib/validation/schematics/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112140152","text":"import jieba\r\nimport math\r\n#import cut\r\nclass NoWordError(Exception):\r\n def __init__(self):\r\n print(\"这个文件是空文本文件\")\r\n\r\n\r\ndef tokenization(s1,s2):\r\n stopwords_position=open('stop_words','r',encoding=\"UTF-8\")\r\n stopwords = stopwords_position.read()\r\n stopwords_position.close()\r\n s1_cut = []\r\n data1 = jieba.cut(s1)\r\n for i in data1:\r\n if (i not in stopwords) and i != '':\r\n s1_cut.append(i)\r\n s2_cut = []\r\n data2 = jieba.cut(s2)\r\n for j in data2:\r\n if (j not in stopwords) and j != '': # 不是停词表中的可以加\r\n s2_cut.append(j)\r\n # print('s1_cut:',s1_cut)\r\n # print('s2_cut:',s2_cut)\r\n\r\n word_set = set(s1_cut).union(set(s2_cut))\r\n # print('word_set',word_set)#s1_cut是s1的分词表,s1_cut是s2的分词表,word_set是这两个表的集合,可以把这两个表中重合的删掉\r\n return word_set,s1_cut,s2_cut\r\n#import dict\r\ndef dictionary(word_set,s1_cut,s2_cut):\r\n # 用字典保存两篇文章中出现的所有词并编上号\r\n #from cut import word_set, s1_cut, s2_cut\r\n\r\n word_dict = dict() # 生成字典,这个字典现在是空的,下面要对他进行配置,word_dict[word]是指word所对应的值\r\n\r\n i = 0\r\n for word in word_set:\r\n word_dict[word] = i\r\n i += 1\r\n #\r\n # print(word_dict)\r\n\r\n ##dict['Age']返回Age所对应的值\r\n\r\n # 根据词袋模型统计词在每篇文档中出现的次数,形成向量\r\n s1_cut_code = [0] * len(word_dict)\r\n\r\n for word in s1_cut:\r\n s1_cut_code[word_dict[word]] += 1\r\n # print(\"s1_cut_code:\",s1_cut_code)\r\n s2_cut_code = [0] * len(word_dict)\r\n for word in s2_cut:\r\n s2_cut_code[word_dict[word]] += 1\r\n return s1_cut_code,s2_cut_code\r\n#import sim_ans\r\ndef sim_ans(s1_cut_code,s2_cut_code):\r\n #sum=0.0\r\n\r\n sum = 0\r\n sq1 = 0\r\n sq2 = 0\r\n for i in range(len(s1_cut_code)):\r\n sum += s1_cut_code[i] * s2_cut_code[i]\r\n sq1 += pow(s1_cut_code[i], 2)\r\n sq2 += pow(s2_cut_code[i], 2)\r\n try:\r\n result = round(float(sum) / (math.sqrt(sq1) * math.sqrt(sq2)), 3)\r\n except ZeroDivisionError:\r\n result = 0.0\r\n print(\"这是一个空文本或者两个文本毫无相关,查重率为:\",result)\r\n print(\"余弦相似度为:%.2f\" % result)\r\n return result\r\ndef write_ans(result):\r\n ans_txt = open(\"./ans.txt\", 'w', encoding=\"UTF-8\")\r\n sim = str('%.2f' % result)\r\n ans_txt.write(sim)\r\n ans_txt.close()\r\n\r\ndef main_test(orig_position,orig_sim_position,ans_position):\r\n s1_position=open(orig_position,'r',encoding=\"UTF-8\")\r\n s1= s1_position.read()\r\n s1_position.close()\r\n s2_position=open(orig_sim_position,'r',encoding=\"UTF-8\")\r\n s2 = s2_position.read()\r\n s2_position.close()\r\n # 空文本异常检测\r\n if s1 == '':\r\n print(\"error:\")\r\n raise NoWordError\r\n if s2 == '':\r\n print(\"error:\")\r\n raise NoWordError\r\n\r\n word_set, s1_cut, s2_cut = tokenization(s1, s2)\r\n s1_cut_code, s2_cut_code = dictionary(word_set, s1_cut, s2_cut)\r\n result = sim_ans(s1_cut_code, s2_cut_code)\r\n write_ans(result)\r\n\r\n\r\n\r\n","sub_path":"031802518/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460112494","text":"from itertools import combinations\n\n\ndef read_input(path):\n with open(path, 'r') as f:\n output = [int(i) for i in f.read().splitlines()]\n return output\n\n\nif __name__ == '__main__':\n stream = read_input('input.txt')\n preamble_len = 25\n invalid_nbr = None\n for i in range(preamble_len, len(stream)):\n valid_sum = False\n preamble = stream[i - preamble_len:i]\n for int_1, int_2 in combinations(preamble, 2):\n if int_1 + int_2 == stream[i]:\n valid_sum = True\n break\n if not valid_sum:\n invalid_nbr = stream[i]\n break\n print(invalid_nbr)\n\n\n","sub_path":"task_09/task_9_1.py","file_name":"task_9_1.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44507972","text":"import math\n\nfin = open(\"maxnumber.in\", 'r')\nfout = open(\"maxnumber.out\", 'w')\n\nn = int(fin.readline())\na = list(map(int, fin.readline().split()))\n\nd = {}\ni = 0\nwhile True:\n if i >= len(a):\n break\n if a[i] % 2 == 0:\n a.append(a[i] // 2)\n a.append(a[i] // 2)\n else:\n d[a[i]] = d.get(a[i], 0) + 1\n i += 1\nd = list(d.items())\nans = []\n#print(d)\nfor i in range(len(d)):\n t = d[i]\n c = t[1]\n res = t[0]\n while c > 1:\n c //= 2\n res *= 2\n ans.append(res)\nprint(max(ans), file=fout)\nfin.close()\nfout.close()\n","sub_path":"anichkov-camp-2018/DAY14/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85679043","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom re import sub\r\nfrom xml.dom import minidom\r\n\r\n\r\ndef cleanIUPAC(IUPAC):\r\n newIUPAC = sub(\"[~{}]\", \"\", IUPAC)\r\n return newIUPAC\r\n\r\n\r\ndef cleanIUPACInChI(IUPACInChI):\r\n newIUPACInChI = sub(\"InChI=\", \"\", IUPACInChI)\r\n return newIUPACInChI\r\n\r\n\r\ndef prettifyFormula(formula):\r\n newFormula = []\r\n for char in formula:\r\n if char.isalpha():\r\n newFormula.append(\"%s\" % (char))\r\n elif char.isdigit():\r\n newFormula.append(\"%s\" % (char))\r\n return \"\".join(newFormula)\r\n\r\n\r\ndef getDescribtorSoup(CID, attribute):\r\n URL = \"https://pubchem.ncbi.nlm.nih.gov/rest/rdf/descriptor/CID%s.html\" % (CID + attribute)\r\n Page = requests.get(URL)\r\n Soup = BeautifulSoup(Page.text, \"html.parser\")\r\n return Soup\r\n\r\n\r\ndef getDescribtorValue(CID, attribute):\r\n try:\r\n URL = \"https://pubchem.ncbi.nlm.nih.gov/rest/rdf/descriptor/CID%s.html\" % (CID + attribute)\r\n Page = requests.get(URL)\r\n Soup = BeautifulSoup(Page.text, \"html.parser\")\r\n Value = Soup.find(\"span\", {\"class\": \"value\"}).string\r\n\r\n except AttributeError as e:\r\n Value = \"no Value\"\r\n\r\n return Value\r\n\r\n\r\ndef getCompound(compound):\r\n imageName = \"%s.png\" % (compound)\r\n pageURL = \"https://pubchem.ncbi.nlm.nih.gov/compound/%s\" % (compound)\r\n page = requests.get(pageURL)\r\n soup = BeautifulSoup(page.text, \"html.parser\")\r\n imageSource = soup.find(\"meta\", {\"property\": \"og:image\"})[\"content\"]\r\n\r\n CID = soup.find(\"meta\", {\"name\": \"pubchem_uid_value\"})[\"content\"]\r\n\r\n iupacSoup = getDescribtorSoup(CID, \"_Preferred_IUPAC_Name\")\r\n\r\n try:\r\n IUPAC = iupacSoup.find(\"span\", {\"class\": \"value\"}).string\r\n preferred = True\r\n except AttributeError as e:\r\n IUPAC = soup.find(\"meta\", {\"property\": \"og:title\"})[\"content\"]\r\n preferred = False\r\n\r\n IUPAC = cleanIUPAC(IUPAC)\r\n\r\n return imageSource, pageURL, IUPAC, preferred, CID\r\n\r\n\r\ndef getInfo(CID):\r\n info = []\r\n xmldoc = minidom.parse('attribute.xml')\r\n attributeList = xmldoc.getElementsByTagName('item')\r\n\r\n for i in attributeList:\r\n info.append(getDescribtorValue(CID, i.attributes['name'].value))\r\n\r\n info[0] = prettifyFormula(info[0])\r\n info[2] = cleanIUPACInChI(info[2])\r\n\r\n return info\r\n","sub_path":"bunshi/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440973747","text":"#This is Catherine Experiment\n\nimport os #handy system and path functions\nfrom psychopy import core, visual, data, event, sound, gui, parallel #these are the psychopy libraries\nimport random, math\nimport visualExtra\n\nsaveImage = False #a flag for the saving of static images\n\n#parallel.PParallelInpOut32\n#parallel.setPortAddress(address=0xE010)\n#parallel.setData(0)\n\n#store info about the experiment\nif not os.path.isdir('data'): #folder data\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nexpInfo={'participant':'','number':0}\ndlg=gui.DlgFromDict(dictionary=expInfo, title='Rotation')\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']='rotation'\n\nif saveImage:\n myWin = visual.Window(allowGUI=False, units='pix', size =(800,800), fullscr=False)#creates a window using pixels as units\n orderTrials = 'sequential'\nelse:\n myWin = visual.Window(allowGUI=False, units='pix', size =(800,800), fullscr=False)#creates a window using pixels as units\n orderTrials = 'random'\n\ncolPoly =[-.4,-.4,-.4]\ncolTop =[-.3,-.4,-.4]\n#these lines are very powerful, they create all stimuli using the special functions of psychopy\n#fixation=visual.TextStim(myWin, text='+', pos=[0, 0], height=30, color='red')#this is a gray cross for the fixation\nstim1 = visual.ImageStim(myWin, image='symm.png') #import an image from a file\npolyL = visual.ShapeStim(myWin, lineColor=None, fillColor=None)\npolyR = visual.ShapeStim(myWin, lineColor=None, fillColor=None)\npolyL2 = visual.ShapeStim(myWin, lineColor=None, fillColor=colPoly)\npolyR2 = visual.ShapeStim(myWin, lineColor=None, fillColor=colPoly)\npolyTop = visual.ShapeStim(myWin, lineColor=None, fillColor=colTop)\ncontour =visual.ShapeStim(myWin, lineColor='black', fillColor=None)\ncontourFill =visual.ShapeStim(myWin, lineColor=None, fillColor=colPoly)\nframe = visual.Rect(myWin, lineColor='blue', fillColor=None, width = 240, height=400)\nfixation = visual.Circle(myWin, lineColor=None, fillColor='red', radius = 10)\ndot = visual.Circle(myWin, lineColor='red', fillColor=None, radius = 12)\noccluder = visual.ShapeStim(myWin, fillColor='white', lineColor=None, vertices=[[0,300],[200,300],[200,-300],[0,-300]])\noccluderTop = visual.ShapeStim(myWin, fillColor=[1,.9,.9], lineColor=None, vertices=[[0,300],[200,300],[200,0],[0,0],[0,300]])\ngaborSize = 32\ngabor = visual.GratingStim(myWin, tex=\"sin\", mask=\"gauss\", texRes=256, pos=[0,-300], units='pix', size=[gaborSize,gaborSize], contrast=0.5, phase=0.5, sf=[0.125,0.125], ori=0)\ngaborBig = visual.GratingStim(myWin, tex=\"sin\", mask=\"gauss\", texRes=256, pos=[0,0], units='pix', size=[160,160], contrast=0.5, phase=0.5, sf=[0.125,0.125], ori=0)\nmyClock = core.Clock()#this creates adn starts a clock which we can later read\nresponseScreen=visual.TextStim(myWin, ori=0, pos=[0,0], height=24, color='white', units= 'pix') #font='Times New Roman')\nwarn = sound.Sound(value='C',secs=0.6,sampleRate=44100)#, bits=16\n\nnsteps = 16\nheight = 200\nwidth =120\nystep = 2. * height / nsteps # 25\njitter =46\n\ndef message(type, nBlocksToGo = 1):\n if type == 'Practice1':\n message = 'Welcome to the Practice. You will see a rotating pattern. Your task is to decide whether the grey object has a reflection'\n if type == 'Practice2':\n message = 'A bit more practice, now with just two presentations. Your task is again to decide whether the grey object has a reflection'\n elif type == 'Experiment':\n message = 'Now for the real thing, its the same, but much longer. Try not to blink, and keep your eyes on the central fixation point'\n elif type == 'Goodbye':\n message ='Thank you for taking part in the Experiment'\n elif type == 'Break':\n message = '%d Blocks to go: Wait for experimenter to check electrodes!' %nBlocksToGo\n# if not type == 'Break':\n myWin.flip()\n Instructions=visual.TextStim(myWin, ori=0, text=message, pos=[0, 0], height=30, color='white', units= 'pix')\n Instructions.draw()\n myWin.flip()\n event.waitKeys(keyList=['g'])\n\ndef collectResponse(regularity, responseScreenVersion, startPos, practice):\n event.clearEvents()\n if startPos=='left':\n responseScreen.setPos([0,100])\n elif startPos=='right':\n responseScreen.setPos([0,-100])\n \n if responseScreenVersion == 1:\n responseScreen.setText('reflection random')\n responseScreen.draw()\n myWin.flip() # \n\n responseKey = event.waitKeys(keyList=['a', 'l','escape'])[0]\n if responseKey == 'a':\n choice = 'ref'\n elif responseKey == 'l':\n choice = 'ran'\n elif responseKey =='escape':\n myWin.close()\n core.quit()\n if regularity== choice:\n respCorr = 1\n else:\n respCorr = 0\n elif responseScreenVersion == 2:\n responseScreen.setText('random reflection')\n responseScreen.draw()\n myWin.flip()\n \n responseKey = event.waitKeys(keyList=['a', 'l','escape'])[0]\n if responseKey == 'l':\n choice = 'ref'\n elif responseKey == 'a':\n choice = 'ran'\n elif responseKey =='escape':\n myWin.close()\n core.quit()\n if regularity== choice:\n respCorr = 1\n else:\n respCorr = 0\n \n if respCorr==0 and practice==True:\n warn.play()\n return respCorr, choice\n \ndef rotateObject(obj, deg):\n \n v = obj.vertices\n visualExtra.rotateVertices(vertices= v, a=deg)\n obj.vertices = v\n\ndef drawGabors(coordsGL, coordsGR, oriGL, oriGR, direction):\n\n gaborBig.setOri(direction)\n coordsGR = visualExtra.rotateVertices(vertices= coordsGR, a=direction)\n coordsGL = visualExtra.rotateVertices(vertices= coordsGL, a=direction)\n \n gaborBig.draw()\n\n# for i in range(len(coordsGR)):\n# gabor.setPos(coordsGR[i])\n# gabor.setOri(oriGR[i][0])\n# gabor.draw()\n# gabor.setPos([-40,0],\"+\")\n# gabor.setOri(oriGR[i][0])\n# gabor.draw()\n# for i in range(len(coordsGL)):\n# gabor.setPos(coordsGL[i])\n# gabor.setOri(oriGL[i][0])\n# gabor.draw()\n# gabor.setPos([+40,0],\"+\")\n# gabor.setOri(oriGL[i][0])\n# gabor.draw()\n\n\ndef prepareOri(regularity):\n \n oriGR = []\n oriGL = []\n \n for i in range(1, nsteps):\n oriGR.append([random.uniform(0,180),random.uniform(0,180)])\n# oriG.append(random.uniform(0,180))\n# oriG.append(random.uniform(0,180))\n \n if regularity =='ref':\n oriGL= [[-i[0],-i[1]] for i in oriGR]\n# oriG.append(-oriG[10 -i])\n# oriG.append(oriG[10 -i])\n elif regularity=='ran':\n for i in range(1, nsteps):\n oriGL.append([random.uniform(0,180),random.uniform(0,180)])\n# oriG.append(random.uniform(0,180))\n# oriG.append(random.uniform(0,180))\n\n return oriGR, oriGL\n \ndef prepareCoords(regularity):\n\n coordsL =[]\n coordsR =[]\n coordsGL =[]\n coordsGR =[]\n \n coordsR.append([0,-height])\n y = -height\n for i in range(nsteps+1):\n# if i < \n x = width + random.uniform(-jitter,jitter)\n coordsR.append([x,y])\n y = y + ystep\n coordsR.append([0,height])\n coordsR.append([0,ystep*2])\n \n coordsL.append([0,-height])\n y = -height\n if regularity =='ref':\n for i in range(1,nsteps+2):\n x = -coordsR[i][0]\n coordsL.append([x,y])\n y = y + ystep\n elif regularity=='ran':\n for i in range(1, nsteps +2):\n x = -width + random.uniform(-jitter,jitter)\n coordsL.append([x,y])\n y = y + ystep\n coordsL.append([0,height])\n coordsL.append([0,ystep*2])\n \n for i in range(2, nsteps+ 1):\n coordsGR.append([coordsR[i][0] -20, coordsL[i][1]])\n# coordsGR.append([coordsR[i][0] -20-gaborSize, coordsR[i][1]])\n# coordsG.append([coords[i][0] -20-gaborSize*2, coords[i][1]])\n for i in range(2, nsteps+ 1): \n coordsGL.append([coordsL[i][0] +20, coordsL[i][1]])\n# coordsGL.append([coordsL[i][0] +20+gaborSize, coordsL[i][1]])\n# coordsG.append([coords[i][0] +20+gaborSize*2, coords[i][1]])\n\n return coordsL, coordsR, coordsGL, coordsGR\n\ndef drawGreyPolys(v):\n\n c = []\n for i in range(1,17):\n c.append(v[i])\n c.append(v[i+1])\n c.append(v[36-i-1])\n c.append(v[36-i])\n c.append(v[i])\n contourFill.setVertices(c)\n if i==12:\n contourFill.setFillColor(colTop)\n contourFill.draw()\n c =[]\n\n contourFill.setFillColor(colPoly)\n\ndef drawPolys(coordsL, coordsR, direction):\n\n coordsR = visualExtra.rotateVertices(vertices= coordsR, a=direction)\n coordsL = visualExtra.rotateVertices(vertices= coordsL, a=direction)\n\n polyR.setVertices(coordsR)\n polyL.setVertices(coordsL)\n \n# polyR.draw()\n# polyL.draw()\n\n# position =nsteps /2 + 3 # for nsteps 10 this is 7 for nsteps 16 this is 10\n# v = list(coordsL[position:])\n# polyTop.setVertices(v)\n# polyTop.draw()\n# v = list(coordsR[position:])\n# polyTop.setVertices(v)\n# polyTop.draw()\n \n position = nsteps +2 # if nsteps is 10 this shoudl be 12 for nsteps 16 this is 18\n v1 = list(coordsR[:position]) \n v2 = coordsL[::-1]\n v = v1+v2[1:position+1]\n #print len(v1), len(v2), len(v)\n contour.setVertices(v)\n\n drawGreyPolys(v)\n contour.draw()\n \n# coordsR = [[i*.9,j*.9] for [i,j] in coordsR]\n# coordsL =[[i*.9,j*.9] for [i,j] in coordsL]\n#\n# polyR.setVertices(coordsR)\n# polyL.setVertices(coordsL)\n# \n# polyR.draw() # these are the smaller versions\n# polyL.draw()\n#\n# v1 = list(coordsR[:12]) \n# v2 = coordsL[::-1]\n# v = v1+v2[1:13]\n# contour.setVertices(v)\n# contour.draw()\n# \n \n# dot.setPos(coordsL[0])\n# dot.draw()\n# dot.setPos(coordsL[1]) \n# dot.draw()\n# dot.setPos(coordsL[2])\n# dot.draw()\n# dot.setPos(coordsR[10]) \n# dot.draw()\n# dot.setPos(coordsR[11])\n# dot.draw()\n# dot.setPos(coordsR[16]) \n# dot.draw()\n# dot.setPos(coordsL[16]) \n# dot.draw()\n\n#def drawPolysTop(coordsL, coordsR, direction):\n# \n# coordsR = visualExtra.rotateVertices(vertices= coordsR, a=direction)\n# coordsL = visualExtra.rotateVertices(vertices= coordsL, a=direction)\n#\n# polyR.setVertices(coordsR)\n# polyL.setVertices(coordsL)\n# \n# polyR.draw()\n# polyL.draw()\n\n\ndef runBlock(trialbook=\"\", practice=True, repeti=1):\n \n trials=data.TrialHandler(nReps=repeti, method=orderTrials, trialList=data.importConditions(trialbook)) \n blockDuration = 24\n nBlocksToGo = trials.nTotal / blockDuration #336/24=14\n counter =0\n \n # This long loop runs through the trials\n for thisTrial in trials:\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec('{} = thisTrial[paramName]'.format(paramName), locals(), globals())\n #exec(paramName+'=thisTrial.'+paramName)\n\n if counter == blockDuration and practice=='exp':\n nBlocksToGo = nBlocksToGo - 1\n message('Break', nBlocksToGo = nBlocksToGo)\n counter = 0\n counter = counter +1\n \n fixation.draw()#draws fixation\n myWin.flip() # fixation and nothing else\n\n occluder.vertices =([[0,300],[200,300],[200,-300],[0,-300],[0,300]])\n occluderTop.vertices =([[0,300],[200,300],[200,100],[0,100],[0,300]])\n v = occluder.vertices\n vTop = occluderTop.vertices\n if startPos =='left':\n v = [[-i[0],i[1]] for i in v]\n vTop = [[-i[0],i[1]] for i in vTop] \n occluder.vertices = visualExtra.rotateVertices(vertices= v, pos=[0,0], a=direction)\n occluderTop.vertices = visualExtra.rotateVertices(vertices= vTop, pos=[0,0], a=direction)\n \n # regularity ='ref' # debug\n # direction = 45 # debug\n \n# t = myClock.getTime() + 0.5 # fixation for this much time\n# while myClock.getTime() < t:\n# pass #do nothing\n\n occluder.draw()\n occluderTop.draw()\n fixation.draw() #draws fixation\n myWin.flip() # fixation and occluder\n \n t = myClock.getTime() + 1.5 # occluder for this much time\n while myClock.getTime() < t:\n pass #do nothing\n \n if saveImage:\n myWin.getMovieFrame()\n myWin.saveMovieFrames(\"images/baseline\"+str(direction)+startPos+\"2.png\")\n \n coordsL, coordsR, coordsGL, coordsGR =prepareCoords(regularity)\n oriGL, oriGR = prepareOri(regularity)\n \n drawPolys(coordsL, coordsR, direction)\n# drawGabors(coordsGL, coordsGR, oriGL, oriGR, direction)\n \n occluder.draw()\n occluderTop.draw()\n fixation.draw()#draws fixation\n myWin.flip() # firts interval\n #parallel.setData(trigger)\n t = myClock.getTime() + .02 # first image for this much time\n while myClock.getTime() < t:\n pass #do nothing\n #parallel.setData(0)\n \n if saveImage:\n myWin.getMovieFrame()\n myWin.saveMovieFrames(\"images/\"+regularity+str(direction)+startPos+\".png\")\n key = event.waitKeys(keyList=['g'])\n \n t = myClock.getTime() + firstHalfDuration -0.02 # first image for this much time\n while myClock.getTime() < t:\n pass #do nothing\n\n if practice =='practice1':\n # gaborBig.setOri(direction)\n drawPolys(coordsL, coordsR, direction-90)\n # drawGabors(coordsGL, coordsGR, oriGL, oriGR, direction+90)\n# v = occluder.vertices\n# vTop = occluderTop.vertices\n v2 = visualExtra.rotateVertices(vertices= v, pos=[0,0], a=-45) #occluder.vertices\n vTop2 = visualExtra.rotateVertices(vertices= vTop, pos=[0,0], a=-45) #occluderTop.vertices\n# v2 = [[-i[0],i[1]] for i in v2]\n# vTop2 = [[-i[0],i[1]] for i in vTop2] \n occluder.vertices =v2\n occluderTop.vertices = vTop2\n occluder.draw()\n occluderTop.draw()\n fixation.draw()#draws fixation\n myWin.flip() # extra interval for the practice\n key = event.waitKeys(keyList=['g'])\n core.wait(0.06)\n\n drawPolys(coordsL, coordsR, direction-90)\n v3 = visualExtra.rotateVertices(vertices= v, pos=[0,0], a=direction) #occluder.vertices\n vTop3 = visualExtra.rotateVertices(vertices= vTop, pos=[0,0], a=direction) #occluderTop.vertices\n v3 = [[-i[0],i[1]] for i in v3]\n vTop3 = [[-i[0],i[1]] for i in vTop3] \n occluder.vertices =v3\n occluderTop.vertices = vTop3\n# occluder.vertices = visualExtra.rotateVertices(vertices= v, pos=[0,0], a=0)\n# occluderTop.vertices = visualExtra.rotateVertices(vertices= vTop, pos=[0,0], a=0)\n\n occluder.draw()\n occluderTop.draw()\n fixation.draw()#draws fixation\n myWin.flip() # second interval\n \n if saveImage:\n myWin.getMovieFrame()\n myWin.saveMovieFrames(\"images/\"+regularity+str(direction)+startPos+\"2.png\")\n \n t = myClock.getTime() + secondHalfDuration # second image for this much time\n while myClock.getTime() < t:\n pass #do nothing\n\n drawPolys(coordsL, coordsR, direction-90)\n occluder.draw()\n occluderTop.draw()\n fixation.draw()#draws fixation\n \n timenow = myClock.getTime()\n correct, choice = collectResponse(regularity, responseScreenVersion, startPos, practice!='exp')\n# correct =1 #debug\n# choice ='ref' #debug\n respRT = myClock.getTime()-timenow #time taken to respond\n \n trials.addData('correct', correct)\n trials.addData('choice', choice)\n trials.addData('respRt', respRT)\n \n #save all data\n trials.extraInfo =expInfo\n if practice=='exp':\n trials.saveAsWideText('data/rotation.txt')\n trials.saveAsExcel('data/rotation.xlsx', sheetName= 'HorizontalVertical', dataOut=['all_raw'])\n else:\n trials.saveAsWideText('data/practice.txt')\n trials.saveAsExcel('data/practice.xlsx', sheetName= 'HorizontalVertical', dataOut=['all_raw'])\n\nmessage('Practice1')\nrunBlock(trialbook='rotationBook.xlsx' , practice='practice1', repeti=1) # (16 trials: 8 conditions: 2 repetitions) .\nmessage('Practice2')\nrunBlock(trialbook='rotationBook.xlsx' , practice='practice2', repeti=2) # (8 trials: 8 conditions: 1 repetitions) .\nmessage('Experiment')\nrunBlock(trialbook='rotationBook.xlsx', practice='exp', repeti=42) # (336 trials: 8 conditions: 42 repetitions) . \nmessage('Goodbye')\nmyWin.close\ncore.quit()\n\n\n\n\n","sub_path":"MATLAB Installed/BIDS_resources/P028_EX2/Code/RotationV3.py","file_name":"RotationV3.py","file_ext":"py","file_size_in_byte":16996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620912235","text":"from flask import Flask, render_template\nfrom datetime import datetime, timedelta\nimport json\nimport random\nfrom timeit import default_timer as timer \n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \"\"\"\n Method thats return current server time\n \"\"\"\n request_time = timer()\n server_time_now = datetime.now()\n response_time = timer()\n return json.dumps(\n {\n 'server_time_now': str(server_time_now),\n 'next_bus': str(server_time_now + timedelta(minutes=random.randint(1, 45))),\n 'process_time': str(response_time - request_time)\n }\n )\n\nif __name__ == \"__main__\":\n \"\"\"\n Inicialize server\n \"\"\"\n print('Server starting')\n app.run(host='0.0.0.0', port=5001)","sub_path":"University/UNIP/APS/#7/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374016820","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)\n#\n# (1) Kamaelia Contributors are listed in the AUTHORS file and at\n# http://www.kamaelia.org/AUTHORS - please extend this file,\n# not this notice.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport Kamaelia.Protocol.Framing as Framing\n\nfrom Axon.Ipc import producerFinished\nimport Axon\n\nclass Framing_Tests(unittest.TestCase):\n\n def test_tuple2string_1(self):\n \"SimpleFrame.__str__ - (1,'1') results in '1 1\\\\n1'\"\n Frame = Framing.SimpleFrame(1,'1')\n result = str(Frame)\n self.assertEqual(result, '1 1\\n1')\n\n def test_tuple2string_2(self):\n \"SimpleFrame.__str__ - (1,'11') results in '1 2\\\\n11'\"\n Frame = Framing.SimpleFrame(1,'11')\n result = str(Frame)\n self.assertEqual(result, '1 2\\n11')\n\n def test_tuple2string_3(self):\n \"SimpleFrame.__str__ - (1,'1111') results in '1 4\\\\n1111'\"\n Frame = Framing.SimpleFrame(1,'1111')\n result = str(Frame)\n self.assertEqual(result, '1 4\\n1111')\n\n def test_tuple2string_4(self):\n \"SimpleFrame.__str__ - (50,'1111') results in '50 4\\\\n1111'\"\n Frame = Framing.SimpleFrame(50,'1111')\n result = str(Frame)\n self.assertEqual(result, '50 4\\n1111')\n\n def test_tuple2string_5(self):\n \"SimpleFrame.__str__ - Random data as the data part of the tuple succeeds\"\n import random\n length = 100\n r = []\n for x in range(length):\n r.append( chr(random.randrange(0,256)) )\n data = \"\".join(r)\n \n Frame = Framing.SimpleFrame(50,data)\n result = str(Frame)\n self.assertEqual(result, \"50 100\\n\"+str(data))\n\n def test_tuple2string_6(self):\n \"SimpleFrame.__str__ - Random data of random length as the data part of the tuple succeeds\"\n import random\n length = random.randrange(100,200)\n r = []\n for x in range(length):\n r.append( chr(random.randrange(0,256)) )\n data = \"\".join(r)\n \n Frame = Framing.SimpleFrame(50,data)\n result = str(Frame)\n self.assertEqual(result, \"50 \"+str(length)+\"\\n\"+str(data))\n\n def test_tuple2string_7(self):\n \"SimpleFrame.__str__ - Completely random frame\"\n import random\n length = random.randrange(100,200)\n r = []\n for x in range(length):\n r.append( chr(random.randrange(0,256)) )\n data = \"\".join(r)\n\n stamp = random.randrange(0,200)\n \n Frame = Framing.SimpleFrame(stamp,data)\n result = str(Frame)\n self.assertEqual(result, str(stamp)+ \" \"+str(length)+\"\\n\"+str(data))\n\n def test_string2tuple(self):\n \"SimpleFrame.fromString - '1 1\\\\n1' is parsed as (1,'1')\"\n result = Framing.SimpleFrame.fromString('1 1\\n1')\n self.assertEqual(result, (1,'1') )\n\n def test_string2tuple_1(self):\n \"SimpleFrame.fromString - '1 2\\\\n11' is parsed as (1,'11')\"\n result = Framing.SimpleFrame.fromString('1 2\\n11')\n self.assertEqual(result, (1,'11') )\n\n def test_string2tuple_2(self):\n \"SimpleFrame.fromString - '1 2\\\\n1111' is parsed as (1,'11') -- Corrupt frame!\"\n result = Framing.SimpleFrame.fromString('1 2\\n1111')\n self.assertEqual(result, (1,'11') )\n\n def test_string2tuple_4(self):\n \"SimpleFrame.fromString - '1 2\\\\n' causes exception to be thrown - Corrupt frame!\"\n try:\n result = Framing.SimpleFrame.fromString('1 2\\n')\n self.fail(\"Should have died\")\n except Framing.ShortFrame:\n # Success\n pass\n\n def test_tuple2string_string2tuple_roundtrip_1(self):\n \"SimpleFrame.__str__, Framing.SimpleFrame.fromString - roundtrip for (1,'1') succeeds\"\n original = (1,'1')\n Frame = Framing.SimpleFrame(*original)\n FrameForWire = str(Frame)\n result = Framing.SimpleFrame.fromString(FrameForWire)\n self.assertEqual(original, result, \"passed through unchanged\")\n\n def test_tuple2string_string2tuple_roundtrip_2(self):\n \"SimpleFrame.__str__, Framing.SimpleFrame.fromString - roundtrip for (50,'1111') succeeds\"\n original = (50,'1111')\n Frame = Framing.SimpleFrame(*original)\n FrameForWire = str(Frame)\n result = Framing.SimpleFrame.fromString(FrameForWire)\n self.assertEqual(original, result, \"passed through unchanged\")\n\n def test_tuple2string_string2tuple_roundtrip_3(self):\n \"SimpleFrame.__str__, Framing.SimpleFrame.fromString - roundtrip for random data, frame id 50 succeeds\"\n import random\n length = random.randrange(100,200)\n r = []\n for x in range(length):\n r.append( chr(random.randrange(0,256)) )\n data = \"\".join(r)\n original = (50,data)\n Frame = Framing.SimpleFrame(*original)\n FrameForWire = str(Frame)\n result = Framing.SimpleFrame.fromString(FrameForWire)\n self.assertEqual(original, result, \"passed through unchanged\")\n\n def test_tuple2string_string2tuple_roundtrip_4(self):\n \"SimpleFrame.__str__, Framing.SimpleFrame.fromString - roundtrip for random data, random frame id succeeds\"\n import random\n length = random.randrange(100,200)\n r = []\n for x in range(length):\n r.append( chr(random.randrange(0,256)) )\n data = \"\".join(r)\n stamp = random.randrange(0,200)\n \n original = (stamp,data)\n\n Frame = Framing.SimpleFrame(*original)\n FrameForWire = str(Frame)\n result = Framing.SimpleFrame.fromString(FrameForWire)\n self.assertEqual(original, result, \"passed through unchanged\")\n\ndef makeTestCase(klass):\n class Component_ShutdownTests(unittest.TestCase):\n def test_smokeTest2(self):\n import random\n X = klass()\n X.activate()\n for i in range(random.randrange(0,2000)):\n try:\n next(X)\n except StopIteration:\n self.fail(\"Component should run until told to stop\")\n\n def test_smokeTest3(self):\n # NOTE: This test is actually satisfied if no body to the component is put in place\n import random\n X = klass()\n X.activate()\n X._deliver(producerFinished(),\"control\")\n componentExit = False\n for i in range(random.randrange(0,2000)):\n try:\n next(X)\n except StopIteration:\n componentExit = True\n break\n if not componentExit:\n self.fail(\"When sent a shutdown message, the component should shutdown\")\n\n def test_smokeTest4(self):\n import random\n X = klass()\n X.activate()\n X._deliver(\"BINGLE\",\"control\")\n componentExit = False\n for i in range(random.randrange(0,2000)):\n try:\n next(X)\n except StopIteration:\n componentExit = True\n break\n if componentExit:\n self.fail(\"Sending a random message to the control box should not cause a shutdown\")\n\n def test_smokeTest5(self):\n import random\n X = klass()\n\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n\n X.activate()\n shutdown_message = producerFinished()\n X._deliver(shutdown_message,\"control\")\n\n componentExit = False\n for i in range(random.randrange(0,2000)):\n try:\n next(X)\n except StopIteration:\n componentExit = True\n break\n try:\n# Y = X._collect(\"signal\")\n Y = Dummy.recv(\"control\")\n\n self.assertEqual(shutdown_message, Y)\n except IndexError:\n self.fail(\"Shutdown Message should be passed on\")\n return Component_ShutdownTests\n\n\nclass FramingComponent_Tests(unittest.TestCase):\n def test_smokeTest(self):\n X = Framing.Framer()\n X.activate()\n\n def test_marshalling(self):\n message = (1,'1')\n expect = str(Framing.SimpleFrame(*message))\n X = Framing.Framer()\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(expect, result)\n\n def test_marshalling_sequence(self):\n X = Framing.Framer()\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n for i in range(100):\n message = (i,str(i))\n expect = str(Framing.SimpleFrame(*message))\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(expect, result)\n\nclass DeFramingComponent_Tests(unittest.TestCase):\n def test_smokeTest(self):\n X = Framing.DeFramer()\n X.activate()\n\n def test_demarshalling(self):\n original = (1,'1')\n message = str(Framing.SimpleFrame(*original))\n X = Framing.DeFramer()\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(original, result)\n\n def test_demarshalling_sequence(self):\n X = Framing.DeFramer()\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X.activate()\n for i in range(100):\n original = (i,str(i))\n message = str(Framing.SimpleFrame(*original))\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(original, result)\n\n def test_demarshalling_frame_missing_end(self):\n \"\"\"DeFramer, will not pass on a frame if a chunk is missing out of it\"\"\"\n X = Framing.DeFramer()\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X.activate()\n originals = []\n for i in range(1,10):\n original = (i, \"a\" * i)\n originals.append(original)\n message = str(Framing.SimpleFrame(*original))\n if i==5:\n message = message[:-2]\n X._deliver(message, \"inbox\")\n for i in range(200):\n next(X)\n for i in range(1,10):\n if i!=5:\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n# print result\n self.assertEqual(originals[0], result)\n del originals[0]\n\n \nFramerMarshall = makeTestCase(Framing.Framer)\nFramerDeMarshall = makeTestCase(Framing.DeFramer)\nDataChunkerBasics = makeTestCase(Framing.DataChunker)\nDataDeChunkerBasics = makeTestCase(Framing.DataDeChunker)\n\nclass DataChunker_test(unittest.TestCase):\n def test_smokeTest(self):\n X = Framing.DataChunker()\n X.activate()\n \n def test_makeChunk(self):\n message = \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*20\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n X = Framing.DataChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n result_start = result[:len(syncmessage)]\n result_message = result[len(syncmessage):]\n\n self.assertEqual(message, result_message)\n self.assertEqual(syncmessage, result_start)\n\n def test_makeChunk_oneSync(self):\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n message += syncmessage\n message += \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n X = Framing.DataChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n\n result_message = result[len(syncmessage):]\n index = result_message.find(syncmessage)\n self.assertEqual(-1, index, \"Should not be able to find the syncmessage in the chunked version\")\n\n\nclass DataDeChunker_Basictest(unittest.TestCase):\n def makeBasicChunk(self, message, syncmessage):\n X = Framing.DataChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n return result\n\n def test_DeChunkFullChunk(self):\n message = \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*20\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n chunk = self.makeBasicChunk(message, syncmessage)\n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(chunk, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(result, message)\n\n def test_DeChunkFullChunk_1(self):\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n message += syncmessage\n message += \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n chunk = self.makeBasicChunk(message, syncmessage)\n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(chunk, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(result, message)\n\n def test_DeChunkFullChunk_2(self):\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = \"1234567890%25qwertyuiopa\\Ssdfghjklzxcvbnm\\n\"*10\n message += syncmessage\n message += \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n chunk = self.makeBasicChunk(message, syncmessage)\n \n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(chunk, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(result, message)\n\n def test_DeChunkFullChunk_3(self):\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = \"1234567890qwertyu\\iopa\\Ssdfghjklzxcvbnm\\n\"*10\n message += syncmessage\n message += \"1234567890qwertyuiopasdfghjklzxcvbnm\\n\"*10\n chunk = self.makeBasicChunk(message, syncmessage)\n \n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(chunk, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n self.assertEqual(result, message)\n\nclass DataDeChunker_BlocksTest(unittest.TestCase):\n\n def makeBasicChunk(self, message, syncmessage):\n X = Framing.DataChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n X._deliver(message, \"inbox\")\n for i in range(20): # More than sufficient cycles (should be lots less!)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n return result\n\n def blocks(self, someData, blocksize=20):\n data = someData\n while len(data) > blocksize:\n yield data[:blocksize]\n data = data[blocksize:]\n yield data\n\n def test_DeChunk_SingleChunk_ManyBlocks(self):\n \"The dechunker handles taking a chunk that's in many blocks and putting it back together\"\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = \"123\\\\S\\4567\\\\890qwer\\\\\\tyuiopasdfg\\\\\\\\hjklzxcvbnm\\n\"*10\n message += syncmessage\n message += \"1234567890q\\\\Swertyuiopasdfghjklzxcvbnm\\n\"*10\n \n chunk = self.makeBasicChunk(message, syncmessage)\n\n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n for block in self.blocks(chunk):\n X._deliver(block, \"inbox\")\n\n try:\n for i in range(200): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n except Framing.IncompleteChunk:\n self.fail(\"IncompleteChunk exception should not propogate\")\n self.assertEqual(result, message)\n\n def test_DeChunk_RandomChunk_ManyBlocks(self):\n \"The dechunker handles taking a chunk that's in many blocks and putting it back together\"\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n message = (\"\".join([str(x) for x in range(10,50)]) + \"\\n\")*10\n chunk = self.makeBasicChunk(message, syncmessage)\n\n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n for block in self.blocks(chunk,blocksize=1):\n X._deliver(block, \"inbox\")\n\n try:\n for i in range(2000): # More than sufficient cycles (should be lots less...)\n next(X)\n X._deliver(\"junk\", \"flush\")\n for i in range(20): # More than sufficient cycles (should be lots less...)\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n except Framing.IncompleteChunk:\n self.fail(\"IncompleteChunk exception should not propogate\")\n self.assertEqual(result, message)\n\n def test_DeChunk_MultipleChunks_ManyBlocks(self):\n \"The dechunker handles taking many chunks that are in many blocks and putting it back together\"\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n \n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n\n for base in range(10,1000,50):\n message = (\"\".join([str(x) for x in range(base,base+50)]) + \"\\n\")*10\n chunk = self.makeBasicChunk(message, syncmessage)\n\n for block in self.blocks(chunk,blocksize=1):\n X._deliver(block, \"inbox\")\n\n try:\n for i in range(2000): # Run for a significant time period! (Chunk might be big)\n next(X)\n X._deliver(\"junk\", \"flush\")\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n except Framing.IncompleteChunk:\n self.fail(\"IncompleteChunk exception should not propogate\")\n self.assertEqual(result, message)\n\n\n def test_non_aligned_chunk_blocks(self):\n \"The dechunker handles taking many chunks that are in many blocks and putting it back together\"\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n \n X = Framing.DataDeChunker(syncmessage=syncmessage)\n Dummy = Axon.Component.component()\n X.link((X, \"outbox\"),(Dummy, \"inbox\"))\n X.link((X, \"signal\"),(Dummy, \"control\"))\n X.activate()\n message = (\"\".join([str(x) for x in range(10,60)]) + \"\\n\")*10\n chunk = self.makeBasicChunk(message, syncmessage)\n\n blockgen = self.blocks(chunk,blocksize=20)\n for _ in range(5):\n next(blockgen) # throw away first 5 blocks of a chunk\n\n for block in blockgen: # This chunk, since it lacks a start should be ignored\n X._deliver(block, \"inbox\")\n \n # The next section assumes by the way the testing happens that the partial\n # chunk above sent to the component is ignored - which is the desired\n # behaviour we're testing\n \n for base in range(10,1000,50):\n message = (\"\".join([str(x) for x in range(base,base+50)]) + \"\\n\")*10\n chunk = self.makeBasicChunk(message, syncmessage)\n\n for block in self.blocks(chunk,blocksize=20):\n X._deliver(block, \"inbox\")\n\n try:\n for i in range(2000): # Run for a significant time period! (Chunk might be big)\n next(X)\n X._deliver(\"junk\", \"flush\")\n next(X)\n# result = X._collect(\"outbox\")\n result = Dummy.recv(\"inbox\")\n except Framing.IncompleteChunk:\n self.fail(\"IncompleteChunk exception should not propogate\")\n self.assertEqual(result, message)\n\n\n\n def test_Message_chunkDeChunk_remainsintact(self):\n from Kamaelia.Chassis.Pipeline import Pipeline\n syncmessage = \"XXXXXXXXXXXXXXXXXXXXXXX\"\n File = open(\"../../Examples/SimpleGraphicalApps/Ticker/Ulysses\").read()\n chunks = [File[y:y+20] for y in range(0,len(File),20) ]\n \n chunker = Framing.DataChunker(syncmessage=syncmessage)\n dechunker = Framing.DataDeChunker(syncmessage=syncmessage)\n system = Pipeline(\n chunker, \n dechunker, \n ).activate()\n Dummy = Axon.Component.component()\n system.link((system, \"outbox\"),(Dummy, \"inbox\"))\n system.link((system, \"signal\"),(Dummy, \"control\"))\n \n for chunk in chunks:\n system._deliver(chunk, \"inbox\")\n \n activeScheduler = system.schedulerClass.run.main()\n for _ in range(2000):\n next(activeScheduler)\n\n resultchunks = []\n try:\n while 1:\n# chunk = system._collect(\"outbox\")\n chunk = Dummy.recv(\"inbox\")\n resultchunks.append(chunk)\n except IndexError:\n pass # We collect all items in the outbox\n \n result = \"\".join(resultchunks)\n self.assertEqual(File[:20],result[:20])\n\n\nif 0:\n class default_test(unittest.TestCase):\n def test_marshalling(self):\n X = Framing.Framer()\n self.fail(\"Test Not Implemented\")\n\nif __name__==\"__main__\":\n unittest.main()\n","sub_path":"Test/Protocol/test_framing.py","file_name":"test_framing.py","file_ext":"py","file_size_in_byte":26146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603512358","text":"import collections\nimport logging\n\nfrom django import forms\nfrom govuk_forms.widgets import NumberInput\n\nfrom application.business_logic import dbs_matches_childminder_dbs, find_dbs_status, DBSStatus\nfrom application.forms.PITH_forms.PITH_base_forms.PITH_childminder_form_retrofit import PITHChildminderFormAdapter\nfrom application.models import Application\n\nlog = logging.getLogger(__name__)\n\n\nclass PITHDBSCheckForm(PITHChildminderFormAdapter):\n \"\"\"\n GOV.UK form for the People in the Home: Non generic form for the DBSCheckView.\n \"\"\"\n field_label_classes = 'form-label-bold'\n error_summary_title = 'There was a problem on this page'\n error_summary_template_name = 'PITH_templates/PITH_error_summary.html'\n auto_replace_widgets = True\n\n def __init__(self, *args, **kwargs):\n\n self.application_id = kwargs.pop('id')\n self.adult = kwargs.pop('adult')\n self.dbs_field = kwargs.pop('dbs_field')\n self.pk = self.adult.pk\n\n self.dbs_field_name = self.dbs_field + str(self.adult.pk)\n\n self.base_fields = collections.OrderedDict([\n (self.dbs_field_name, self.get_dbs_field_data()),\n ])\n\n super().__init__(*args, **kwargs)\n\n self.field_list = [*self.fields]\n\n def get_dbs_field_data(self):\n dbs_certificate_number_widget = NumberInput()\n dbs_certificate_number_widget.input_classes = 'inline form-control form-control-1-4'\n\n return forms.IntegerField(label='DBS certificate number',\n help_text='12-digit number on their certificate',\n required=False,\n error_messages={'required': 'Please enter their DBS certificate number'},\n widget=dbs_certificate_number_widget)\n\n def clean(self):\n \"\"\"\n Nullify fields\n DBS field validation\n Fetch DBS record from capita list\n :return: cleaned data\n \"\"\"\n super().clean()\n\n cleaned_dbs_field = self.data[self.dbs_field_name] \\\n if self.data[self.dbs_field_name] != \"\" \\\n else None\n application = Application.objects.get(application_id=self.application_id)\n\n self.clean_dbs(cleaned_dbs_field, self.dbs_field, application)\n\n log.debug(self.errors.get(self.dbs_field_name, ()))\n # if dbs number looks ok, fetch dbs record for further checking\n if len(self.errors.get(self.dbs_field_name, ())) == 0:\n\n # find status will perform dbs lookup and store result\n dbs_status = find_dbs_status(self.adult, self.adult, dbs_certificate_number=cleaned_dbs_field)\n\n if dbs_status == DBSStatus.DOB_MISMATCH:\n self.add_error(self.dbs_field,\n \"\"\"\n Check your DBS certificate. The number you entered does not match your number held by DBS.\n \"\"\")\n\n return self.cleaned_data\n\n def clean_dbs(self, cleaned_dbs_value, field_name, application):\n\n if cleaned_dbs_value is None:\n self.add_error(field_name, 'Please enter their DBS certificate number')\n elif len(str(cleaned_dbs_value)) != 12:\n self.add_error(field_name, 'Check the certificate: the number should be 12 digits long')\n elif dbs_matches_childminder_dbs(application, cleaned_dbs_value):\n self.add_error(field_name, 'Please enter a different DBS number. '\n 'You entered this number for someone in your childcare location')\n # check for duplicate dbs numbers amongst adults in home is done in the view's validate_form_list function\n","sub_path":"application/forms/PITH_forms/PITH_DBS_check_form.py","file_name":"PITH_DBS_check_form.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191139164","text":"#\n# @lc app=leetcode id=75 lang=python3\n#\n# [75] Sort Colors\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n index = [0, 0, 0]\n for i in nums:\n if(i == 0):\n index[0] += 1\n elif(i == 1):\n index[1] += 1\n else:\n index[2] += 1\n first = index[0]\n second = index[0] + index[1]\n for i in range(len(nums)):\n if(i < first):\n nums[i] = 0\n elif(i < second):\n nums[i] = 1\n else:\n nums[i] = 2\n# @lc code=end\nprint(Solution().sortColors([2,0,2,1,1,0]))\n","sub_path":"heregreat/python/75.sort-colors.py","file_name":"75.sort-colors.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"358342160","text":"from flask_restful import Resource, reqparse\n\nfrom models.store import StoreModel\n\nclass Store(Resource):\n \"\"\" Resource for particular store \"\"\"\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"name\", type=str, required=True, help=\"name of the store\")\n\n def get(self, name):\n \"\"\" endpoint for getting one store \"\"\"\n\n # try to find the store\n try:\n store = StoreModel.find_by_name(name)\n except Exception as e:\n return {\"message\" : \"error occured when finding store\"}, 500\n # if found, return it\n if store:\n return store.json(), 200 \n # i not found, return 404\n else:\n return {\"message\" : \"store not found\"}, 404\n\n def post(self, name):\n \"\"\" endpoint for creating new store \"\"\"\n\n # parse the data sent\n # data = self.parser.parse_args() # {\"name\" : \"WallMart\"}\n\n # check if the store exists already\n store = StoreModel.find_by_name(name)\n if store:\n return {\"message\" : \"store with name already exists\"}, 400\n\n # if not create new one\n new_store = StoreModel(name)\n # save to db\n try:\n new_store.save_to_db()\n except Exception as e:\n return {\"message\" : \"error when adding to db\"}, 500\n # return result\n return new_store.json(), 201\n\n def delete(self, name):\n \"\"\" endpoint for deleting item from db \"\"\"\n\n # try to find it\n try:\n store = StoreModel.find_by_name(name)\n except Exception as e:\n return {\"message\" : \"error when finding the store\"}\n # if found, delete it\n if store:\n store.delete_from_db(), 200\n else:\n return {\"message\" : \"store not found\"}, 404\n\n return {\"message\" : \"item deleted\"}\n\n def put(self, name):\n \"\"\" endoint for upserting item \"\"\"\n \n # parse the data sent\n data = self.parser.parse_args() # {\"name\" : \"Walmare\"}\n\n # try to find the store\n try:\n store = StoreModel.find_by_name(name)\n except Exception as e:\n return {\"message\" : \"error {e} when finding the store\"}, 500\n # if found, update\n if store:\n store.name = data[\"name\"]\n # if not found, create\n else:\n store = StoreModel(name)\n # save to db\n try:\n store.save_to_db()\n except Exception as e:\n return {\"message\" : \"error {e} when saving to db\"}, 500\n # return result\n return store.json(), 200\n\n \nclass Stores(Resource):\n \"\"\" Resource for all stores \"\"\"\n\n def get(self):\n \"\"\" endpoint for getting all stores - return ID and name \"\"\"\n\n stores = StoreModel.query.all()\n print(stores)\n return {\"stores\": [store.json() for store in StoreModel.query.all()]}, 200","sub_path":"section10/resources/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260744471","text":"from airflow import DAG\nfrom airflow.operators.datafy_spark_plugin import DatafySparkSubmitOperator\nfrom datetime import datetime, timedelta\n\n\ndefault_args = {\n \"owner\": \"Datafy\",\n \"depends_on_past\": False,\n \"start_date\": datetime(year=2020, month=9, day=15),\n \"email\": [],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 0,\n \"retry_delay\": timedelta(minutes=5),\n}\n\n\nimage = \"{{ macros.image('airquality') }}\"\nrole = \"datafy-dp-{{ macros.env() }}/spark-iam-role-glue-{{ macros.env() }}\"\n\ndag = DAG(\n \"airquality\", default_args=default_args, schedule_interval=\"@daily\", max_active_runs=1\n)\n\nexecutor_memory = \"1G\"\ningest_task = DatafySparkSubmitOperator(\n dag=dag,\n task_id=\"ingest\",\n num_executors=\"1\",\n executor_memory=executor_memory,\n driver_memory=\"512M\",\n env_vars={\"AWS_REGION\": \"eu-west-1\"},\n conf={\n \"spark.kubernetes.container.image\": image,\n \"spark.kubernetes.driver.annotation.iam.amazonaws.com/role\": role,\n \"spark.kubernetes.executor.annotation.iam.amazonaws.com/role\": role,\n },\n spark_main_version=2,\n application=\"/opt/spark/work-dir/src/airquality/app.py\",\n application_args=[\"--date\", \"{{ ds }}\", \"--jobs\", \"ingest\", \"--env\", \"{{ macros.env() }}\"],\n)\n\nclean_task = DatafySparkSubmitOperator(\n dag=dag,\n task_id=\"clean\",\n num_executors=\"1\",\n executor_memory=executor_memory,\n driver_memory=\"512M\",\n env_vars={\"AWS_REGION\": \"eu-west-1\"},\n conf={\n \"spark.kubernetes.container.image\": image,\n \"spark.kubernetes.driver.annotation.iam.amazonaws.com/role\": role,\n \"spark.kubernetes.executor.annotation.iam.amazonaws.com/role\": role,\n },\n spark_main_version=2,\n application=\"/opt/spark/work-dir/src/airquality/app.py\",\n application_args=[\"--date\", \"{{ ds }}\", \"--jobs\", \"clean\", \"--env\", \"{{ macros.env() }}\"],\n)\n\nfilter_belgium_task = DatafySparkSubmitOperator(\n dag=dag,\n task_id=\"filter_belgium\",\n num_executors=\"1\",\n executor_memory=executor_memory,\n driver_memory=\"512M\",\n env_vars={\"AWS_REGION\": \"eu-west-1\"},\n conf={\n \"spark.kubernetes.container.image\": image,\n \"spark.kubernetes.driver.annotation.iam.amazonaws.com/role\": role,\n \"spark.kubernetes.executor.annotation.iam.amazonaws.com/role\": role,\n },\n spark_main_version=2,\n application=\"/opt/spark/work-dir/src/airquality/app.py\",\n application_args=[\"--date\", \"{{ ds }}\", \"--jobs\", \"filter_belgium\", \"--env\", \"{{ macros.env() }}\"],\n)\n\ningest_task >> clean_task >> filter_belgium_task","sub_path":"airquality/dags/airquality.py","file_name":"airquality.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326518009","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 07:05:41 2018\n\n@author: aruna\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Importing the dataset\ndataset = pd.read_csv(\"E:/AI_ML_DA/R_Python_Code_Datasets/DataSets/Machine Learning A-Z Template Folder/Part 2 - Regression/Section 4 - Simple Linear Regression/Salary_Data.csv\")\ndataset.head()\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,1].values\n\n\n#Splitting the dataset into Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 1/3,random_state = 0)\n\n# Feature Scalng\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test) #We don not need to fit the sc_X object to the test set coz it is already fitted to the Training Set\n\"\"\"\n\n# Fitting simple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\n\n# Predicting the Test set result\n# Vector of predicted values\n# y_pred will contain all the predicted values for salaries\ny_pred = regressor.predict(X_test)\n\n# Visualising the Training set results\nplt.scatter(X_train,y_train, color = 'red')\nplt.plot(X_train,regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Training Set)')\nplt.xlabel('Year of Experience')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualising the Test set results\nplt.scatter(X_test,y_test, color = 'red')\nplt.plot(X_train,regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Test Set)')\nplt.xlabel('Year of Experience')\nplt.ylabel('Salary')\nplt.show()\n","sub_path":"Simple_Linear_Regression_Udemy.py","file_name":"Simple_Linear_Regression_Udemy.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243928750","text":"import time\nimport json\nimport logging\nimport warnings\nimport os\nimport requests\nfrom typing import Any, List, Optional, Text, Dict\n\n\nfrom rasa.constants import DOCS_URL_COMPONENTS\nfrom rasa.nlu.constants import ENTITIES\n\nfrom rasa.nlu.config import RasaNLUModelConfig\n\nfrom rasa.nlu.model import Metadata\nfrom rasa.nlu.training_data import Message\n\nimport recognizers_suite as Recognizers\nfrom recognizers_suite import Culture, ModelResult\n\nimport traceback\n\nfrom rasa.nlu.components import Component\nfrom rasa.nlu import utils\nfrom rasa.nlu.model import Metadata\nfrom rasa.nlu.extractors.extractor import EntityExtractor\n\n\nlogger = logging.getLogger(__name__)\n\nclass MSRTExtractor(EntityExtractor):\n \"\"\"\n Entity Extractor built using Microsoft's Recognizers Text Package.\n \n It provides robust recognition and resolution of entities like numbers,\n units, and date/time;\n \n for more info. check here: https://github.com/microsoft/Recognizers-Text\n \"\"\"\n\n def __init__(self, component_config=None):\n super(MSRTExtractor, self).__init__(component_config)\n\n def add_extractor_name(\n self, entities: List[Dict[Text, Any]]\n ) -> List[Dict[Text, Any]]:\n \"\"\"\n Adds the Extractor name to the Message class during the prediction.\n \"\"\"\n for entity in entities:\n entity[\"extractor\"] = self.name\n return entities\n\n\n @staticmethod\n def convert_to_rasa(\n matches: List[Dict[Text, Any]]\n ) -> List[Dict[Text, Any]]:\n \"\"\"\n Method to convert the extracted entities from Recognizer Text to Rasa Format.\n\n Once the recognizer model has parsed all the entities,\n then we can convert the extracted entities to rasa format.\n \"\"\"\n extracted = []\n for match in matches:\n entity={}\n entity[\"start\"]= match[\"start\"] \n entity[\"end\"]= match[\"end\"]\n entity[\"text\"]=match[\"text\"]\n entity[\"entity\"]= match[\"type_name\"]\n\n if \"values\" in match[\"resolution\"]:\n \n \n if match[\"type_name\"]==\"datetimeV2.datetime\":\n entity[\"entity\"]=match[\"resolution\"][\"values\"][-1][\"type\"]\n entity[\"value\"]=match[\"resolution\"][\"values\"][-1][\"value\"]\n\n elif match[\"type_name\"]==\"datetimeV2.date\":\n entity[\"entity\"]=match[\"resolution\"][\"values\"][-1][\"type\"]\n entity[\"value\"]=match[\"resolution\"][\"values\"][-1][\"value\"]\n\n elif match[\"type_name\"]==\"datetimeV2.time\":\n entity[\"entity\"]=match[\"resolution\"][\"values\"][-1][\"type\"]\n entity[\"value\"]=match[\"resolution\"][\"values\"][-1][\"value\"] \n \n elif match[\"type_name\"]==\"datetimeV2.timerange\" or match[\"type_name\"]==\"datetimeV2.datetimerange\":\n entity[\"entity\"]=match[\"resolution\"][\"values\"][-1][\"type\"]\n entity[\"value\"]={\n \"start_time\":match[\"resolution\"][\"values\"][-1][\"start\"],\n \"end_time\":match[\"resolution\"][\"values\"][-1][\"end\"]\n }\n\n elif match[\"type_name\"]==\"datetimeV2.daterange\":\n entity[\"entity\"]=match[\"resolution\"][\"values\"][-1][\"type\"]\n entity[\"value\"]={}\n if \"start\" in match[\"resolution\"][\"values\"][-1]:\n entity[\"value\"].update({\"start_date\":match[\"resolution\"][\"values\"][-1][\"start\"]})\n\n if \"end\" in match[\"resolution\"][\"values\"][-1]:\n entity[\"value\"].update({\"end_date\":match[\"resolution\"][\"values\"][-1][\"end\"]})\n \n if \"start\" not in match[\"resolution\"][\"values\"][-1] and \"end\" not in match[\"resolution\"][\"values\"][-1]:\n entity[\"value\"]=match[\"resolution\"][\"values\"][-1]\n \n else:\n entity[\"value\"]=match[\"resolution\"][\"values\"]\n else:\n entity[\"value\"]=match[\"resolution\"][\"value\"]\n\n extracted.append(entity)\n\n return extracted \n\n @staticmethod\n def _parse_all_entities(user_input: str, \n culture: str) -> List[Dict[Text, Any]]:\n \"\"\"\n This is the main method that does the entity extraction work.\n\n For more details: https://github.com/Microsoft/Recognizers-Text/tree/master/Python#api-documentation\n \"\"\"\n\n return [\n # Number recognizer - This function will find any number from the input\n # E.g \"I have two apples\" will return \"2\".\n Recognizers.recognize_number(user_input, culture),\n\n # Ordinal number recognizer - This function will find any ordinal number\n # E.g \"eleventh\" will return \"11\".\n Recognizers.recognize_ordinal(user_input, culture),\n\n # Percentage recognizer - This function will find any number presented as percentage\n # E.g \"one hundred percents\" will return \"100%\"\n Recognizers.recognize_percentage(user_input, culture),\n\n # Age recognizer - This function will find any age number presented\n # E.g \"After ninety five years of age, perspectives change\" will return\n # \"95 Year\"\n Recognizers.recognize_age(user_input, culture),\n\n # Currency recognizer - This function will find any currency presented\n # E.g \"Interest expense in the 1988 third quarter was $ 75.3 million\"\n # will return \"75300000 Dollar\"\n Recognizers.recognize_currency(user_input, culture),\n\n\n # Temperature recognizer - This function will find any temperature presented\n # E.g \"Set the temperature to 30 degrees celsius\" will return \"30 C\"\n Recognizers.recognize_temperature(user_input, culture),\n\n # DateTime recognizer - This function will find any Date even if its write in colloquial language -\n # E.g \"I'll go back 8pm today\" will return \"2017-10-04 20:00:00\"\n Recognizers.recognize_datetime(user_input, culture),\n\n # PhoneNumber recognizer will find any phone number presented\n # E.g \"My phone number is ( 19 ) 38294427.\"\n Recognizers.recognize_phone_number(user_input, culture),\n\n # Email recognizer will find any phone number presented\n # E.g \"Please write to me at Dave@abc.com for more information on task\n # #A1\"\n Recognizers.recognize_email(user_input, culture),\n ]\n\n @staticmethod\n def _parse_entiities(self,text: Text,language: str) -> List[Dict[Text, Any]]:\n \"\"\"pass the user input to the recognizer model to parse the entities.\n \n Required Parameter: \n\n @user_input -> user entered text,\n \n @lanugage -> lanugage for prediction\n \"\"\"\n\n try:\n results = self._parse_all_entities(text, language)\n # Flatten results\n results = [item for sublist in results for item in sublist]\n results=json.dumps( results, default=lambda o: o.__dict__, indent='\\t', ensure_ascii=False)\n return self.convert_to_rasa(json.loads(results))\n \n except:\n logger.error(\n \"Failed to parse entities from recognizer model.\"\n \"Error: {}\".format(traceback.format_exc())\n )\n return []\n\n def process(self, message, **kwargs):\n \"\"\"Retrieve the text message, pass it to the text recognizer\n and append the extracted entities to the message class.\"\"\"\n\n\n language = Culture.English\n extracted_entities = self._parse_entiities(self,message.text,language)\n extracted_entities = self.add_extractor_name(extracted_entities)\n\n message.set(\n ENTITIES,\n message.get(ENTITIES, []) + extracted_entities,\n add_to_output=True,\n )\n","sub_path":"MSRTEntityExtractor.py","file_name":"MSRTEntityExtractor.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"628788631","text":"###########################################################\n# CIS 117 Internet Programming: PYTHON Lab #5\n#\n# Programming with Files; Dictionaries\n###########################################################\nimport re\n\nindexEntries = []\ndictionary = {}\nlineNumber = 0\n\ndef main():\n filename = input(\"Enter a filename: \")\n openFile(filename)\n for key, value in dictionary.items():\n print(key, value)\n\ndef openFile(filename):\n openedFile = open(filename, 'r')\n for line in openedFile:\n line = line.rstrip('\\n')\n addToDictionary(line)\n\ndef addToDictionary(line):\n global lineNumber\n pattern = re.compile(\"^[A-Za-z0-9_]*$\")\n if re.match(pattern, line) is not None:\n if line in dictionary:\n #add line to dictionary\n lineSet = dictionary[line]\n lineSet.add(lineNumber)\n lineNumber += 1\n else:\n lineSet = set([lineNumber])\n dictionary[line] = lineSet\n lineNumber += 1\nmain()\n\n######################################################################\n#Enter a filename: t5.py\n#carrot {9}\n#1 {1, 2}\n#ant {11, 12, 13}\n#rat {6}\n#pen {16, 5}\n#stick {15}\n#orange {10}\n#_ {17}\n#art {4}\n#goodbye {18}\n#apple {0, 8}\n#4 {7}\n#ball {3, 14}\n######################################################################\n","sub_path":"Python/WilliamKimLab5.py","file_name":"WilliamKimLab5.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164305520","text":"from typing import Optional\n\nimport singlish.builtin\nimport singlish.exceptions\n\nclass Person:\n\n def __init__(self, name: str, age: int):\n self.name = name\n self.age = age\n\n\nclass Singaporean(Person):\n\n # See https://en.wikipedia.org/wiki/Five_Cs_of_Singapore\n MUST_HAVES = ['car', 'cash', 'condominium', 'country club', 'credit card']\n\n def __init__(self, name: str, age: int, properties: Optional[dict]):\n # use parent class's constructor via `singlish.builtin.limpeh`.\n # steady, right?\n singlish.builtin.limpeh(Singaporean, self).__init__(name, age)\n if properties:\n self.properties = properties\n\n def _rating(self) -> int:\n # returns sum (total count) of the MUST_HAVE properties s/he owns\n # where `singlish.builtin.total()` is sum()\n return singlish.builtin.total(\n [\n bool(item in self.properties)\n for item in Singaporean.MUST_HAVES\n ]\n )\n\n def is_eligible(self) -> bool:\n # check if key is in dict via `singlish.builtin.got_anot`\n if not singlish.builtin.got_anot(self, 'properties'):\n # this one does not own anything; how to marry liddat?\n return False\n\n # check len via `singlish.builtin.how_many`\n if singlish.builtin.how_many(self.properties) >= 10:\n # Ho' say ah! Don't wait oready\n return True\n\n # only eligible if s/he owns at least 3 of the MUST_HAVES\n return self._rating() >= 3\n\n @property\n def atas(self) -> bool:\n if self._rating() >= len(Singaporean.MUST_HAVES):\n return True\n\n condominium = self.properties.get('condominium')\n if not condominium:\n return False\n return condominium.startswith('One')\n\n def marry(self, partner: Singaporean):\n try:\n assert self.properties['ring'] == partner.properties['ring']\n except KeyError as ke:\n # wedding ring leh, how to marry?\n raise singlish.exceptions.CannotFindLeh(ke)\n except AssertionError as ae:\n # no same ring! how liddat?\n raise singlish.exceptions.ZhunBoh(ae)\n\n if not self.is_eligible():\n # don't understand the logic; why would the partner marry?\n raise singlish.exceptions.CatchNoBall()\n\n if (partner.atas and self.atas) or (partner.is_eligible() and self.is_eligible()):\n\n diff = abs(self._rating() - partner._rating())\n if diff >= 3:\n # different class; you sure anot?\n raise singlish.exceptions.YouGotStudyOneAnot()\n\n # Government sure allow; proceed to purchase your BTO flat, can?\n return\n","sub_path":"examples/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311206123","text":"\nimport requests\nfrom lxml import etree\n\nBASS_DOMAIN = 'https://www.dytt8.net'\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36'\n}\ndef get_detail_urls(url):\n # url = \"https://www.dytt8.net/html/gndy/dyzz/list_23_1.html\"\n response = requests.get(url, headers=HEADERS)\n text = response.text\n # print(response.content.decode('gbk'))\n html = etree.HTML(text)\n detail_urls = html.xpath(\"//table[@class='tbspan']//a/@href\")#是一个列表\n\n # def asd(url)\n # return BASS_DOMAIN+url\n #\n # index = 0\n # for detail_url in detail_urls:\n # detail_url = asd(detail_url)\n # detail_urls[index] = detail_url\n # index += 1\n #18-26行相当于第29行\n\n #map(一个函数,一个序列)\n # 把列表中的每个值依次经过前面的函数计算,再传出\n #lambda相当于出传入url,传出BASS_DOMAIN+url\n detail_urls = map(lambda url:BASS_DOMAIN+url,detail_urls)\n return detail_urls\n\n\ndef parse_detail_page(url):\n movie = {}\n response = requests.get(url,headers=HEADERS)\n text = response.content.decode('gbk')\n html = etree.HTML(text)\n # print(html)\n title = html.xpath(\"//div[@class='title_all']//font[@color='#07519a']/text()\")[0]\n movie['标题']= title\n zoom = html.xpath(\"//div[@id='Zoom']\")[0]\n img = zoom.xpath(\".//img/@src\")\n # //img[@src=\"???\"] 是给定img下属性src的值\n # //img/@src 是获取img下属性src的值\n # @后加属性\n cover = img[0]#第一个值\n movie['图片']= cover\n screenshot = img[1]#第二个值\n movie['截图']= screenshot\n infos = zoom.xpath(\".//text()\")\n for index,info in enumerate(infos):\n if info.startswith(\"◎年  代\"):\n info = info.replace(\"◎年  代\",\"\").strip()\n movie['年代'] = info\n elif info.startswith(\"◎类  别\"):\n info = info.replace(\"◎类  别\", \"\").strip()\n movie['类别'] = info\n elif info.startswith(\"◎产  地\"):\n info = info.replace(\"◎产  地\", \"\").strip()\n movie['产地'] = info\n elif info.startswith(\"◎豆瓣评分\"):\n info = info.replace(\"◎豆瓣评分\", \"\").strip()\n movie['豆瓣评分'] = info\n elif info.startswith(\"◎片  长\"):\n info = info.replace(\"◎片  长\", \"\").strip()\n movie['片长'] = info\n elif info.startswith(\"◎主  演\"):\n info = info.replace(\"◎主  演\", \"\").strip()\n actors = [info]\n for x in range(index+1,len(infos)):\n actor = infos[x].strip()\n if actor.startswith(\"◎\"):\n break\n actors.append(actor)\n movie['主演'] = actors\n download_url =zoom.xpath(\".//p/a/@href\")\n movie['磁力链接'] = download_url\n # print(movie)\n return movie\n\n\ndef spider():\n base_url = \"https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html\"\n movies = []\n for x in range(1,2):\n #一共有7页\n url = base_url.format(x)\n detail_urls = get_detail_urls(url)\n for detail_url in detail_urls:\n #遍历每一页中的电影详情url\n movie = parse_detail_page(detail_url)\n movies.append(movie)\n # break\n # break\n print(movie)\n\nif __name__ == '__main__':\n spider()\n\n\n\n","sub_path":"数据提取/lxml/dytt8_spider.py","file_name":"dytt8_spider.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453473949","text":"from __future__ import division\nimport sys, argparse, math, json, os, colorsys\nfrom PIL import Image\n\n# scale image\ndef scale_image(image, clarity, blockSize, mode='rgb'):\n new_with = int(100 * clarity)\n (orginal_height, orginal_with) = image.size\n rgb = image.load()\n blockHeight = int(math.ceil(orginal_height / blockSize))\n blockWith = int(math.ceil(orginal_with / blockSize))\n hexdict = {}\n\n for x in range(blockHeight):\n xOffset = x * blockSize\n for y in range(blockWith):\n yOffset = y * blockSize\n\n container = []\n for xi in range(blockSize):\n if (xi + xOffset) >= orginal_with: break\n for yi in range(blockSize):\n if (yi + yOffset) >= orginal_height: break\n container.append(rgb[xi+xOffset,yi+yOffset])\n\n\n avg_alpha = int(round(sum(list(zip(*container))[3]) / len(container)))\n\n if 'hsv' == mode:\n container = map(lambda co: rbg_to_hsv(*(co[:3])), container)\n if 'hls' == mode:\n container = map(lambda co: rbg_to_hls(*(co[:3])), container)\n\n color = averagePixel(container, mode)\n\n if palette: color = getClosestColor(color, palette, hexdict, mode)\n\n if 'hsv' == mode:\n color = list(hsv_to_rbg(*color))\n if 'hls' == mode:\n color = list(hls_to_rbg(*color))\n\n color.append(avg_alpha)\n color = tuple(map(lambda co: int(round(co)), color))\n\n for xi in range(blockSize):\n if (xi + xOffset) >= blockWith: break\n for yi in range(blockSize):\n if (yi + yOffset) >= blockHeight: break\n rgb[xi + xOffset, yi + yOffset] = color\n\n\n new_image = image.rezise(new_with, new_heigt)\n return new_image\n\n'''Convert from decimal back to 0-255 mode'''\ndef hls_to_rbg(h, l, s):\n return map(lambda x: int(x * 255.0), colorsys.hls_to_rgb(h, l, s))\ndef hsv_to_rbg(h, s, v):\n return map(lambda x: int(x * 255.0), colorsys.hsv_to_rgb(h, s, v))\ndef rbg_to_hsv(r, b, g):\n return colorsys.rgb_to_hsv(r/255, b/255, g/255)\ndef rbg_to_hls(r, b, g):\n return colorsys.rgb_to_hls(r/255, b/255, g/255)\n\ndef getHex(color, mode='rbg'):\n if 'hsv' == mode:\n rgb = hsv_to_rbg(*(color[:3]))\n elif 'hls' == mode:\n rgb = hls_to_rbg(*(color[:3]))\n else:\n rgb = color[:3]\n return ''.join(map(lambda t: hex(int(t)).split('x', 1)[1], rgb))\n\ndef colorDiff(c1, c2): # Calculates difference betwixt two colors\n return sum(map(lambda x: (x[0] - x[1])**2, list(zip(c1[:3], c2[:3]))))\n\ndef colorDiffWeighted(c1, c2, mode='hsv'):\n diff_pix = map(lambda x: abs(x[0] - x[1]), list(zip(c1[:3], c2[:3])))\n return (diff_pix[0] * 10) + (diff_pix[1] * 10) + (diff_pix[2] * 10)\n\ndef averagePixel(data, mode='rbg'):\n if 'rbg' == mode:\n return list(map(lambda x: int(round(sum(x) / len(data)))), list(zip(*data)[:3]))\n else:\n return list(map(lambda x: sum(x) / len(data), zip(*data)[:3]))\n\ndef getClosestColor(color, palette, hexdict, mode='rgb'):\n hexval = getHex(color, mode)\n if hexval not in hexdict:\n if mode != 'rgb': diff_func = colorDiffWeighted\n else: diff_func = colorDiff\n hexdict[hexval] = min(palette, key=lambda c: diff_func(color, c))\n return list(hexdict[hexval])\n\ndef generatePalette(image, mode='rgb'):\n if 'hsv' == mode:\n transform = lambda _, rgb: list(rbg_to_hsv(*rgb))\n elif 'hls' == mode:\n transform = lambda _, rgb: list(rbg_to_hls(*rgb))\n else:\n transform = lambda _, rgb: list(rgb)\n return json.dumps(map(transform, image.getcolorts(image.size[0]*image.size[1])))\n\ndef exitScript(args, code):\n args.infile.close()\n args.outfile.close()\n sys.exit(code)\n\ndef pixelCrop(image, block_size, orientation='tl'):\n (orginal_height, orginal_with) = image.size()\n blockHeight = int((orginal_height // block_size) * block_size)\n blockWith = int((orginal_with // block_size) * block_size)\n if 'lt' == orientation: cropsize = (0, 0, blockHeight, blockWith)\n elif 'tr' == orientation: cropsize = (orginal_height - blockHeight, 0, orginal_height, blockWith)\n elif 'bl' == orientation: cropsize = (0, orginal_with - blockWith, blockHeight, orginal_height)\n elif 'br' == orientation: cropsize = (orginal_height -blockHeight, orginal_with - blockWith, orginal_height, orginal_with)\n return image.crop(cropsize)\n\nif __name__==\"__main__\":\n parse = argparse.ArgumentParser( \\\n description='Create \"pixel art\" from a photo', prog='phixelgator', \\\n epilog=\"Disclaimer: this does not *really* make pixel art, it just reduces the image resolution with preset color palettes.\")\n parse.add_argument('-b', '--block', type=int, default=8, \\\n help=\"Block size for phixelization. Default is 8 pixels.\")\n parse.add_argument('-p', '--palette', \\\n choices=['mario','hyrule','kungfu','tetris','contra','appleii', \\\n 'atari2600','commodore64','gameboy','grayscale','intellivision','nes','sega'], \\\n help=\"The color palette to use.\")\n parse.add_argument('-c', '--custom', type=argparse.FileType('r'), \\\n help=\"A custom palette file to use instead of the defaults. Should be plain JSON file with a single array of color triplets.\")\n parse.add_argument('-d', '--dimensions', \\\n help=\"The dimensions of the new image (format: 10x10)\")\n parse.add_argument('-t', '--type', choices=['png','jpeg','gif','bmp'], default='png', \\\n help=\"Output file type. Default is 'png'.\")\n parse.add_argument('-x', '--crop', choices=['tl','tr','bl','br'], \\\n help=\"If this flag is set, the image will be cropped to conform to the Block Size. \\\n The argument passed describes what corner to crop from.\")\n parse.add_argument('-m', '--mode', choices=['rgb','hsv','hls'], default='rgb', \\\n help=\"The color mode to use. hsv or hls may produce more desirable results than the default rgb \\\n but the process will take longer.\")\n parse.add_argument('-g', '--generate', action='store_true', \\\n help=\"This flag overrides the default behaviour of infile and outfile options -- instead \\\n of converting the input to a new image, a custom palette file will be generated from all colors \\\n used in the infile photo. Other options are ignored.\")\n parse.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin, \\\n help=\"the input file (defaults to stdin)\")\n parse.add_argument('outfile', nargs='?', type=argparse.FileType('wb'), default=sys.stdout, \\\n help=\"the output file (defaults to stdout)\")\n args = parse.parse_args()\n\n \"\"\" If the -g flag is set, the behaviour of the utility is\n completely altered -- instead of generating a new image,\n a new color-palette json file is generated from the colors\n of the input file. \"\"\"\n if args.generate is True:\n img = Image.open(args.infile).convert('RGB')\n palette = generatePalette(img)\n args.outfile.write(palette)\n exitScript(args, 0)\n\n \"\"\" Try to load the custom palette if provided:\n Should be formatted as json similar to the\n default palette definitions in this script. \"\"\"\n palette = False\n if args.custom is not None:\n palette = json.loads(args.custom.read())\n args.custom.close()\n # To simplify things, the custom palette generator only makes rgb files,\n # so it's fairly safe to assume that's what we're getting.\n if 'hsv' == args.mode:\n palette = map(lambda rgb: rgb_to_hsv(*rgb), palette)\n elif 'hls' == args.mode:\n palette = map(lambda rgb: rgb_to_hls(*rgb), palette)\n elif args.palette is not None:\n try:\n path = os.sep.join([os.path.dirname(os.path.realpath(__file__)),'palettes',args.mode,args.palette])\n with open(path + '.json', 'r') as f:\n palette = json.loads(f.read())\n except Exception as e:\n sys.stderr.write(\"No palette loaded\")\n palette = False\n\n img = Image.open(args.infile).convert('RGBA')\n\n if args.crop:\n img = phixelCrop(img, args.block, args.crop)\n\n scale_image(img, palette, args.block, args.mode)\n\n \"\"\" Try to resize the image and fail gracefully \"\"\"\n if args.dimensions:\n try:\n imgWidth, imgHeight = map(int, args.dimensions.split('x',1))\n resized_img = img.resize((imgWidth, imgHeight))\n resized_img.save(args.outfile, args.type)\n except Exception as e:\n sys.stderr.write(\"Failed to resize image\")\n img.save(args.outfile, args.type)\n else:\n img.save(args.outfile, args.type)\n\n exitScript(args, 0)","sub_path":"pixel_art.py","file_name":"pixel_art.py","file_ext":"py","file_size_in_byte":8599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644357497","text":"\nimport re\n\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import logout\nfrom django.urls import reverse\n\n\n\nEXEMPT_URLS = [re.compile(settings.LOGIN_URL.lstrip('/'))]\nif hasattr(settings,'LOGIN_EXEMPT_URLS'):\n EXEMPT_URLS += [re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]\n\n\nclass LoginRequiredMiddleware:\n\n def __init__(self,get_response):\n self.get_response = get_response\n\n def __call__(self,request):\n response = self.get_response(request)\n return response\n\n\n\n def process_view(self,request,view_func,view_args,view_kwargs):\n \"\"\"\n https://docs.djangoproject.com/en/2.1/topics/http/middleware/#process-view\n process_view()¶\n process_view(request, view_func, view_args, view_kwargs)¶\n request is an HttpRequest object. view_func is the Python function that Django is about to use. (It’s the actual\n function object, not the name of the function as a string.) view_args is a list of positional arguments that will\n be passed to the view, and view_kwargs is a dictionary of keyword arguments that will be passed to the view. Neither\n view_args nor view_kwargs include the first view argument (request).\n\n process_view() is called just before Django calls the view.\n\n It should return either None or an HttpResponse object. If it returns None, Django will continue processing\n this request, executing any other process_view() middleware and, then, the appropriate view. If it returns an HttpResponse\n object, Django won’t bother calling the appropriate view; it’ll apply response middleware to that HttpResponse and return\n the result.\n\n \"\"\"\n assert hasattr(request,'user')\n path = request.path_info.lstrip('/')\n print(path)\n url_is_exempt = any(url.match(path) for url in EXEMPT_URLS)\n\n if path == reverse('accounts:logout').lstrip('/'):#reverse('accounts:logout') 'accounts' is the namespace of accouts app\n logout(request)\n\n\n\n # if not request.user.is_authenticated:\n # if not any(url.match(path) for url in EXEMPT_URLS):\n # return redirect(settings.LOGIN_URL)\n if request.user.is_authenticated and url_is_exempt:\n return redirect(settings.LOGIN_REDIRECT_URL)\n elif request.user.is_authenticated or url_is_exempt:\n return None\n else:\n return redirect(settings.LOGIN_URL)\n\n","sub_path":"sitenew/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51546184","text":"# Number spiral diagonals\nimport time\n\nstart = time.time()\n\n\ndef e28():\n summ = 1\n point = 1\n for n in range(2, 1001, 2):\n for p in range(4):\n point += n\n summ += point\n return summ\n\n\nprint('diag sum =', e28()) # 669171001\nend = time.time() - start\nprint(\"Runtime =\", end)\n","sub_path":"euler28_Number_spiral_diagonals.py","file_name":"euler28_Number_spiral_diagonals.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432065916","text":"#!/usr/bin/python3\nfrom tkinter import *\nroot = Tk()\nroot.title(\"Simple Calculator\")\ne=Entry(root,width=30)\ne.grid(row=0,column=0,columnspan=3,padx=10,pady=10)\ndef button_click(number):\n #e.delete(0,END)\n current=e.get()\n e.delete(0,END)\n e.insert(0,str(current)+str(number))\ndef button_clear():\n e.delete(0,END)\ndef button_add():\n first_num=e.get()\n global fnum\n global math\n math=\"addition\"\n fnum=int(first_num)\n e.delete(0,END)\ndef button_equal():\n second_number=e.get()\n e.delete(0,END)\n if math==\"addition\":\n e.insert(0,fnum+int(second_number))\n if math==\"substract\":\n e.insert(0,fnum-int(second_number))\n if math==\"multiplication\":\n e.insert(0,fnum*int(second_number))\n if math==\"division\":\n e.insert(0,fnum/int(second_number))\n\ndef button_sub():\n first_num = e.get()\n global fnum\n global math\n math = \"substract\"\n fnum = int(first_num)\n e.delete(0, END)\ndef button_mul():\n first_num = e.get()\n global fnum\n global math\n math = \"multiplication\"\n fnum = int(first_num)\n e.delete(0, END)\ndef button_div():\n first_num = e.get()\n global fnum\n global math\n math = \"division\"\n fnum = int(first_num)\n e.delete(0, END)\nbutton_1 = Button(root, text=' 1 ',\n\t\t\t\t\tcommand=lambda: button_click(1),padx=40,pady=20)\nbutton_2=Button(root,text='2',command=lambda : button_click(2),padx=40,pady=20)\nbutton_3=Button(root,text='3',command=lambda : button_click(3),padx=40,pady=20)\nbutton_4=Button(root,text='4',command=lambda : button_click(4),padx=40,pady=20)\nbutton_5=Button(root,text='5',command=lambda : button_click(5),padx=40,pady=20)\nbutton_6=Button(root,text='6',command=lambda : button_click(6),padx=40,pady=20)\nbutton_7=Button(root,text='7',command=lambda : button_click(7),padx=40,pady=20)\nbutton_8=Button(root,text='8',command=lambda : button_click(8),padx=40,pady=20)\nbutton_9=Button(root,text='9',command=lambda : button_click(9),padx=40,pady=20)\nbutton_0=Button(root,text='0',command=lambda : button_click(0),padx=44,pady=20)\nbutton_add=Button(root,text='+',command=button_add,padx=43,pady=20)\nbutton_clear=Button(root,text='C',command=button_clear,padx=88,pady=20)\nbutton_sub=Button(root,text='-',command=button_sub,padx=44,pady=20)\nbutton_mult=Button(root,text='*',command=button_mul,padx=42,pady=20)\nbutton_div=Button(root,text='/',command=button_div,padx=42,pady=20)\nbutton_equal=Button(root,text='=',command=button_equal,padx=90 ,pady=20)\nbutton_1.grid(row=3,column=0)\nbutton_2.grid(row=3,column=1)\nbutton_3.grid(row=3,column=2)\nbutton_4.grid(row=2,column=0)\nbutton_5.grid(row=2,column=1)\nbutton_6.grid(row=2,column=2)\nbutton_7.grid(row=1,column=0)\nbutton_8.grid(row=1,column=1)\nbutton_9.grid(row=1,column=2)\nbutton_0.grid(row=4,column=0)\nbutton_clear.grid(row=4,column=1,columnspan=2)\nbutton_add.grid(row=5,column=0)\nbutton_equal.grid(row=5,column=1,columnspan=2)\nbutton_sub.grid(row=6,column=0)\nbutton_mult.grid(row=6,column=1)\nbutton_div.grid(row=6,column=2)\nroot.mainloop()","sub_path":"python_min_project/graphical_calculator.py","file_name":"graphical_calculator.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112171660","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 17 18:56:59 2018\n\n@author: UO238186\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n#%%\ndef KernelRBF(X,Y,g):\n m = X.shape[0]\n n = Y.shape[0]\n K = np.zeros((m,n))\n \n for i in range(m):\n for j in range(n):\n dif = np.linalg.norm(X[i,:]-Y[j,:])\n K[i,j] = np.exp(-dif**2/g)\n \n return K \n\n#%%\nfolder = 'characters/'\ndata = np.loadtxt(folder + 'data_char.txt')\nlabels = np.loadtxt(folder + 'labels_char.txt')\n\ntrainPercentage = 0.9\n\ndata_train = data[:len(data)*trainPercentage,:]\nlabels_train = labels[:len(labels)*trainPercentage]\n\ndata_test = data[len(data)*trainPercentage:,:]\nlabels_test = labels[len(labels)*trainPercentage:]\n#%%\nC = 1\nsigma = 100\n\nnumber_classes = 26 # classes of digits\n\nn = data_train.shape[0] # number of rows (images) in training matrix\nm = data_test.shape[0] # number of rows (images) in testing matrix \nI = np.identity(n)\n#%%\nH = np.float_(data_train)/255 #the images are in [0,255]\nH_te = np.float_(data_test)/255\n#%%\nY = np.zeros((n, number_classes))\nfor i in range(0, n):\n Y[i, int(labels_train[i])] = 1\n#%%\nlabels_te=np.zeros((m, number_classes))\nfor i in range(0, m):\n labels_te[i, int(labels_test[i])] = 1\n#%%\n# Omega's \nOmegaP = KernelRBF(H, H, sigma) \nW = np.linalg.solve(I/C + OmegaP, Y)\n\nOmegaP_te = KernelRBF(H_te, H, sigma)\nYP_te = np.dot(OmegaP_te, W)\n\n# prediction\npredictedP_test = YP_te.argmax(axis=1)\n\n# success percentage\npercent = np.sum(predictedP_test == labels_test)/float(m)*100.\nprint('C = '+str(C)+' sigma = '+str(sigma)+'\\nSuccess rate = '+str(percent)+'%')\n\n# confusion matrix\nmc = confusion_matrix(labels_test, predictedP_test)\n\n# plot\nplt.figure(figsize=(6,6))\nticks = range(number_classes)\nplt.xticks(ticks)\nplt.yticks(ticks)\nplt.imshow(mc,cmap=plt.cm.Blues)\nplt.colorbar(shrink=0.8)\nw, h = mc.shape\nfor i in range(w):\n for j in range(h):\n plt.annotate(str(mc[i][j]), xy=(j, i), \n horizontalalignment='center',\n verticalalignment='center')\nplt.xlabel('Predicted label')\nplt.ylabel('Actual label')\nplt.title('Confusion matrix')\nplt.show()","sub_path":"ImageClassification/ImageClassificationExercise02.py","file_name":"ImageClassificationExercise02.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254132230","text":"import logging\nlogger = logging.getLogger(__name__)\n\nimport requests\n\nclass WebClient():\n headers = {'Content-Type': 'application/json'}\n\n def send_msg(self, _type, url, message, **kwargs):\n logger.debug('sending msg %s to url %s', _type, url)\n logger.debug('sending msg data \\n%s', message)\n response = None\n try:\n if _type == 'post':\n response = requests.post(url, headers=WebClient.headers, data=message, **kwargs)\n elif _type == 'put':\n response = requests.put(url, headers=WebClient.headers, data=message, **kwargs)\n elif _type == 'get':\n response = requests.get(url, headers=WebClient.headers, data=message, **kwargs)\n else:\n response = requests.delete(url, headers=WebClient.headers, data=message, **kwargs)\n except requests.RequestException as exception:\n logger.info('Requests fail - exception %s', exception)\n response = None\n finally:\n reply = self.__process_msg_response(response)\n logger.info('Requests - response %s', response)\n if reply:\n return reply.text\n return reply\n\n def __process_msg_response(self, response):\n try:\n if response:\n response.raise_for_status()\n else:\n response = None\n except Exception as exception:\n logging.info(\"Response exception %s\", exception)\n response = None\n finally:\n return response\n\n\nif __name__ == '__main__':\n pass","sub_path":"build/lib/exchange/rest/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427826534","text":"from requests import get\nfrom base64 import b64encode\nfrom os import getcwd\nfrom os.path import dirname, exists\nfrom re import search, sub\nfrom time import localtime, strftime\nfrom aria2 import addUri\n\n\ndef get_replay_list_to_c(term_id):\n return get('https://ke.qq.com/cgi-proxy/agency/exp/get_replay_list_to_c', headers={\n 'referer': f'https://ke.qq.com/webcourse/index.html?term_id={term_id}'\n }, params={\n 'tid': term_id,\n 'need_recording': '0',\n 'page_idx': '0',\n 'page_size': '0',\n 'need_all': '1',\n 'role_type': '2',\n # 'bkn': '988170367',\n # 'r': '0.2353',\n }).json()\n\n\ndef get_token(term_id, fileId):\n return get('https://ke.qq.com/cgi-bin/qcloud/get_token', params={\n 'term_id': term_id,\n 'fileId': fileId,\n # 'bkn': '988170367',\n # 't': '0.6172',\n }).json()\n\n\ndef getplayinfo(term_id, fileId):\n return get(f'https://playvideo.qcloud.com/getplayinfo/v2/1258712167/{fileId}', params=get_token(term_id, fileId)['result']).json()\n\n\ndef getDLUrl(term_id, fileId):\n tcl = getplayinfo(term_id, fileId)['videoInfo']['transcodeList']\n main = sorted(tcl, key=lambda ii: ii['bitrate'])[-1]['url']\n dlu = dku = None\n with get(main) as res:\n for ii in res.text.split('\\n')[::-1]:\n if ii and not ii.startswith('#'):\n dlu = dirname(main) + '/' + sub('start=\\\\d+', 'start=0', ii)\n if ii.startswith('#EXT-X-KEY'):\n dku = ii.split('\",IV')[0].split('URI=\"')[-1]\n if dlu and dku:\n return dlu, dku\n\n\ndef get_dk_token(cid, term_id):\n cookie = open('cookie.txt').read()\n uin = search('\"uin\":(\\\\d+),', cookie).group(1)\n plskey = search('p_lskey=(.*?);', cookie).group(1)\n pskey = search('p_skey=(.*?);', cookie).group(1)\n return b64encode(f'uin={uin};vod_type=0;cid={cid};term_id={term_id};plskey={plskey};pskey={pskey}'.encode()).decode()\n\n\ndef dlAll(cid, term_id, skip_exist=True, skip_list=None):\n _dir = getcwd()\n for ii in get_replay_list_to_c(term_id)['result']['replay_info_list']:\n task_name = ii['task_name']\n bg_time = ii['bg_time']\n file_id = ii['file']['file_id']\n duration = ii['file']['duration']\n name = f'{task_name} {strftime(\"%Y-%m-%d %H-%M-%S\", localtime(bg_time))} ({(duration + 59) // 60}min)'\n if skip_list and name in skip_list:\n continue\n if skip_exist and exists(name + '.ts'):\n continue\n dlu, dku = getDLUrl(term_id, file_id)\n addUri(dlu, {'out': name + '.ts', 'dir': _dir})\n addUri(f'{dku}&token={get_dk_token(cid, term_id)}', {'out': name + '.key', 'dir': _dir})\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269247106","text":"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao (Bin.Xiao@microsoft.com)\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport torch\n\nclass JointsMSELoss(nn.Module):\n def __init__(self):\n super(JointsMSELoss, self).__init__()\n self.criterion = nn.MSELoss(size_average=True)\n\n def forward(self, output, target):\n batch_size = output.size(0)\n num_joints = output.size(1)\n # \n heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)\n loss = 0\n #print(heatmaps_gt)\n for idx in range(num_joints):\n # batch x (w*h)\n heatmap_pred = heatmaps_pred[idx].squeeze()\n heatmap_gt = heatmaps_gt[idx].squeeze()\n \n target_weight = (torch.max(heatmap_gt, dim=1)[0] > 0).float().reshape(-1, 1)\n #print(\"red\", heatmap_pred)\n #print(heatmap_gt) \n #loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)\n loss += 0.5 * self.criterion(heatmap_pred*target_weight, heatmap_gt)\n return loss / num_joints\n\nclass MWMSELoss(nn.Module):\n def __init__(self):\n super(MWMSELoss, self).__init__()\n self.criterion = nn.MSELoss(size_average=False, reduce=False)\n self.k = 1\n self.b = 1\n print(\"use MW Weight for train k = {}, b= {}\".format(self.k, self.b))\n\n def forward(self, output, target):\n batch_size = output.size(0)\n num_joints = output.size(1)\n #print(output.reshape((batch_size, num_joints, -1)).shape)\n heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, dim=1) # 13 x (batch_size, (h*w))\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, dim=1)\n loss = 0\n #print(heatmaps_gt)\n for idx in range(num_joints):\n # batch x (w*h)\n heatmap_pred = heatmaps_pred[idx].squeeze()\n heatmap_gt = heatmaps_gt[idx].squeeze()\n # if gt is zero_heatmap -> loss == 0\n k = heatmap_gt * self.k\n b = torch.where(heatmap_gt >= 0, 1, -1) * self.b\n mw = k+b\n '''\n for i in range(60, 70):\n #print(torch.max(heatmap_gt[0][120*i:120*(i+1)]))\n if torch.max(heatmap_gt[0][120*i:120*(i+1)]) > 0:\n print(heatmap_gt[0][120*i:120*(i+1)])\n #print(k[0][120*i:120*(i+1)])\n print(b[0][120*i:120*(i+1)])\n print(mw[0][120*i:120*(i+1)])\n '''\n target_weight = (torch.max(heatmap_gt, dim=1)[0] > 0).float().reshape(-1, 1)\n\n #print(\"w/o mw\",self.criterion(heatmap_pred*target_weight, heatmap_gt).mean())\n #print(\"w mw\", (mw * self.criterion(heatmap_pred*target_weight, heatmap_gt)).mean())\n #print(\"w mw\", self.criterion(torch.mul(mw, heatmap_pred)*target_weight, heatmap_gt))\n #print(heatmap_gt) \n #loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)\n loss += 0.5 * (mw *self.criterion(heatmap_pred*target_weight, heatmap_gt)).mean()\n return loss / num_joints\n\nif __name__ == '__main__':\n a = JointsMSELoss()\n b = torch.arange(24).reshape(2, 3, 2, 2)\n print(b)\n c = torch.zeros((2, 3, 2, 2))\n c[0][0][0][0] = 1\n d = a(b, c)\n print(d)\n","sub_path":"uwbpose/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369387902","text":"# This acts as the C server that contains the routing table information for the rest of\n# the program to function properly. No loop is required as once it is sent to the R router,\n# the connection will be terminated.\n#\n# Created By: Jason Crandall u0726408\n\nfrom socket import *\nimport json\nimport sys\n\n# Set up static host and port number\nHOST = 'localhost'\nPORT = 2345\n\n# Initialize the flow table to be sent to the router\nflowTable = {\n \"table\": [\n {\n \"match\": \"sra <= 20 and dsa <= 20 and srp > 10 and dsp > 10\",\n \"action\": \"sra = 21;srp = 41\",\n \"statistics\": 0\n },\n {\n \"match\": \"sra > 40 and dsa > 40 and srp > 10 and dsp > 10\",\n \"action\": \"forward\",\n \"statistics\": 0\n },\n {\n \"match\": \"srp <= 10 or dsp <= 10\",\n \"action\": \"drop\",\n \"statistics\": 0\n },\n {\n \"match\": \"'No Match Found'\",\n \"action\": \"drop\",\n \"statistics\": 0\n }\n ]\n}\n\n# Main function that waits for a connection from the router, then\n# sends the flow table and closes the connection\ndef main():\n # Open a connection and wait for input from the router\n with socket(AF_INET,SOCK_STREAM) as s:\n try:\n s.bind((HOST, PORT))\n except:\n print(\"OS Error 98: Address already in use\")\n s.close()\n return\n s.listen(2)\n connection, addr = s.accept()\n with connection:\n print(\"Connected by \", addr)\n data = connection.recv(1024)\n print(\"Received Data: \", data.decode())\n command = ''\n\n # Ensure the correct format is received\n try:\n jsonData = json.loads(data.decode())\n command = jsonData['command']\n except:\n error = \"Unrecognized Format\"\n connection.send(error.encode())\n \n # Send flow table to router\n if(command == \"requestTable\"):\n tableData = json.dumps(flowTable)\n print(\"Sending flow table\")\n connection.send(tableData.encode())\n\nif __name__ == \"__main__\":\n main()","sub_path":"PA_2/part1/C_server.py","file_name":"C_server.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432792387","text":"#!/usr/bin/python3\n# 3/28/2019 Skeetzo\n# OnlySnarf.py menu system\n\n### doesn't work:\n# upload & backup (requires upload via local added to main script)\n# settings menu -> \"Incorrect Index\"\n\nimport random\nimport os\nimport shutil\nimport datetime\nimport json\nimport sys\nimport pathlib\nimport pkg_resources\nfrom OnlySnarf.settings import SETTINGS as settings\nfrom OnlySnarf import onlysnarf as OnlySnarf\nfrom OnlySnarf import google as Google\nfrom OnlySnarf import cron as Cron\n\n###################\n##### Globals #####\n###################\n\nversion = str(pkg_resources.get_distribution(\"onlysnarf\").version)\nheader = \"\\n ________ .__ _________ _____ \\n \\\n\\\\_____ \\\\ ____ | | ___.__./ _____/ ____ _____ ________/ ____\\\\\\n \\\n / | \\\\ / \\\\| |< | |\\\\_____ \\\\ / \\\\\\\\__ \\\\\\\\_ _ \\\\ __\\\\ \\n \\\n/ | \\\\ | \\\\ |_\\\\___ |/ \\\\ | \\\\/ __ \\\\ | |\\\\/| | \\n \\\n\\\\_______ /___| /____/ ____/_______ /___| (____ \\\\\\\\__| |_| \\n \\\n \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\\\/ \\n\"\n\nUPDATED = False\nUPDATED_TO = False\nINITIALIZED = False\n\ncolors = {\n 'blue': '\\033[94m',\n 'header': '\\033[48;1;34m',\n 'teal': '\\033[96m',\n 'pink': '\\033[95m',\n 'green': '\\033[92m',\n 'yellow': '\\033[93m',\n 'menu': '\\033[48;1;44m'\n }\n\nmenuItems = []\nactionItems = []\nmessageItems = []\nfileItems = []\nlocationItems = []\npromotionItems = []\nsettingItems = []\nmethodItems = []\n\ndef initialize():\n # print(\"Initializing Menu\")\n global INITIALIZED\n if INITIALIZED:\n # print(\"Already Initialized, Skipping\")\n return\n global settingItems\n # Settings Menu\n settingItems = [\n [ \"Verbose\", settings.VERBOSE, [\"True\",\"False\"],True],\n [ \"Debug\", settings.DEBUG, [\"True\",\"False\"],False],\n [ \"Backup\", settings.BACKUP, [\"True\",\"False\"],True],\n [ \"Show Window\", settings.SHOW_WINDOW, [\"True\",\"False\"],False],\n [ \"Delete Google\", settings.DELETE_GOOGLE, [\"True\",\"False\"],False],\n [ \"Skip Delete\", settings.SKIP_DELETE, [\"True\",\"False\"],False],\n [ \"Tweeting\", settings.TWEETING, [\"True\",\"False\"],True],\n [ \"Image Limit\", settings.IMAGE_UPLOAD_LIMIT,None,True],\n ]\n if str(settings.VERBOSE) == \"True\":\n settingItems.append([ \"Skip Delete\", settings.SKIP_DELETE, [\"True\",\"False\"],False])\n settingItems.append([ \"Mount Path\", settings.MOUNT_PATH,None,False])\n settingItems.append([ \"Drive Path\", settings.DRIVE_PATH,None,False])\n settingItems.append([ \"Users Path\", settings.USERS_PATH,None,False])\n settingItems.append([ \"Google Root\", settings.ROOT_FOLDER,None,False])\n settingItems.append([ \"Drive Folder\", settings.DRIVE_FOLDERS,None,False])\n settingItems.append([ \"Create Drive\", settings.CREATE_DRIVE, [\"True\",\"False\"],False])\n if str(settings.DEBUG) == \"True\":\n settingItems.append([ \"Force Delete\", settings.FORCE_DELETE, [\"True\",\"False\"],False])\n settingItems.append([ \"Force Backup\", settings.FORCE_BACKUP, [\"True\",\"False\"],False])\n settingItems.append([ \"Force Upload\", settings.FORCE_UPLOAD, [\"True\",\"False\"],False])\n settingItems.append([ \"Skip Download\", settings.SKIP_DOWNLOAD, [\"True\",\"False\"],False])\n settingItems.append([ \"Image Max\", settings.IMAGE_UPLOAD_MAX,None,False])\n settingItems.append([ \"Text\", settings.TEXT,None,False])\n settingItems.append([ \"Local\", settings.INPUT,None,False])\n settingItems.append([ \"Image\", settings.IMAGE,None,False])\n settingItems.append([ \"Prefer Local\", settings.PREFER_LOCAL,[\"True\",\"False\"],True])\n # settingItems.append([ \"Overwrite Local\", settings.OVERWRITE_LOCAL,[\"True\",\"False\"],True])\n settingItems = sorted(settingItems)\n settingItems.insert(0,[ \"Back\", \"main\"])\n\n global menuItems\n # Main Menu\n menuItems = [\n [ \"Actions\", \"action\"],\n [ \"Settings\", \"set_settings\"],\n [ \"Exit\", \"exit\"]\n ]\n\n global actionItems\n # Actions Menu\n actionItems = [\n [ \"Upload\", \"release\" ],\n [ \"Download\", \"download\" ],\n # [ \"Promotion\", \"promotion\" ],\n [ \"Message\", \"message\" ],\n [ \"Discount\", \"discount\" ],\n [ \"Post\", \"post\" ],\n [ \"Reset\", \"reset\" ]\n ]\n if str(settings.DEBUG) == \"True\":\n actionItems.append([ \"Test\", \"test\"])\n actionItems.append([ \"Promotion\", \"promotion\" ])\n actionItems.append([ \"Cron\", \"cron\" ])\n actionItems = sorted(actionItems)\n actionItems.insert(0,[ \"Back\", \"main\"])\n\n global messageItems\n # Message Menu\n messageItems = [\n [ \"All\", \"all\"],\n # [ \"New\", \"new\"],\n [ \"Recent\", \"recent\"],\n # [ \"Favorite\", \"favorite\"],\n [ \"User by Username\", \"user\"],\n [ \"Select User\", \"select\"]\n ]\n if str(settings.DEBUG) == \"True\":\n messageItems.append([ \"New\", \"new\"])\n messageItems.append([ \"Favorite\", \"favorite\"])\n messageItems = sorted(messageItems)\n messageItems.insert(0,[ \"Back\", \"main\"])\n\n global fileItems\n # File Type Menu\n fileItems = [\n [ \"Image\", \"image\"],\n [ \"Gallery\", \"gallery\"],\n [ \"Performer\", \"performer\"],\n # [ \"Scene\", \"scene\"],\n [ \"Video\", \"video\"],\n ]\n if str(settings.DEBUG) == \"True\":\n fileItems.append([ \"Scene\", \"scene\"])\n fileItems = sorted(fileItems)\n fileItems.insert(0,[ \"Back\", \"main\"])\n\n global locationItems\n # File Location Menu\n locationItems = sorted([\n [ \"Local\", \"local\"],\n [ \"Google Drive\", \"google\"]\n ])\n locationItems.insert(0,[ \"Back\", \"main\"])\n\n global promotionItems\n if str(settings.DEBUG) == \"True\":\n promotionItems = sorted([\n [ \"Enter Email\", \"email\" ],\n [ \"Select User\", \"select\" ]\n ])\n promotionItems.insert(0,[ \"Back\", \"main\"])\n\n global methodItems\n methodItems = sorted([\n [ \"Choose\", \"choose\" ],\n [ \"Random\", \"random\" ]\n ])\n methodItems.insert(0,[ \"Back\", \"main\"])\n\n global postItems\n postItems = sorted([\n [ \"Enter\", \"enter\" ],\n [ \"Select\", \"select\" ]\n ])\n postItems.insert(0,[ \"Back\", \"main\"])\n\n global cronItems\n cronItems = sorted([\n [ \"Add\", \"add\" ],\n [ \"List\", \"list\" ],\n [ \"Delete\", \"delete\" ],\n [ \"Delete All\", \"deleteall\" ]\n ])\n cronItems.insert(0,[ \"Back\", \"main\"])\n\n # print(\"Initialized Menu\")\n INITIALIZED = True\n\n#####################\n##### Functions #####\n#####################\n\n### Action Menu - file type\ndef action():\n for item in actionItems:\n print(colorize(\"[\" + str(actionItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) >= len(actionItems): raise ValueError\n if str(actionItems[int(choice)][1]) == \"main\":\n return main()\n elif str(actionItems[int(choice)][1]) == \"reset\":\n OnlySnarf.remove_local()\n elif str(actionItems[int(choice)][1]) == \"message\":\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizeMessage(actionChoice)\n elif str(actionItems[int(choice)][1]) == \"discount\":\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizeDiscount(actionChoice)\n elif str(actionItems[int(choice)][1]) == \"promotion\":\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizePromotion(actionChoice)\n elif str(actionItems[int(choice)][1]) == \"post\":\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizePost(actionChoice)\n elif str(actionItems[int(choice)][1]) == \"cron\":\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizeCron(actionChoice)\n else:\n actionChoice = list(actionItems[int(choice)])[1]\n return finalizeAction(actionChoice)\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n\n### Action Menu - finalize\ndef finalizeAction(actionChoice):\n for item in fileItems:\n print(colorize(\"[\" + str(fileItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n fileChoice = input(\">> \")\n try:\n if int(fileChoice) < 0 or int(fileChoice) >= len(fileItems): raise ValueError\n if str(fileItems[int(fileChoice)][1]) == \"main\":\n return action()\n # Call the matching function\n fileChoice = list(fileItems[int(fileChoice)])[1]\n return selectMethod(actionChoice, fileChoice)\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n\ndef selectMethod(actionChoice, fileChoice):\n # if settings.INPUT and methodItems doesn't include Input option already\n if [ \"Local\", \"local\" ] not in methodItems and actionChoice == \"release\":\n methodItems.append([ \"Local\", \"local\" ])\n for item in methodItems:\n print(colorize(\"[\" + str(methodItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n methodChoice = input(\">> \")\n try:\n if int(methodChoice) < 0 or int(methodChoice) >= len(methodItems): raise ValueError\n methodChoice_ = list(methodItems[int(methodChoice)])[1]\n if str(methodItems[int(methodChoice)][1]) == \"main\":\n return action()\n elif str(methodItems[int(methodChoice)][1]) == \"choose\":\n if str(fileChoice) == \"gallery\":\n choices = displayFolders(\"galleries\")\n elif str(fileChoice) == \"video\":\n choices = displayFolders(\"videos\")\n elif str(fileChoice) == \"image\":\n choices = displayFolders(\"images\")\n elif str(fileChoice) == \"performer\":\n choices = displayFolders(\"performers\")\n elif str(fileChoice) == \"scene\":\n choices = displayFolders(\"scenes\")\n seeking = True\n while seeking:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) > len(choices): raise ValueError\n if int(choice) == 0:\n return selectMethod(actionChoice, fileChoice)\n file = choices[int(choice)-1]\n parent = file\n seeking = False\n if str(fileChoice) == \"gallery\" or str(fileChoice) == \"image\" or str(fileChoice) == \"video\" or str(fileChoice) == \"performer\":\n if str(fileChoice) == \"gallery\":\n choices_ = displayFolders(file['title'], parent=\"galleries\")\n elif str(fileChoice) == \"image\":\n choices_ = displayFiles(file['title'], parent=\"images\")\n elif str(fileChoice) == \"video\":\n choices_ = displayFiles(file['title'], parent=\"videos\")\n elif str(fileChoice) == \"performer\":\n choices_ = displayFolders(file['title'], parent=\"performers\")\n seeking_ = True\n while seeking_:\n choice_ = input(\">> \")\n try:\n if int(choice_) < 0 or int(choice_) > len(choices_): raise ValueError\n if int(choice_) == 0:\n return selectMethod(actionChoice, fileChoice)\n file = choices_[int(choice_)-1]\n seeking_ = False\n folderName = file['title']\n if str(fileChoice) == \"performer\":\n # parent = file\n # choices_ = displayFiles(file['title'], parent=parent)\n choices_ = displayBoth(file['title'], parent=parent)\n seeking__ = True\n while seeking__:\n choice_ = input(\">> \")\n try:\n if int(choice_) < 0 or int(choice_) > len(choices_): raise ValueError\n if int(choice_) == 0:\n return selectMethod(actionChoice, fileChoice)\n file = choices_[int(choice_)-1]\n seeking__ = False\n return performAction(actionChoice, fileChoice, methodChoice_, file=file, folderName=folderName, parent=parent)\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n return finalizeAction(actionChoice)\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n return finalizeAction(actionChoice)\n return performAction(actionChoice, fileChoice, methodChoice_, file=file, folderName=folderName, parent=parent)\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n return finalizeAction(actionChoice)\n return performAction(actionChoice, fileChoice, methodChoice_)\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n\n### Action Menu - perform\ndef performAction(actionChoice, fileChoice, methodChoice, file=None, folderName=None, parent=None):\n try:\n method = getattr(OnlySnarf, str(actionChoice))\n response = method(fileChoice, methodChoice=methodChoice, file=file, folderName=folderName, parent=parent)\n if response:\n if str(actionChoice) == \"download\":\n settings.update_value(\"input\",response.get(\"path\"))\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n mainMenu()\n\n# Message Menu - finalize\ndef finalizeMessage(actionChoice):\n for item in messageItems:\n print(colorize(\"[\" + str(messageItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) >= len(messageItems): raise ValueError\n if str(messageItems[int(choice)][1]) == \"main\":\n return action()\n return performMessage(actionChoice, messageItems[int(choice)][1])\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n\n# Message Menu - perform\ndef performMessage(actionChoice, messageChoice):\n username = None\n if str(messageChoice) == \"select\":\n messageChoice = \"user\"\n users = displayUsers()\n seeking = True\n while seeking:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) > len(users): raise ValueError\n if int(choice) == 0:\n return finalizeMessage(actionChoice)\n username = users[int(choice)-1].username\n seeking = False\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n return mainMenu()\n elif str(messageChoice) == \"user\":\n print(\"Username:\")\n username = input(\">> \")\n images = selectImage(messageChoice)\n # [folder , image_file]\n # print(\"len: \" + str(len(images)))\n while True:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) > len(images): raise ValueError\n if int(choice) == 0:\n return finalizeMessage(actionChoice)\n try:\n image = images[int(choice)-1]\n message(messageChoice, [image[1],image[0]], username)\n return mainMenu()\n # except (ValueError, IndexError):\n # print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\")\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n # mainMenu() \n\n# {\n# image: file\n# folder: folder\n# }\ndef message(choice, image=None, username=None):\n message = input(\"Message: \")\n waiting = True\n while waiting:\n try:\n price = input(\"Price: \")\n \"{:.2f}\".format(float(price))\n waiting = False\n except ValueError:\n print(\"Enter a currency amount!\")\n if not image or not image[0] or image[0] == None:\n print(\"Error: Missing Image\")\n return \n OnlySnarf.remove_local()\n try: \n image = Google.download_file(image[0]).get(\"path\")\n except Exception as e:\n OnlySnarf.remove_local()\n try:\n image = Google.download_gallery(image[0]).get(\"path\")\n except Exception as e:\n print(\"Error: Missing Image(s)\")\n image = None\n # pass\n OnlySnarf.message(choice, message=message, image=image, price=price, username=username)\n\n# Promotion Menu - finalize\ndef finalizePromotion(actionChoice):\n for item in promotionItems:\n print(colorize(\"[\" + str(promotionItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) > len(promotionItems): raise ValueError\n if str(promotionItems[int(choice)][1]) == \"main\":\n return action()\n choice = list(promotionItems[int(choice)])[1]\n return performPromotion(actionChoice, choice)\n except (ValueError, IndexError):\n settings.maybePrint(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n\ndef performPromotion(actionChoice, promotionChoice):\n def promote(username):\n if username == None:\n print(\"Warning: No user found\")\n else:\n OnlySnarf.give_trial(username)\n mainMenu() \n try:\n username = None\n if str(promotionChoice) == \"email\":\n # prompt\n choice = input(\"Email: \")\n username = str(choice)\n return promote(username)\n elif str(promotionChoice) == \"select\":\n users = displayUsers() \n while True:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) > len(users): raise ValueError\n if int(choice) == 0:\n return finalizePromotion(actionChoice)\n return promote(str(users[int(choice)-1].username))\n except (ValueError, IndexError):\n settings.maybePrint(sys.exc_info()[0])\n print(\"Error: Incorrect Index\") \n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n mainMenu()\n\ndef finalizeDiscount(actionChoice):\n for item in messageItems:\n print(colorize(\"[\" + str(messageItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) >= len(messageItems): raise ValueError\n if str(messageItems[int(choice)][1]) == \"main\":\n return action()\n return performDiscount(actionChoice, messageItems[int(choice)][1])\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n\ndef performDiscount(actionChoice, discountChoice):\n username = None\n if str(discountChoice) == \"user\":\n user = input(\"Username: \")\n OnlySnarf.discount(user, depth=int(choice))\n mainMenu()\n elif str(discountChoice) == \"select\":\n users = displayUsers()\n seeking = True\n while seeking:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) > len(users): raise ValueError\n if int(choice) == 0:\n return finalizeDiscount(actionChoice)\n OnlySnarf.discount(users[int(choice)-1], depth=int(choice))\n mainMenu()\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n return mainMenu()\n OnlySnarf.discount(discountChoice)\n mainMenu() \n\ndef finalizePost(actionChoice):\n for item in postItems:\n print(colorize(\"[\" + str(postItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) >= len(postItems): raise ValueError\n if str(postItems[int(choice)][1]) == \"main\":\n return action()\n elif str(postItems[int(choice)][1]) == \"enter\":\n OnlySnarf.post()\n else:\n selectPost()\n return mainMenu()\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n\ndef selectPost():\n postMenu = []\n for key in settings.POSTS:\n postMenu.append([ key.title().replace(\"_\",\" \"), settings.POSTS[key]])\n postMenu.insert(0,[ \"Back\", \"main\"])\n for item in postMenu:\n print(colorize(\"[\" + str(postMenu.index(item)) + \"] \", 'teal') + list(item)[0] + \" - {}\".format(list(item)[1][:50]))\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) >= len(postMenu): raise ValueError\n if str(postMenu[int(choice)][1]) == \"main\":\n return action()\n text = postMenu[int(choice)][1]\n OnlySnarf.post(text=text)\n return mainMenu()\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n\ndef finalizeCron(actionChoice):\n for item in cronItems:\n print(colorize(\"[\" + str(cronItems.index(item)) + \"] \", 'teal') + list(item)[0])\n while True:\n cronChoice = input(\">> \")\n try:\n if int(cronChoice) < 0 or int(cronChoice) >= len(cronItems): raise ValueError\n if str(cronItems[int(cronChoice)][1]) == \"main\":\n return action()\n cronChoice = list(cronItems[int(cronChoice)])[1]\n return performCron(actionChoice, cronChoice)\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n print(\"Error: Missing Method\") \n\ndef performCron(actionChoice, cronChoice):\n if str(cronChoice) == \"add\":\n print(\"Comment:\")\n comment = input(\">> \")\n print(\"Args:\")\n args = input(\">> \")\n args = args.split(\",\")\n print(\"Minute:\")\n minute = input(\">> \")\n print(\"Hours:\")\n hour = input(\">> \")\n Cron.create(comment, args=args, minute=minute, hour=hour)\n elif str(cronChoice) == \"list\":\n Cron.list()\n elif str(cronChoice) == \"delete\":\n jobs = Cron.getAll()\n print(colorize(\"[0] \", 'teal') + \"Back\")\n jobs_ = []\n for job in jobs:\n jobs_.append(str(job.comment))\n print(colorize(\"[\" + str(jobs.index(job)+1) + \"] \", 'teal') + str(job))\n while True:\n choice = input(\">> \")\n try:\n choice = int(choice)\n if int(choice) < 0 or int(choice) > len(jobs): raise ValueError\n if int(choice) == 0: return finalizeCron(actionChoice)\n Cron.delete(jobs_[int(choice)-1])\n return mainMenu()\n except (ValueError, IndexError):\n print(sys.exc_info()[0])\n print(\"Error: Incorrect Index\")\n \n elif str(cronChoice) == \"deleteall\":\n Cron.deleteAll()\n else:\n print(\"Error: Missing Cron Action\")\n mainMenu() \n\ndef displayBoth(folderName, parent=None):\n files = Google.get_files_of_folder(folderName, parent=parent)\n folders = Google.get_folders_of_folder(folderName, parent=parent)\n files_both = []\n for f in files: files_both.append(f)\n for f in folders: files_both.append(f)\n print(colorize(\"[0] \", 'blue') + \"Back\")\n i = 1\n for file in files_both:\n print(colorize(\"[\" + str(i) + \"] \", 'blue') + str(file['title']))\n i = i+1\n return files_both\n\ndef displayFiles(folderName, parent=None):\n files = Google.get_files_of_folder(folderName, parent=parent)\n print(colorize(\"[0] \", 'blue') + \"Back\")\n i = 1\n for file in files:\n print(colorize(\"[\" + str(i) + \"] \", 'blue') + str(file['title']))\n i = i+1\n return files\n\ndef displayFolders(folderName, parent=None):\n folders = Google.get_folders_of_folder(folderName, parent=parent)\n print(colorize(\"[0] \", 'blue') + \"Back\")\n i = 1\n for folder in folders:\n print(colorize(\"[\" + str(i) + \"] \", 'blue') + str(folder['title']))\n i = i+1\n return folders\n\n# displays and returns users\ndef displayUsers():\n users = OnlySnarf.get_users()\n print(colorize(\"[0] \", 'blue') + \"Back\")\n # show list\n i = 1\n for user in users:\n print(colorize(\"[\" + str(i) + \"] \", 'blue') + str(user.username))\n i = i+1\n return users\n\ndef selectImage(folderName):\n images = Google.get_images()\n print(colorize(\"[0] \", 'blue') + \"Back\")\n i = 1\n for image in images:\n print(colorize(\"[\" + str(i) + \"] \", 'blue') + str(image[0][\"title\"]) + \" - \" + str(image[1]['title']))\n i = i+1\n return images\n # performMessage -> [folder , image_file] -> performMessage\n\n###########################\n\n### Settings Menu\ndef set_settings():\n showHeader()\n print(colorize(\"Set:\",'menu'))\n global settingItems\n for item in settingItems:\n print(colorize(\"[\" + str(settingItems.index(item)) + \"] \", 'blue') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) >= len(settingItems): raise ValueError\n settingChoice = list(settingItems[int(choice)])[0]\n settingValue = list(settingItems[int(choice)])[1]\n if str(settingChoice) == \"Back\":\n return main()\n elif str(settingChoice) == \"File Name\":\n settingValue = input(\"Enter the file name: \")\n elif str(settingChoice) == \"File Path\":\n settingValue = input(\"Enter the file path: \")\n elif str(settingChoice) == \"Text\":\n settingValue = input(\"Enter the upload text: \")\n elif str(settingChoice) == \"Mount Path\":\n settingValue = input(\"Enter the mount path: \")\n elif str(settingChoice) == \"Google: Root Folder Path\":\n settingValue = input(\"Enter the drive path (folderName/folderName/...): \")\n elif str(settingChoice) == \"Image\":\n settingValue = input(\"Enter the image path: \")\n elif str(settingChoice) == \"Google: Root Folder Name\":\n settingValue = input(\"Enter the Google root folder name: \")\n elif str(settingChoice) == \"Google: Drive Folders\":\n settingValue = input(\"Enter the Google drive folders (separated by ',', no spaces): \")\n settingValue = settingValue.split(\",\")\n elif str(settingChoice) == \"Image Limit\":\n settingValue = input(\"Enter the image upload limit: \")\n elif str(settingChoice) == \"Image Max\":\n settingValue = input(\"Enter the image upload max: \")\n elif str(settingChoice) == \"Local\":\n settingValue = input(\"Enter the local path: \")\n else:\n list_ = list(settingItems[int(choice)][2])\n print(colorize(str(settingChoice)+\" =\", 'blue'))\n for item in list_:\n print(colorize(\"[\" + str(list_.index(item)) + \"] \", 'pink') + str(item))\n while True:\n updateChoice = input(\">> \")\n try:\n if int(updateChoice) < 0 or int(updateChoice) >= len(list(settingItems[int(choice)][2])): raise ValueError\n settingValue = list_[int(updateChoice)]\n break\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n break\n global UPDATED\n UPDATED = settingChoice\n global UPDATED_TO\n UPDATED_TO = settingValue\n settingItems[int(choice)][1] = settingValue\n settings.update_value(settingChoice, settingValue)\n return set_settings()\n except (ValueError, IndexError):\n print(\"Error: Incorrect Index\")\n except Exception as e:\n settings.maybePrint(e)\n return main()\n\n###########################\n\ndef colorize(string, color):\n if not color in colors: return string\n return colors[color] + string + '\\033[0m'\n \ndef exit():\n print(\"Shnarrf?\")\n sys.exit(0)\n\n###########################\nimport atexit\ndef exit_handler():\n print('Shnnarrrff!')\n exit()\natexit.register(exit_handler)\n\nimport signal\ndef signal_handler(sig, frame):\n print('Shnnnarf?')\n exit()\nsignal.signal(signal.SIGINT, signal_handler)\n###########################\n\ndef main():\n showHeader()\n mainMenu()\n\ndef mainMenu():\n ### Main Menu\n print(colorize(\"Select an option:\", 'menu'))\n for item in menuItems:\n print(colorize(\"[\" + str(menuItems.index(item)) + \"] \", 'blue') + list(item)[0])\n while True:\n choice = input(\">> \")\n try:\n if int(choice) < 0 or int(choice) >= len(menuItems): raise ValueError\n # Call the matching function\n method_name = list(menuItems[int(choice)])[1]\n possibles = globals().copy()\n possibles.update(locals())\n method = possibles.get(method_name)\n if method is not None:\n return method()\n else:\n print(\"Error: Missing Option\") \n except (ValueError, IndexError, KeyboardInterrupt):\n print(\"Error: Incorrect Index\")\n pass\n\ndef showHeader():\n os.system('clear')\n # Print some badass ascii art header here !\n print(colorize(header, 'header'))\n print(colorize('version '+version+'\\n', 'green'))\n showUser()\n showSettings()\n\ndef showSettings():\n print('Settings:')\n for setting in settingItems:\n if str(setting[0]) == \"Image Limit\" and setting[3]:\n print(\" - {} = {}/{}\".format(setting[0],setting[1],settings.IMAGE_UPLOAD_MAX))\n elif str(setting[0]) != \"Back\" and str(settings.DEBUG) == \"True\":\n print(\" - {} = {}\".format(setting[0],setting[1]))\n elif str(setting[0]) != \"Back\" and setting[3]:\n print(\" - {} = {}\".format(setting[0],setting[1]))\n global UPDATED\n global UPDATED_TO\n if str(UPDATED) != \"False\":\n print('\\nUpdated: '+str(UPDATED)+' -> '+str(UPDATED_TO))\n UPDATED = False\n print('\\r')\n\ndef showUser():\n print(\"User:\")\n print(\" - Username = {}\".format(settings.USERNAME))\n if settings.PASSWORD and str(settings.PASSWORD) != \"\":\n pass_ = \"******\"\n else:\n pass_ = \"\"\n print(\" - Password = {}\".format(pass_))\n print('\\r')\n\n###########################\n\nif __name__ == \"__main__\":\n try:\n main_other()\n except:\n # print(sys.exc_info()[0])\n print(\"Shhhhhnnnnnarf!\")\n finally:\n sys.exit(0)\n\ndef main_other():\n settings.initialize()\n initialize()\n main()","sub_path":"OnlySnarf/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":33296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427381814","text":"# 문제 설명\n# 이 문제에는 표준 입력으로 두 개의 정수 n과 m이 주어집니다.\n# 별(*) 문자를 이용해 가로의 길이가 n, 세로의 길이가 m인 직사각형 형태를 출력해보세요.\n#\n# 제한 조건\n# n과 m은 각각 1000 이하인 자연수입니다.\n\na, b = (5, 3)\n\n# a, b = map(int, input().strip().split(' '))\nprint(a + b)\n\nz = []\nfor _ in range(b):\n s = ''\n for _ in range(a):\n s += '*'\n z.append(s)\nprint('\\n'.join(z))\n\n# answer = ('*'*a +'\\n')*b\n\n# 5 min","sub_path":"programmers/2019_2_10/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288997582","text":"import sys\nimport requests\nimport csv\nimport io\nimport pandas as pd\nfrom pandas.core.groupby.groupby import DataError\nimport numpy as np\nfrom .utils import store_data, stoi\n\nfrom collections import defaultdict\nfrom .utils import store_data, sorted_date, stoi\n\n# ------------------------------------------------------------------------\n# Globals\n\nrs_region_codes = {\n \"SM\": \"Santa Maria (R01 e R02)\",\n \"UR\": \"Uruguaiana (R03)\",\n \"CC\": \"Capão da Canoa (R0 4 e R05)\",\n \"TQ\": \"Taquara (R06)\",\n \"NH\": \"Novo Hamburgo (R07)\",\n \"CA\": \"Canoas (R08)\",\n \"PA\": \"Porto Alegre (R09 e R10)\",\n \"SA\": \"Santo Ângelo (R11)\",\n \"CA\": \"Cruz Alta (R12)\",\n \"IJ\": \"Ijuí (R13)\",\n \"SR\": \"Santa Rosa (R14)\",\n \"PM\": \"Palmeira das Missões (R15 e R20)\",\n \"ER\": \"Erechim (R16)\",\n \"PF\": \"Passo Fundo (R17, R18 e R19)\",\n \"PE\": \"Pelotas (R21)\",\n \"BG\": \"Bagé (R22)\",\n \"CX\": \"Caxias do Sul (R23, R24, R25 e R26)\",\n \"CS\": \"Cachoeira do Sul (R27)\",\n \"SC\": \"Santa Cruz do Sul (R28)\",\n \"LJ\": \"Lajeado (R29 e R30)\"\n}\n\n\n#BR/RS state source format\n#time, regions, cases, deaths, hospitalized, icu, recovered\nURL_RS = \"https://raw.githubusercontent.com/seplagses/Covid-RS/master/data/Data_Regions_RS.csv\"\ncols = ['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']\n\ndef parse():\n r_rs = requests.get(URL_RS)\n if not r_rs.ok:\n print(f\"Failed to fetch {URL_RS}\", file=sys.stderr)\n exit(1)\n r.close()\n else: print(f\"Connected to {URL_RS}\", file=sys.stderr)\n\n \n dataframe=pd.read_csv(URL_RS, quoting=csv.QUOTE_NONE, header = 0, skiprows=2)\n print(f\"data {dataframe.head()}\", file=sys.stderr)\n print(list(dataframe), file=sys.stderr)\n \n try:\n dummy=pd.get_dummies(dataframe['regions'])\n dataframe=pd.concat([dataframe,dummy],axis=1)\n dataframe = dataframe.fillna(0)\n regions_name=dataframe.iloc[:,7:].columns\n print(regions_name)\n dataframe_region=dataframe[['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']].copy() \n dataframe_region['cases']=dataframe_region['cases'].astype(int)\n dataframe_region['deaths']=dataframe_region['deaths'].astype(int)\n dataframe_region['hospitalized']=dataframe_region['hospitalized'].astype(int)\n dataframe_region['icu']=dataframe_region['icu'].astype(int)\n dataframe_region['recovered']=dataframe_region['recovered'].astype(int)\n print(f\"data: {dataframe_region.head()}\", file=sys.stderr)\n except DataError:\n print(f\"error: {DataError}\", file=sys.stderr)\n\n region_tables = {}\n for region in regions_name:\n region_tables['-'.join(['RS',region])] = dataframe_region[dataframe[region]==1].values.tolist()\n \n store_data(region_tables, 'brazil_rs', cols)\n","sub_path":"data/parsers/brazil_rs.py","file_name":"brazil_rs.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12578460","text":"from lib.fibheap import FibonacciHeap\nfrom lib.SimpleNode import SimpleNode\nimport json\n\n\ndef peeling(node_dict, total_C_degree, total_q_degree, fib_heap, q, lambda1, lambda2):\n n = node_dict.__len__()\n avg_degree = (float)(total_C_degree) / n\n q_avg = total_q_degree / n\n # outputs we want\n max_C_avg = avg_degree\n S_size = n\n\n for i in range(n - 1):\n\n if i % 50000 == 0:\n print(i, max_C_avg, q_avg)\n\n # find min node from graph (remove from heap)\n node_to_remove = fib_heap.extract_min().value\n for neighbor in node_dict[node_to_remove].neighbor_dict.keys():\n\n # get dictionary that has all edges between two nodes\n C_degree_loss = node_dict[node_to_remove].neighbor_dict[neighbor][0]\n node_dict[neighbor].Cdegree -= C_degree_loss\n q_degree_loss = node_dict[node_to_remove].neighbor_dict[neighbor][1]\n node_dict[neighbor].qdegree -= q_degree_loss\n\n # here the key can be actually increased\n if neighbor != node_to_remove:\n fib_heap.decrease_key(node_dict[neighbor].fib_node, node_dict[neighbor].Cdegree)\n del node_dict[neighbor].neighbor_dict[node_to_remove]\n total_C_degree -= C_degree_loss\n total_q_degree -= q_degree_loss\n\n del node_dict[node_to_remove]\n avg_degree = (float)(total_C_degree) / (n - i - 1)\n q_avg = total_q_degree / (n - i - 1)\n S_size = n - i - 1\n if q_avg > q * lambda2 - lambda1:\n if len(node_dict)<100:\n print(list(node_dict))\n return True, avg_degree, q_avg, S_size\n\n return False, max_C_avg, q_avg, S_size\n\n\nclass RiskNode:\n degree = None\n total_degree = None\n neighbor_dict = None\n papercount = None\n\n def __init__(self, n):\n self.degree = [0] * n\n self.neighbor_dict = {}\n self.total_degree = 0\n self.papercount = 0\n\n # type is int from 0 to len(degree)-1\n def increase_neighbor(self, name, type, degree):\n if name not in self.neighbor_dict:\n self.neighbor_dict[name] = {type: degree}\n else:\n if type not in self.neighbor_dict[name]:\n self.neighbor_dict[name][type] = degree\n else:\n self.neighbor_dict[name][type] += degree\n self.degree[type] += degree\n\n def set_neighbor_risk(self, name, degree):\n self.neighbor_dict[name][1] = degree\n self.degree[1] += degree\n\n\ndef process_tmdb_file(file_path):\n actor_dict = {}\n relation_list = json.load(open(file_path))\n for relation in relation_list:\n # print(len(relation_list))\n weight = relation['popularity'] * relation['possibility']\n risk = relation['popularity'] * relation['possibility'] * (1 - relation['possibility'])\n if risk == 0:\n continue\n if relation['actors'][0] not in actor_dict:\n actor_dict[relation['actors'][0]] = RiskNode(2)\n if relation['actors'][1] not in actor_dict:\n actor_dict[relation['actors'][1]] = RiskNode(2)\n actor_dict[relation['actors'][0]].increase_neighbor(relation['actors'][1],0,weight)\n actor_dict[relation['actors'][1]].increase_neighbor(relation['actors'][0],0,weight)\n actor_dict[relation['actors'][0]].set_neighbor_risk(relation['actors'][1],-risk)\n actor_dict[relation['actors'][1]].set_neighbor_risk(relation['actors'][0],-risk)\n return actor_dict\n\ndef process_dblp_file(file_path):\n author_dict = {}\n relation_list = json.load(open(file_path))\n for relation in relation_list:\n\n weight = relation['popularity'] * relation['possibility']\n risk = relation['popularity'] * relation['possibility'] * (1 - relation['possibility'])\n if risk == 0:\n continue\n if relation['actors'][0] not in author_dict:\n author_dict[relation['actors'][0]] = RiskNode(2)\n if relation['actors'][1] not in author_dict:\n author_dict[relation['actors'][1]] = RiskNode(2)\n author_dict[relation['actors'][0]].increase_neighbor(relation['actors'][1],0,weight)\n author_dict[relation['actors'][1]].increase_neighbor(relation['actors'][0],0,weight)\n author_dict[relation['actors'][0]].set_neighbor_risk(relation['actors'][1],-risk)\n author_dict[relation['actors'][1]].set_neighbor_risk(relation['actors'][0],-risk)\n return author_dict\n\ndef process_PPI_file(file_path):\n author_dict = {}\n relation_list = json.load(open(file_path))\n for relation in relation_list:\n\n weight = relation['weight'] * relation['possibility']\n risk = relation['weight'] * relation['possibility'] * (1 - relation['possibility'])\n if risk == 0:\n continue\n if relation['protein'][0] not in author_dict:\n author_dict[relation['protein'][0]] = RiskNode(2)\n if relation['protein'][1] not in author_dict:\n author_dict[relation['protein'][1]] = RiskNode(2)\n author_dict[relation['protein'][0]].increase_neighbor(relation['protein'][1],0,weight)\n author_dict[relation['protein'][1]].increase_neighbor(relation['protein'][0],0,weight)\n author_dict[relation['protein'][0]].set_neighbor_risk(relation['protein'][1],-risk)\n author_dict[relation['protein'][1]].set_neighbor_risk(relation['protein'][0],-risk)\n return author_dict\n\n\ndef risk_averse_peel(uncertain_file='tmdb', filepath='../datasets/tmdb/tmdb_2017.json', precision=0.02):\n # peeling preprocess for Tmdb\n if uncertain_file == 'tmdb':\n node_dict = process_tmdb_file(filepath)\n\n # peeling preprocess for DBLP\n elif uncertain_file == 'dblp':\n node_dict = process_dblp_file(filepath)\n\n # peeling preprocess for PPI datasets\n elif uncertain_file == 'ppi':\n node_dict = process_PPI_file(filepath)\n\n else:\n assert Exception('uncertain dataset file type not expected!')\n\n result = {'risk':{},'weight':{},'size':{}}\n\n pos_count = 0\n n = node_dict.__len__()\n print(\"initially the graph will have \" + str(n) + \" nodes\")\n lambda2 = 1\n for node in node_dict.keys():\n for neighbor in node_dict[node].neighbor_dict.keys():\n # if 0 in node_dict[node].neighbor_dict[neighbor]:\n pos_count += node_dict[node].neighbor_dict[neighbor][0]\n rho_list = [0.5, 1, 2, 10]\n C_list = [0.25,0.5,1,2,3,4,5,6]\n for rho in rho_list:\n print(\"!!!!!\")\n print(\"now we have rho as \", rho)\n print(\"!!!!!!\")\n for C in C_list:\n print(\"!!!!!\")\n print(\"now we have C as \", C)\n print(\"!!!!!!\")\n lambda1 = rho * lambda2\n lowbound = 0\n upbound = 20\n # upbound = (pos_count + lambda1 * n) / lambda2\n\n accerate_flag = True\n while True:\n # peeling with edge = pos - q * neg, and find if there exist a subgraph whose density > q * lambda2 - lambda1\n # first build fib heap based on q\n if accerate_flag:\n q = lowbound + (upbound - lowbound) / 2\n else:\n q = (upbound + lowbound) / 2\n node_dict_q = {}\n total_C_degree = 0\n total_q_degree = 0\n fib_heap = FibonacciHeap()\n for node in node_dict.keys():\n node_dict_q[node] = SimpleNode()\n for neighbor in node_dict[node].neighbor_dict.keys():\n C_temp_degree_each = 0\n q_temp_degree_each = 0\n # here we already store disabled interactions as negative values\n C_temp_degree_each += node_dict[node].neighbor_dict[neighbor][1]\n q_temp_degree_each += q * node_dict[node].neighbor_dict[neighbor][1]\n C_temp_degree_each += C * node_dict[node].neighbor_dict[neighbor][0]\n q_temp_degree_each += node_dict[node].neighbor_dict[neighbor][0]\n node_dict_q[node].increase_neighbor(neighbor, C_temp_degree_each, q_temp_degree_each)\n # to avoid influence from loop\n if node == neighbor:\n total_C_degree += C_temp_degree_each\n total_q_degree += q_temp_degree_each\n node_dict_q[node].fib_node = fib_heap.insert(node_dict_q[node].Cdegree, node)\n total_C_degree += node_dict_q[node].Cdegree\n total_q_degree += node_dict_q[node].qdegree\n # print(total_C_degree, total_q_degree)\n total_q_degree = total_q_degree / 2\n total_C_degree = total_C_degree / 2\n print(total_C_degree, total_q_degree)\n exist_flag, max_avg, q_avg, S_size = peeling(node_dict_q, total_C_degree, total_q_degree, fib_heap, q,\n lambda1, lambda2)\n print(q, exist_flag, max_avg, q_avg, S_size)\n if exist_flag:\n if accerate_flag:\n accerate_flag = False\n if q - lowbound < precision and upbound - q < precision:\n weight = (max_avg-q_avg/q)/(C-1/q)\n risk = C*weight - max_avg\n print(\"~~~~~~~~~~~~~\")\n print(\"rho, C, result q, corresponding (max density)subgraph's size, density, average risk:\")\n print(rho, C, q, S_size, weight, risk)\n if C not in result['size']:\n result['size'][C] = {}\n result['size'][C][rho] = S_size\n if C not in result['risk']:\n result['risk'][C] = {}\n result['risk'][C][rho] = risk\n if C not in result['weight']:\n result['weight'][C] = {}\n result['weight'][C][rho] = weight\n print(\"~~~~~~~~~~~~~\")\n break\n else:\n lowbound = q\n else:\n upbound = q\n\n print(result)\n\n\nif __name__ == \"__main__\":\n risk_averse_peel()\n","sub_path":"risk_averse/peel_risk_averse.py","file_name":"peel_risk_averse.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421256170","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport gzip\n\nself_name = os.path.basename(__file__)\nusage_mesg = 'Usage: %s ' % self_name\n\nif len(sys.argv) != 3:\n sys.stderr.write('%s\\n' % usage_mesg)\n sys.exit(1)\n\nfilename_fa = sys.argv[1]\nseq_type = sys.argv[2]\n\nif not os.access(filename_fa, os.R_OK):\n sys.stderr.write('%s\\n' % usage_mesg)\n sys.exit(1)\n\nif seq_type not in ['chromosome', 'scaffold']:\n sys.stderr.write('%s\\n' % usage_mesg)\n sys.exit(1)\n\nf_fa = open(filename_fa, 'r')\nif filename_fa.endswith('.gz'):\n f_fa = gzip.open(filename_fa, 'rt')\n\nfor line in f_fa:\n if line.startswith('>'):\n tmp_h = line.strip().lstrip('>')\n\n if seq_type == 'chromosome':\n if tmp_h.startswith('chrUn'):\n is_print = 0\n else:\n is_print = 1\n elif seq_type == 'scaffold':\n if tmp_h.startswith('chrUn'):\n is_print = 1\n else:\n is_print = 0\n else:\n sys.stderr.write('Unknown seq_tyoe: %s\\n' % seq_type)\n sys.exit(1)\n\n if is_print > 0:\n print(line.strip())\n elif is_print > 0:\n print(line.strip())\nf_fa.close()\n","sub_path":"utils/fasta-select_chromosome+scaffolds.py","file_name":"fasta-select_chromosome+scaffolds.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599883506","text":"#!/usr/bin/env python3.5\nimport argparse\nimport grpc\nimport os\nimport sys\nimport json\nfrom modules.Id_Manager import IdManager\n\n\nimport p4runtime_lib.bmv2\nfrom p4runtime_lib.error_utils import printGrpcError\nfrom p4runtime_lib.switch import ShutdownAllSwitchConnections\nimport p4runtime_lib.helper\n\nfrom scapy.all import Packet\nfrom scapy.all import BitField\nfrom scapy.layers.inet import IP\nfrom scapy.layers.l2 import Ether\n\nsys.path.append(\n os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../../utils'))\n\nclass Packet_In(Packet):\n name = \"Packet_In\"\n\n fields_desc = [\n BitField('port', 0, 9)\n ]\n\ndef load_switches_conf():\n data = {}\n with open('config/switches.json', 'w') as json_file:\n data = json.load(json_file)\n\n return data\n\ndef load_pr_rules():\n rules = {}\n with open('config/pr_rules.json', 'w') as json_file:\n rules = json.load(json_file)\n\n return rules\n\ndef load_sdnc_pkt_in_rules():\n rules = {}\n with open('config/port_knock_in_rules.json', 'w') as json_file:\n rules = json.load(json_file)\n\n return rules\n\ndef load_fwd_rules():\n rules = {}\n with open('config/fwd_rules.json', 'w') as json_file:\n rules = json.load(json_file)\n\n return rules\n\ndef load_pk_rules():\n rules = {}\n with open('config/portknocking_rules.json', 'w') as json_file:\n rules = json.load(json_file)\n\n return rules\n\n\ndef connect_to_switches(switches_config):\n switches = []\n for switch in switches_config:\n switches.append(\n p4runtime_lib.bmv2.Bmv2SwitchConnection(\n name=switch[\"name\"],\n address=switch[\"address\"],\n device_id=switch[\"device_id\"],\n proto_dump_file=switch[\"proto_dump_file\"]))\n\n return switches\n\ndef send_master_arbitration_updates(switches):\n for switch in switches:\n switch.MasterArbitrationUpdate()\n\ndef set_pipelines(switches, p4info_helper, bmv2_file_path):\n\n for switch in switches:\n switch.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,\n bmv2_json_file_path=bmv2_file_path)\n\n\ndef install_direct_forwarding_rules(p4info_helper, switches):\n ports = {\n [1, 2],\n [1]\n }\n\n for idx, switch in enumerate(switches):\n for port in ports:\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.inc.port_tb\",\n match_fields={\n \"standard_metadata.ingress_port\": port\n },\n action_name=\"IngressImpl.inc.direct_forward\",\n action_params={}\n )\n\n switch.WriteTableEntry(table_entry)\n\ndef install_rules_protected_services(p4info_helper, switches, switches_conf, rules):\n for idx, switch in enumerate(switches):\n\n sw = switches_conf[idx][\"name\"]\n rules_l = rules[sw][\"rules\"]\n actions_l = rules[sw][\"actions\"]\n\n for index, rule in enumerate(rules_l):\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.pr.protected_service_tb\",\n match_fields={\n \"meta.pk_metadata.stage\": rule[\"stage\"],\n \"hdr.ipv4.dstAddr\": rule[\"ipDstAddr\"],\n \"hdr.tcp.dstPort\": rule[\"tcpDstPort\"]\n },\n action_name=actions_l[index][\"name\"],\n action_params={}\n )\n\n switch.WriteTableEntry(table_entry)\n\ndef install_port_knock_in_rules(p4info_helper, switches, switches_conf, rules):\n for idx, switch in enumerate(switches):\n\n sw = switches_conf[idx][\"name\"]\n rules_l = rules[sw][\"rules\"]\n actions_l = rules[sw][\"actions\"]\n\n for index, rule in enumerate(rules_l):\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.pn_in.portknocking_in_tb\",\n match_fields={\n \"hdr.ipv4.dstAddr\": rule[\"ipDstAddr\"],\n \"hdr.tcp.dstPort\": rule[\"tcpDstPort\"],\n \"hdr.tcp.ctrl\": rule[\"tcpCtrl\"]\n },\n action_name=actions_l[index][\"name\"],\n action_params={}\n )\n\n switch.WriteTableEntry(table_entry)\n\ndef install_forwarding_rules(p4info_helper, switches, switches_conf, rules):\n\n for idx, switch in enumerate(switches):\n\n sw = switches_conf[idx][\"name\"]\n rules_l = rules[sw][\"rules\"]\n actions_l = rules[sw][\"actions\"]\n\n for index, rule in enumerate(rules_l):\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.fwd.fwd_tb\",\n match_fields={\n \"hdr.ipv4.dstAddr\": rule[\"ipDstAddr\"]\n },\n action_name=actions_l[index][\"name\"],\n action_params={\n \"dstAddr\": actions_l[index][\"values\"][\"dstAddr\"],\n \"port\": actions_l[index][\"values\"][\"port\"]\n }\n )\n\n switch.WriteTableEntry(table_entry)\n\ndef install_pip2id_rules(p4info_helper, switch, src_ip, new_id):\n\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.pr.ip_2_id_tb\",\n match_fields={\n \"hdr.ipv4.srcAddr\": src_ip\n },\n action_name=\"IngressImpl.pr.id_found\",\n action_params={\n \"current_id\": new_id\n }\n )\n\n switch.WriteTableEntry(table_entry)\n\ndef install_pk_rules(p4info_helper, switches, switches_conf, rules):\n for idx, switch in enumerate(switches):\n\n sw = switches_conf[idx][\"name\"]\n rules_l = rules[sw][\"rules\"]\n actions_l = rules[sw][\"actions\"]\n\n for index, rule in enumerate(rules_l):\n table_entry = p4info_helper.buildTableEntry(\n table_name=\"IngressImpl.pn_in.portknocking_in_tb\",\n match_fields={\n \"meta.pk_metadata.stage\": rule[\"stage\"],\n \"hdr.ipv4.dstAddr\": rule[\"ipDstAddr\"],\n \"hdr.tcp.dstPort\": rule[\"tcpDstPort\"]\n },\n action_name=actions_l[index][\"name\"],\n action_params={}\n )\n\n switch.WriteTableEntry(table_entry)\n\n\n\ndef main(p4info_file_path, bmv2_file_path):\n # Instantiate a P4Runtime helper from the p4info file\n\n ip2id_l = {}\n id_manager = IdManager(2**16 - 1)\n\n p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)\n\n switches_conf = load_switches_conf()\n pr_rules = load_pr_rules()\n sdnc_pi_rules = load_sdnc_pkt_in_rules()\n fwd_rules = load_fwd_rules()\n pk_rules = load_pk_rules()\n\n try:\n\n switches = connect_to_switches(switches_conf[\"switches\"])\n\n send_master_arbitration_updates(switches)\n\n set_pipelines(switches, p4info_helper, bmv2_file_path)\n\n install_direct_forwarding_rules(p4info_helper, switches)\n\n install_rules_protected_services(p4info_helper, switches, switches_conf, pr_rules[\"switches\"])\n\n install_port_knock_in_rules(p4info_helper, switches, switches_conf, sdnc_pi_rules[\"switches\"])\n\n install_forwarding_rules(p4info_helper, switches, switches_conf, fwd_rules[\"switches\"])\n\n switch_2 = switches[1]\n while True:\n packet_in = switch_2.PacketIn()\n if packet_in.WhichOneof('update') == 'packet':\n pkt = Ether(_pkt=packet_in.packet.payload)\n\n src_ip = pkt.getlayer(IP).src\n print(\"SRC IP: \"+str(src_ip))\n\n new_id = id_manager.get_id()\n print(\"New ID: \"+str(new_id))\n ip2id_l[str(src_ip)] = new_id\n\n install_pip2id_rules(p4info_helper, switches, src_ip, new_id)\n install_pk_rules(p4info_helper, switches, switches_conf, pk_rules[\"switches\"])\n\n\n except KeyboardInterrupt:\n print(\" Shutting down.\")\n except grpc.RpcError as e:\n printGrpcError(e)\n\n ShutdownAllSwitchConnections()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='P4Runtime Controller')\n parser.add_argument('--p4info', help='p4info proto in text format from p4c',\n type=str, action=\"store\", required=False,\n default='./build/advanced_tunnel.p4.p4info.txt')\n parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c',\n type=str, action=\"store\", required=False,\n default='./build/advanced_tunnel.json')\n args = parser.parse_args()\n\n if not os.path.exists(args.p4info):\n parser.print_help()\n print(\"\\np4info file not found: %s\\nHave you run 'make'?\" % args.p4info)\n parser.exit(1)\n if not os.path.exists(args.bmv2_json):\n parser.print_help()\n print(\"\\nBMv2 JSON file not found: %s\\nHave you run 'make'?\" % args.bmv2_json)\n parser.exit(1)\n\n main(args.p4info, args.bmv2_json)\n","sub_path":"implementation_3/app/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492459114","text":"import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct ## new\nimport zlib\nfrom keras.models import load_model\nimport time\nimport serial\n\nser=serial.Serial('/dev/ttyUSB0',baudrate=9600,timeout=100)\n\ndef getValues():\n Sensor=ser.readline().decode('UTF-8')\n datas= Sensor.split(\",\")\n return datas[0],datas[1]\n\ndef setValue():\n ser.write(b'>M \\r\\n')\n\n\ndef img_preprocess(img):\n img = img[60:135,:,:]\n img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n img = cv2.GaussianBlur(img, (3, 3), 0)\n img = cv2.resize(img, (200, 66))\n img = img/255\n return img\n\nmodel=load_model(\"model.h5\")\n\nHOST=''\nPORT=1234\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nprint('Socket created')\n\ns.bind((HOST,PORT))\nprint('Socket bind complete')\ns.listen(10)\nprint('Socket now listening')\n\nconn,addr=s.accept()\n\ndata = b\"\"\npayload_size = struct.calcsize(\">L\")\nprint(\"payload_size: {}\".format(payload_size))\nwhile True:\n while len(data) < payload_size:\n print(\"Recv: {}\".format(len(data)))\n data += conn.recv(4096)\n\n print(\"Done Recv: {}\".format(len(data)))\n packed_msg_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack(\">L\", packed_msg_size)[0]\n print(\"msg_size: {}\".format(msg_size))\n while len(data) < msg_size:\n data += conn.recv(4096)\n frame_data = data[:msg_size]\n data = data[msg_size:]\n\n frame=pickle.loads(frame_data, fix_imports=True, encoding=\"bytes\")\n frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n cv2.imshow('ImageWindow',frame)\n\n image=img_preprocess(frame)\n image=np.array([image])\n\n steering_angle=float(model.predict(image))\n \n\n s1,s2=getValues()\n\n \n speed=int(s1.replace(\"\\x00\",\"\"))\n speed_limit=10\n throttle=1.0-speed/speed_limit\n\n #bThrottle=bytearray(struct.pack(\"f\",steering_angle))\n #print(\"x\"+str(bThrottle))\n ser.write(b'>M t \\r\\n')\n\n '''\n for b in bThrottle:\n print(\"b\"+str(b))\n ser.write(b)\n '''\n print(\"steering angle {}\".format(steering_angle))\n key=cv2.waitKey(1)\n if key == 27:\n cv2.destroyAllWindows()\n break\n ","sub_path":"SendImagetoModel/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162317646","text":"\nimport pandas as pd\nimport numpy as np\nfrom itertools import groupby\nfrom Bio import AlignIO\n\nclass ReadData:\n \"\"\"\n This is a class for reading sequence data in various file formats.\n\n Attributes:\n data (dict of str: List): Store ids and sequence data as dict.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The constructor for ReadData class.\n\n Parameters:\n data (dict of str: List): Store ids and sequence data as dict.\n \"\"\"\n\n self.data = dict()\n\n\n def aln2pandas(self, file, aln):\n \"\"\"\n aln2pandas(self, file)\n The function to read input alignment using Biopython.\n\n Parameters:\n file (file object): The file object to read.\n aln_type (str): The type of alignment to tell Biopython.\n\n Returns:\n df (pandas.DataFrame of str): Ids, seqs stored as Pandas DataFrame.\n seqlength (int): Length of sequences in alignment.\n \"\"\"\n alignment = AlignIO.read(file, aln)\n seqlength = alignment.get_alignment_length()\n for record in alignment:\n id = str(record.id)\n seq = str(record.seq)\n self.data[id] = seq\n\n df = pd.DataFrame.from_dict(self.data, orient=\"index\")\n\n return df, seqlength\n\n def loci2db(self, file, locus_count):\n \"\"\"\n loci2db(self, file, locus_count)\n The function to read .loci input file.\n\n Parameters:\n file (file object): The .loci file object to read.\n locus_count (int): Current locus number.\n\n Returns:\n locus (dict of str): Single locus in .loci file.\n locus_count (int): Current locus numberself.\n ind_count (int): Number of individuals in locus.\n seq_len (int): Length of sequences at locus.\n \"\"\"\n\n locus = dict()\n ind_count = 0\n\n for line in file:\n line = line.strip()\n lines = line.split()\n id = lines[0]\n seq = lines[1]\n\n if line.startswith(\">\"):\n locus[id] = seq\n ind_count += 1\n seq_len = len(seq)\n\n elif line.startswith(\"//\"): # Locus delimiter.\n locus_count += 1\n yield locus, locus_count, ind_count, seq_len\n ind_count = 0 # Reset ind_count\n locus = dict() # Clear locus dict\n","sub_path":"read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297305344","text":"from django.db.models.query import QuerySet\nfrom django.http import request\nfrom django.shortcuts import get_list_or_404, render\nfrom django.shortcuts import redirect\nfrom django.shortcuts import get_object_or_404\nfrom .models import Guide\nfrom django.utils import timezone\nfrom django.contrib import auth\nfrom django.db.models import Q\n\ndef guide_list (request) :\n lists = Guide.objects.all()\n return render(request,'guide_list.html',{'guide_list':lists})\n\ndef guide_detail(request,id) :\n guide_detail = get_object_or_404(Guide,pk=id)\n return render (request,'guide_detail.html',{'guide_detail':guide_detail})\n\n\ndef guide_new (request) :\n \n if (request.method == 'POST') :\n newGuide = Guide.objects.create(title = request.POST['title'],\n content = request.POST['content'],\n writer = request.user.username,\n date = timezone.localtime(),\n location = request.POST['location'],\n price = request.POST['price'],\n )\n if (request.FILES.get('image') is not None) :\n newGuide.image = request.FILES['image']\n newGuide.save()\n \n return guide_list(request)\n \n if (request.method == 'GET') :\n return render(request,'guide_new.html')\n\ndef guide_update (request, id) :\n if (request.method == 'GET') :\n update_guide = get_object_or_404(Guide,pk=id)\n \n return render (request,'guide_update.html',{'guide_update':update_guide})\n\n elif (request.method == 'POST') :\n update_guide = Guide.objects.get(pk=id)\n update_guide.title= request.POST['title']\n update_guide.content= request.POST['content']\n update_guide.location= request.POST['location']\n update_guide.price= request.POST['price']\n update_guide.image= request.FILES['image']\n update_guide.save() \n \n return guide_list(request)\n\ndef guide_delete (request, id) :\n if (request.method == 'GET') :\n guide = get_object_or_404(Guide,pk=id) \n guide.delete()\n return guide_list(request)\n\ndef guide_search_by_location (request) :\n search_key = request.GET['location']\n # search_list = Guide.objects.all()\n \n search_list = Guide.objects.filter(location=search_key)\n \n \n return render(request,'guide_list.html',{'guide_list':search_list})","sub_path":"Guide/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556561115","text":"import argparse\nimport cv2\nfrom deepracer_viz.gradcam import load_model_session, gradcam, blend_gradcam_image\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport os\n\n\n# Function for processing image and applying GradCAM\n# Using GPU helps, make sure to have CUDA installed correctly:\n# You should need CUDA 10: check with `nvcc --version` : https://www.tensorflow.org/install/source#linux\n# https://developer.nvidia.com/cuda-10.0-download-archive?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=1804&target_type=debnetwork\n# `sudo apt-get install cuda-libraries-10.0`\ndef process_image(i, tf_session):\n # Prep image for GradCam\n grad_frame = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)\n # Get GradCAM\n cam = gradcam(tf_session, grad_frame, args.action)\n # Overlay GradCAM with original image\n return blend_gradcam_image(i, cam)\n\n\ndef main(args):\n global bytes\n CODEC = \"X264\"\n FPS = args.fps\n\n # if the ip argument exists then enter into live view mode with video from physical DeepRacer\n if args.output:\n fourcc = cv2.VideoWriter_fourcc(*CODEC)\n raw_path = os.path.join(args.output, \"raw\")\n print(\"Saving images to \" + raw_path)\n gradcam_path = os.path.join(args.output, \"gradcam\")\n print(\"Saving gradcam images to \" + gradcam_path)\n if not os.path.exists(raw_path):\n os.mkdir(raw_path)\n if not os.path.exists(gradcam_path):\n os.mkdir(gradcam_path)\n\n if args.ip:\n # Setup file to save the output to\n with requests.Session() as s:\n URL = \"https://\" + str(args.ip) + \"/\"\n post_login_url = URL + \"/login\"\n video_url = URL + \"/route?topic=/video_mjpeg&width=480&height=360\"\n\n # Get the CSRF Token\n response = s.get(URL, verify=False)\n soup = BeautifulSoup(response.text, 'lxml')\n csrf_token = soup.select_one('meta[name=\"csrf-token\"]')['content']\n headers = {'X-CSRFToken': csrf_token}\n\n # Login to the DeepRacer web interface with Post\n if not args.password:\n print(\"ERROR: User must add password for DeepRacer before stream can be accessed\")\n exit(1)\n payload = {'password': args.password}\n post = s.post(post_login_url, data=payload, headers=headers, verify=False)\n\n # Get the video stream\n video_stream = s.get(video_url, stream=True, verify=False)\n\n # Load the CNN\n sess = load_model_session(args.model)\n\n if video_stream.status_code == 200:\n print(\"Video Connected!\")\n last_image_time = time.time()\n # Bytes to build Jpeg\n bytes = bytes()\n for chunk in video_stream.iter_content(chunk_size=1024):\n bytes += chunk\n a = bytes.find(b'\\xff\\xd8') # Marker byte pair\n b = bytes.find(b'\\xff\\xd9') # Trailing byte pair\n # If both byte pairs on in the stream then build the jpeg\n if a != -1 and b != -1:\n jpg = bytes[a:b + 2]\n bytes = bytes[b + 2:]\n\n if time.time() - last_image_time > 1.0 / args.fps:\n i = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)\n out_frame = process_image(i, sess)\n # cv2.imshow('Raw Image', i)\n cv2.imshow('GradCAM', cv2.resize(out_frame, (1920, 1440)))\n last_image_time = time.time()\n # Save the images to file if args.output is specified\n if args.output:\n cv2.imwrite(os.path.join(raw_path, str(last_image_time) + '.jpg'), i)\n cv2.imwrite(os.path.join(gradcam_path, str(last_image_time) + '.jpg'), out_frame)\n if cv2.waitKey(1) == 27: # Press esc to stop processing images\n break\n else:\n print(\"Received unexpected status code {}\".format(video_stream.status_code))\n\n else:\n capture = cv2.VideoCapture(args.input_file)\n\n sess = load_model_session(args.model)\n while capture.isOpened():\n ret, frame = capture.read()\n if ret:\n\n # Apply GradCAM\n out_frame = process_image(frame, sess)\n\n cv2.imshow('frame', out_frame)\n if args.output:\n last_image_time = time.time()\n cv2.imwrite(os.path.join(raw_path, str(last_image_time) + '.jpg'), frame)\n cv2.imwrite(os.path.join(gradcam_path, str(last_image_time) + '.jpg'), out_frame)\n\n if cv2.waitKey(int(np.ceil(1000.0 / FPS))) & 0xFF == ord('q'):\n break\n else:\n break\n\n capture.release()\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # non-optional arguments\n parser.add_argument(\"model\", help=\"The .pb file containing the model.\")\n parser.add_argument(\"action\", type=int, help=\"The index of the action in the action space to visualize\")\n\n # args for static input and output video (optional)\n parser.add_argument(\"-f\", \"--input_file\", help=\"The name of the MP4 file to process.\")\n parser.add_argument(\"--fps\", help=\"FPS of the output video.\", type=int, default=10)\n parser.add_argument(\"-o\", \"--output\", help=\"MP4 output file to store the gradcam output.\")\n\n # args for live stream from physical DeepRacer (optional)\n parser.add_argument(\"-i\", \"--ip\", help=\"The IP of the DeepRacer if you are using live feed.\")\n parser.add_argument(\"-p\", \"--password\", help=\"The password for your DeepRacer if doing live feed.\")\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"tools/gradcam.py","file_name":"gradcam.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419285390","text":"import os\nimport json\nimport shutil\nimport subprocess\nfrom send2trash import send2trash\n\n\nclass OSUtil(object):\n def __init__(self):\n super(OSUtil, self).__init__()\n\n def renameFile(self, dir, old, new):\n os.rename(\n os.path.join(dir, old),\n os.path.join(dir, new)\n )\n\n def renameDirectory(self, path, new_name):\n try:\n base, name = os.path.split(path)\n os.rename(\n src=path,\n dst=os.path.join(base, new_name)\n )\n return True\n except:\n return False\n\n def removeToTrash(self, path):\n try:\n send2trash(path)\n return True\n except:\n return False\n\n def removeFile(self, path):\n if os.path.isfile(path):\n try:\n os.remove(path)\n return True\n except:\n return False\n\n def removeDir(self, path, ignore_errors=False):\n if os.path.isdir(path):\n try:\n shutil.rmtree(path, bool(ignore_errors))\n return True\n except:\n return False\n\n def makeDir(self, path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n return True\n except:\n return False\n else:\n return False\n\n def copyFile(self, src, dst):\n try:\n shutil.copyfile(src, dst)\n return True\n except:\n return False\n\n def saveToJSON(self, obj, path):\n if not os.path.exists(path):\n new_file = open(path, 'w')\n new_file.close()\n if os.path.exists(path):\n json.dump(\n obj=obj,\n fp=open(path, 'w'),\n indent=4\n )\n else:\n json.dump(\n obj=obj,\n fp=open(path, 'w'),\n indent=4\n )\n\n def loadFromJSON(self, path):\n if (os.path.exists(path)):\n result = json.load(fp=open(path))\n return result\n else:\n return None\n\n def goodFileName(self, name):\n bad = self.badFileNameSigns()\n for sign in name:\n if sign in bad:\n return False\n return True\n\n def badFileNameSigns(self):\n return ':*?<>|/\\\\'\n\n def hideDirectory(self, path):\n if os.path.isdir(path):\n cmd = \"\"\"attrib +s +h \"{dir}\" \"\"\".format(dir=path)\n os.system(cmd)\n\n def openOSExplorer(self, path):\n if os.path.isdir(path):\n path = path.replace('/', '\\\\')\n subprocess.Popen('explorer \"{dir}\"'.format(dir=path))\n\n\nOSUTIL = OSUtil()\n\n","sub_path":"Source/os_utilities.py","file_name":"os_utilities.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428939436","text":"import logging\nfrom flask import render_template, url_for, request, redirect, Blueprint\nfrom yahtzee.game.utils import (roll_dice, get_dice_imgs, get_categories, \n update_score, next_turn)\nfrom yahtzee.game.vars import Game\nfrom yahtzee.game.forms import CategoryForm, PlayerNamesForm\n\nlog = logging.getLogger(__name__)\n\ngame = Blueprint('game', __name__)\n\n\n@game.route(\"/game/new/\", methods=['GET', 'POST'])\ndef new_game(p=0):\n if 'cur_game' not in globals() or p != 0:\n global cur_game\n cur_game = Game(p) # p is the number of players\n\n form = PlayerNamesForm()\n if form.validate_on_submit():\n # Assign name values obtained from PlayerNamesForm\n cur_game.p_names[1] = form.player1.data\n cur_game.p_names[2] = form.player2.data\n cur_game.p_names[3] = form.player3.data\n cur_game.p_names[4] = form.player4.data\n log.debug(cur_game.p_names)\n\n return redirect(url_for('game.play'))\n\n return render_template('new.html', title='New Game', form=form, game=cur_game)\n\n\n@game.route(\"/game/play\", methods=['GET', 'POST'])\ndef play():\n if 'cur_game' in globals():\n categories = get_categories(cur_game.dice) # List of tuples (key, val, str)\n select_categories = [(x, z) for x, y, z in categories] # 2 item tuple\n log.debug(categories)\n\n form = CategoryForm()\n form.update_categories(cats=select_categories) # Update categories when page loads\n if form.validate_on_submit():\n pick = form.category.data # Should return category key value\n log.debug(pick)\n \n for k, v, s in categories:\n if k == pick:\n update_score(pick, v)\n if next_turn() == False:\n cur_game.get_winner()\n return redirect(url_for('game.results'))\n else:\n form.update_categories()\n \n d_imgs = get_dice_imgs(cur_game.dice, cur_game.held) # Dice img string list\n return render_template('play.html', title='Play', game=cur_game, d_imgs=d_imgs, form=form)\n else:\n log.warning(\"Could not locate game in global variable list; Redirecting to '/game/new'\")\n return redirect(url_for('main.home'))\n\n\n@game.route(\"/game/roll\")\ndef roll():\n if 'cur_game' in globals():\n cur_game.dice = roll_dice(cur_game.dice, cur_game.held)\n cur_game.roll += 1 # Increment roll num\n\n return redirect(url_for('game.play'))\n else:\n log.warning(\"Could not roll dice; Redirecting to '/game/new'\")\n return redirect(url_for('main.home'))\n\n\n@game.route(\"/game/hold/\")\ndef hold(d):\n if d not in cur_game.held and cur_game.dice[d] != 0:\n cur_game.held.append(d)\n elif d in cur_game.held:\n cur_game.held.remove(d)\n\n return redirect(url_for('game.play'))\n\n\n@game.route(\"/game/results\")\ndef results():\n if 'cur_game' in globals():\n p_scores = [(pos, p) for pos, p in enumerate(cur_game.p_scores) if pos != 0]\n return render_template('results.html', title='Results', p_scores=p_scores, winner=cur_game.winner)\n else:\n log.warning(\"Could not locate game in global variable list; Redirecting to '/game/new'\")\n return redirect(url_for('main.home'))\n\n\n# Debug route for testing\n@game.route(\"/game/test\", methods=['GET', 'POST'])\ndef test():\n cats = [('ones', 'Ones - 1'), ('twos', 'Twos - 1'), ('threes', 'Threes - 1')]\n\n category_form = CategoryForm()\n category_form.update_categories(cats)\n if category_form.validate_on_submit():\n log.debug(f\"{category_form.content.data} was submitted.\")\n return redirect(url_for('game.test'))\n\n return render_template('test.html', title='Test', form=category_form)\n","sub_path":"yahtzee/game/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126588147","text":"import re\nimport plyj.parser as plyj\nimport plyj.model as model\n\n\n# Java File Processor\nclass ProcessorJAVA:\n def __init__(self, path):\n self.stringPattern = re.compile('\\\".*?\\\"', re.DOTALL)\n self.singleCommentPattern = re.compile('//.*\\n')\n self.blockCommentPattern = re.compile('/\\*(.*?)\\*/', re.DOTALL)\n self.parser = plyj.Parser()\n self.tree = self.parser.parse_file(path)\n\n # Return the position of strings present in the document\n # Doesnt consider escaped \\\" inside a string like string = \"something \\\" .\"\n def extractStringPositions(self, doc):\n stringPos = [(m.start(), m.end() - 1)\n for m in re.finditer(self.stringPattern, doc)]\n\n return stringPos\n\n # Regex to find comments in the code\n def extractComments(self, doc):\n comments = []\n commentsPos = []\n singleComment = -1\n blockComment = -1\n inString = False\n # Parse each character\n for i in range(len(doc)):\n # Check if a string is running\n if doc[i] == '\\\"':\n inString = not(inString)\n # Check if a comment is starting\n if not(inString) and doc[i] == '/':\n try:\n if doc[i+1] == '/':\n singleComment = i\n elif doc[i+1] == '*':\n blockComment = i\n except:\n None\n # If the comment is single line comment\n if singleComment != -1:\n if doc[i] == '\\n':\n comment = doc[singleComment:i] + '\\n'\n comments.append(comment)\n commentsPos.append((singleComment, i-1))\n singleComment = -1\n # If the comment is a block comment\n if blockComment != -1:\n try:\n if doc[i] == '*' and doc[i+1] == '/':\n comment = doc[blockComment:i+1] + '/'\n comments.append(comment)\n commentsPos.append((blockComment, i+1))\n blockComment = -1\n except:\n None\n\n return comments, commentsPos\n\n # Regex to find variables in the code\n def extractVariables(self, doc, stringPos=[]):\n declarations = []\n vDeclarators = []\n typeDeclarations = self.tree.type_declarations\n for decl in typeDeclarations:\n bodyDeclarations = decl.body\n for bDecl in bodyDeclarations:\n if (type(bDecl) == model.FieldDeclaration):\n vDeclarators += bDecl.variable_declarators\n elif (type(bDecl) == model.MethodDeclaration):\n mDeclBody = bDecl.body\n for vDecl in mDeclBody:\n if(type(vDecl) == model.VariableDeclaration):\n vDeclarators += vDecl.variable_declarators\n\n for decl in vDeclarators:\n declarations.append(decl.variable.name)\n\n return declarations, len(declarations)\n\n # Extract functions\n def extractFunctions(self, file):\n functions = []\n typeDeclarations = self.tree.type_declarations\n for decl in typeDeclarations:\n bodyDeclarations = decl.body\n for bDecl in bodyDeclarations:\n if(type(bDecl) == model.MethodDeclaration):\n functions.append(decl.name)\n return functions, len(functions)\n\n # Extract classes\n def extractClasses(self, file):\n classes = []\n typeDeclarations = self.tree.type_declarations\n for decl in typeDeclarations:\n if(type(decl) == model.ClassDeclaration):\n classes.append(decl.name)\n return classes, len(classes)\n","sub_path":"package/Processor/ProcessorJAVA.py","file_name":"ProcessorJAVA.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59447407","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef gauss_kernel(size = 5, sigma = 3):\n center = (int)(size / 2)\n kernel = np.zeros((size, size))\n for i in range(size):\n for j in range(size):\n diff = np.sqrt((i - center) ** 2 + (j - center) ** 2)\n kernel[i, j] = np.exp(-(diff ** 2) / (2 * sigma ** 2))\n return kernel / np.sum(kernel)\n\n\ndef filter(img, window_size=3):\n img2 = np.zeros_like(img)\n kernel = gauss_kernel(window_size)\n p = window_size//2\n for k in range(img.shape[2]): # foreach color channel\n for i in range(p, img.shape[0]-p): # foreach row\n for j in range(p, img.shape[1]-p): # foreach column\n window = img[i-p:i+p+1, j-p:j+p+1, k]\n img2[i,j,k] = (kernel*window).sum()\n return img2\n\ndef main():\n img = plt.imread(\"H:\\\\git\\\\2019_IT\\\\Snail.png\")[:, :, :3] #file\n img2 = filter(img)\n\n fig, axs = plt.subplots(1,2)\n axs[0].imshow(img)\n axs[1].imshow(img2)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62538559","text":"# Imports\nimport tensorflow as tf\nimport pandas as pd\n\n# These are the headers in our dataframe\nCSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']\n \nSPECIES = ['Setosa', 'Versicolor', 'Virginica']\n\n# The following are constants that will save time\ntrain_path = tf.keras.utils.get_file(\n \"iris_training.csv\", \"https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv\")\ntest_path = tf.keras.utils.get_file(\n \"iris_test.csv\", \"https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv\")\n\n# We will use Keras (a tensorflow model) to help read data into a pandas dataframe\ntrain = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\ntest = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n\n# Seperating the thing we eventually want our model to predict\ntrain_y = train.pop('Species')\ntest_y = test.pop('Species')\n\n# Input Function\ndef input_fn(features, labels, training=True, batch_size=256):\n # Converts inputs to a data.Dataset object\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n\n # Shuffle and repeat if you are in training mode\n if training:\n dataset = dataset.shuffle(1000).repeat()\n\n return dataset.batch(batch_size)\n\n# Because feature columns are all numerical, we can just loop through\nfeature_columns = []\nfor key in train.keys():\n feature_columns.append(tf.feature_column.numeric_column(key=key))\n\n# Creating the Estimator: going to create a Deep Neural Network\n# The DNN has 2 hidden layers with 30 nodes and 10 hidden nodes each\nclassifier = tf.estimator.DNNClassifier(\n # Init Feature Columns\n feature_columns=feature_columns,\n # These are the hidden units\n hidden_units=[30, 10],\n # The classifier has to choose between 3 classes\n n_classes=3\n)\n\n# Training the Model\nclassifier.train(\n input_fn = lambda:input_fn(train, train_y, training=True),\n steps = 500\n)\n\n# Testing the model against known values\neval_result = classifier.evaluate(input_fn = lambda:input_fn(test, test_y, training=False))\n\nprint('\\nTest accuracy: {accuracy:0.3f}'.format(**eval_result))","sub_path":"irisModel.py","file_name":"irisModel.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139841273","text":"'''\nCreated on May 22, 2018\n\nAuthor: Achlioptas Panos (Github ID: optas)\n'''\n\nimport numpy as np\nimport time\nimport tensorflow as tf\n\nfrom tflearn import is_training\nfrom . gan import GAN\nfrom .. external.structural_losses.tf_nndistance import nn_distance\n# from .. external.structural_losses.tf_approxmatch import approx_match, match_cost\n\n\nclass W_GAN_GP(GAN):\n '''Gradient Penalty.\n https://arxiv.org/abs/1704.00028\n '''\n\n def __init__(self, name, learning_rate, lam, n_output, noise_dim, discriminator, generator, configuration, beta=0.5, gen_kwargs={}, disc_kwargs={}, reconstr_param=None, disc_param=None, graph=None):\n assert noise_dim == 1948\n\n GAN.__init__(self, name, graph)\n\n self.noise_dim = noise_dim\n self.n_output = n_output\n self.discriminator = discriminator\n self.generator = generator\n\n c = configuration\n\n with tf.variable_scope(name):\n # self.noise = tf.placeholder(tf.float32, shape=[None, noise_dim]) # Noise vector.\n self.incomplete_input = tf.placeholder(tf.float32, shape=[None, noise_dim, 3])\n self.real_pc = tf.placeholder(tf.float32, shape=[None] + self.n_output) # Ground-truth.\n\n with tf.variable_scope('generator'):\n self.generator_out = self.generator(self.incomplete_input, configuration, **gen_kwargs)\n\n with tf.variable_scope('discriminator') as scope:\n self.real_prob, self.real_logit = self.discriminator(self.real_pc, scope=scope, **disc_kwargs)\n self.synthetic_prob, self.synthetic_logit = self.discriminator(self.generator_out, reuse=True, scope=scope, **disc_kwargs)\n\n # Compute WGAN losses\n # discriminator loss\n self.loss_d = tf.reduce_mean(self.synthetic_logit) - tf.reduce_mean(self.real_logit)\n\n # generator loss\n cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.generator_out, self.real_pc)\n l2_loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)\n\n # reg_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # w_reg_alpha = 1.0\n # for rl in reg_losses:\n # self.loss += (w_reg_alpha * rl)\n self.g_reconstr_loss = reconstr_param*l2_loss\n self.g_disc_loss = -disc_param*tf.reduce_mean(self.synthetic_logit)\n\n # self.loss_g = -0.2*self.g_disc_loss + 0.8*self.g_reconstr_loss\n self.loss_g = self.g_disc_loss + self.g_reconstr_loss\n\n\n # Compute gradient penalty at interpolated points\n ndims = self.real_pc.get_shape().ndims\n batch_size = tf.shape(self.real_pc)[0]\n alpha = tf.random_uniform(shape=[batch_size] + [1] * (ndims - 1), minval=0., maxval=1.)\n differences = self.generator_out - self.real_pc\n interpolates = self.real_pc + (alpha * differences)\n\n with tf.variable_scope('discriminator') as scope:\n gradients = tf.gradients(self.discriminator(interpolates, reuse=True, scope=scope, **disc_kwargs)[1], [interpolates])[0]\n\n # Reduce over all but the first dimension\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=range(1, ndims)))\n gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)\n self.loss_d += lam * gradient_penalty\n\n train_vars = tf.trainable_variables()\n d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')]\n g_params = [v for v in train_vars if v.name.startswith(name + '/generator/')]\n\n self.opt_d = self.optimizer(learning_rate, beta, self.loss_d, d_params)\n self.opt_g = self.optimizer(learning_rate, beta, self.loss_g, g_params)\n\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)\n self.init = tf.global_variables_initializer()\n\n # Launch the session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n self.sess.run(self.init)\n\n def generator_noise_distribution(self, n_samples, ndims, mu, sigma):\n return np.random.normal(mu, sigma, (n_samples, ndims))\n\n def _single_epoch_train(self, train_data, batch_size, noise_params, discriminator_boost=5):\n '''\n see: http://blog.aylien.com/introduction-generative-adversarial-networks-code-tensorflow/\n http://wiseodd.github.io/techblog/2016/09/17/gan-tensorflow/\n '''\n n_examples = train_data.num_examples\n epoch_loss_d = 0.\n epoch_loss_g = 0.\n epoch_g_disc_loss = 0.\n epoch_g_reconstr_loss = 0.\n batch_size = batch_size\n n_batches = int(n_examples / batch_size)\n start_time = time.time()\n\n iterations_for_epoch = n_batches / discriminator_boost\n\n is_training(True, session=self.sess)\n try:\n # Loop over all batches\n for _ in xrange(iterations_for_epoch):\n for _ in range(discriminator_boost):\n batch_i, _, _ = train_data.next_batch(batch_size)\n # z is incomplete data, feed is the complete PC\n z, feed = batch_i[:, :1948, :], batch_i[:, 1948:, :]\n # z = self.generator_noise_distribution(batch_size, self.noise_dim, **noise_params)\n\n feed_dict = {self.real_pc: feed, self.incomplete_input: z}\n _, loss_d = self.sess.run([self.opt_d, self.loss_d], feed_dict=feed_dict)\n epoch_loss_d += loss_d\n\n # Update generator.\n # z = self.generator_noise_distribution(batch_size, self.noise_dim, **noise_params)\n batch_i, _, _ = train_data.next_batch(batch_size)\n # z is incomplete data, feed is the complete PC\n z, feed = batch_i[:, :1948, :], batch_i[:, 1948:, :]\n\n feed_dict = {self.incomplete_input: z, self.real_pc: feed}\n _, loss_g, g_disc_loss, g_reconstr_loss = self.sess.run([self.opt_g, self.loss_g, self.g_disc_loss, self.g_reconstr_loss], feed_dict=feed_dict)\n epoch_loss_g += loss_g\n epoch_g_disc_loss += g_disc_loss\n epoch_g_reconstr_loss += g_reconstr_loss\n\n is_training(False, session=self.sess)\n except Exception:\n raise\n finally:\n is_training(False, session=self.sess)\n epoch_loss_d /= (iterations_for_epoch * discriminator_boost)\n epoch_loss_g /= iterations_for_epoch\n epoch_g_disc_loss /= iterations_for_epoch\n epoch_g_reconstr_loss /= iterations_for_epoch\n\n duration = time.time() - start_time\n return (epoch_loss_d, epoch_loss_g, epoch_g_reconstr_loss, epoch_g_disc_loss), duration\n","sub_path":"src/w_gan_gp.py","file_name":"w_gan_gp.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563214995","text":"def insertion_sort(items):\n # Split the list into sorted and unsorted\n # For each element in unsorted...\n counter = 0\n for i in range(1, len(items)):\n # Insert that element into the correct place in sorted\n # Store the elements in a temp variable\n temp = items[i]\n # Shifting all larger sorted elements to the right by 1\n j = i\n while j > 0 and temp < items[j - 1]:\n print('**********************')\n counter += 1\n print(items)\n items[j] = items[j - 1]\n j -= 1\n print(items)\n # Insert the element after the first smaller element\n items[j] = temp\n print(items)\n print(f'Counter = {counter}')\n return items\n\n\nl = [7, 4, 9, 2, 6, 3, 0, 8, 5, 1]\n\ninsertion_sort(l)","sub_path":"src/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315520660","text":"s = input(\"enter the string\")\ns1 = list(s.replace(\" \",\"\")) #converting the input into list and removing spaces between them\nprint(s1)\n\nindividual=[] #every individual character passed in the string\nrepeated=[] #every repeated characcter passeed in the string\nunique=[] #characters whose count is 1 in the string\n\nfor c in s1:\n if c not in individual:\n individual.append(c) #append each character in the string\n else:\n repeated.append(c) #append repeated characters into this list\n\nfor d in s1:\n if d in individual and d not in repeated:\n unique.append(d)\n\n#prints the first non-repeated character\nprint(\"The first non-repeated character in the string is: \", unique[0])\n\n","sub_path":"Lab1/Source/firstnonrepeat.py","file_name":"firstnonrepeat.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451272426","text":"def listdatas(pathin):\n import os\n a = []\n datas = os.listdir(pathin)\n for i in datas:\n if i.endswith('.tif'):\n fn_i = pathin + '/' + i\n a.append(fn_i)\n return a\n\ndef mosaic(datas, pathout, pathout_sh):\n from osgeo import gdal\n\n nn = pathout_sh + '/' + 'mosaic.sh'\n f = open(nn,'w')\n\n f.write('gdal_merge.py -o {} -n -99.0 -a_nodata -99.0 '.format(pathout + '/' + 'mean_mosaic' + '.tif'))\n for i in datas:\n f.write('{} '.format(i))\n f.close()\n return\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str, help='input', required=True)# 输入路径\n parser.add_argument('-o', '--output', type=str, help='output', required=True)# 输出路径\n parser.add_argument('-os', '--output_sh', type=str, help='output_sh', required=True)# .sh输出路径\n args = parser.parse_args()\n datas = listdatas(args.input)\n mosaic(datas, args.output, args.output_sh)\n return\n\nif __name__ == \"__main__\":\n main()\n\n# python mosaic.py -i /mnt/e/r_tcc/out -o /mnt/e/r_tcc/out_1 -os /mnt/e/r_tcc/code\n# python mosaic.py -i /public/home/mfeng/jwang/forest/northeast/out/slope/1 -o /public/home/mfeng/jwang/forest/northeast/out/slope/3 -os /public/home/mfeng/jwang/forest/northeast/code_tcc/inter_new","sub_path":"ne_forest/01/tcc/mosaic.py","file_name":"mosaic.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352608774","text":"from tensorflow import keras\nfrom os.path import isfile\nfrom tqdm import tqdm\nimport numpy as np\nimport random\nfrom data import FeatureGen, ScoreGen, TrainingData\nimport image_utils as iu\nfrom model import baseline\nfrom util import score_reshape, compute_score\nfrom callback import tensor_board\nshape = (384, 384, 1)\nmetadata = iu.load_meta()\ntrain = metadata['train']\nprint(\"Training data : \", len(train))\ntrain_set = set(train)\nw2hs = metadata['w2hs']\nsteps = 0\nmodel, branch_model, head_model = baseline.build_model(shape, 64e-5, 0)\n\n\ndef set_lr(model, lr):\n keras.backend.set_value(model.optimizer.lr, float(lr))\n\n\ndef get_lr(model):\n return keras.backend.get_value(model.optimizer.lr)\n\n\ndef make_steps(step, ampl):\n \"\"\"\n Perform training epochs\n @param step Number of epochs to perform\n @param ampl the K, the randomized component of the score matrix.\n \"\"\"\n global w2ts, t2i, steps, features, score, histories\n\n # shuffle the training pictures\n random.shuffle(train)\n\n # Map whale id to the list of associated training picture hash value\n w2ts = metadata['w2ts']\n # Map training picture hash value to index in 'train' array\n t2i = metadata['t2i']\n # Compute the match score for each picture pair\n features, score = compute_score(\n train, shape, branch_model, head_model, verbose=1, **metadata)\n\n # Train the model for 'step' epochs\n history = model.fit_generator(\n TrainingData(\n shape,\n score + ampl * np.random.random_sample(size=score.shape),\n steps=step,\n batch_size=32,\n **metadata),\n initial_epoch=steps,\n epochs=steps + step,\n max_queue_size=6,\n workers=4,\n verbose=1,\n use_multiprocessing=True).history\n steps += step\n\n # Collect history data\n history['epochs'] = steps\n history['ms'] = np.mean(score)\n history['lr'] = get_lr(model)\n print(history['epochs'], history['lr'], history['ms'])\n histories.append(history)\n model.save('../pre_trained_model/fine-tuned.model')\n\n\nhistories = []\nsteps = 0\n\nif isfile('../pre_trained_model/mpiotte-standard.model'):\n tmp = keras.models.load_model('../pre_trained_model/mpiotte-standard.model')\n model.set_weights(tmp.get_weights())\n\n# epoch -> 10\nmake_steps(10, 1000)\nampl = 100.0\nfor _ in range(2):\n print('noise ampl. = ', ampl)\n make_steps(5, ampl)\n ampl = max(1.0, 100**-0.1 * ampl)\n# epoch -> 150\nfor _ in range(18):\n make_steps(5, 1.0)\n# epoch -> 200\nset_lr(model, 16e-5)\nfor _ in range(10):\n make_steps(5, 0.5)\n# epoch -> 240\nset_lr(model, 4e-5)\nfor _ in range(8):\n make_steps(5, 0.25)\n# epoch -> 250\nset_lr(model, 1e-5)\nfor _ in range(2):\n make_steps(5, 0.25)\n# epoch -> 300\nweights = model.get_weights()\nmodel, branch_model, head_model = baseline.build_model(\n shape, 64e-5, 0.0002)\nmodel.set_weights(weights)\nfor _ in range(10):\n make_steps(5, 1.0)\n# epoch -> 350\nset_lr(model, 16e-5)\nfor _ in range(10):\n make_steps(5, 0.5)\n# epoch -> 390\nset_lr(model, 4e-5)\nfor _ in range(8):\n make_steps(5, 0.25)\n# epoch -> 400\nset_lr(model, 1e-5)\nfor _ in range(2):\n make_steps(5, 0.25)\nmodel.save('../pre_trained_model/fine-tuned.model')","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343650126","text":"\"\"\"\nPremium Question\n\"\"\"\n__author__ = 'Daniel'\n\n\nclass TicTacToe(object):\n def __init__(self, n):\n \"\"\"\n Initialize your data structure here.\n :type n: int\n \"\"\"\n self.n = n\n self.rows = [0 for _ in xrange(n)]\n self.cols = [0 for _ in xrange(n)]\n self.diag0 = 0\n self.diag1 = 0\n\n def move(self, row, col, player):\n \"\"\"\n Since guarantee the move is valid, only store row, col, diagonal.\n 1: -1\n 2: +1\n Player {player} makes a move at ({row}, {col}).\n @param row The row of the board.\n @param col The column of the board.\n @param player The player, can be either 1 or 2.\n @return The current winning condition, can be either:\n 0: No one wins.\n 1: Player 1 wins.\n 2: Player 2 wins.\n :type row: int\n :type col: int\n :type player: int\n :rtype: int\n \"\"\"\n delta = -1 if player == 1 else 1\n self.cols[col] += delta\n self.rows[row] += delta\n if col == row:\n self.diag0 += delta\n if col + row == self.n - 1:\n self.diag1 += delta\n\n is_win = lambda x: delta * x == self.n\n if any(map(is_win, [self.rows[row], self.cols[col], self.diag0, self.diag1])):\n return player\n\n return 0\n\n# Your TicTacToe object will be instantiated and called as such:\n# obj = TicTacToe(n)\n# param_1 = obj.move(row,col,player)","sub_path":"348 Design Tic-Tac-Toe.py","file_name":"348 Design Tic-Tac-Toe.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539243252","text":"from django.contrib.auth import authenticate\n# 验证登陆\n\nfrom backend import paramiko_ssh\n\nfrom web import models\n\nclass SshHandle(object):\n \"\"\"堡垒机交互脚本\"\"\"\n def __init__(self,argv_handler_item):\n self.argv_handler_item=argv_handler_item\n self.models=models\n def auth(self):\n \"\"\"认证程序\"\"\"\n count = 0\n while count < 3:\n username = input(\"堡垒机账号:\").strip()\n password = input(\"Password:\").strip()\n user=authenticate(username=username,password=password)\n # 用户验证\n if user:\n self.user=user\n return True\n else:\n count +=1\n\n def interactive(self):\n \"\"\"启动交互脚本\"\"\"\n\n if self.auth():\n '''验证'''\n print(\"验证通过...\")\n host_goup_list=self.user.host_groups.all()\n # 获取属于对象的主机组列表\n while True:\n host_goup_list=self.user.host_groups.all()\n for index,host_goup_obj in enumerate(host_goup_list):\n print(\"%s.\\t[%s](%s)\"%(index,host_goup_obj.name,host_goup_obj.host_to_remote_users.count()))\n\n print('z.\\t未分组主机[%s]'%(self.user.host_to_remote_users.count()))\n choice = input(\"请选择主机组>>>:\")\n if choice.isdigit():\n choice=int(choice)\n selected_host_group=host_goup_list[choice]\n elif choice =='z':\n selected_host_group = self.user\n\n while True:\n \"\"\" 没有分组\"\"\"\n for index,host_to_user_obj in enumerate(selected_host_group.host_to_remote_users.all()):\n\n print(\"%s.\\t%s\" % (index, host_to_user_obj))\n choice = input(\"请选择主机>>>:\").strip()\n\n if choice.isdigit():\n choice = int(choice)\n selected_host_to_user_obj = selected_host_group.host_to_remote_users.all()[choice]\n print('------->%s'% selected_host_to_user_obj)\n paramiko_ssh.ssh_connect(self,selected_host_to_user_obj)\n # 远程建立连接\n elif choice =='b':\n break\n\n\n\n\n","sub_path":"新建文件夹/BLJ/backend/ssh_interactive.py","file_name":"ssh_interactive.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137437766","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom math import sqrt\nfrom data_process import data_io_do as dataio\nfrom keras import backend as K\nimport os\nimport h5py\nimport argparse\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout,RepeatVector\nfrom keras.layers import LSTM, TimeDistributed, Masking\nfrom keras.callbacks import Callback,ModelCheckpoint\nfrom keras.models import load_model\nfrom keras import regularizers\nfrom keras.optimizers import SGD\n\n\nimport tensorflow as tf\nimport tensorboard\n\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n# Prepare data\n\ndef generate_data(filepath, num_sample, timestamp, start, mode, scalardic):\n \"\"\"\n :param filepath: data set for the model\n :param start: start row for training set, for training, start=0\n :param num_sample: how many samples used for training set, in this case, 2928 samples from 1st Oct-30th Nov, two month\n :param timestamp: timestamp used for LSTM\n :return: training set, train_x and train_y\n \"\"\"\n dataset = pd.read_csv(filepath)\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n for c in [c for c in dataset.columns if dataset[c].dtype in numerics]:\n dataset[c] = dataset[c].abs()\n dataset = dataset.iloc[start:start + num_sample, :] # get first num_sample rows for training set, with all columns\n dataset['TIMESTAMP'] = pd.to_datetime(dataset['TIMESTAMP'], dayfirst=True)\n\n\n set_x, set_y = dataio.load_csvdata_Pearson(dataset, timestamp, mode, scalardic)\n return set_x, set_y\n\n\ndef getdate_index(filepath, start, num_predict):\n \"\"\"\n :param filepath: same dataset file\n :param start: start now no. for prediction\n :param num_predict: how many predictions\n :return: the x axis datatime index for prediciton drawing\n \"\"\"\n dataset = pd.read_csv(filepath)\n dataset = dataset.iloc[start:start + num_predict, :]\n dataset['TIMESTAMP'] = pd.to_datetime(dataset['TIMESTAMP'], dayfirst=True)\n\n return dataset['TIMESTAMP']\n\n\ndef rsquare(y_true,y_pred):\n SS_res = K.sum(K.square(y_true - y_pred))\n SS_tot = K.sum(K.square(y_true - K.mean(y_true)))\n return (1 - SS_res / (SS_tot + K.epsilon()))\n\n\n\n# Parameters\nmodel_params = {\n 'TIMESTEPS': 12,\n 'N_FEATURES':3,\n 'train_no': 2452,\n 'test_no': 473\n # 'RNN_LAYERS': [{'num_units': 400}],\n # # 'RNN_LAYERS': [{'num_units': 60, 'keep_prob': 0.75},{'num_units': 120, 'keep_prob': 0.75},{'num_units': 60, 'keep_prob': 0.75}],\n # 'DENSE_LAYERS': None,\n # 'TRAINING_STEPS': 15000,\n # 'PRINT_STEPS': 50,\n # 'BATCH_SIZE': 100\n}\n\n\n# Scale x data (training set) to 0 mean and unit standard deviation.\nscaler_do = preprocessing.StandardScaler()\nscaler_ec = preprocessing.StandardScaler()\nscaler_temp = preprocessing.StandardScaler()\nscaler_ph = preprocessing.StandardScaler()\nscaler_chlo = preprocessing.StandardScaler()\n\nscaler_dic = {\n 'scaler_one': scaler_do,\n 'scaler_two': scaler_ec,\n 'scaler_three': scaler_temp,\n 'scaler_four': scaler_ph,\n 'scaler_five': scaler_chlo\n}\n\n# datafile\nfilepath = r'C:\\Users\\ZHA244\\Coding\\QLD\\baffle_creek\\dry_season-90min.csv'\nx, y = generate_data(filepath, model_params['train_no'],model_params['TIMESTEPS'], 0, 'train', scaler_dic)\n\nscaler_dic['scaler_one'] = x['scalerone']\nscaler_dic['scaler_two'] = x['scalertwo']\nscaler_dic['scaler_three'] = x['scalerthree']\nscaler_dic['scaler_four'] = x['scalerfour']\nscaler_dic['scaler_five'] = x['scalerfive']\n\n# Training set, three train y for multiple tasks training\nx_train = x['train']\ny_train_do = y['trainyone']\n\n\n\n\n# y_train_ec = y['trainytwo']\n# y_train_temp = y['trainythree']\n\nx_t, y_t = generate_data(filepath, model_params['test_no']+12, model_params['TIMESTEPS'], model_params['train_no']-12, 'test', scaler_dic) # testing set for 240 prediction (5 days)\n# Testing set, three test y for multiple tasks testing\nx_test = x_t['train']\ny_test_do = y_t['trainyone']\n# y_test_ec = y_t['trainytwo']\n# y_test_temp = y_t['trainythree']\n\n# Scale y data to 0 mean and unit standard deviation\nscaler_do_y = preprocessing.StandardScaler()\n# scaler_ec_y = preprocessing.StandardScaler()\n# scaler_temp_y = preprocessing.StandardScaler()\n\ny_train_do = y_train_do.reshape(-1, 1)\n# y_train_ec = y_train_ec.reshape(-1, 1)\n# y_train_temp = y_train_temp.reshape(-1, 1)\n\n\ny_train_do = scaler_do_y.fit_transform(y_train_do)\n# y_train_ec = scaler_ec_y.fit_transform(y_train_ec)\n# y_train_temp = scaler_temp_y.fit_transform(y_train_temp)\n\ny_test_do = y_test_do.reshape(-1, 1)\n# y_test_ec = y_test_ec.reshape(-1,1)\n# y_test_temp = y_test_temp.reshape(-1,1)\n\ny_test_do = scaler_do_y.transform(y_test_do)\n# y_test_ec = scaler_ec_y.transform(y_test_ec)\n# y_test_temp = scaler_temp_y.transform(y_test_temp)\n\n\nx_train = x_train.reshape((x_train.shape[0], model_params['TIMESTEPS']* model_params['N_FEATURES']))\nx_test = x_test.reshape((x_test.shape[0],model_params['TIMESTEPS']* model_params['N_FEATURES']))\n\n\nprint(x_train.shape)\nprint(x_test.shape)\n\n\n##\n# Model\n##\n\nmodel = Sequential()\nmodel.add(Dense(3, input_dim=model_params['TIMESTEPS']* model_params['N_FEATURES'], activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(3, activation='relu'))\n# model.add(Dropout(0.2))\nmodel.add(Dense(3, activation='relu'))\nmodel.add(Dense(1, activation=None))\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='mse', optimizer='sgd',metrics=['mse','mae',rsquare])\n\n\n##\n# Training\n\nmodel_file = os.path.join('./bpnnpearson', '-{val_mean_squared_error}-{epoch:02d}' + '.hdf5')\ncheckpoint = ModelCheckpoint(filepath=model_file, monitor='val_mean_squared_error', save_best_only=False,\n save_weights_only=False, mode='max', period=1)\n\n# history = model.fit(x_train, y_train_do,\n# epochs=1000,\n# batch_size=10,shuffle=True,validation_split=0.1,callbacks=[checkpoint])\n\n# Predict.\n\nmodel = load_model('bpnnpearson/-0.07077323177401892-955.hdf5',custom_objects={'rsquare': rsquare})\nscore = model.evaluate(x_test, y_test_do, batch_size=1)\n\nprint(score)\n\npredictions = model.predict(x_test)\n\n\ny_predicted = np.array(list(scaler_do_y.inverse_transform(p) for p in predictions))\n\n# y_predicted = np.array(list(scaler_do_y.inverse_transform(p['predictions']) for p in predictions))\ny_predicted = y_predicted.reshape(np.array(y_test_do).shape)\n\n\n\n# Score with sklearn.\nscore_sklearn = mean_squared_error(y_predicted, scaler_do_y.inverse_transform(y_test_do))\nprint('RMSE (sklearn): {0:f}'.format(sqrt(score_sklearn)))\n\nprint(\"--------\")\nmae = mean_absolute_error(scaler_do_y.inverse_transform(y_test_do), y_predicted)\nprint(\"MAE (sklearn):{0:f}\".format(mae))\nprint(\"---------\")\nr2 = r2_score(scaler_do_y.inverse_transform(y_test_do), y_predicted)\nprint(\"R2 (sklearn):{0:f}\".format(r2))\n\n\n\n\n\n\n# Drawing\n\naxis_data = getdate_index(filepath,732,372)\n#\n# ax = plt.gca()\n# xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M')\n# ax.xaxis.set_major_formatter(xfmt)\n#\n# true_line, = plt.plot_date(axis_data, scaler_do_y.inverse_transform(y_test_do)[0:336], 'b-', color='blue',\n# label='True Value')\n# predict_line, = plt.plot_date(axis_data, np.array(y_predicted)[0:336], 'b-', color='Red',\n# label='Prediction Value')\n#\n# plt.legend(handles=[true_line, predict_line])\n# plt.gcf().autofmt_xdate()\n# plt.show()\n\n# # These are the \"Tableau 20\" colors as RGB.\n# tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n# (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n# (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n# (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n# (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n# # Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.\n# for i in range(len(tableau20)):\n# r, g, b = tableau20[i]\n# tableau20[i] = (r / 255., g / 255., b / 255.)\n#\n# ax = plt.gca()\n#\n# # ax.spines[\"top\"].set_visible(False)\n# # ax.spines[\"bottom\"].set_visible(False)\n# # ax.spines[\"right\"].set_visible(False)\n# # ax.spines[\"left\"].set_visible(False)\n#\n# xfmt = mdates.DateFormatter('%Y-%m-%d')\n# ax.xaxis.set_major_formatter(xfmt)\n#\n#\n# true_line, = plt.plot_date(axis_data, scaler_do_y.inverse_transform(y_test_do)[0:372], '-', lw=1, color=tableau20[2],\n# label='True Value')\n# predict_line, = plt.plot_date(axis_data, np.array(y_predicted)[0:372], '--', lw=1, color=tableau20[18],\n# label='Prediction Value')\n#\n#\n# plt.legend(handles=[true_line, predict_line], fontsize=12)\n# plt.title('Water Quality Prediction', fontsize=16)\n# plt.xlabel('Date', fontsize=14)\n# plt.ylabel('DO (mg/l)', fontsize=14)\n# plt.gcf().autofmt_xdate()\n# plt.savefig(r'C:\\Users\\ZHA244\\Pictures\\paper-figure\\120min-7days-bpnn.png', dpi=200)\n# plt.show()\n\n","sub_path":"bpnn_pearson.py","file_name":"bpnn_pearson.py","file_ext":"py","file_size_in_byte":9170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132243979","text":"#!/usr/bin/env python3\n\nimport re\nfrom enum import Enum\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n\nVERSION_FILE_PATH = './VERSION'\n\n\nclass Increment(Enum):\n major = '1'\n minor = '2'\n patch = '3'\n\n\nclass bc:\n WARNING = '\\033[93m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef wrap(text, color):\n return f\"{color}{text}{bc.ENDC}\"\n\n\ndef execute_command(command):\n \"\"\"\n Execute a shell command and stream the output until the process completes.\n\n Args:\n command (str): The command to execute.\n\n Returns:\n Exit code of the command.\n \"\"\"\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n encoding='utf-8',\n errors='replace'\n )\n while True:\n realtime_output = process.stdout.readline()\n if realtime_output == '' and process.poll() is not None:\n break\n if realtime_output:\n print(realtime_output.strip(), flush=True)\n exit_code = process.wait()\n return exit_code\n\n\ndef get_active_branch_name():\n\n head_dir = Path(\".\") / \".git\" / \"HEAD\"\n with head_dir.open(\"r\") as f:\n content = f.read().splitlines()\n\n for line in content:\n if line[0:4] == \"ref:\":\n return line.partition(\"refs/heads/\")[2]\n\n\ndef check_exit_code(exit_code):\n if exit_code != 0:\n print(f\"Error: exit code {exit_code}\")\n sys.exit(exit_code)\n\n\ndef yes_or_no(question):\n \"\"\"\n Asks user a yes/no question.\n\n Args:\n question (str): Question to ask\n\n Returns:\n bool: True if user answers yes, False otherwise\n \"\"\"\n reply = str(\n input(f\"{question} ({wrap('y/n', bc.BOLD)}): \")).lower().strip()\n no_reply_msg = \"Please enter\"\n if not reply:\n return yes_or_no(no_reply_msg)\n elif reply[0] == 'y':\n return True\n elif reply[0] == 'n':\n return False\n else:\n return yes_or_no(no_reply_msg)\n\n\ndef exit_without_error():\n print(\"Exiting\")\n sys.exit(0)\n\n\ndef check_valid_version(version):\n pattern = r\"^\\d+\\.\\d+\\.\\d+$\"\n return bool(re.match(pattern, version))\n\n\ndef validate_increment(increment):\n valid_increments = [x.value for x in Increment]\n if increment not in valid_increments:\n valid_increments_str = ', '.join(valid_increments)\n print(\n f\"Invalid value '{increment}'. must be one of: {valid_increments_str}\")\n sys.exit(1)\n\n\ndef get_current_version():\n with open(VERSION_FILE_PATH, 'r') as f:\n version = f.read().strip()\n if not check_valid_version(version):\n error_msg = f\"Invalid version: {version}. Must be of format X.Y.Z\"\n raise ValueError(error_msg)\n return version\n\n\ndef generate_new_version(current_version, increment):\n # generate new version\n new_version = None\n version_parts = current_version.split('.')\n if increment == Increment.patch.value:\n new_patch = int(version_parts[2]) + 1\n new_version = '.'.join(\n [version_parts[0], version_parts[1], str(new_patch)])\n elif increment == Increment.minor.value:\n new_minor = int(version_parts[1]) + 1\n new_version = '.'.join([version_parts[0], str(new_minor), '0'])\n elif increment == Increment.major.value:\n new_major = int(version_parts[0]) + 1\n new_version = '.'.join([str(new_major), '0', '0'])\n return new_version\n\n\ndef write_version_file(new_version):\n with open(VERSION_FILE_PATH, 'w') as f:\n f.write(new_version)\n\n\ndef main():\n # check branch\n active_branch = get_active_branch_name()\n if active_branch != \"master\":\n print(\n f\"{wrap('WARNING',bc.WARNING)}: You are on branch '{active_branch}'.\\nIt's generally recommended you run this script on master after first merging your feature branch.\")\n reply_switch = yes_or_no(\"Change to master and pull?\")\n if reply_switch is True:\n exit_code_exit_code = execute_command(\n \"git checkout master && git pull origin master\")\n check_exit_code(exit_code_exit_code)\n active_branch = get_active_branch_name()\n\n # get current version\n current_version = get_current_version()\n print(\n f\"Current tt-docker-base version on '{active_branch}': {current_version}\")\n\n # get desired bump\n q_increment = f\"Bump major ({wrap('1', bc.BOLD)}), minor ({wrap('2', bc.BOLD)}), or patch ({wrap('3', bc.BOLD)})?\"\n reply_increment = input(f\"{q_increment}: \").lower().strip()\n validate_increment(reply_increment)\n new_version = generate_new_version(current_version, reply_increment)\n\n # save and commit\n q_commit = f\"Bump version to {wrap(new_version,bc.UNDERLINE)} and commit?\"\n confirm_commit = yes_or_no(q_commit)\n if confirm_commit == False:\n exit_without_error()\n print(f\"{VERSION_FILE_PATH} file changed: {current_version} -> {wrap(new_version,bc.UNDERLINE)}\")\n write_version_file(new_version)\n cmd_gitadd = f\"git add {VERSION_FILE_PATH}\"\n exit_code_gitadd = execute_command(cmd_gitadd)\n check_exit_code(exit_code_gitadd)\n cmd_commit = f\"git commit -m 'bump version to {new_version}'\"\n exit_code_commit = execute_command(cmd_commit)\n check_exit_code(exit_code_commit)\n\n # tag and push\n q_tag = f\"Tag commit as {wrap(new_version,bc.UNDERLINE)} and push to remote?\"\n confirm_tag = yes_or_no(q_tag)\n if confirm_tag == False:\n exit_without_error()\n cmd_tag = f'git tag {new_version}'\n exit_code_tag = execute_command(cmd_tag)\n check_exit_code(exit_code_tag)\n cmd_push_active = f'git push origin --tags {active_branch}' # pushes only specific tag\n exit_code_push = execute_command(cmd_push_active)\n check_exit_code(exit_code_push)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/bump.py","file_name":"bump.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"509845012","text":"# References\n# https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/pytorch_basics/main.py\n# http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class\nimport torch\nimport docx\nimport re\nimport os \nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\n\nclass Wordlist(object):\n def __init__(self, filename):\n\n word=[]\n for file in filelist:\n file=docx.Document(os.path.join(getcwd,file))\n #self.len = xy.shape[0]\n #self.x_data = torch.from_numpy(xy[:, 0:-1])#关于np的数组操作,http://blog.csdn.net/liangzuojiayi/article/details/51534164\n #self.y_data = torch.from_numpy(xy[:, [-1]])\n #输出每一段的内容\n for para in file.paragraphs[:10]:\n filtrate_nonChinese = re.compile(u'[^\\u4E00-\\u9FA50-9]')#非中文 \n\n filtered_English_str = [ i.lower().strip() for i in re.findall('[A-Za-z ]+', para.text) if i.lower().strip() !=''] #保留字母\n for i in filtered_English_str:\n if re.findall(' ',i):\n filtered_English_str.remove(i)\n filtered_English_str.extend( i.split(' '))\n\n filtered_Chinese = filtrate_nonChinese.sub(r' ', para.text)#replace\n filtered_Chinese_str=[i for i in ' '.join(filtered_Chinese.replace(' ','')) if i!=' ' ]\n filtered_Chinese_str.extend(filtered_English_str)\n word.extend(filtered_Chinese_str)\n word=list(set(word)) \n word.sort()\n self.size = len(word)\n self.word=word\n\n self.voc = dict(enumerate(self.word))\n\n self.reverse_voc = {v:k for k,v in self.voc.items()}\n\n def getID(self, word):\n try:\n return self.reverse_voc[word]\n except:\n return -1\n\n def getWord(self,wordid):\n\n return self.voc[wordid]\n\n\nif __name__ == '__main__':\n \n getcwd=os.path.join(os.getcwd(),'word')\n filelist=os.listdir(getcwd)\n\n wordlist=Wordlist(filelist)\n\n print(wordlist.voc)\n\n print(wordlist.getID('高'),wordlist.getWord(225))\n","sub_path":"DL-Cornell-decathlon/wordlist.py","file_name":"wordlist.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283777399","text":"class ModelMetaclass(type):\n def __new__(cls,name,bases,attrs):\n #排除Model类本身\n if name=='Model':\n return type.__new__(cls,name,bases,attrs)\n #获取table名称\n tableName=attrs.get('__table__',None) or name\n logging.info('found model:%s (table\"%s)'%(name,tableName))\n #获取所有的Field和主键名\n mappings=dict()\n fields=[]\n primaryKey=None\n for k,v in attrs.items():\n if isinstance(v,Field):\n logging.info('found mapping:%s==>%s' &(k,v))\n mappings[k]=v\n if v.primary_key:\n #找到主键\n if primaryKey:\n raise RuntimeError('Duplicate primary key for field :%s' %k)\n primaryKey=k\n else:\n fields.append(k)\n if not primaryKey:\n raise RuntimeError('Primary key not found')\n for k in mappings.keys():\n attrs.pop(k)\n escaped_fields=list(map(lambda f:'’%s’'%f,fields))\n attrs['__mappings__']=mapping#保存属性和列的映射关系\n attrs['__table__']=tableName\n attrs['__table__']=tableName\n attrs['__primary_key__']=primaryKey#主键属性名\n attrs['__fields__']=fields#除了主键外的其他属性名\n #构造默认的select,insert ,deletehe update语句\n attrs['__select__']='select ‘%s’,%s from ‘%s’' %(primaryKey,','.join(escaped_fields),tableName)\n attrs['__insert__']='insert into ‘%s’(%s,‘%s’) values(%s)' %(tableName,','.join(escaped_fields),primaryKey,create_args_string(len(escaped_fields)+1))\n attrs['__update__']='update ‘%s’ set %s where ‘%s’=?'%(tableName,','.join(map(lambda f:'‘%s’=?' %(mappings.get(f).name or f),fields)),primaryKey)\n attrs['__delete__']='delete from ‘%s’ where ‘%s’=?'%(tableName,primaryKey)\n return type.__new__(cls,name,bases,attrs)\n","sub_path":"Python/python_study/lianxi/Python实战/Modelmetaclass001.py","file_name":"Modelmetaclass001.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"319204900","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/api_metadata/views/conf.py\n# Compiled at: 2017-11-27 05:10:38\nimport functools, itertools, arrow\nfrom flask import request\nfrom flask_classy import route\nfrom ocs.api import decorators, exports\nfrom ocs.api.validators import Validator\nfrom . import MetadataAPIBaseView, client_ip, compat\n\nclass ConfViewValidator(Validator):\n\n def validate_conf(self, view):\n return view(client_ip())\n\n\ndef _compat_conf(view):\n \"\"\" Decorates /conf to call the export function on the view response.\n \"\"\"\n\n @functools.wraps(view)\n def wrapped(*args, **kwargs):\n export_decorator, ret = view(*args, **kwargs)\n flask_response = decorators.jsonify(lambda : ret)()\n return export_decorator(lambda : flask_response)()\n\n return wrapped\n\n\nclass ConfView(MetadataAPIBaseView):\n route_base = '/'\n validation_class = ConfViewValidator()\n\n def _server_info(self, ip_addr):\n \"\"\" Queries the compute API to get server info that has the IP\n `ip_addr`.\n \"\"\"\n server = self._get_server_by_ip(ip_addr)\n return (\n {'id': server.get('id'), \n 'name': server.get('name'), \n 'commercial_type': server.get('commercial_type'), \n 'hostname': server.get('hostname'), \n 'tags': server.get('tags'), \n 'state_detail': server.get('state_detail'), \n 'public_ip': server.get('public_ip'), \n 'private_ip': server.get('private_ip'), \n 'volumes': server.get('volumes'), \n 'organization': server.get('organization'), \n 'location': server.get('location', {}), \n 'ipv6': server.get('ipv6'), \n 'extra_networks': server.get('extra_networks'), \n 'bootscript': server.get('bootscript')},\n server)\n\n def _orga_info(self, server_orga):\n \"\"\" Queries the account API to get info about the organization that\n owns a server.\n \"\"\"\n response = self.privileged_account_api.query().organizations(server_orga).get()\n organization = response.get('organization', {})\n orga_users = organization.get('users', {})\n users_keys = [ user.get('ssh_public_keys', []) or [] for user in orga_users ]\n all_users_keys = list(itertools.chain(*users_keys))\n return {'ssh_public_keys': all_users_keys, \n 'timezone': organization.get('timezone')}\n\n @_compat_conf\n @route('/conf')\n def conf(self, ip_addr):\n \"\"\" Returns client's metadata.\n\n By default, a \"shell format\" is returned to the client. For images\n prior to ~2014/11/01, dictionaries were not rendered properly.\n\n The following dict:\n\n >>> {'super_dict': {'x': 1, 'y': 2}}\n\n Used to be rendered as (bad, note the absence of quotes):\n\n SUPER_DICT=X Y\n SUPER_DICT_X=1\n SUPER_DICT_Y=2\n\n And now (good):\n\n SUPER_DICT='X Y'\n SUPER_DICT_X=1\n SUPER_DICT_Y=2\n\n Do prevent breaking client's scripts using this endpoint, we keep the\n bad behaviour for old images.\n \"\"\"\n ret, server = self._server_info(ip_addr)\n ret.update(self._orga_info(ret.get('organization')))\n image = server.get('image')\n if image:\n creation_date = arrow.get(image.get('creation_date'))\n limit = arrow.get('2014-11-01T00:00:00+00:00')\n if creation_date < limit:\n return (compat.select_export(default='sh'), ret)\n return (\n exports.select_export(default='sh'), ret)","sub_path":"pycfiles/api_metadata-1.2.7-py2.7/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447141183","text":"# -*- coding: UTF-8 -*-\n# Copyright 2014 Luc Saffre\n# License: BSD (see file COPYING for details)\n\nfrom __future__ import print_function\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.core.exceptions import ValidationError\n\nfrom lino import dd, rt\n\nfrom lino.utils.instantiator import InstanceGenerator\n\nfrom .models import PlaceTypes\n\n\nclass PlaceGenerator(InstanceGenerator):\n def __init__(self):\n super(PlaceGenerator, self).__init__()\n self.prev_obj = None\n EE = rt.modules.countries.Country.objects.get(isocode=\"EE\")\n\n for pt in PlaceTypes.objects():\n self.add_instantiator(\n pt.name, 'countries.Place', 'name zip_code',\n country=EE,\n type=pt)\n\n def on_new(self, obj):\n prev = self.prev_obj\n if prev and prev.type and obj.type:\n otype = self.assimilate(obj.type)\n ptype = self.assimilate(prev.type)\n if ptype < otype:\n obj.parent = prev\n else:\n p = prev.parent\n while p and not self.can_be_parent(\n self.assimilate(p.type), otype):\n p = p.parent\n if p is not None:\n obj.parent = p\n elif False:\n logger.warning(\n \"%s (%s) is no parent for %s (%s)\",\n prev, prev.type, obj, obj.type)\n\n try:\n obj.full_clean()\n obj.save()\n self.prev_obj = obj\n return obj\n except ValidationError as e:\n logger.warning(\n \"Failed to load %s (%s) : %s\",\n obj, obj.type, e)\n # return super(PlaceGenerator, self).on_new(obj)\n \n def can_be_parent(self, ptype, otype):\n \"\"\"return True if a place of type pt can be parent for a place of type\n ot.\n\n \"\"\"\n if ptype < otype:\n return True\n return False\n\n def assimilate(self, pt):\n \"\"\"In Estonia, municipalities and towns can be siblings within a same\ncounty.\"\"\"\n if pt == PlaceTypes.municipality:\n return PlaceTypes.town\n return pt\n \n\n","sub_path":"lino/modlib/countries/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168605472","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\nThe Monte Carlo code that adds fake galaxiesto images from the Legacy Survey\n\"\"\"\n\nfrom __future__ import division, print_function\n\nif __name__ == '__main__':\n import matplotlib\n matplotlib.use('Agg')\nimport h5py\nimport os\nimport sys\nimport subprocess\nimport time as time_builtin\nimport shutil\nimport logging\nimport argparse\nimport pdb\nimport photutils\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pkg_resources import resource_filename\nfrom pickle import dump\nfrom glob import glob\nimport csv\n\nfrom astropy.table import Table, Column, vstack\nfrom astropy.io import fits\n#from astropy import wcs as astropy_wcs\nfrom fitsio import FITSHDR\nimport fitsio\n\nfrom astropy import units\nfrom astropy.coordinates import SkyCoord\n\nfrom obiwan.db_tools import getSrcsInBrick\nfrom obiwan.common import get_outdir_runbrick, get_brickinfo_hack\nfrom obiwan.common import stack_tables\n\n# Sphinx build would crash\n#try:\nfrom legacypipe.runbrick import run_brick\nfrom legacypipe.decam import DecamImage\nfrom legacypipe.survey import LegacySurveyData, wcs_for_brick\nfrom legacypipe.runcosmos import DecamImagePlusNoise, CosmosSurvey\n\nfrom astrometry.util.fits import fits_table, merge_tables\nfrom astrometry.util.ttime import Time\nfrom astrometry.libkd.spherematch import match_radec\n\nfrom tractor.psfex import PsfEx, PsfExModel\nfrom tractor.basics import GaussianMixtureEllipsePSF, RaDecPos\nfrom tractor.sfd import SFDMap\n\nimport galsim\n#except ImportError:\n# pass\n\nDATASETS=['dr5','dr3','cosmos','dr6']\n\ndef write_dict(fn,d):\n '''d -- dictionary'''\n w = csv.writer(open(fn, \"w\"))\n for key, val in d.items():\n w.writerow([key, val])\n\ndef read_dict(fn):\n d = {}\n for key, val in csv.reader(open(fn)):\n d[key] = val\n return d\n\ndef imshow_stamp(stamp,fn='test.png',galsimobj=True):\n if galsimobj:\n img = stamp.array.copy()\n else:\n img= stamp.copy()\n img=img + abs(img.min())+1\n plt.imsave(fn,np.log10(img),origin='lower',cmap='gray')\n #plt.imshow(np.log10(img),origin='lower',cmap='gray')\n #plt.savefig(fn)\n #plt.close()\n #print('Wrote %s' % fn)\n\ndef plot_radial_profs(fn,profs):\n assert(profs.shape[1] == 3)\n r=np.arange(profs.shape[0])\n for i,lab in zip(range(3),['src','srcnoise','srcnoiseimg']):\n plt.plot(r,profs[:,i],label=lab)\n plt.legend(loc='lower right')\n plt.savefig(fn)\n plt.close()\n\n\ndef ptime(text,t0):\n '''Timer'''\n tnow=Time()\n print('TIMING:%s ' % text,tnow-t0)\n return tnow\n\n\ndef get_skip_ids(decals_sim_dir, brickname, objtype):\n fns= glob(os.path.join(decals_sim_dir, objtype,\n brickname[:3], brickname,\n '*','obiwan','skippedids-*.fits'))\n if len(fns) == 0:\n raise ValueError(\"no skippedids.fits files exist for this brick %s\" % brickname)\n T= stack_tables(fns, textfile=False)\n return T.ids.astype(str)\n\ndef get_fnsuffix(**kwargs):\n return '-{}-{}.fits'.format(kwargs['objtype'], kwargs['brickname'])\n #'rs%d' % kwargs['rowst'])\n\n# try:\nclass SimDecals(LegacySurveyData):\n \"\"\"Top level object that specifying which data to run through pipeline\n\n Same behavior as legacypipe.runs.Dr3DecalsSurvey which chooses which\n CCDs to include. But this also stores all the relevant obiwan\n objects\n\n Args:\n dataset: see definition in\n https://github.com/legacysurvey/obiwan/blob/master/py/obiwan/test/end_to_end/README.md\n survey_dir: as used by legacypipe.runbrick.run_brick()\n Defaults to $LEGACY_SURVEY_DIR environment variable. Where to look for\n files including calibration files, tables of CCDs and bricks, image data\n metacat: fits_table\n configuration-like params for the simulated sources\n simcat: fits_table\n simulated source catalog for a given brick (not CCD).\n output_dir: legacypipe's outdir\n add_sim_noise: add Poisson noise from the simulated source to the image\n seed: for random number generators\n image_eq_model: referred to as 'testA'\n wherever add a simulated source, replace both image and invvar of the image\n with that of the simulated source only\n\n Attributes:\n DR: see above\n metacat: fits_table\n configuration-like params for the simulated sources\n simcat: fits_table\n simulated source catalog for a given brick (not CCD).\n output_dir: legacypipe's outdir\n add_sim_noise: add Poisson noise from the simulated source to the image\n image_eq_model: referred to as 'testA'\n wherever add a simulated source, replace both image and invvar of the image\n with that of the simulated source only\n \"\"\"\n\n def __init__(self, dataset=None, survey_dir=None, metacat=None, simcat=None,\n output_dir=None,add_sim_noise=False, seed=0,\n image_eq_model=False,**kwargs):\n self.dataset= dataset\n\n kw= dict(survey_dir=survey_dir,\n output_dir=output_dir)\n if self.dataset == 'cosmos':\n kw.update(subset=kwargs['subset'])\n super(SimDecals, self).__init__(**kw)\n\n self.metacat = metacat\n self.simcat = simcat\n # Additional options from command line\n self.add_sim_noise= add_sim_noise\n self.seed= seed\n self.image_eq_model= image_eq_model\n print('SimDecals: self.image_eq_model=',self.image_eq_model)\n\n def get_image_object(self, t):\n if self.dataset == 'cosmos':\n return SimImageCosmos(self, t)\n else:\n return SimImage(self, t)\n\n def filter_ccds_files(self, fns):\n \"\"\"see legacypipe/runs.py\"\"\"\n return fns\n\n def ccds_for_fitting(self, brick, ccds):\n if self.dataset in ['dr3','dr5']:\n return np.flatnonzero(ccds.camera == 'decam')\n elif self.dataset in ['cosmos']:\n return np.flatnonzero(ccds.camera == 'decam+noise')\n #elif self.dataset == 'DR4':\n # return np.flatnonzero(np.logical_or(ccds.camera == 'mosaic',\n # ccds.camera == '90prime'))\n\n def filter_ccd_kd_files(self, fns):\n \"\"\"see legacypipe/runs.py\"\"\"\n return []\n\ndef get_srcimg_invvar(stamp_ivar,img_ivar):\n \"\"\"stamp_ivar, img_ivar -- galsim Image objects\"\"\"\n # Use img_ivar when stamp_ivar == 0, both otherwise\n use_img_ivar= np.ones(img_ivar.array.shape).astype(bool)\n use_img_ivar[ stamp_ivar.array > 0 ] = False\n # First compute using both\n ivar= np.power(stamp_ivar.array.copy(), -1) + np.power(img_ivar.array.copy(), -1)\n ivar= np.power(ivar,-1)\n keep= np.ones(ivar.shape).astype(bool)\n keep[ (stamp_ivar.array > 0)*\\\n (img_ivar.array > 0) ] = False\n ivar[keep] = 0.\n # Now use img_ivar only where need to\n ivar[ use_img_ivar ] = img_ivar.array.copy()[ use_img_ivar ]\n # return\n obj_ivar = stamp_ivar.copy()\n obj_ivar.fill(0.)\n obj_ivar+= ivar\n return obj_ivar\n\ndef saturation_e(camera):\n # Saturation limit\n d=dict(decam=3e4) # e-\n return d[camera]\n\ndef ivar_to_var(ivar,nano2e=None,camera='decam'):\n assert(nano2e is not None)\n flag= ivar == 0.\n var= np.power(ivar, -1)\n # Set 0 ivar pixels to satuation limit\n # var * nano2e^2 = e-^2\n sat= saturation_e(camera) / nano2e**2\n var[flag]= sat\n return var\n# except NameError:\n# pass\n\n# try:\nclass SimDecalsCosmos(SimDecals,CosmosSurvey):\n \"\"\"Filters the CCDs to just those in the cosmos realizations\n\n Call just like SimDecals except with additional Argument 'subset'\n\n Args:\n **kwargs: SimDecals args + 'subset'\n \"\"\"\n\n def __init__(self, **kwargs):\n super(SimDecalsCosmos, self).__init__(**kwargs)\n# except NameError:\n# pass\n\n\n\n# try:\nclass SimImage(DecamImage):\n \"\"\"Adds simulated sources to a single exposure\n\n Similar behavior as legacypipe.decam.DecamImage. Instead of\n loading images specifically from DECam, this loads images\n with simulated sources added in\n\n Args:\n survey: SimDecals() object\n t: as used by DecamImage\n a single row fits_table for a specific CCD\n\n Attributes:\n inherits: DecamImage\n t: as used by DecamImage\n a single row fits_table for a specific CCD\n \"\"\"\n\n def __init__(self, survey, t):\n super(SimImage, self).__init__(survey, t)\n self.t = t\n if self.survey.dataset in ['dr3']:\n assert('arawgain' in self.t.get_columns())\n self.t.rename('arawgain', 'gain')\n elif self.survey.dataset in ['dr5']:\n assert 'gain' in self.t.get_columns()\n # Find image on proj or proja if doesn't exist\n dirs=dict(proj='/global/project/projectdirs/cosmo/staging/decam',\n proja='/global/projecta/projectdirs/cosmo/staging/decam')\n if not os.path.exists(self.imgfn):\n print('doesnt exist: %s, finding new location for file' % self.imgfn)\n base=os.path.basename(self.imgfn)\n found= glob('%s/**/%s' % (dirs['proja'],base), recursive=True)\n if len(found) == 0:\n found= glob('%s/**/%s' % (dirs['proj'],base), recursive=True)\n if len(found) == 0:\n raise OSError('cannot find image on project or projecta: %s' % base)\n else:\n self.imgfn= found[0]\n print('found new location, overwrite self.imgfn with %s' % self.imgfn)\n self.wtfn= (self.imgfn.replace('_oki_','_oow_')\n .replace('_ooi_','_oow_'))\n self.dqfn= self.wtfn.replace('_oow_','_ood_')\n\n def get_tractor_image(self, **kwargs):\n tim = super(SimImage, self).get_tractor_image(**kwargs)\n if tim is None: # this can be None when the edge of a CCD overlaps\n return tim\n\n # Seed\n #if 'SEED' in self.survey.metacat.columns:\n # seed = self.survey.metacat['SEED']\n #else:\n # seed = None\n\n objtype = self.survey.metacat.get('objtype')[0]\n objstamp = BuildStamp(tim, seed=self.survey.seed,\n camera=self.t.camera,\n gain=self.t.gain,exptime=self.t.exptime)\n # ids make it onto a ccd (geometry cut)\n tim.ids_added=[]\n\n # Grab the data and inverse variance images [nanomaggies!]\n tim_image = galsim.Image(tim.getImage())\n tim_invvar = galsim.Image(tim.getInvvar())\n tim_dq = galsim.Image(tim.dq)\n # Also store galaxy sims and sims invvar\n sims_image = tim_image.copy()\n sims_image.fill(0.0)\n sims_ivar = sims_image.copy()\n\n # Store simulated galaxy images in tim object\n # Loop on each object.\n for ii, obj in enumerate(self.survey.simcat):\n # Print timing\n t0= Time()\n if objtype in ['lrg','elg']:\n strin= 'Drawing 1 %s: n=%.2f, rhalf=%.2f, e1=%.2f, e2=%.2f' % \\\n (objtype.upper(), obj.n,obj.rhalf,obj.e1,obj.e2)\n print(strin)\n\n if objtype == 'star':\n stamp = objstamp.star(obj)\n elif objtype == 'elg':\n stamp = objstamp.elg(obj)\n elif objtype == 'lrg':\n stamp = objstamp.lrg(obj)\n elif objtype == 'qso':\n stamp = objstamp.qso(obj)\n t0= ptime('Finished Drawing %s: id=%d band=%s dbflux=%f addedflux=%f' %\n (objtype.upper(), obj.id,objstamp.band,\n obj.get(objstamp.band+'flux'),stamp.array.sum()), t0)\n\n stamp_nonoise= stamp.copy()\n if self.survey.add_sim_noise:\n stamp += noise_for_galaxy(stamp,objstamp.nano2e)\n ivarstamp= ivar_for_galaxy(stamp,objstamp.nano2e)\n # Add source if EVEN 1 pix falls on the CCD\n overlap = stamp.bounds & tim_image.bounds\n if overlap.area() > 0:\n print('Stamp overlaps tim: id=%d band=%s' % (obj.id,objstamp.band))\n tim.ids_added.append(obj.id)\n stamp = stamp[overlap]\n ivarstamp = ivarstamp[overlap]\n stamp_nonoise= stamp_nonoise[overlap]\n\n # Zero out invvar where bad pixel mask is flagged (> 0)\n keep = np.ones(tim_dq[overlap].array.shape)\n keep[ tim_dq[overlap].array > 0 ] = 0.\n ivarstamp *= keep\n\n # Add stamp to image\n back= tim_image[overlap].copy()\n tim_image[overlap] += stamp #= back.copy() + stamp.copy()\n # Add variances\n back_ivar= tim_invvar[overlap].copy()\n tot_ivar= get_srcimg_invvar(ivarstamp, back_ivar)\n tim_invvar[overlap] = tot_ivar.copy()\n\n #Extra\n sims_image[overlap] += stamp.copy()\n sims_ivar[overlap] += ivarstamp.copy()\n\n if np.min(sims_ivar.array) < 0:\n log.warning('Negative invvar!')\n import pdb ; pdb.set_trace()\n tim.sims_image = sims_image.array\n tim.sims_inverr = np.sqrt(sims_ivar.array)\n # Can set image=model, ivar=1/model for testing\n if self.survey.image_eq_model:\n tim.data = sims_image.array.copy()\n tim.inverr = np.zeros(tim.data.shape)\n tim.inverr[sims_image.array > 0.] = np.sqrt(1./sims_image.array.copy()[sims_image.array > 0.])\n else:\n tim.data = tim_image.array\n tim.inverr = np.sqrt(tim_invvar.array)\n return tim\n# except NameError:\n# pass\n\n# try:\nclass SimImageCosmos(SimImage,DecamImagePlusNoise):\n \"\"\"Filters the CCDs to just those in the cosmos realizations\n\n Call just like SimDecals except with additional Argument 'subset'\n\n Args:\n **kwargs: SimDecals args + 'subset'\n \"\"\"\n\n def __init__(self, survey, t):\n super(SimImageCosmos, self).__init__(survey, t)\n# except NameError:\n# pass\n\ndef noise_for_galaxy(gal,nano2e):\n \"\"\"Returns numpy array of noise in Img count units for gal in image cnt units\"\"\"\n # Noise model + no negative image vals when compute noise\n one_std_per_pix= gal.array.copy() # nanomaggies\n one_std_per_pix[one_std_per_pix < 0]=0\n # rescale\n one_std_per_pix *= nano2e # e-\n one_std_per_pix= np.sqrt(one_std_per_pix)\n num_stds= np.random.randn(one_std_per_pix.shape[0],one_std_per_pix.shape[1])\n #one_std_per_pix.shape, num_stds.shape\n noise= one_std_per_pix * num_stds\n # rescale\n noise /= nano2e #nanomaggies\n return noise\n\ndef ivar_for_galaxy(gal,nano2e):\n \"\"\"Adds gaussian noise to perfect source\n\n Args:\n gal: galsim.Image() for source, UNITS: nanomags\n nano2e: factor to convert to e- (gal * nano2e has units e-)\n\n Returns:\n galsim.Image() of invvar for the source, UNITS: nanomags\n \"\"\"\n var= gal.copy() * nano2e #e^2\n var.applyNonlinearity(np.abs)\n var /= nano2e**2 #nanomag^2\n var.invertSelf()\n return var\n\n\nclass BuildStamp():\n \"\"\"Does the drawing of simulated sources on a single exposure\n\n Args:\n tim: Tractor Image Object for a specific CCD\n gain: gain of the CCD\n\n Attributes:\n band: g,r,z\n camera: 'decam', 'mosaic', '90prime'\n gsparams: galsim object that configures how accurate simulated source will be\n gsdeviate: galsim object that configures its random number generator\n wcs: WCS from tim\n psf: psf from tim\n galsim_wcs: wcs repackaged into galsim compatible object\n zpscale: conversion factor 'nanomaggies' to 'Image units used by Legacypipe', which\n are ADU/sec for DECam and e/sec for Bass,MzLS\n nano2e: conversion factor 'nanomaggies' to 'e-'\n \"\"\"\n\n def __init__(self,tim, seed=0,\n camera=None,gain=None,exptime=None):\n #self.camera=camera\n self.band = tim.band.strip()\n # GSParams should be used when galsim object is initialized\n # MAX size for sersic n < 6.2\n # https://github.com/GalSim-developers/GalSim/pull/450/commits/755bcfdca25afe42cccfd6a7f8660da5ecda2a65\n self.gsparams = galsim.GSParams(maximum_fft_size=65536)\n #print('FIX ME!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n self.gsdeviate = galsim.BaseDeviate(seed)\n self.wcs = tim.getWcs()\n self.psf = tim.getPsf()\n\n # zpscale equivalent to magzpt = self.t.ccdzpt+2.5*np.log10(self.t.exptime)\n self.zpscale = tim.zpscale # nanomaggies-->ADU (decam) or e-/sec (bass,mzls)\n assert(camera in ['decam','mosaic','90prime'])\n if camera == 'decam':\n self.nano2e = self.zpscale*gain # nanomaggies -> ADU -> e-\n else:\n # correct for mzls, possibly not for bass\n self.nano2e = self.zpscale # nanomaggies -> e-/s -> e-\n\n def setlocal(self,obj):\n \"\"\"Get the pixel positions, local wcs, local PSF.\"\"\"\n\n xx, yy = self.wcs.positionToPixel(RaDecPos(obj.get('ra'), obj.get('dec')))\n self.pos = galsim.PositionD(xx, yy)\n self.xpos = int(self.pos.x)\n self.ypos = int(self.pos.y)\n self.offset = galsim.PositionD(self.pos.x-self.xpos, self.pos.y-self.ypos)\n\n # Get the local PSF\n self.localpsf = self.psf.getPointSourcePatch(self.xpos, self.ypos)\n self.localpsf= galsim.Image(self.localpsf.getImage(),\n scale=self.wcs.pixscale_at(self.xpos,self.ypos))\n #if self.camera == '90prime':\n # print('band=',self.band,'px scale=',self.wcs.pixscale_at(self.xpos,self.ypos))\n\n def star(self,obj):\n \"\"\"Render a star (PSF).\"\"\"\n log = logging.getLogger('decals_sim')\n # Use input flux as the 7'' aperture flux\n self.setlocal(obj)\n stamp = self.localpsf.copy()\n stamp.shift(dx=self.offset.x,dy=self.offset.y)\n # Scale to desired flux\n stamp *= float(obj.get(self.band+'flux')) # [nanomaggies]\n # position in observed image\n stamp.setCenter(self.xpos, self.ypos)\n return stamp\n\n\n def convolve_galaxy(self,gal):\n \"\"\"Convolve the object with the PSF and then draw it.\"\"\"\n psf= self.localpsf.copy()\n # doesn't change tractor measurements\n #psf /= psf.array.sum()\n psf= galsim.InterpolatedImage(psf)\n #gsparams=self.gsparams)\n return galsim.Convolve([gal, psf]) #, gsparams=self.gsparams)\n\n def elg(self,obj):\n \"\"\"Create an ELG (disk-like) galaxy.\"\"\"\n # Create localpsf object\n self.setlocal(obj)\n #try:\n # TRIAL: galaxy profile\n gal = galsim.Sersic(float(obj.get('n')),\n half_light_radius=float(obj.get('rhalf')),\\\n flux=float(obj.get(self.band+'flux')),\n gsparams=self.gsparams)\n gal = gal.shear(e1=float(obj.get('e1')), e2=float(obj.get('e2')))\n # Convolve with normed-psf\n gal = self.convolve_galaxy(gal)\n gal = gal.drawImage(method='auto',\n offset=self.offset,\n scale= self.wcs.pixscale_at(self.xpos,self.ypos))\n #method='no_pixel'\n # Scale to desired flux\n print('DREW galaxy, flux input=%.4f, actual=%.4f' % \\\n (float(obj.get(self.band+'flux')),gal.array.sum()))\n # position in observed image\n gal.setCenter(self.xpos, self.ypos)\n return gal\n\n def lrg(self,obj):\n \"\"\"Create an LRG just like did for ELG\"\"\"\n return self.elg(obj)\n\n def qso(self,obj):\n \"\"\"Create a QSO just like a star\"\"\"\n return self.star(obj)\n\n\n\ndef flag_nearest_neighbors(Samp, radius_in_deg=5./3600):\n \"\"\"Returns Sample indices to keep (have > dist separations) and indices to skip\n\n Returns:\n tuple: keep,skip: indices of Samp to keep and skip\n \"\"\"\n flag_set=set()\n all_indices= range(len(Samp))\n for cnt in all_indices:\n if cnt in flag_set:\n continue\n else:\n I,J,d = match_radec(Samp.ra[cnt],Samp.dec[cnt],\n Samp.ra,Samp.dec, 5./3600,\n notself=False,nearest=False)\n # Remove all Samp matches (J), minus the reference ra,dec\n flag_inds= set(J).difference(set( [cnt] ))\n if len(flag_inds) > 0:\n flag_set= flag_set.union(flag_inds)\n keep= list( set(all_indices).difference(flag_set) )\n return keep, list(flag_set)\n\ndef get_ellip(q):\n \"\"\"Given minor to major axis ratio (q) Returns ellipticity\"\"\"\n return (1-q**2)/(1+q**2)\n\ndef get_e1_e2(q,beta):\n \"\"\"Given minor to major axis ratio (q) and postion angle (beta), Returns e1,e2 tuple\"\"\"\n e= get_ellip(q)\n return e*np.cos(2*beta), e*np.sin(2*beta)\n\n#def build_simcat(nobj=None, brickname=None, brickwcs=None, meta=None, seed=None, noOverlap=True):\ndef build_simcat(Samp=None,brickwcs=None, meta=None):\n \"\"\"Creates the simulated source catalog for a given brick (not CCD).\n\n The WCS for the brick (not CCD) is used to convert ra,dec of source\n to x,y pixel location in brickspace\n\n Args:\n Samp: fits_table for the properties of sources in the brick\n usually a subset of all sources in the brick determined by\n rowstart (rs)\n brickwcs: WCS object for the brick\n meta: 'metacat' table\n fits_table with configuration-like params for the simulated sources\n\n Returns:\n tuple of\n cat:\n skipping_ids:\n \"\"\"\n log = logging.getLogger('decals_sim')\n\n #rand = np.random.RandomState(seed)\n\n # Assign central coordinates uniformly but remove simulated sources which\n # are too near to one another. Iterate until we have the requisite number\n # of objects.\n #bounds = brickwcs.radec_bounds()\n #ra = rand.uniform(bounds[0], bounds[1], nobj)\n #dec = rand.uniform(bounds[2], bounds[3], nobj)\n i_keep,i_skip= flag_nearest_neighbors(Samp, radius_in_deg=5./3600)\n skipping_ids= Samp.get('id')[i_skip]\n log.info('sources %d, keeping %d, flagged as nearby %d' % (len(Samp),len(i_keep),len(i_skip)))\n Samp.cut(i_keep)\n\n xxyy = brickwcs.radec2pixelxy(Samp.ra,Samp.dec)\n\n #cat = Table()\n #cat['ID'] = Column(Samp.get('id'),dtype='i4') #np.arange(nobj, dtype='i4'))\n #cat['RA'] = Column(Samp.ra, dtype='f8')\n #cat['DEC'] = Column(Samp.dec, dtype='f8')\n #cat['X'] = Column(xxyy[1][:], dtype='f4')\n #cat['Y'] = Column(xxyy[2][:], dtype='f4')\n cat = fits_table()\n for key in ['id','ra','dec']:\n cat.set(key, Samp.get(key))\n cat.set('x', xxyy[1][:])\n cat.set('y', xxyy[2][:])\n\n typ=meta.get('objtype')[0]\n # Mags\n filts = ['%s %s' % ('DES', f) for f in 'grz']\n for band in ['g','r','z']:\n nanomag= 1E9*10**(-0.4*Samp.get(band))\n # Add extinction (to stars too, b/c \"decam-chatter 6517\")\n mw_transmission= SFDMap().extinction(['DES %s' % band],\n Samp.ra, Samp.dec)\n mw_transmission= 10**(-mw_transmission[:,0].astype(np.float32)/2.5)\n cat.set('%sflux' % band, nanomag * mw_transmission)\n cat.set('mw_transmission_%s' % band, mw_transmission)\n\n # Galaxy Properties\n if typ in ['elg','lrg']:\n # Convert to e1,e2 if given ba,pa\n if ('ba' in Samp.get_columns()) & ('pa' in Samp.get_columns()):\n e1,e2= get_e1_e2(Samp.get('ba'),Samp.get('pa'))\n Samp.set('e1',e1)\n Samp.set('e2',e2)\n for key in ['n','rhalf','e1','e2']:\n cat.set(key, Samp.get(key))\n # Sersic n: GALSIM n = [0.3,6.2] for numerical stability,see\n # https://github.com/GalSim-developers/GalSim/issues/{325,450}\n return cat, skipping_ids\n\n\n\ndef get_parser():\n '''return parser object, tells it what options to look for\n options can come from a list of strings or command line'''\n parser = argparse.ArgumentParser(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter,\n description='DECaLS simulations.')\n parser.add_argument('--dataset', type=str, choices=['dr5','dr3', 'cosmos','dr6'], required=True, help='see definitions in obiwan/test/README.md')\n parser.add_argument('-o', '--objtype', type=str, choices=['star','elg', 'lrg', 'qso'], default='star', required=True)\n parser.add_argument('-b', '--brick', type=str, default='2428p117', required=True)\n parser.add_argument('--outdir', default='./', required=False)\n parser.add_argument('--logfn', default='./', required=False)\n parser.add_argument('-n', '--nobj', type=int, default=500, metavar='',\n help='number of objects to simulate (required input)')\n parser.add_argument('-rs', '--rowstart', type=int, default=0, metavar='',\n help='zero indexed, row of ra,dec,mags table, after it is cut to brick, to start on')\n parser.add_argument('--do_skipids', type=str, choices=['no','yes'],default='no', help='inject skipped ids for brick, otherwise run as usual')\n parser.add_argument('--do_more', type=str, choices=['no','yes'],default='no', help='yes if running more randoms b/c TS returns too few targets')\n parser.add_argument('--minid', type=int, default=None, help='set if do_more==yes, minimum id to consider, useful if adding more randoms mid-run')\n parser.add_argument('--randoms_db', default='obiwan_elg', help='desi db table name for randoms')\n parser.add_argument('--randoms_from_fits', default=None, help='set to read randoms from fits file instead of scidb2.nersc.gov db, set to absolute path of local fits file on computer')\n parser.add_argument('--dont_sort_sampleid', action=\"store_true\", default=False, help='False to sort sample by id')\n parser.add_argument('-t', '--threads', type=int, default=1, metavar='',\n help='number of threads to use when calling The Tractor')\n parser.add_argument('-z', '--zoom', nargs=4, default=(0, 3600, 0, 3600), type=int, metavar='',\n help='see runbrick.py; (default is 0 3600 0 3600)')\n parser.add_argument('-survey-dir', '--survey_dir', metavar='',\n help='Location of survey-ccds*.fits.gz')\n parser.add_argument('--add_sim_noise', action=\"store_true\", help=\"set to add noise to simulated sources\")\n parser.add_argument('-testA','--image_eq_model', action=\"store_true\", help=\"set to set image,inverr by model only (ignore real image,invvar)\")\n parser.add_argument('--all-blobs', action='store_true',\n help='Process all the blobs, not just those that contain simulated sources.')\n parser.add_argument('--stage', choices=['tims', 'image_coadds', 'srcs', 'fitblobs', 'coadds'],\n type=str, default=None, metavar='', help='Run through the stage then stop')\n parser.add_argument('--no_cleanup', action='store_true',default=False,\n help='useful for test_checkpoint function')\n parser.add_argument('--early_coadds', action='store_true',default=False,\n help='add this option to make the JPGs before detection/model fitting')\n parser.add_argument('--bricklist',action='store',default='bricks-eboss-ngc.txt',\\\n help='if using mpi4py, $LEGACY_SURVEY_DIR/bricklist')\n parser.add_argument('--nproc', type=int,action='store',default=1,\\\n help='if using mpi4py')\n parser.add_argument('--all_blobs', action='store_true',default=False,\n help='fit models to all blobs, not just those containing sim sources')\n parser.add_argument('--subset', type=int, default=0,\n help='COSMOS subset number [0 to 4, 10 to 12], only used if dataset = cosmos')\n parser.add_argument('--checkpoint', action='store_true',default=False,\n help='turn on checkpointing')\n parser.add_argument('--skip_ccd_cuts', action='store_true',default=False,\n help='no ccd cuts')\n parser.add_argument('--overwrite_if_exists', action='store_true',default=False,\n help='run the code even if expected output already exists')\n parser.add_argument('-v', '--verbose', action='store_true', help='toggle on verbose output')\n return parser\n\ndef create_metadata(kwargs=None):\n \"\"\"fits_table with configuration-like params for the simulated sources\n\n TODO: Should metacat table have a rowstart column?\n TODO: One metacat table per brick, instead of one per `rs*` directory?\n\n Args:\n kwargs: configuration-like params for the simulated sources\n {'brickname': which chunk of sky\n 'objtype': star,elg,lrg,qso\n 'nobj': number of simulated sources for this run\n }\n\n Returns:\n Nothing\n writes the 'metacat' fits_table to disk and stores it\n in the kwargs input arg\n \"\"\"\n assert(kwargs is not None)\n log = logging.getLogger('decals_sim')\n # Pack the input parameters into a meta-data table and write out.\n #metacols = [\n # ('BRICKNAME', 'S10'),\n # ('OBJTYPE', 'S10'),\n # ('NOBJ', 'i4'),\n # ('CHUNKSIZE', 'i2'),\n # ('NCHUNK', 'i2'),\n # ('ZOOM', 'i4', (4,)),\n # ('SEED', 'S20'),\n # ('RMAG_RANGE', 'f4', (2,))]\n #metacat = Table(np.zeros(1, dtype=metacols))\n metacat = fits_table()\n for key in ['brickname','objtype']: #,'nchunk']:\n metacat.set(key, np.array( [kwargs[key]] ))\n metacat.set('nobj', np.array( [kwargs['args'].nobj] ))\n metacat.set('zoom', np.array( [kwargs['args'].zoom] ))\n #metacat['RMAG_RANGE'] = kwargs['args'].rmag_range\n #if not kwargs['args'].seed:\n # log.info('Random seed = {}'.format(kwargs['args'].seed))\n # metacat['SEED'] = kwargs['args'].seed\n #metacat_dir = os.path.join(kwargs['decals_sim_dir'], kwargs['objtype'],kwargs['brickname'][:3],kwargs['brickname'])\n metacat_dir= get_outdir_runbrick(kwargs['decals_sim_dir'],\n kwargs['brickname'],kwargs['rowst'],\n do_skipids=kwargs['do_skipids'],do_more=kwargs['do_more'])\n if not os.path.exists(metacat_dir):\n os.makedirs(metacat_dir)\n metafile = os.path.join(metacat_dir, 'metacat'+get_fnsuffix(**kwargs))\n log.info('Writing {}'.format(metafile))\n if os.path.isfile(metafile):\n os.remove(metafile)\n metacat.writeto(metafile)\n # Store new stuff\n kwargs['metacat']=metacat\n kwargs['metacat_dir']=metacat_dir\n\n\ndef create_ith_simcat(d=None):\n \"\"\"Write 'simcat' and 'skipped_ids' tables for a given sample of sources\n\n Args:\n d: {'Samp': fits_table for the properties of sources in the brick\n 'brickwcs': WCS object for the brick\n 'metacat': fits_table with configuration params for the simulated sources\n }\n\n Returns:\n Nothing, saves the 'simcat' and 'skipped_ids' tables\n Adds 'simcat' table to dict 'd'\n \"\"\"\n assert(d is not None)\n log = logging.getLogger('decals_sim')\n #chunksuffix = '{:02d}'.format(ith_chunk)\n # Build and write out the simulated object catalog.\n #seed= d['seeds'][ith_chunk]\n #simcat = build_simcat(d['nobj'], d['brickname'], d['brickwcs'], d['metacat'], seed)\n simcat, skipped_ids = build_simcat(Samp=d['Samp'],brickwcs=d['brickwcs'],meta=d['metacat'])\n # Simcat\n simcat_dir = get_outdir_runbrick(d['decals_sim_dir'],\n d['brickname'],d['rowst'],\n do_skipids=d['do_skipids'],do_more=d['do_more'])\n if not os.path.exists(simcat_dir):\n os.makedirs(simcat_dir)\n #simcatfile = os.path.join(simcat_dir, 'simcat-{}-{}-row{}-{}.fits'.format(d['brickname'], d['objtype'],rowstart,rowend)) # chunksuffix))\n simcatfile = os.path.join(simcat_dir, 'simcat'+get_fnsuffix(**d))\n if os.path.isfile(simcatfile):\n os.remove(simcatfile)\n simcat.writeto(simcatfile)\n log.info('Wrote {}'.format(simcatfile))\n # Skipped Ids\n if len(skipped_ids) > 0:\n skip_table= fits_table()\n skip_table.set('ids',skipped_ids)\n name= os.path.join(simcat_dir,'skippedids'+get_fnsuffix(**d))\n if os.path.exists(name):\n os.remove(name)\n log.info('Removed %s' % name)\n skip_table.writeto(name)\n log.info('Wrote {}'.format(name))\n # add to dict\n d['simcat']= simcat\n d['simcat_dir']= simcat_dir\n\ndef get_checkpoint_fn(outdir,brick,rowstart):\n return os.path.join(outdir,'checkpoint',\n brick[:3],brick,\n 'checkpoint_rs%d.pickle' % rowstart)\n\ndef get_runbrick_setup(**kwargs):\n \"\"\"Convert runbrick.py cmd line options into `**kwargs` for run_brick()\n\n The command line options depend on the Data Release (e.g. the\n legacypipe code version. The cmd line options associated with\n each DR get modified and repackaged into a dict in\n legacypipe.runbrick so this converter is required to call run_brick\n appropriately\n\n Args:\n **kwargs: dict of the cmd line options to obiwan.kenobi.py\n\n Returns:\n dict to use when calling legacypipe.runbrick.run_brick like\n run_brick(brickname, survey, `**dict`)\n \"\"\"\n dataset= kwargs['dataset']\n assert(dataset in DATASETS)\n from legacypipe.runbrick import get_runbrick_kwargs\n from legacypipe.runbrick import get_parser as get_runbrick_parser\n zm= kwargs['zoom']\n cmd_line= ['--no-write', '--skip','--force-all',\n '--zoom','%d' % zm[0],'%d' % zm[1],'%d' % zm[2],'%d' % zm[3],\n '--no-wise', '--threads','%d' % kwargs['threads']]\n if kwargs['checkpoint']:\n checkpoint_fn= get_checkpoint_fn(kwargs['outdir'],\n kwargs['brick'], kwargs['rowstart'])\n cmd_line += ['--checkpoint',checkpoint_fn]\n if kwargs['stage']:\n cmd_line += ['--stage', kwargs['stage']]\n if kwargs['early_coadds']:\n cmd_line += ['--early-coadds', '--stage', 'image_coadds']\n if kwargs['skip_ccd_cuts']:\n cmd_line += ['--skip_ccd_cuts']\n #if kwargs['stage']:\n # cmd_line += ['--stage', '%s' % kwargs['stage']]\n if dataset == 'dr3':\n #cmd_line += ['--hybrid-psf']\n cmd_line += ['--run', 'dr3','--nsigma', '6','--simp']\n elif dataset == 'dr5':\n # defaults: rex (use --simp), nsigma 6 ,hybrid-psf (--no-hybrid-psf otherwise)\n # depth cut already done (use --depth-cut to do depth cut anyway)\n cmd_line += ['--run', 'dr5']\n\n rb_parser= get_runbrick_parser()\n rb_opt = rb_parser.parse_args(args=cmd_line)\n rb_optdict = vars(rb_opt)\n # remove keys as Dustin' does\n _= rb_optdict.pop('ps', None)\n _= rb_optdict.pop('verbose',None)\n _, rb_kwargs= get_runbrick_kwargs(**rb_optdict)\n return rb_kwargs\n\ndef do_one_chunk(d=None):\n \"\"\"Runs the legacypipe/Tractor pipeline on images with simulated sources\n\n Args:\n d: {'args': obiwan.kenobi.py cmd line argparse.Namespace object\n 'brickname': chunk of sky\n 'metacat': fits_table configuration params for the simulated sources\n 'simcat': fits_table simulated source catalog for a given brick (not CCD).\n\n Note:\n runb_brick() is 'main' for the legacypipe/Tractor pipeline\n\n Returns:\n Nothing, but this func end ups writing out all the obiwan results\n \"\"\"\n assert(d is not None)\n kw= dict(dataset=d['args'].dataset,\\\n metacat=d['metacat'], simcat=d['simcat'], \\\n output_dir=d['simcat_dir'], \\\n add_sim_noise=d['args'].add_sim_noise, seed=d['seed'],\\\n image_eq_model=d['args'].image_eq_model)\n if d['args'].dataset == 'cosmos':\n kw.update(subset=d['args'].subset)\n simdecals= SimDecalsCosmos(**kw)\n else:\n simdecals = SimDecals(**kw)\n # Use Tractor to just process the blobs containing the simulated sources.\n if d['args'].all_blobs:\n blobxy = None\n else:\n blobxy = zip(d['simcat'].get('x'), d['simcat'].get('y'))\n # Default runbrick call sequence\n obiwan_kwargs= vars(d['args'])\n runbrick_kwargs= get_runbrick_setup(**obiwan_kwargs)\n # Obiwan modifications\n runbrick_kwargs.update(blobxy=blobxy)\n #plotbase='obiwan')\n print('Calling run_brick with: ')\n print('brickname= %s' % d['brickname'])\n print('simdecals= ',simdecals)\n print('runbrick_kwards= ',runbrick_kwargs)\n # Run it: run_brick(brick, survey obj, **kwargs)\n np.random.seed(d['seed'])\n print(runbrick_kwargs)\n run_brick(d['brickname'], simdecals, **runbrick_kwargs)\n\ndef dobash(cmd):\n print('UNIX cmd: %s' % cmd)\n if os.system(cmd): raise ValueError\n\ndef do_ith_cleanup(d=None):\n \"\"\"Moves all obiwan+legacypipe outputs to a new directory stucture\n\n Uses rsync to move everthing, if all the rsync's succeed then all the\n original files and directories are removed\n\n Args:\n d: dict with keys brickname, simcat_dir\n \"\"\"\n assert(d is not None)\n log = logging.getLogger('decals_sim')\n log.info('Cleaning up...')\n brick= d['brickname']\n bri= brick[:3]\n outdir= d['simcat_dir']\n rsdir= os.path.basename(outdir)\n # outdir/obj\n base= os.path.dirname(\n os.path.dirname(\n os.path.dirname(outdir)))\n\n drs= ['obiwan','coadd']\n print(d)\n print(d['args'])\n if not d['args'].early_coadds:\n drs += ['metrics','tractor','tractor-i']\n for dr in drs:\n dobash('mkdir -p %s/%s/%s/%s/%s' % \\\n (base,dr,bri,brick,rsdir))\n\n # obiwan\n dobash('mv %s/*.fits %s/obiwan/%s/%s/%s/' % \\\n (outdir, base,bri,brick,rsdir))\n dobash('mv %s/coadd/%s/%s/sim_ids_added.fits %s/obiwan/%s/%s/%s/' % \\\n (outdir,bri,brick, base,bri,brick,rsdir))\n # coadd\n dobash('mv %s/coadd/%s/%s/* %s/coadd/%s/%s/%s/' % \\\n (outdir,bri,brick, base,bri,brick,rsdir))\n\n if not d['args'].early_coadds:\n # metrics,tractor,tractor-i\n for dr in ['metrics','tractor','tractor-i']:\n dobash('mv %s/%s/%s/* %s/%s/%s/%s/%s/' % \\\n (outdir,dr,bri, base,dr,bri,brick,rsdir))\n # Remove original outdir\n dobash('rm -r %s' % outdir)\n\n # Remove unneeded coadd files\n names= ['nexp','depth']\n if not d['args'].early_coadds:\n drs+= ['chi2']\n for name in names:\n dobash('rm %s/coadd/%s/%s/%s/*%s*' %\n (base,bri,brick,rsdir,name))\n if rsdir != 'rs0':\n # jpgs are nice to look at, but only keep in 1 dir\n for name in ['jpg']:\n dobash('rm %s/coadd/%s/%s/%s/*.%s' %\n (base,bri,brick,rsdir,name))\n\n\ndef get_sample(objtype,brick,randoms_db,\n minid=None,randoms_from_fits='',\n do_skipids='no',outdir=None,\n dont_sort_sampleid=False):\n \"\"\"Gets all simulated randoms for a brick from PSQl db, and applies all relevant cuts\n\n Args:\n objtype: elg,lrg\n brick:\n randoms_db: name of PSQL db for randoms, e.g. obiwan_elg_ra175\n minid: None, unless do_more == yes then it is an integer for the randoms id to start from\n randoms_from_fits: None or filename of fits_table to use for randoms\n do_skipids: yes or no, rerunning on all skipped randoms?\n outdir: None if do_skipids='no'; otherwise path like $CSCRATCH/obiwan_out/elg_9deg2_ra175\n dont_sort_sampleid: False to sort sample by id\n\n\n Returns:\n tupe: sample fits_table, seed\n \"\"\"\n assert(do_skipids in ['yes','no'])\n if do_skipids == 'yes':\n assert(not outdir is None)\n if randoms_from_fits:\n Samp,seed= fits_table(randoms_from_fits),1\n else:\n if do_skipids == 'no':\n Samp,seed= getSrcsInBrick(brick,objtype, db_table=randoms_db)\n elif do_skipids == 'yes':\n skip_ids= get_skip_ids(outdir, brick, objtype)\n Samp,seed= getSrcsInBrick(brick,objtype, db_table=randoms_db,\n skipped_ids= skip_ids)\n # Already did these cuts in decals_sim_radeccolors\n #r0,r1,d0,d1= brickwcs.radec_bounds()\n #Samp.cut( (Samp.ra >= r0)*(Samp.ra <= r1)*\\\n # (Samp.dec >= d0)*(Samp.dec <= d1) )\n # Sort by Sersic n low -> high (if elg or lrg)\n # Apply cuts\n if minid:\n Samp.cut( Samp.id >= minid )\n if dont_sort_sampleid == False:\n # breaks clustering but robus to adding more ids\n Samp= Samp[np.argsort(Samp.id) ]\n return Samp,seed\n\n\n\ndef main(args=None):\n \"\"\"Main routine which parses the optional inputs.\"\"\"\n t0= Time()\n # Command line options\n if args is None:\n # Read from cmd line\n parser= get_parser()\n args = parser.parse_args(args=args)\n else:\n # args is already a argparse.Namespace obj\n pass\n # Print calling sequence\n print('Args:', args)\n if args.do_more == 'yes':\n assert(not args.minid is None)\n # Setup loggers\n if args.verbose:\n lvl = logging.DEBUG\n else:\n lvl = logging.INFO\n logging.basicConfig(level=lvl, stream=sys.stdout) #,format='%(message)s')\n log = logging.getLogger('decals_sim')\n # Sort through args\n #log.info('decals_sim.py args={}'.format(args))\n #max_nobj=500\n #max_nchunk=1000\n #if args.ith_chunk is not None: assert(args.ith_chunk <= max_nchunk-1)\n #assert(args.nchunk <= max_nchunk)\n #assert(args.nobj <= max_nobj)\n #if args.ith_chunk is not None:\n # assert(args.nchunk == 1) #if choose a chunk, only doing 1 chunk\n if args.nobj is None:\n parser.print_help()\n sys.exit(1)\n\n # Exit if expected output already exists\n rsdir= get_outdir_runbrick(args.outdir,\n args.brick,args.rowstart,\n do_skipids=args.do_skipids,\n do_more=args.do_more)\n rsdir= os.path.basename(rsdir)\n tractor_fn= os.path.join(args.outdir,\n 'tractor',args.brick[:3],args.brick,\n rsdir,\n 'tractor-%s.fits' % args.brick)\n if (os.path.exists(tractor_fn) &\n (not args.overwrite_if_exists)):\n print('Exiting, already finished %s' % tractor_fn)\n return 0 #sys.exit(0)\n\n brickname = args.brick\n objtype = args.objtype\n\n # Output dir\n decals_sim_dir = args.outdir\n\n #nchunk = args.nchunk\n #rand = np.random.RandomState(args.seed) # determines seed for all chunks\n #seeds = rand.random_integers(0,2**18, max_nchunk)\n\n log.info('Object type = {}'.format(objtype))\n #log.info('Number of objects = {}'.format(nobj))\n #log.info('Number of chunks = {}'.format(nchunk))\n\n # Optionally zoom into a portion of the brick\n survey = LegacySurveyData()\n brickinfo= get_brickinfo_hack(survey,brickname)\n #brickinfo = survey.get_brick_by_name(brickname)\n #print(brickname)\n brickwcs = wcs_for_brick(brickinfo)\n W, H, pixscale = brickwcs.get_width(), brickwcs.get_height(), brickwcs.pixel_scale()\n\n log.info('Brick = {}'.format(brickname))\n if args.zoom is not None: # See also runbrick.stage_tims()\n (x0, x1, y0, y1) = args.zoom\n W = x1 - x0\n H = y1 - y0\n brickwcs = brickwcs.get_subimage(x0, y0, W, H)\n log.info('Zoom (pixel boundaries) = {}'.format(args.zoom))\n targetrd = np.array([brickwcs.pixelxy2radec(x, y) for x, y in\n [(1,1), (W,1), (W,H), (1,H), (1,1)]])\n\n radec_center = brickwcs.radec_center()\n log.info('RA, Dec center = {}'.format(radec_center))\n log.info('Brick = {}'.format(brickname))\n t0= ptime('First part of Main()',t0)\n\n # SAMPLE table\n sample_kwargs= {\"objtype\":args.objtype,\n \"brick\":args.brick,\n \"outdir\":args.outdir,\n \"randoms_db\":args.randoms_db,\n \"minid\":args.minid,\n \"do_skipids\":args.do_skipids,\n \"randoms_from_fits\":args.randoms_from_fits,\n \"dont_sort_sampleid\":args.dont_sort_sampleid}\n Samp,seed= get_sample(**sample_kwargs)\n\n Samp= Samp[args.rowstart:args.rowstart + args.nobj]\n # Performance\n #if objtype in ['elg','lrg']:\n # Samp=Samp[np.argsort( Samp.get('%s_n' % objtype) )]\n print('Max sample size=%d, actual sample size=%d' % (args.nobj,len(Samp)))\n assert(len(Samp) <= args.nobj)\n t0= ptime('Got randoms sample',t0)\n\n # Store args in dict for easy func passing\n kwargs=dict(Samp=Samp,\\\n brickname=brickname, \\\n checkpoint=args.checkpoint, \\\n seed= seed,\n decals_sim_dir= decals_sim_dir,\\\n brickwcs= brickwcs, \\\n objtype=objtype,\\\n nobj=len(Samp),\\\n maxobjs=args.nobj,\\\n rowst=args.rowstart,\\\n do_skipids=args.do_skipids,\\\n do_more=args.do_more,\\\n minid=args.minid,\\\n args=args)\n\n # Stop if starting row exceeds length of radec,color table\n if len(Samp) == 0:\n fn= get_outdir_runbrick(kwargs['decals_sim_dir'],\n kwargs['brickname'],kwargs['rowst'],\n do_skipids=kwargs['do_skipids'],do_more=kwargs['do_more'])\n fn+= '_exceeded.txt'\n junk= os.system('touch %s' % fn)\n print('Wrote %s' % fn)\n raise ValueError('starting row=%d exceeds number of artificial sources, quit' % args.rowstart)\n\n # Create simulated catalogues and run Tractor\n create_metadata(kwargs=kwargs)\n t0= ptime('create_metadata',t0)\n # do chunks\n #for ith_chunk in chunk_list:\n #log.info('Working on chunk {:02d}/{:02d}'.format(ith_chunk,kwargs['nchunk']-1))\n # Random ra,dec and source properties\n create_ith_simcat(d=kwargs)\n t0= ptime('create_ith_simcat',t0)\n # Run tractor\n do_one_chunk(d=kwargs)\n t0= ptime('do_one_chunk',t0)\n # Clean up output\n if args.no_cleanup == False:\n do_ith_cleanup(d=kwargs)\n t0= ptime('do_ith_cleanup',t0)\n log.info('All done!')\n return 0\n\nif __name__ == '__main__':\n print('obiwan started at %s' % time_builtin.strftime(\"%Y-%m-%d %H:%M:%S\"))\n main()\n print('obiwan finshed at %s' % time_builtin.strftime(\"%Y-%m-%d %H:%M:%S\"))\n","sub_path":"py/obiwan/kenobi.py","file_name":"kenobi.py","file_ext":"py","file_size_in_byte":46533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563222235","text":"from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer\nfrom array import array\nimport ROOT\n\n\nclass FakeFactorAnalyzer(Analyzer):\n '''Stores the value of the fakefactor weight to apply to \n data in the application region.\n !!! background fractions should be updated for the analysis!\n For nore information see :\n https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauJet2TauFakes\n '''\n\n def __init__(self, cfg_ana, cfg_comp, looperName):\n super(FakeFactorAnalyzer, self).__init__(cfg_ana, cfg_comp, looperName)\n if self.cfg_comp.isData:\n self.btagfile = ROOT.TFile(self.cfg_ana.filepath.format(self.cfg_ana.channel,'btag'))\n self.nobtagfile = ROOT.TFile(self.cfg_ana.filepath.format(self.cfg_ana.channel,'nobtag'))\n self.inclfile = ROOT.TFile(self.cfg_ana.filepath.format(self.cfg_ana.channel,'inclusive'))\n self.btagff = self.btagfile.Get('ff_comb')\n self.nobtagff = self.nobtagfile.Get('ff_comb')\n self.inclff = self.inclfile.Get('ff_comb')\n \n\n def fake_factor_semileptonic(self, tau, njets, mvis, mt, lepton_iso, sys='', category='inclusive'):\n '''Interface function to retrieve the fake factors from\n the rootfile for the semileptonic channels.\n \n @param sys : if '' -> nominal value, else can be 'up' or 'down'\n '''\n inputs = [tau.pt(),\n tau.decayMode(),\n njets,\n mvis,\n mt,\n lepton_iso,\n self.frac_qcd,\n self.frac_w,\n self.frac_tt]\n if category == 'inclusive':\n ff = self.inclff\n elif category == 'btag':\n ff = self.btagff\n elif category == 'nobtag':\n ff = self.nobtagff\n else:\n raise ValueError('category must be in [\"btag\",\"nobtag\",\"inclusive\"]')\n if sys:\n return ff.value(len(inputs), array('d',inputs),sys)\n return ff.value(len(inputs), array('d',inputs))\n\n def fake_factor_fullyhadronic(self, tau1,tau2, njets, mvis, sys=''):\n '''Interface function to retrieve the fake factors from\n the rootfile for the fully hadronic channel.\n \n @param sys : if '' -> nominal value, else can be 'up' or 'down'\n '''\n inputs = [tau1.pt(),\n tau2.pt(),\n tau1.decayMode(),\n njets,\n mvis,\n self.frac_qcd,\n self.frac_w,\n self.frac_tt,\n self.frac_dy]\n if category == 'inclusive':\n ff = self.inclff\n elif category == 'btag':\n ff = self.btagff\n elif category == 'nobtag':\n ff = self.nobtagff\n else:\n raise ValueError('category must be in [\"btag\",\"nobtag\",\"inclusive\"]')\n if sys:\n return ff.value(len(inputs), array('d',inputs),sys)\n return ff.value(len(inputs), array('d',inputs))\n\n def set_ff_fullyhadronic(self, tau, tau2, njets, mvis):\n for cat in [\"btag\",\"nobtag\",\"inclusive\"]:\n setattr(tau, \n 'weight_fakefactor_{}',\n self.fake_factor_fullyhadronic(tau,tau2,njets,mvis))\n setattr(tau, \n 'weight_fakefactor_{}_up',\n self.fake_factor_fullyhadronic(tau,tau2,njets,mvis,'up'))\n setattr(tau, \n 'weight_fakefactor_{}_down',\n self.fake_factor_fullyhadronic(tau,tau2,njets,mvis,'down'))\n\n def set_ff_semileptonic(self, tau, njets, mvis, mt, iso):\n for cat in [\"btag\",\"nobtag\",\"inclusive\"]:\n setattr(tau, \n 'weight_fakefactor_{}',\n self.fake_factor_semileptonic(tau,njets,mvis,mt,iso))\n setattr(tau, \n 'weight_fakefactor_{}_up',\n self.fake_factor_semileptonic(tau,njets,mvis,mt,iso,'up'))\n setattr(tau, \n 'weight_fakefactor_{}_down',\n self.fake_factor_semileptonic(tau,njets,mvis,mt,iso,'down'))\n\n def process(self, event):\n if not self.cfg_comp.isData:\n return True\n\n njets = len(event.jets30)\n mvis = event.dileptons_sorted[0].mass()\n\n if self.cfg_ana.channel == 'tt':\n tau1 = event.dileptons_sorted[0].leg1()\n tau2 = event.dileptons_sorted[0].leg2()\n self.set_ff_fullyhadronic(tau1, tau2, njets, mvis)\n self.set_ff_fullyhadronic(tau2, tau1, njets, mvis)\n\n elif self.cfg_ana.channel in ['mt','et']:\n mt = event.dileptons_sorted[0].mTLeg1(getattr(event,self.cfg_ana.met))\n tau = event.dileptons_sorted[0].leg2()\n lep = event.dileptons_sorted[0].leg1()\n self.set_ff_semileptonic(tau, njets, mvis, mt, lep.iso_htt())\n\n else:\n raise ValueError('Channel not or wrongly set')\n","sub_path":"H2TauTau/python/heppy/analyzers/FakeFactorAnalyzer.py","file_name":"FakeFactorAnalyzer.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181699203","text":"from copy import deepcopy\nfrom collections import deque\n\n\ndef get_array():\n \"\"\"\n\n :rtype: a list of integers\n \"\"\"\n a = []\n with open('IntegerArray.txt') as f:\n for line in f:\n a.append(int(line))\n return a\n\ndef split_array(a):\n \"\"\"\n split a list into two lists\n :param a: list of integers\n :return: two sub-list of integers\n \"\"\"\n n = len(a)\n if n == 1:\n return a\n index = n // 2\n b = a[:index]\n c = a[index:]\n return b, c\n\n\ndef merge_and_count(a, b):\n \"\"\"\n\n :param a: a list of integers\n :param b: a list of integers\n :return: c: the merged list\n count: the number of inversions\n \"\"\"\n n = len(a) + len(b)\n i_a = 0\n i_b = 0\n c = []\n count = 0\n\n for i in range(0, n):\n if i_a >= len(a):\n c.append(b[i_b])\n i_b += 1\n\n elif i_b >= len(b):\n c.append(a[i_a])\n i_a += 1\n\n elif a[i_a] < b[i_b]:\n c.append(a[i_a])\n i_a += 1\n\n else:\n c.append(b[i_b])\n i_b += 1\n count += len(a) - i_a\n return c, count\n\n\ndef countArrary(input_a):\n \"\"\"\n count the number of inversions\n :param input_a: input list of integers\n :return: count of inversions\n \"\"\"\n if len(input_a) == 1:\n return 0\n else:\n # split the input array\n split_a = [input_a]\n while len(split_a) != len(input_a):\n new_split_a = []\n for sub_a in split_a:\n if len(sub_a) > 1:\n b, c = split_array(sub_a)\n new_split_a.append(b)\n new_split_a.append(c)\n else:\n new_split_a.append(sub_a)\n split_a = deepcopy(new_split_a)\n\n # merge and count\n merge_a = deque(split_a)\n count = 0\n while len(merge_a[0]) < len(input_a):\n new_merge_a = []\n while merge_a:\n a = merge_a.popleft()\n if merge_a:\n b = merge_a.popleft()\n c, c_inv = merge_and_count(a, b)\n count += c_inv\n new_merge_a.append(c)\n else:\n new_merge_a.append(a)\n\n merge_a = deque(deepcopy(new_merge_a))\n\n # print(merge_a)\n return count\n\n\nif __name__ == \"__main__\":\n input_a = get_array()\n print(countArrary(input_a))\n","sub_path":"Part_1/Homework_1/compute_number_inversions.py","file_name":"compute_number_inversions.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231893002","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\n\n\nclass LgSpider(object):\n url = ''\n\n def __init__(self, url=''):\n self.url = url\n\n def run(self):\n urls = self.read_url(\"/home/jwang/PycharmProjects/learnPython/Spiders/lagou.text\")\n\n for url in urls:\n self.url = url\n job_info = self.parser_job(self.download(self.url))\n print(job_info)\n\n company_info = ''\n if job_info['company_link']:\n company_info = self.parser_company(self.download(job_info['company_link']))\n print(company_info)\n\n self.save(job_info, company_info)\n time.sleep(1)\n\n @staticmethod\n def save(job_info, company_info):\n fout = open('job.csv', 'a+')\n fout.write('招聘Title,岗位名称,薪资,城市,经验,学历,职位类型,职位诱惑,职位描述,工作地址,职位链接,'\n '公司链接,公司全称,公司简称,公司简介,公司行业,公司融资,公司规模,公司城市,公司介绍')\n fout.write(\"\\n\")\n fout.write(job_info['title'] + ',')\n fout.write(job_info['name'] + ',')\n fout.write(job_info['salary'] + ',')\n fout.write(job_info['city'] + ',')\n fout.write(job_info['exp'] + ',')\n fout.write(job_info['edu'] + ',')\n fout.write(job_info['type'] + ',')\n fout.write(job_info['job_advantage'] + ',')\n fout.write(job_info['job_desc'] + ',')\n fout.write(job_info['job_addr'] + ',')\n fout.write(job_info['url'] + ',')\n fout.write(job_info['company_link'] + ',')\n\n fout.write(company_info['full_name'] + ',')\n fout.write(company_info['short_name'] + ',')\n fout.write(company_info['word'] + ',')\n fout.write(company_info['type'] + ',')\n fout.write(company_info['finance_stage'] + ',')\n fout.write(company_info['size'] + ',')\n fout.write(company_info['location'] + ',')\n fout.write(company_info['desc'] + ',')\n\n fout.close()\n\n @staticmethod\n def download(url):\n if url is None:\n return None\n\n headers = {\n 'User-Agent': \"Mozilla / 5.0(X11;Linux x86_64) AppleWebKit\"\n \" / 537.36(KHTML, like Gecko) Chrome / 59.0.3071.104 Safari / 537.36\",\n 'Accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n 'Accept-Language': \"zh,zh-CN;q=0.8,en;q=0.6,en-US;q=0.4\",\n 'Cookie': \"WEBTJ-ID=06092018%2C212124-163e4b5684bad3-06bce6028c62e-102e130c-2073600-163e4b5684c41c; _ga=GA1.2.1590203236.1528550484; user_trace_token=20180609212124-01c2e442-6be8-11e8-9438-5254005c3644; LGUID=20180609212124-01c2e880-6be8-11e8-9438-5254005c3644; _gid=GA1.2.764309885.1528550484; JSESSIONID=ABAAABAABEEAAJA3ED4C1A2CD94390C40135CD065381764; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528550484,1528550490; LGSID=20180609230847-0225b521-6bf7-11e8-9928-525400f775ce; index_location_city=%E5%8C%97%E4%BA%AC; TG-TRACK-CODE=index_navigation; SEARCH_ID=dd3407b3e7254e62bf85c119691102fe; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528558827; LGRID=20180609234027-6e9d13a2-6bfb-11e8-9929-525400f775ce\"\n }\n\n response = requests.request('get', url, headers=headers)\n response.encoding = 'utf-8'\n if response.status_code != 200:\n return None\n\n return response.text\n\n def parser_job(self, html):\n if html is None:\n return None\n soup = BeautifulSoup(html, 'html.parser')\n data = dict()\n\n data['url'] = self.url\n\n title_node = soup.select('div[class=job-name] div[class=company]')[0]\n data['title'] = title_node.get_text()\n\n name_node = soup.select('div[class=job-name] span[class=name]')[0]\n data['name'] = name_node.get_text()\n\n job_request_node = soup.select(\"dd[class=job_request] p span\")\n data['salary'] = job_request_node[0].get_text().strip('/ ')\n data['city'] = job_request_node[1].get_text().strip('/ ')\n data['exp'] = job_request_node[2].get_text().strip('/ ')\n data['edu'] = job_request_node[3].get_text().strip('/ ')\n data['type'] = job_request_node[4].get_text().strip('/ ')\n\n job_advantage_node = soup.select(\"dd[class=job-advantage] p\")\n data['job_advantage'] = job_advantage_node[0].get_text()\n\n job_desc_node = soup.select(\"dd[class=job_bt] div\")\n data['job_desc'] = job_desc_node[0].get_text().replace('\\n', '')\n\n job_addr_node = soup.select(\"div[class=work_addr]\")\n data['job_addr'] = job_addr_node[0].get_text().replace(' ', '').replace('\\n', '').strip('查看地图')\n\n company_node = soup.select(\"dl[class=job_company] dt a\")[0]\n data['company_link'] = company_node['href']\n\n return data\n\n @staticmethod\n def parser_company(html):\n if html is None:\n return None\n soup = BeautifulSoup(html, 'html.parser')\n data = dict()\n\n name = soup.select(\"div[class=company_main] h1 a\")[0]\n data['short_name'] = name.get_text().replace('\\n', '').replace(' ', '')\n data['full_name'] = name.attrs['title']\n\n company_word = soup.select(\"div[class=company_word]\")[0]\n data['word'] = company_word.get_text().replace(' ', '').replace('\\n', '')\n\n company_info = soup.select(\"div[id=basic_container] div[class=item_content] span\")\n data['type'] = company_info[0].get_text()\n data['finance_stage'] = company_info[1].get_text()\n data['size'] = company_info[2].get_text()\n data['location'] = company_info[3].get_text()\n\n company_desc = soup.select(\"div[class=company_intro_text] span[class=company_content]\")\n data['desc'] = company_desc[0].get_text().replace('\\n', '').replace(' ', '')\n\n return data\n\n @staticmethod\n def read_url(file):\n urls = []\n if file is None:\n return None\n with open(file) as res:\n for url in res:\n urls.append(url.strip('\\n'))\n\n return urls\n\n\nif __name__ == \"__main__\":\n LgSpider().run()\n","sub_path":"Spiders/lagou_spider.py","file_name":"lagou_spider.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376499630","text":"import threading \r\n\r\nbarrier = threading.Barrier(3) \r\n\r\nclass thread(threading.Thread): \r\n\tdef __init__(self, thread_ID): \r\n\t\tthreading.Thread.__init__(self) \r\n\t\tself.thread_ID = thread_ID \r\n\tdef run(self): \r\n\t\tprint(str(self.thread_ID) + \"\\n\") \r\n\t\tbarrier.wait() \r\n\t\t\r\nthread1 = thread(100) \r\nthread2 = thread(101) \r\n\r\nthread1.start() \r\nbarrier.wait() \r\n\r\nthread2.start()\r\n\r\n\r\nprint(\"Exit\\n\") \r\n","sub_path":"barrier.py","file_name":"barrier.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45860101","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport json\nimport math\nimport os\nimport requests\nimport struct\nimport time\nfrom six.moves import range\n\nimport girder\nfrom girder import config\nfrom tests import base\n\n\n# boiler plate to start and stop the server\n\nos.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')\nconfig.loadConfig() # Must reload config to pickup correct port\n\nJPEGHeader = '\\xff\\xd8\\xff'\nPNGHeader = '\\x89PNG'\n\n\ndef setUpModule():\n base.enabledPlugins.append('large_image')\n base.startServer(False)\n\n\ndef tearDownModule():\n base.stopServer()\n\n\nclass LargeImageTilesTest(base.TestCase):\n def setUp(self):\n base.TestCase.setUp(self)\n admin = {\n 'email': 'admin@email.com',\n 'login': 'adminlogin',\n 'firstName': 'Admin',\n 'lastName': 'Last',\n 'password': 'adminpassword',\n 'admin': True\n }\n self.admin = self.model('user').createUser(**admin)\n folders = self.model('folder').childFolders(\n self.admin, 'user', user=self.admin)\n for folder in folders:\n if folder['name'] == 'Public':\n self.publicFolder = folder\n # Authorize our user for Girder Worker\n resp = self.request(\n '/system/setting', method='PUT', user=self.admin, params={\n 'list': json.dumps([{\n 'key': 'worker.broker',\n 'value': 'mongodb://127.0.0.1/girder_worker'\n }, {\n 'key': 'worker.backend',\n 'value': 'mongodb://127.0.0.1/girder_worker'\n }])})\n self.assertStatusOk(resp)\n\n def _uploadFile(self, path):\n \"\"\"\n Upload the specified path to the admin user's public folder and return\n the resulting item.\n\n :param path: path to upload.\n :returns: file: the created file.\n \"\"\"\n name = os.path.basename(path)\n with open(path, 'rb') as file:\n data = file.read()\n resp = self.request(\n path='/file', method='POST', user=self.admin, params={\n 'parentType': 'folder',\n 'parentId': self.publicFolder['_id'],\n 'name': name,\n 'size': len(data)\n })\n self.assertStatusOk(resp)\n uploadId = resp.json['_id']\n\n fields = [('offset', 0), ('uploadId', uploadId)]\n files = [('chunk', name, data)]\n resp = self.multipartRequest(\n path='/file/chunk', fields=fields, files=files, user=self.admin)\n self.assertStatusOk(resp)\n self.assertIn('itemId', resp.json)\n return resp.json\n\n def _createTestTiles(self, itemId, params={}, info=None, error=None):\n \"\"\"\n Discard any existing tile set on an item, then create a test tile set\n with some optional parameters.\n\n :param itemId: the item on which the tiles are created.\n :param params: optional parameters to use for the tiles.\n :param info: if present, the tile information must match all values in\n this dictionary.\n :param error: if present, expect to get an error from the tile info\n query and ensure that this string is in the error\n message.\n :returns: the tile information dictionary.\n \"\"\"\n # We don't actually use the itemId to fetch test tiles\n try:\n resp = self.request(path='/item/test/tiles', user=self.admin,\n params=params)\n if error:\n self.assertStatus(resp, 400)\n self.assertIn(error, resp.json['message'])\n return None\n except AssertionError as exc:\n if error:\n self.assertIn(error, exc.args[0])\n return\n else:\n raise\n self.assertStatusOk(resp)\n infoDict = resp.json\n if info:\n for key in info:\n self.assertEqual(infoDict[key], info[key])\n return infoDict\n\n def _testTilesZXY(self, itemId, metadata, tileParams={},\n imgHeader=JPEGHeader):\n \"\"\"\n Test that the tile server is serving images.\n\n :param itemId: the item ID to get tiles from.\n :param metadata: tile information used to determine the expected\n valid queries. If 'sparse' is added to it, tiles\n are allowed to not exist above that level.\n :param tileParams: optional parameters to send to the tile query.\n :param imgHeader: if something other than a JPEG is expected, this is\n the first few bytes of the expected image.\n \"\"\"\n # We should get images for all valid levels, but only within the\n # expected range of tiles.\n for z in range(metadata.get('minLevel', 0), metadata['levels']):\n maxX = math.ceil(float(metadata['sizeX']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileWidth']) - 1\n maxY = math.ceil(float(metadata['sizeY']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileHeight']) - 1\n # Check the four corners on each level\n for (x, y) in ((0, 0), (maxX, 0), (0, maxY), (maxX, maxY)):\n resp = self.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), user=self.admin, params=tileParams,\n isJson=False)\n if (resp.output_status[:3] != '200' and\n metadata.get('sparse') and z > metadata['sparse']):\n self.assertStatus(resp, 404)\n continue\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(imgHeader)], imgHeader)\n # Check out of range each level\n for (x, y) in ((-1, 0), (maxX + 1, 0), (0, -1), (0, maxY + 1)):\n resp = self.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), user=self.admin, params=tileParams)\n if x < 0 or y < 0:\n self.assertStatus(resp, 400)\n self.assertTrue('must be positive integers' in\n resp.json['message'])\n else:\n self.assertStatus(resp, 404)\n self.assertTrue('does not exist' in resp.json['message'] or\n 'outside layer' in resp.json['message'])\n # Check negative z level\n resp = self.request(path='/item/%s/tiles/zxy/-1/0/0' % itemId,\n user=self.admin, params=tileParams)\n self.assertStatus(resp, 400)\n self.assertIn('must be positive integers', resp.json['message'])\n # Check non-integer z level\n resp = self.request(path='/item/%s/tiles/zxy/abc/0/0' % itemId,\n user=self.admin, params=tileParams)\n self.assertStatus(resp, 400)\n self.assertIn('must be integers', resp.json['message'])\n # If we set the minLevel, test one lower than it\n if 'minLevel' in metadata:\n resp = self.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['minLevel'] - 1), user=self.admin,\n params=tileParams)\n self.assertStatus(resp, 404)\n self.assertIn('layer does not exist', resp.json['message'])\n # Check too large z level\n resp = self.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['levels']), user=self.admin, params=tileParams)\n self.assertStatus(resp, 404)\n self.assertIn('layer does not exist', resp.json['message'])\n\n def _postTileViaHttp(self, itemId, fileId):\n \"\"\"\n When we know we need to process a job, we have to use an actual http\n request rather than the normal simulated request to cherrypy. This is\n required because cherrypy needs to know how it was reached so that\n girder_worker can reach it when done.\n\n :param itemId: the id of the item with the file to process.\n :param fileId: the id of the file that should be processed.\n :returns: metadata from the tile if the conversion was successful,\n False if it converted but didn't result in useable tiles, and\n None if it failed.\n \"\"\"\n headers = [('Accept', 'application/json')]\n self._buildHeaders(headers, None, self.admin, None, None, None)\n headers = {header[0]: header[1] for header in headers}\n req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' % (\n int(os.environ['GIRDER_PORT']), itemId), headers=headers,\n data={'fileId': fileId})\n self.assertEqual(req.status_code, 200)\n # If we ask to create the item again right away, we should be told that\n # either there is already a job running or the item has already been\n # added\n req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' % (\n int(os.environ['GIRDER_PORT']), itemId), headers=headers,\n data={'fileId': fileId})\n self.assertEqual(req.status_code, 400)\n self.assertTrue('Item already has' in req.json()['message'] or\n 'Item is scheduled' in req.json()['message'])\n\n starttime = time.time()\n resp = None\n while time.time() - starttime < 30:\n try:\n resp = self.request(path='/item/%s/tiles' % itemId,\n user=self.admin)\n self.assertStatusOk(resp)\n break\n except AssertionError as exc:\n if 'File must have at least 1 level' in exc.args[0]:\n return False\n self.assertIn('is still pending creation', exc.args[0])\n item = self.model('item').load(itemId, user=self.admin)\n job = self.model('job', 'jobs').load(item['largeImage']['jobId'],\n user=self.admin)\n if job['status'] == girder.plugins.jobs.constants.JobStatus.ERROR:\n return None\n time.sleep(0.1)\n self.assertStatusOk(resp)\n return resp.json\n\n def testTilesFromPTIF(self):\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_image.ptif'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n # We shouldn't have tile information yet\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n resp = self.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,\n user=self.admin)\n self.assertStatus(resp, 404)\n self.assertIn('No large image file', resp.json['message'])\n # Asking to delete the tile information succeeds but does nothing\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], False)\n # Ask to make this a tile-based item with an invalid file ID\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': itemId})\n self.assertStatus(resp, 400)\n self.assertIn('No such file', resp.json['message'])\n\n # Ask to make this a tile-based item properly\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatusOk(resp)\n # Now the tile request should tell us about the file. These are\n # specific to our test file\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatusOk(resp)\n tileMetadata = resp.json\n self.assertEqual(tileMetadata['tileWidth'], 256)\n self.assertEqual(tileMetadata['tileHeight'], 256)\n self.assertEqual(tileMetadata['sizeX'], 58368)\n self.assertEqual(tileMetadata['sizeY'], 12288)\n self.assertEqual(tileMetadata['levels'], 9)\n tileMetadata['sparse'] = 5\n self._testTilesZXY(itemId, tileMetadata)\n\n # Ask to make this a tile-based item again\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatus(resp, 400)\n self.assertIn('Item already has', resp.json['message'])\n\n # We should be able to delete the large image information\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], True)\n\n # We should no longer have tile informaton\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n\n # We should be able to re-add it (we are also testing that fileId is\n # optional if there is only one file).\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin)\n self.assertStatusOk(resp)\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatusOk(resp)\n\n def testTilesFromTest(self):\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_image.ptif'))\n items = [{'itemId': str(file['itemId']), 'fileId': str(file['_id'])}]\n # Create a second item\n resp = self.request(path='/item', method='POST', user=self.admin,\n params={'folderId': self.publicFolder['_id'],\n 'name': 'test'})\n self.assertStatusOk(resp)\n itemId = str(resp.json['_id'])\n items.append({'itemId': itemId})\n # Check that we can't create a tile set with another item's file\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin,\n params={'fileId': items[0]['fileId']})\n self.assertStatus(resp, 400)\n self.assertIn('The provided file must be in the provided item',\n resp.json['message'])\n # Now create a test tile with the default options\n params = {'encoding': 'JPEG'}\n meta = self._createTestTiles(itemId, params, {\n 'tileWidth': 256, 'tileHeight': 256,\n 'sizeX': 256 * 2 ** 9, 'sizeY': 256 * 2 ** 9, 'levels': 10\n })\n self._testTilesZXY('test', meta, params)\n # Test most of our parameters in a single special case\n params = {\n 'minLevel': 2,\n 'maxLevel': 5,\n 'tileWidth': 160,\n 'tileHeight': 120,\n 'sizeX': 5000,\n 'sizeY': 3000,\n 'encoding': 'JPEG'\n }\n meta = self._createTestTiles(itemId, params, {\n 'tileWidth': 160, 'tileHeight': 120,\n 'sizeX': 5000, 'sizeY': 3000, 'levels': 6\n })\n meta['minLevel'] = 2\n self._testTilesZXY('test', meta, params)\n # Test the fractal tiles with PNG\n params = {'fractal': 'true'}\n meta = self._createTestTiles(itemId, params, {\n 'tileWidth': 256, 'tileHeight': 256,\n 'sizeX': 256 * 2 ** 9, 'sizeY': 256 * 2 ** 9, 'levels': 10\n })\n self._testTilesZXY('test', meta, params, PNGHeader)\n # Test that the fractal isn't the same as the non-fractal\n resp = self.request(path='/item/test/tiles/zxy/0/0/0', user=self.admin,\n params=params, isJson=False)\n image = self.getBody(resp, text=False)\n resp = self.request(path='/item/test/tiles/zxy/0/0/0', user=self.admin,\n isJson=False)\n self.assertNotEqual(self.getBody(resp, text=False), image)\n # Test each property with an invalid value\n badParams = {\n 'minLevel': 'a',\n 'maxLevel': False,\n 'tileWidth': (),\n 'tileHeight': [],\n 'sizeX': {},\n 'sizeY': 1.3,\n 'encoding': 2,\n }\n for key in badParams:\n err = ('parameter is an incorrect' if key is not 'encoding' else\n 'Invalid encoding')\n self._createTestTiles(itemId, {key: badParams[key]}, error=err)\n\n def testTilesFromPNG(self):\n file = self._uploadFile(os.path.join(\n os.path.dirname(__file__), 'test_files', 'yb10kx5k.png'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n tileMetadata = self._postTileViaHttp(itemId, fileId)\n self.assertEqual(tileMetadata['tileWidth'], 256)\n self.assertEqual(tileMetadata['tileHeight'], 256)\n self.assertEqual(tileMetadata['sizeX'], 10000)\n self.assertEqual(tileMetadata['sizeY'], 5000)\n self.assertEqual(tileMetadata['levels'], 7)\n self._testTilesZXY(itemId, tileMetadata)\n # Ask to make this a tile-based item with an missing file ID (there are\n # now two files, so this will now fail).\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('Missing \"fileId\"', resp.json['message'])\n # We should be able to delete the tiles\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], True)\n # We should no longer have tile informaton\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n # This should work with a PNG with transparency, too.\n file = self._uploadFile(os.path.join(\n os.path.dirname(__file__), 'test_files', 'yb10kx5ktrans.png'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n tileMetadata = self._postTileViaHttp(itemId, fileId)\n self.assertEqual(tileMetadata['tileWidth'], 256)\n self.assertEqual(tileMetadata['tileHeight'], 256)\n self.assertEqual(tileMetadata['sizeX'], 10000)\n self.assertEqual(tileMetadata['sizeY'], 5000)\n self.assertEqual(tileMetadata['levels'], 7)\n self._testTilesZXY(itemId, tileMetadata)\n # We should be able to delete the tiles\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], True)\n # We should no longer have tile informaton\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n\n def testTilesFromBadFiles(self):\n # Uploading a monochrome file should result in no useful tiles.\n file = self._uploadFile(os.path.join(\n os.path.dirname(__file__), 'test_files', 'small.jpg'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n tileMetadata = self._postTileViaHttp(itemId, fileId)\n self.assertEqual(tileMetadata, False)\n # We should be able to delete the conversion\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], True)\n # Uploading a non-image file should run a job, too.\n file = self._uploadFile(os.path.join(\n os.path.dirname(__file__), 'test_files', 'notanimage.txt'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n tileMetadata = self._postTileViaHttp(itemId, fileId)\n self.assertEqual(tileMetadata, None)\n resp = self.request(path='/item/%s/tiles' % itemId, method='DELETE',\n user=self.admin)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json['deleted'], True)\n\n def testTilesFromSVS(self):\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_svs_image.TCGA-DU-6399-'\n '01A-01-TS1.e8eb65de-d63e-42db-af6f-14fefbbdf7bd.svs'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n # Ask to make this a tile-based item\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatusOk(resp)\n # Now the tile request should tell us about the file. These are\n # specific to our test file\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatusOk(resp)\n tileMetadata = resp.json\n self.assertEqual(tileMetadata['tileWidth'], 240)\n self.assertEqual(tileMetadata['tileHeight'], 240)\n self.assertEqual(tileMetadata['sizeX'], 31872)\n self.assertEqual(tileMetadata['sizeY'], 13835)\n self.assertEqual(tileMetadata['levels'], 9)\n self._testTilesZXY(itemId, tileMetadata)\n\n # Ask to make this a tile-based item again\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatus(resp, 400)\n self.assertIn('Item already has', resp.json['message'])\n\n # Ask for PNGs\n params = {'encoding': 'PNG'}\n self._testTilesZXY(itemId, tileMetadata, params, PNGHeader)\n\n # Check that invalid encodings are rejected\n try:\n resp = self.request(path='/item/%s/tiles' % itemId,\n user=self.admin,\n params={'encoding': 'invalid'})\n self.assertTrue(False)\n except AssertionError as exc:\n self.assertIn('Invalid encoding', exc.args[0])\n\n # Check that JPEG options are honored.\n resp = self.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,\n user=self.admin, isJson=False)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n defaultLength = len(image)\n\n resp = self.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,\n user=self.admin, isJson=False,\n params={'jpegQuality': 10})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n\n resp = self.request(path='/item/%s/tiles/zxy/0/0/0' % itemId,\n user=self.admin, isJson=False,\n params={'jpegSubsampling': 2})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n\n def testDummyTileSource(self):\n # We can't actually load the dummy source via the endpoints if we have\n # all of the requirements installed, so just check that it exists and\n # will return appropriate values.\n from girder.plugins.large_image.tilesource.dummy import DummyTileSource\n dummy = DummyTileSource()\n self.assertEqual(dummy.getTile(0, 0, 0), '')\n tileMetadata = dummy.getMetadata()\n self.assertEqual(tileMetadata['tileWidth'], 0)\n self.assertEqual(tileMetadata['tileHeight'], 0)\n self.assertEqual(tileMetadata['sizeX'], 0)\n self.assertEqual(tileMetadata['sizeY'], 0)\n self.assertEqual(tileMetadata['levels'], 0)\n\n def testThumbnails(self):\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_image.ptif'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n # We shouldn't be able to get a thumbnail yet\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n # Ask to make this a tile-based item\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatusOk(resp)\n # Get metadata to use in our thumbnail tests\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatusOk(resp)\n tileMetadata = resp.json\n # Now we should be able to get a thumbnail\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n defaultLength = len(image)\n\n # Test that JPEG options are honored\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'jpegQuality': 10})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'jpegSubsampling': 2})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n\n # Test width and height using PNGs\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'encoding': 'PNG'})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(max(width, height), 256)\n # We know that we are using an example where the width is greater than\n # the height\n origWidth = int(tileMetadata['sizeX'] *\n 2 ** -(tileMetadata['levels'] - 1))\n origHeight = int(tileMetadata['sizeY'] *\n 2 ** -(tileMetadata['levels'] - 1))\n self.assertEqual(height, int(width * origHeight / origWidth))\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'encoding': 'PNG', 'width': 200})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(width, 200)\n self.assertEqual(height, int(width * origHeight / origWidth))\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'encoding': 'PNG', 'height': 200})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(height, 200)\n self.assertEqual(width, int(height * origWidth / origHeight))\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin, isJson=False,\n params={'encoding': 'PNG',\n 'width': 180, 'height': 180})\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(width, 180)\n self.assertEqual(height, int(width * origHeight / origWidth))\n\n # Test bad parameters\n badParams = [\n ({'encoding': 'invalid'}, 400, 'Invalid encoding'),\n ({'width': 'invalid'}, 400, 'incorrect type'),\n ({'width': 0}, 400, 'Invalid width or height'),\n ({'width': -5}, 400, 'Invalid width or height'),\n ({'height': 'invalid'}, 400, 'incorrect type'),\n ({'height': 0}, 400, 'Invalid width or height'),\n ({'height': -5}, 400, 'Invalid width or height'),\n ({'jpegQuality': 'invalid'}, 400, 'incorrect type'),\n ({'jpegSubsampling': 'invalid'}, 400, 'incorrect type'),\n ]\n for entry in badParams:\n resp = self.request(path='/item/%s/tiles/thumbnail' % itemId,\n user=self.admin,\n params=entry[0])\n self.assertStatus(resp, entry[1])\n self.assertIn(entry[2], resp.json['message'])\n\n def testRegions(self):\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_image.ptif'))\n itemId = str(file['itemId'])\n # We shouldn't be able to get a region yet\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin)\n self.assertStatus(resp, 400)\n self.assertIn('No large image file', resp.json['message'])\n # Ask to make this a tile-based item\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin)\n self.assertStatusOk(resp)\n # Get metadata to use in our tests\n resp = self.request(path='/item/%s/tiles' % itemId, user=self.admin)\n self.assertStatusOk(resp)\n tileMetadata = resp.json\n\n # Test bad parameters\n badParams = [\n ({'encoding': 'invalid', 'width': 10}, 400, 'Invalid encoding'),\n ({'width': 'invalid'}, 400, 'incorrect type'),\n ({'width': -5}, 400, 'Invalid width or height'),\n ({'height': 'invalid'}, 400, 'incorrect type'),\n ({'height': -5}, 400, 'Invalid width or height'),\n ({'jpegQuality': 'invalid', 'width': 10}, 400, 'incorrect type'),\n ({'jpegSubsampling': 'invalid', 'width': 10}, 400,\n 'incorrect type'),\n ({'left': 'invalid'}, 400, 'incorrect type'),\n ({'right': 'invalid'}, 400, 'incorrect type'),\n ({'top': 'invalid'}, 400, 'incorrect type'),\n ({'bottom': 'invalid'}, 400, 'incorrect type'),\n ({'regionWidth': 'invalid'}, 400, 'incorrect type'),\n ({'regionHeight': 'invalid'}, 400, 'incorrect type'),\n ({'units': 'invalid'}, 400, 'Invalid units'),\n ]\n for entry in badParams:\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin,\n params=entry[0])\n self.assertStatus(resp, entry[1])\n self.assertIn(entry[2], resp.json['message'])\n\n # Get a small region for testing. Our test file is sparse, so\n # initially get a region where there is full information.\n params = {'regionWidth': 1000, 'regionHeight': 1000,\n 'left': 48000, 'top': 3000}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = origImage = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n defaultLength = len(image)\n\n # Test that JPEG options are honored\n params['jpegQuality'] = 10\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n del params['jpegQuality']\n\n params['jpegSubsampling'] = 2\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n self.assertTrue(len(image) < defaultLength)\n del params['jpegSubsampling']\n\n # Test using negative offsets\n params['left'] -= tileMetadata['sizeX']\n params['top'] -= tileMetadata['sizeY']\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image, origImage)\n # We should get the same image using right and bottom\n params = {\n 'left': params['left'], 'top': params['top'],\n 'right': params['left'] + 1000, 'bottom': params['top'] + 1000}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image, origImage)\n params = {\n 'regionWidth': 1000, 'regionHeight': 1000,\n 'right': params['right'], 'bottom': params['bottom']}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image, origImage)\n\n # Fractions should get us the same results\n params = {\n 'regionWidth': 1000.0 / tileMetadata['sizeX'],\n 'regionHeight': 1000.0 / tileMetadata['sizeY'],\n 'left': 48000.0 / tileMetadata['sizeX'],\n 'top': 3000.0 / tileMetadata['sizeY'],\n 'units': 'fraction'}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image, origImage)\n\n # 0-sized results are allowed\n params = {'regionWidth': 1000, 'regionHeight': 0,\n 'left': 48000, 'top': 3000, 'width': 1000, 'height': 1000}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(len(image), 0)\n\n # Test scaling (and a sparse region from our file)\n params = {'regionWidth': 2000, 'regionHeight': 1500,\n 'width': 500, 'height': 500, 'encoding': 'PNG'}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(width, 500)\n self.assertEqual(height, 375)\n\n # test svs image\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_svs_image.TCGA-DU-6399-'\n '01A-01-TS1.e8eb65de-d63e-42db-af6f-14fefbbdf7bd.svs'))\n itemId = str(file['itemId'])\n # Ask to make this a tile-based item\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin)\n self.assertStatusOk(resp)\n params = {'regionWidth': 2000, 'regionHeight': 1500,\n 'width': 1000, 'height': 1000, 'encoding': 'PNG'}\n resp = self.request(path='/item/%s/tiles/region' % itemId,\n user=self.admin, isJson=False, params=params)\n self.assertStatusOk(resp)\n image = self.getBody(resp, text=False)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n (width, height) = struct.unpack('!LL', image[16:24])\n self.assertEqual(width, 1000)\n self.assertEqual(height, 750)\n\n def testSettings(self):\n from girder.plugins.large_image import constants\n from girder.models.model_base import ValidationException\n\n for key in (constants.PluginSettings.LARGE_IMAGE_SHOW_THUMBNAILS,\n constants.PluginSettings.LARGE_IMAGE_SHOW_VIEWER):\n self.model('setting').set(key, 'false')\n self.assertFalse(self.model('setting').get(key))\n self.model('setting').set(key, 'true')\n self.assertTrue(self.model('setting').get(key))\n try:\n self.model('setting').set(key, 'not valid')\n self.assertTrue(False)\n except ValidationException as exc:\n self.assertIn('Invalid setting', exc.args[0])\n self.model('setting').set(\n constants.PluginSettings.LARGE_IMAGE_DEFAULT_VIEWER, 'geojs')\n self.assertEqual(self.model('setting').get(\n constants.PluginSettings.LARGE_IMAGE_DEFAULT_VIEWER), 'geojs')\n # Test the system/setting/large_image end point\n resp = self.request(path='/system/setting/large_image', user=None)\n self.assertStatusOk(resp)\n settings = resp.json\n # The values were set earlier\n self.assertEqual(settings[\n constants.PluginSettings.LARGE_IMAGE_DEFAULT_VIEWER], 'geojs')\n self.assertEqual(settings[\n constants.PluginSettings.LARGE_IMAGE_SHOW_VIEWER], True)\n self.assertEqual(settings[\n constants.PluginSettings.LARGE_IMAGE_SHOW_THUMBNAILS], True)\n\n def testGetTileSource(self):\n from girder.plugins.large_image.tilesource import getTileSource\n\n # Upload a PTIF and make it a large_image\n file = self._uploadFile(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_image.ptif'))\n itemId = str(file['itemId'])\n fileId = str(file['_id'])\n resp = self.request(path='/item/%s/tiles' % itemId, method='POST',\n user=self.admin, params={'fileId': fileId})\n self.assertStatusOk(resp)\n # We should have access via getTileSource\n source = getTileSource('girder_item://' + itemId, user=self.admin)\n image, mime = source.getThumbnail(encoding='PNG', height=200)\n self.assertEqual(image[:len(PNGHeader)], PNGHeader)\n\n # We can also use a file with getTileSource. The user is ignored.\n source = getTileSource(os.path.join(\n os.environ['LARGE_IMAGE_DATA'], 'sample_svs_image.TCGA-DU-6399-'\n '01A-01-TS1.e8eb65de-d63e-42db-af6f-14fefbbdf7bd.svs'),\n user=self.admin, encoding='PNG')\n image, mime = source.getThumbnail(encoding='JPEG', width=200)\n self.assertEqual(image[:len(JPEGHeader)], JPEGHeader)\n","sub_path":"plugin_tests/tiles_test.py","file_name":"tiles_test.py","file_ext":"py","file_size_in_byte":40784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235240452","text":"import torch\nimport torch.nn as nn\nfrom habitat_baselines.vln.models import visual_encoder\n\n\nclass Seq2SeqActor(nn.Module):\n def __init__(self, observation_space, encoder, decoder, dim_actions):\n super().__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.criterion = nn.CrossEntropyLoss()\n self.visual_encoder = visual_encoder.ResNet(observation_space)\n\n def forward(self, *x):\n raise NotImplementedError\n\n def act(\n self,\n observations,\n rnn_hidden_states,\n prev_actions,\n masks,\n deterministic=False,\n ):\n \n ctx, h_t, c_t = self.encoder(seq, seq_lengths)\n\n\n features, rnn_hidden_states = self.net(\n observations, rnn_hidden_states, prev_actions, masks\n )\n distribution = self.action_distribution(features)\n value = self.critic(features)\n\n if deterministic:\n action = distribution.mode()\n else:\n action = distribution.sample()\n\n action_log_probs = distribution.log_probs(action)\n\n return value, action, action_log_probs, rnn_hidden_states\n\n def get_value(self, observations, rnn_hidden_states, prev_actions, masks):\n features, _ = self.net(\n observations, rnn_hidden_states, prev_actions, masks\n )\n return self.critic(features)\n\n def evaluate_actions(\n self, observations, rnn_hidden_states, prev_actions, masks, action\n ):\n features, rnn_hidden_states = self.net(\n observations, rnn_hidden_states, prev_actions, masks\n )\n distribution = self.action_distribution(features)\n value = self.critic(features)\n\n action_log_probs = distribution.log_probs(action)\n distribution_entropy = distribution.entropy().mean()\n\n return value, action_log_probs, distribution_entropy, rnn_hidden_states\n","sub_path":"habitat_baselines/vln/common/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425621351","text":"import pickle\n\nfrom bson.binary import Binary, USER_DEFINED_SUBTYPE\nfrom bson.codec_options import TypeDecoder, CodecOptions, TypeRegistry\nfrom pymongo import MongoClient\n\n\ndef fallback_pickle_encoder(value):\n return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE)\n\n\nclass PickledBinaryDecoder(TypeDecoder):\n bson_type = Binary\n\n def transform_bson(self, value):\n if value.subtype == USER_DEFINED_SUBTYPE:\n return pickle.loads(value)\n return value\n\n\nclass MongoInstance:\n def __init__(self, mongodb_uri):\n codec_options = CodecOptions(\n type_registry=TypeRegistry([PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder))\n\n self.client = MongoClient(mongodb_uri)\n self.db = self.client['trivicord']\n self.collection = self.db.get_collection('games', codec_options=codec_options)\n\n def get_game(self, game_id):\n result = self.collection.find_one({'game_id': game_id}, {'_id': 0, 'game_id': 0})\n if result:\n return result['game']\n else:\n return None\n\n def save_game(self, game_id, game):\n ids = [c['game_id'] for c in self.collection.find({}, {'game_id': 1})]\n if game_id not in ids:\n self.collection.insert_one({'game_id': game_id, 'game': game})\n else:\n self.collection.update_one({'game_id': game_id}, {'$set': {'game': game}})\n\n def delete_game(self, game_id):\n self.collection.delete_many({'game_id': game_id})\n\n def get_games(self):\n return [(g['game_id'], g['game']) for g in self.collection.find({}, {'_id': 0})]\n\n\nif __name__ == '__main__':\n from jeopardy import TriviaGame\n\n game = TriviaGame(1)\n\n mongo = MongoInstance('mongodb://localhost')\n\n mongo.save_game('1', game)\n\n print(mongo.get_games())\n\n for a in mongo.get_games():\n print(a)\n\n # mongo.delete_game_from_db('1')\n","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"8418260","text":"prime = [3,5,7,13]\nwhile input('Enter q to quit or any key to continue: ') != 'q':\n# prime = [3,5,7,13]\n i = 0\n flag = False\n \n# number = int(input(\"Enter a prime number: \"))\n \n while flag == False:\n# if i == len(prime):\n# print(\"That is not correct\")\n# flag = True\n# elif number == prime[i]:\n# print(\"You found a number\")\n# flag = True\n# else:\n# i += 1\n try:\n number = int(input(\"Enter a prime number: \"))\n if number in prime:\n print(\"You found a number.\")\n flag = True\n else:\n print(\"That is is not correct.\")\n flag = True\n except ValueError:\n print(\"A number please\")\n\n \n \n \n \n \n \n \n \n \n","sub_path":"tstp/ex7-4.py","file_name":"ex7-4.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370745279","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport TencentYoutuyun\n\n# AppID: 10163570\n# SecretID: AKIDqUu4zefMr65pKaJNyJRKyPeeT6TaQ9Zj\n# SecretKey: 61kGXYLvFKDtKWTBesrgV6RXO0MTN5Yl\n# 应用平台: Web\n# 应用类型: 娱乐\n# 接入产品类型: FaceIn人脸核身\n# 应用简介:\n# tsesst\n\n# pip install requests\n# please get these values from http://open.youtu.qq.com\nappid = '10163570'\nsecret_id = 'AKIDqUu4zefMr65pKaJNyJRKyPeeT6TaQ9Zj'\nsecret_key = '61kGXYLvFKDtKWTBesrgV6RXO0MTN5Yl'\nuserid = '123'\n\n#choose a end_point\n#end_point = TencentYoutuyun.conf.API_TENCENTYUN_END_POINT\n#end_point = TencentYoutuyun.conf.API_YOUTU_VIP_END_POINT\nend_point = TencentYoutuyun.conf.API_YOUTU_END_POINT\n\nyoutu = TencentYoutuyun.YouTu(appid, secret_id, secret_key, userid, end_point)\n\n#两张人脸比对,返回相似度\n#session_id = id\n#ret = youtu.FaceCompare(img1,img2)\n#print (ret)\n\n#新建个体ID\n# ret = youtu.NewPerson(person_id=\"person1\",image_path=\"zyx.jpg\",group_ids=\"Students\", person_name= 'zyx', tag='', data_type = 0)\n# print(ret)\n\nret = youtu.DetectFace(image_path=\"https://tvax1.sinaimg.cn/crop.0.0.1242.1242.180/9fd8f287ly8fwkveisgeaj20yi0yidkh.jpg\",mode = 0,data_type =1)\nprint (ret)\n#\nfor j in ret[\"face\"]:\n if j[\"gender\"] > 50:\n print(\"性别:男\")\n else:\n print(\"性别:女\")\n\n if j[\"expression\"] >90:\n print(\"你在大笑\")\n elif 50 pre_i and j > pre_j:\n if src[i] != tgt[j]:\n if enable_replace:\n correction.append((\"replace\", i - 1, tgt[j]))\n else:\n correction.append((\"delete\", i - 1, \"\"))\n correction.append((\"add\", i - 1, tgt[j]))\n elif i > pre_i:\n correction.append((\"delete\", i - 1, \"\"))\n else:\n correction.append((\"add\", i, tgt[j]))\n pre_i, pre_j = i, j\n return correction\n","sub_path":"process_data/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16986568","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport verbose as v\n__author__ = 'Yaacov'\n\nverbose_msg = False\n\n\nclass TargetedActionFrame(tk.Frame):\n def __init__(self, parent, combat, guy):\n tk.Frame.__init__(self, parent)\n v.p(verbose_msg, \"Init Targeted Action Frame\")\n self.parent = parent\n self.combat = combat\n self.combatant = guy\n self.targets_list = []\n self.target = tk.StringVar()\n self.choose_target_optmenu = ttk.OptionMenu(self, self.target, *['None'])\n\n def update_targets_list(self):\n self.targets_list = []\n self.targets_list = [x for x in self.combat.get_combatant_names() if x != self.combatant.name]\n self.targets_list.sort()\n self.choose_target_optmenu['menu'].delete(0, tk.END)\n\n if not self.targets_list:\n self.target.set(\"None\")\n self.choose_target_optmenu['menu'].add_command(label=\"None\", command=tk._setit(self.target, \"None\"))\n else:\n for item in self.targets_list:\n self.choose_target_optmenu['menu'].add_command(label=item, command=tk._setit(self.target, item))\n self.target.set(self.targets_list[0])\n","sub_path":"targetedactionframe.py","file_name":"targetedactionframe.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201022961","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\n\nfrom helpers import SqlQueries \n\n\ndefault_args = {\n 'owner': 'shell845',\n 'depends_on_past': False,\n 'start_date': datetime(2018, 11, 1),\n 'end_date': datetime(2018, 11, 1),\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n 'catchup': False,\n 'email_on_retry': False\n}\n\ndag = DAG('sparkify_dag',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='0 * * * *' # corn format for hourly schedule\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\ncreate_tables_task = PostgresOperator(\n task_id=\"create_tables\",\n dag=dag,\n sql='create_tables.sql',\n postgres_conn_id=\"redshift\"\n)\n\nstage_events_to_redshift = StageToRedshiftOperator(\n task_id='Stage_events',\n dag=dag,\n redshift_conn_id='redshift',\n aws_credentials_id='aws_credentials',\n table='staging_events',\n s3_bucket='udacity-dend',\n s3_key='log_data',\n s3_region='us-west-2',\n file_format='JSON',\n provide_context=True\n)\n\nstage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n dag=dag,\n redshift_conn_id='redshift',\n aws_credentials_id='aws_credentials',\n table='staging_songs',\n s3_bucket='udacity-dend',\n s3_key='song_data', # change to 'song_data/A/D/C' for testing\n s3_region='us-west-2',\n file_format='JSON',\n provide_context=True\n)\n\nload_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n table=\"songplays\",\n sql_query=SqlQueries.songplay_table_insert,\n append_data=True,\n provide_context=True\n)\n\nload_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n table=\"users\",\n sql_query=SqlQueries.user_table_insert,\n append_data=True,\n provide_context=True\n)\n\nload_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n table=\"songs\",\n sql_query=SqlQueries.song_table_insert,\n append_data=True,\n provide_context=True\n)\n\nload_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n table=\"artists_table_insert\",\n sql_query=SqlQueries.artist_table_insert,\n append_data=True,\n provide_context=True\n)\n\nload_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n dag=dag,\n redshift_conn_id=\"redshift\",\n table=\"time\",\n sql_query=SqlQueries.time_table_insert,\n append_data=True,\n provide_context=True\n)\n\n\nrun_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n dag=dag,\n redshift_conn_id=\"redshift\",\n tables=[\"staging_events\", \"staging_songs\", \"songplays\", \"songs\", \"users\", \"artists\", \"time\"],\n sql_query=SqlQueries.data_quality_query,\n provide_context=True\n)\n\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\n\n# Uncomment below if need drop tables\n# drop_tables_task = PostgresOperator(\n# task_id=\"drop_tables\",\n# dag=dag,\n# sql='drop_tables.sql',\n# postgres_conn_id=\"redshift\"\n# )\n\n\nstart_operator >> create_tables_task\ncreate_tables_task >> [stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table\nload_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks\nrun_quality_checks >> end_operator\n\n# Uncomment below if need drop tables\n# end_operator >> drop_tables_task\n","sub_path":"data_pipeline_airflow/project/airflow/dags/sparkify_dag.py","file_name":"sparkify_dag.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168389969","text":"import mongoengine\nfrom mongoengine import *\nfrom importlib import reload\nimport bin.benching.config as config_file\n\n\n# MongoEngine schemas:\n# ---------------------\n\nclass Instance(Document):\n filename = StringField(required=True)\n num_sat = IntField(default=0)\n num_unsat = IntField(default=0)\n num_unknown = IntField(default=0)\n verified_sat = BooleanField(default=False)\n # If a solver returns a model for this Instance which ALL solvers find to be \"sat\",\n # then we assume that this Instance is proven \"sat\"\n\n meta = {\n 'indexes': [\n {'fields': ['filename'], 'unique': True}\n ]\n }\n\n\nclass Result(Document):\n program = StringField(required=True)\n nickname = StringField(required=True)\n instance = ReferenceField(Instance, required=True)\n result = StringField(required=True)\n elapsed = FloatField(required=True)\n model = StringField()\n verified = StringField(choices=(\"YES\", \"NO\", \"N/A\"))\n # YES means that \"result\" has been verified (if \"sat\") or not yet disproved (if \"unsat\")\n # NO means that \"result\" has been disproved\n # N/A means that the model has not been verified or disproved yet\n\n\n# Formats and writes results to the database:\n# ------------------------------------------------\ndef write_instances(instances):\n reload(config_file)\n mongoengine.connect(config_file.config[\"database_name\"], replicaset=\"monitoring_replSet\")\n\n for instance in instances:\n if not Instance.objects(filename=instance):\n\n Instance.objects(filename=instance). \\\n update_one(upsert=True,\n set__filename=instance)\n\n mongoengine.connection.disconnect()\n\n\n# Formats and writes results to the database:\n# ------------------------------------------------\ndef write_results(program, nickname, instance, result, elapsed, model):\n reload(config_file)\n mongoengine.connect(config_file.config[\"database_name\"], replicaset=\"monitoring_replSet\")\n\n this_instance = Instance.objects.get(filename=instance)\n\n this_result = Result(program=program)\n this_result.nickname = nickname\n this_result.instance = this_instance\n this_result.result = result\n this_result.elapsed = elapsed\n\n # For model verification\n if model:\n this_result.model = model\n this_result.verified = \"N/A\"\n\n this_result.save(force_insert=True)\n\n # Updates the current instance's counters with each result\n if result == 'sat':\n this_instance.modify(inc__num_sat=1)\n elif result == 'unsat':\n this_instance.modify(inc__num_unsat=1)\n else:\n this_instance.modify(inc__num_unknown=1)\n\n mongoengine.connection.disconnect()\n","sub_path":"categories/smt/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209102879","text":"import logging\nimport pytz\nimport uuid\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom mail_templated import EmailMessage\nfrom operator import itemgetter\nfrom random import randint\nfrom smtplib import SMTPException\nfrom time import sleep\n\nfrom django.conf import settings\nfrom django.core.mail.backends.smtp import EmailBackend\nfrom django.db.utils import IntegrityError\n\nfrom bezantrakta.order.models import Order, OrderTicket\n\nfrom project.cache import cache_factory\nfrom project.shortcuts import timezone_now\n\nfrom bezantrakta.eticket.shortcuts import render_eticket\n\n\nclass OrderBasket():\n \"\"\"Класс для работы с предварительными резервами и созданными заказами.\n\n Проводит определённые операции с заказом, получая информацию о нём из кэша.\n\n Class attributes:\n ORDER_TYPE (tuple): Типы заказа билетов (комбинация способа получения билетов и способа оплаты). Упорядочены в порядке предпочтения для показа на шаге 2 заказа билетов.\n ORDER_TYPE_MAPPING (dict): Способы получения и оплаты билетов для каждого варианта заказа.\n ORDER_DELIVERY_CAPTION (dict): Подписи способов получения билетов.\n ORDER_PAYMENT_CAPTION (dict): Подписи способов оплаты.\n ORDER_OVERALL_CAPTION (dict): Подписи разных вариантов вычисления общей суммы заказа.\n ORDER_STATUS_CAPTION (dict): Подписи статусов заказа и их визуальное оформление.\n CUSTOMER_ATTRIBUTES (tuple): Реквизиты покупателя, которые необходимо сохранять в cookies браузера.\n OVERALL_EXTRA_MULTIPLIER (int): Число, до которого округляется общая сумма заказа с сервисным сбором и с оффлайн-оплатой.\n\n Attributes:\n logger (logging.Logger): Логгер для записи информации о текущей операции.\n\n event_title (str): Название события.\n event_url (str): URL события на сайте (для обратной ссылки в случае ошибки).\n\n city_title (str): Название города.\n city_timezone (pytz.tzfile): Часовой пояс города сайта.\n\n domain_id (int): Идентификатор сайта.\n domain_title (str): Название сайта.\n domain_slug (str): Псевдоним (поддомен) сайта.\n\n ticket_service (dict): Информация о сервисе продажи билетов.\n payment_service (dict): Информация о сервисе онлайн-оплаты.\n\n markup (dict): Различные возможные наценки при оформлении заказа.\n\n payment_url (str): URL платёжной формы при онлйн-оплате.\n\n order (dict): Параметры текущего предварительного резерва или созданного заказа.\n\n Содержимое ``order``:\n\n * order_uuid (uuid.UUID): UUID заказа.\n * order_id (int): Идентификатор заказа (после его успешного создания и записи в БД), иначе ``None``.\n\n * domain_slug (str): Псевдоним (поддомен) сайта.\n * city_timezone (pytz.tzfile): Часовой пояс города сайта.\n\n * ticket_service_id (str): Идентификатор сервиса продажи билетов.\n\n * event_uuid (uuid.UUID): UUID события.\n * event_id (int): Идентификатор события.\n\n * customer (dict): Реквизиты покупателя.\n\n Содержимое ``customer``:\n\n * name (str): ФИО покупателя.\n * phone (str): Телефон покупателя.\n * email (str): Электронная почта покупателя.\n * address (str): Адрес доставки (если она нужна), иначе ``None``.\n\n * delivery (str): Способ получения заказа.\n\n Возможные значения ``delivery``:\n\n * self: Получение покупателем в кассе.\n * courier: Доставка курьером.\n * email: Электронный билет.\n\n * payment (str): Способ оплаты заказа (cash | online).\n\n Возможные значения ``payment``:\n\n * cash: Оффлайн-оплата (наличными или банковской картой на месте).\n * online: Онлайн-оплата.\n\n * extra (decimal.Decimal): Процент сервисного сбора с каждого билета в заказе.\n * courier_price (decimal.Decimal): Стоимость доставки курьером (если она используется).\n * commission (decimal.Decimal): Процент комиссии сервиса онлайн-оплаты (если он используется).\n\n * payment_id (str): Идентификатор ронлайн-оплаты (если она используется), иначе ``None``.\n\n * status (str): Статус заказа.\n\n Возможные значения ``status``:\n\n * reserved: Предварительные резерв.\n * ordered: Созданный заказ (в процессе оформления).\n * approved: Успешно подтвёрждённый заказ.\n * cancelled: Отменённый заказ.\n * refunded: Заказ с возвратом полной/частичной стоимости покупателю.\n\n * tickets_count (int): Число билетов в заказе.\n * tickets (dict): Словарь, содежащий словари с информацией о билетах в заказе. Ключи словаря - идентификаторы билета в сервисе продажи билетов ``ticket_id``.\n\n Содержимое словарей в ``tickets``:\n\n * ticket_uuid (uuid.UUID): UUID билета.\n * sector_id (int): Идентификатор сектора.\n * sector_title (str): Название сектора.\n * row_id (int): Идентификатор ряда.\n * seat_id (int): Идентификатор места.\n * seat_title (str): Название места.\n * bar_code (str): Штрих-код билета.\n\n * total (decimal.Decimal): Сумма цен на все билеты в заказе.\n * overall (decimal.Decimal): Общая сумма заказа (с учётом возможных наценок/скидок).\n * overall_header (str): Заголовок для общей суммы заказа (с учётом возможных наценок/скидок).\n \"\"\"\n ORDER_TYPE = ('self_online', 'email_online', 'self_cash', 'courier_cash',)\n ORDER_TYPE_MAPPING = {\n 'self_online': {'delivery': 'self', 'payment': 'online', },\n 'email_online': {'delivery': 'email', 'payment': 'online', },\n 'self_cash': {'delivery': 'self', 'payment': 'cash', },\n 'courier_cash': {'delivery': 'courier', 'payment': 'cash', },\n }\n ORDER_DELIVERY_CAPTION = {\n None: '-',\n 'self': 'получение в кассе',\n 'courier': 'доставка курьером',\n 'email': 'электронный билет',\n }\n ORDER_PAYMENT_CAPTION = {\n None: '-',\n 'cash': 'оплата при получении',\n 'online': 'онлайн-оплата',\n }\n ORDER_OVERALL_CAPTION = {\n 'overall_total': 'Общая сумма заказа',\n 'overall_extra': 'Всего с учётом сервисного сбора',\n 'overall_courier': 'Всего с учётом доставки курьером',\n 'overall_courier_extra': 'Всего с учётом доставки курьером и сервисного сбора',\n 'overall_commission': 'Всего с учётом комиссии платёжной системы',\n 'overall_commission_extra': 'Всего с учётом комиссии платёжной системы и сервисного сбора',\n }\n ORDER_STATUS_CAPTION = {\n # Статус предварительного резерва мест, когда заказ ещё не создан\n 'reserved': {'color': 'black', 'description': 'предварительный резерв'},\n # Статусы созданного заказа\n 'ordered': {'color': 'blue', 'description': 'создан'},\n 'cancelled': {'color': 'red', 'description': 'отменён'},\n 'approved': {'color': 'green', 'description': 'успешно завершён'},\n 'refunded': {'color': 'violet', 'description': 'возвращён'},\n }\n CUSTOMER_ATTRIBUTES = ('name', 'phone', 'email', 'address', 'order_type')\n OVERALL_EXTRA_MULTIPLIER = 50\n\n def __init__(self, **kwargs):\n self.logger = logging.getLogger(kwargs.get('logger', 'bezantrakta.reserve'))\n\n # Получение существующего или создание нового пустого предварительного резерва\n if 'order_uuid' in kwargs and kwargs['order_uuid']:\n self.get(kwargs['order_uuid'])\n else:\n self.order = {}\n\n self.order['event_uuid'] = kwargs.get('event_uuid', None)\n self.order['order_uuid'] = uuid.uuid4()\n\n # Получение реквизитов покупателя\n if 'customer' in kwargs and kwargs['customer']:\n self.order['customer'] = {}\n for attr in OrderBasket.CUSTOMER_ATTRIBUTES:\n self.order['customer'][attr] = kwargs['customer'].get(attr, None)\n else:\n self.order['customer'] = {attr: None for attr in OrderBasket.CUSTOMER_ATTRIBUTES}\n\n self.order['delivery'] = None\n self.order['payment'] = None\n\n self.order['status'] = 'reserved'\n\n self.order['tickets'] = {}\n self.order['tickets_count'] = 0\n self.order['total'] = self.decimal_price(0)\n\n self.order['overall'] = self.decimal_price(0)\n\n self.post_init()\n\n if not kwargs['order_uuid']:\n self.update()\n\n def __str__(self):\n return '{cls}({order_uuid})'.format(\n cls=self.__class__.__name__,\n order_uuid=self.order['order_uuid'],\n )\n\n def __repr__(self):\n return '{cls}({order_uuid})'.format(\n cls=self.__class__.__name__,\n order_uuid=self.order['order_uuid'],\n )\n\n def post_init(self):\n if self.order and self.order['event_uuid']:\n # Информация о событии\n event = cache_factory('event', self.order['event_uuid'])\n\n self.order['event_id'] = event['ticket_service_event']\n\n self.event_title = event['event_title']\n self.event_url = event['url']\n\n # Информация о сайте\n domain = cache_factory('domain', event['domain_slug'])\n\n self.city_title = domain['city_title']\n self.city_timezone = domain['city_timezone']\n self.domain_id = domain['domain_id']\n self.domain_title = domain['domain_title']\n self.domain_slug = event['domain_slug']\n\n # Получение реквизитов покупателя ???\n if 'customer' not in self.order:\n self.order['customer'] = {}\n for attr in OrderBasket.CUSTOMER_ATTRIBUTES:\n self.order['customer'][attr] = (\n self.order[attr] if\n attr in self.order and self.order[attr] else\n None\n )\n\n if not self.order['customer']['address']:\n self.order['customer']['address'] = self.city_title\n\n self.order['delivery_caption'] = OrderBasket.ORDER_DELIVERY_CAPTION[self.order['delivery']]\n self.order['payment_caption'] = OrderBasket.ORDER_PAYMENT_CAPTION[self.order['payment']]\n\n self.order['status_color'] = OrderBasket.ORDER_STATUS_CAPTION[self.order['status']]['color']\n self.order['status_caption'] = OrderBasket.ORDER_STATUS_CAPTION[self.order['status']]['description']\n\n # Формирование упорядоченного списка билетов в заказе для вывода\n tickets_list = [t for tid, t in self.order['tickets'].items()]\n self.order['tickets_list'] = sorted(\n tickets_list, key=itemgetter('sector_title', 'row_id', 'seat_id', 'price')\n )\n\n self.ticket_service = {}\n self.ticket_service['id'] = event['ticket_service_id']\n # Информация о сервисе продажи билетов\n ticket_service = cache_factory('ticket_service', self.ticket_service['id'])\n\n if ticket_service and ticket_service['is_active']:\n self.ticket_service['title'] = ticket_service['title']\n self.ticket_service['max_seats_per_order'] = ticket_service['settings']['max_seats_per_order']\n self.ticket_service['heartbeat_timeout'] = ticket_service['settings']['heartbeat_timeout']\n self.ticket_service['seat_timeout'] = ticket_service['settings']['seat_timeout']\n\n self.ticket_service['order_email'] = {}\n self.ticket_service['order_email']['user'] = ticket_service['settings']['order_email']['user']\n self.ticket_service['order_email']['pswd'] = ticket_service['settings']['order_email']['pswd']\n\n self._ts = ticket_service['instance']\n\n self.payment_service = {}\n self.payment_service['id'] = event['payment_service_id']\n # Информация о сервисе онлайн-оплаты\n payment_service = cache_factory(\n 'payment_service', self.payment_service['id'],\n domain_slug=event['domain_slug']\n )\n\n if payment_service and payment_service['is_active']:\n self.payment_service['title'] = payment_service['title']\n self.payment_service['description'] = payment_service['settings']['description']\n self.payment_service['timeout'] = payment_service['settings']['timeout']\n self.payment_service['success_url'] = payment_service['settings']['init']['success_url']\n self.payment_service['error_url'] = payment_service['settings']['init']['error_url']\n\n self._ps = payment_service['instance']\n\n # Различные возможные наценки при оформлении заказа\n self.markup = {}\n # Процент сервисного сбора на каждый из билетов в заказе\n self.markup['extra'] = event['settings']['extra']\n # Стоимость доставки курьером, если она используется\n self.markup['courier_price'] = self.decimal_price(ticket_service['settings']['courier_price'])\n # Процент комиссии сервиса онлайн-оплаты, если он используется\n self.markup['commission'] = self.decimal_price(\n payment_service['settings']['commission'] if payment_service else 0\n )\n\n self.get_overall()\n\n def get(self, order_uuid):\n # Получить существующий заказ\n self.order = cache_factory('order', order_uuid)\n\n def update(self):\n # Обновить существующий заказ, используя изменённое ранее значение self.order\n self.get_overall()\n self.order['updated'] = self.now()\n self.order = cache_factory('order', self.order['order_uuid'], obj=self.order, reset=True)\n\n self.post_init()\n\n def delete(self):\n # Полностью удалить существующий заказ\n cache_factory('order', self.order['order_uuid'], delete=True)\n\n def log(self):\n \"\"\"Логирование информации о полученном предварительном резерве или созданном заказе.\"\"\"\n self.logger.info('Сайт: {title} ({id})'.format(title=self.domain_title, id=self.domain_id))\n self.logger.info('Сервис продажи билетов: {title} ({id})'.format(\n title=self.ticket_service['title'],\n id=self.ticket_service['id']\n ))\n\n self.logger.info('UUID события: {}'.format(self.order['event_uuid']))\n self.logger.info('Идентификатор события: {}'.format(self.order['event_id']))\n self.logger.info('Название события: {}'.format(self.event_title))\n\n if self.order['customer']:\n self.logger.info('\\nРеквизиты покупателя:')\n self.logger.info('ФИО: {}'.format(self.order['customer']['name']))\n self.logger.info('Email: {}'.format(self.order['customer']['email']))\n self.logger.info('Телефон: {}'.format(self.order['customer']['phone']))\n\n if self.order['status'] == 'reserved':\n self.logger.info('Предварительный резерв:')\n else:\n self.logger.info('Заказ:')\n\n self.logger.info('UUID: {}'.format(self.order['order_uuid']))\n self.logger.info('Билеты:')\n if self.order['tickets_count'] > 0:\n for ticket_id in self.order['tickets']:\n self.logger.info('* {}'.format(self.order['tickets'][ticket_id]))\n else:\n self.logger.info('Билеты: \\{\\}')\n self.logger.info('Число билетов: {}'.format(self.order['tickets_count']))\n self.logger.info('Сумма: {}'.format(self.order['total']))\n self.logger.info('Всего: {}'.format(self.order['overall']))\n\n self.logger.info('Получение билетов: {}'.format(self.order['delivery_caption']))\n if self.order['delivery'] == 'courier':\n self.logger.info('Адрес доставки: {}'.format(self.order['customer']['address']))\n self.logger.info('Оплата билетов: {}'.format(self.order['payment_caption']))\n\n def ticket_toggle(self, ticket_id, is_fixed, action):\n response = {}\n\n if action == 'add':\n self.logger.info('\\nДействие: добавить')\n elif action == 'remove':\n self.logger.info('\\nДействие: удалить')\n\n add_condition = action == 'add' and self.order['tickets_count'] < self.ticket_service['max_seats_per_order']\n remove_condition = action == 'remove' and self.order['tickets_count'] > 0\n\n if add_condition or remove_condition:\n # Параметры для отправки запроса к сервису продажи билетов\n params = {\n 'event_id': self.order['event_id'],\n 'order_uuid': self.order['order_uuid'],\n 'ticket_id': ticket_id,\n 'action': action\n }\n\n # Универсальный метод для работы с предварительным резервом мест\n reserve = self._ts.reserve(**params)\n self.logger.info('\\nreserve: {}'.format(reserve))\n\n if add_condition:\n seats_and_prices = cache_factory('seats_and_prices', self.order['event_uuid'])\n if not seats_and_prices:\n response['success'] = False\n response['message'] = 'Не получена информация о билетах в событии'\n return response\n ticket = seats_and_prices['seats'].get(ticket_id, None)\n self.logger.info('\\nticket: {}'.format(ticket))\n if not ticket:\n response['success'] = False\n response['message'] = 'Не получена информация о билете'\n return response\n\n if reserve['success']:\n self.order['tickets'][ticket_id] = {\n 'ticket_uuid': uuid.uuid4(),\n 'ticket_id': ticket_id,\n 'sector_id': ticket['sector_id'],\n 'sector_title': ticket['sector_title'],\n 'row_id': ticket['row_id'],\n 'seat_id': ticket['seat_id'],\n 'seat_title': ticket['seat_title'],\n 'price': self.decimal_price(ticket['price']),\n 'price_order': ticket['price_order'],\n 'is_fixed': bool(is_fixed),\n 'added': self.now(),\n }\n self.order['tickets_count'] += 1\n self.order['total'] += self.decimal_price(ticket['price'])\n elif remove_condition:\n # Даже если при удалении билета получен НЕуспешный ответ -\n # билет в любом случае удаляется из предварительного резерва\n try:\n ticket_price = self.order['tickets'][ticket_id]['price']\n except KeyError:\n pass\n else:\n del self.order['tickets'][ticket_id]\n self.order['tickets_count'] -= 1\n self.order['total'] -= self.decimal_price(ticket_price)\n\n self.update()\n\n if reserve['success']:\n response['success'] = True\n response['message'] = 'Билет успешно удалён из резерва'\n response['ticket_id'] = ticket_id\n response['action'] = action\n response['tickets'] = self.order['tickets']\n response['tickets_count'] = self.order['tickets_count']\n response['total'] = self.order['total']\n else:\n response['success'] = False\n response['message'] = reserve['message']\n else:\n response['success'] = False\n response['message'] = 'Невозможно провести резерв билета'\n\n return response\n\n def tickets_clear(self):\n \"\"\"Освобождение билетов и удаление предварительного резерва.\n\n Returns:\n dict: Description\n \"\"\"\n self.logger.info('\\nОтмена предыдущего предварительного резерва...')\n\n response = {}\n response['success'] = True\n response['tickets'] = {}\n\n if self.order['tickets_count'] > 0:\n tickets = self.order['tickets'].copy()\n\n for ticket_id, ticket in tickets.items():\n remove = self.ticket_toggle(ticket_id, True, 'remove')\n\n response['tickets'][ticket_id] = ticket\n\n if remove['success']:\n response['tickets'][ticket_id]['removed'] = True\n self.logger.info(' Билет успешно удалён из предварительного резерва')\n else:\n response['tickets'][ticket_id]['removed'] = False\n self.logger.info(' Билет НЕ удалось удалить из предварительного резерва')\n\n # Задержка в несколько секунд во избежание возможных ошибок\n sleep(randint(2, 5))\n\n self.delete()\n\n response['message'] = 'Старый предварительный резерв успешно удалён'\n\n return response\n\n def tickets_check(self, status):\n \"\"\"Проверка состояния билетов в предварительном резерве или созданном заказе.\"\"\"\n if status == 'reserved':\n self.logger.info('\\nПроверка состояния билетов в предварительном резерве...')\n elif status == 'ordered':\n self.logger.info('\\nПроверка состояния билетов в созданном заказе...')\n elif status == 'approved':\n self.logger.info('\\nПроверка состояния билетов в подтверждённом заказе...')\n\n for ticket_id in self.order['tickets']:\n params = {\n 'event_id': self.order['event_id'],\n 'ticket_id': ticket_id,\n }\n\n ticket_status = self._ts.ticket_status(**params)\n self.logger.info(' ticket_status: {}'.format(ticket_status))\n\n self.order['tickets'][ticket_id]['status'] = ticket_status['status']\n\n # Если статус билета совпадает с запрошенным или не требует проверки\n if ticket_status['success'] or ticket_status['status'] in (status, 'bypass',):\n self.logger.info(' 🎫: {}'.format(self.order['tickets'][ticket_id]))\n # Если статус билета НЕ совпадает с запрошенным\n else:\n del self.order['tickets'][ticket_id]\n self.order['tickets_count'] -= 1\n self.order['total'] -= self.decimal_price(self.order['tickets'][ticket_id]['price'])\n\n self.logger.error(' Ошибка с билетом {}'.format(ticket_id))\n\n self.update()\n\n def order_type_default(self, order_type):\n \"\"\"Предварительный выбор типа заказа из списка активных в конкретном событии,\n если заказов ранее не было или если выбранный ранее тип заказа неактивен в конкретном событии.\"\"\"\n default_order_type = None\n\n # Информация о событии\n event = cache_factory('event', self.order['event_uuid'])\n # Информация о сервисе продажи билетов\n ticket_service = cache_factory('ticket_service', self.ticket_service['id'])\n # Информация о сервисе онлайн-оплаты\n payment_service = cache_factory(\n 'payment_service', self.payment_service['id'],\n domain_slug=event['domain_slug']\n )\n\n # Все типы заказа билетов для выбора (настройки в сервисе продажи билетов и в событии)\n order_types = OrderedDict()\n for ot in self.ORDER_TYPE:\n order_types.update(\n {\n ot: {\n 'ticket_service': ticket_service['settings']['order'][ot],\n 'event': event['settings']['order'][ot],\n }\n }\n )\n\n # Активные типы заказа билетов в конкретном событии\n order_types_active = tuple(\n ot for ot in order_types.keys() if\n order_types[ot]['ticket_service'] is True and order_types[ot]['event'] is True and\n (payment_service or not ot.endswith('_online'))\n )\n # Типы заказа билетов с онлайн-оплатой НЕ включаются в список активных,\n # если к текущему сервису продажи билетов НЕ привязан никакой сервис онлайн-оплаты\n\n # Выбор первого доступного типа заказа по порядку,\n # если он НЕ был выбран ранее или если выбранный ранее тип заказа в текущем событии отключен\n if not order_type or order_type not in order_types_active:\n for ot in order_types.keys():\n if ot in order_types_active:\n default_order_type = ot\n break\n else:\n default_order_type = order_type\n\n return default_order_type\n\n def order_type_change(self, customer, order_type):\n # Обновление типа получения и типа оплаты билетов\n self.order['delivery'] = OrderBasket.ORDER_TYPE_MAPPING[order_type]['delivery']\n self.order['payment'] = OrderBasket.ORDER_TYPE_MAPPING[order_type]['payment']\n\n # Обновление реквизитов покупателя\n self.order['customer']['name'] = customer['name']\n self.order['customer']['phone'] = customer['phone']\n self.order['customer']['email'] = customer['email']\n self.order['customer']['address'] = customer['address']\n self.order['customer']['order_type'] = order_type\n\n self.update()\n\n def order_create(self):\n \"\"\"Создание нового заказа в сервисе продажи билетов.\n\n Returns:\n dict: Информация о созданном заказе.\n \"\"\"\n self.logger.info('\\nСоздание заказа...')\n\n # Добавление опциональных параметров для возможной доставки курьером\n if self.order['delivery'] == 'courier':\n self.order['customer']['is_courier'] = True\n # self.order['customer']['address'] = self.order['customer']['address']\n else:\n self.order['customer']['is_courier'] = False\n self.order['customer']['address'] = None\n\n order_create = self._ts.order_create(\n event_id=self.order['event_id'],\n order_uuid=self.order['order_uuid'],\n customer=self.order['customer'],\n tickets=self.order['tickets']\n )\n\n if order_create['success']:\n self.order['status'] = 'ordered'\n self.logger.info('Статус заказа: {}'.format(self.order['status_caption']))\n\n self.order['order_id'] = order_create['order_id']\n self.logger.info('Идентификатор заказа: {}'.format(self.order['order_id']))\n\n # Получение штрих-кодов для билетов в заказе\n self.tickets_barcode(order_create)\n\n self.update()\n\n self.logger.info('\\nbasket.order created: {}'.format(self.order))\n\n return order_create\n\n def tickets_barcode(self, order):\n \"\"\"Получение штрих-кодов для билетов в заказе из ответа метода order_create.\"\"\"\n for otid in order['tickets']:\n for tid in self.order['tickets']:\n if order['tickets'][otid]['ticket_uuid'] == self.order['tickets'][tid]['ticket_uuid']:\n self.logger.info('\\n{ot_uuid} == {tid_uuid}: {cond}'.format(\n ot_uuid=order['tickets'][otid]['ticket_uuid'],\n tid_uuid=self.order['tickets'][tid]['ticket_uuid'],\n cond=order['tickets'][otid]['ticket_uuid'] == self.order['tickets'][tid]['ticket_uuid'])\n )\n self.order['tickets'][tid]['bar_code'] = (\n order['tickets'][otid]['bar_code'] if\n 'bar_code' in order['tickets'][otid] and order['tickets'][otid]['bar_code'] else\n # Если по каким-то причинам штрих-код не получен - он генерируется автоматически\n ''.join([str(randint(0, 9)) for x in range(self._ts.bar_code_length)])\n )\n self.logger.info('t[bar_code]: {barcode}'.format(barcode=self.order['tickets'][tid]['bar_code']))\n else:\n continue\n\n self.logger.info('\\ntickets with bar_codes: {}'.format(self.order['tickets']))\n\n def order_create_db(self):\n response = {}\n\n # Сохранение созданного заказа в БД\n try:\n Order.objects.create(\n id=self.order['order_uuid'],\n ticket_service_id=self.ticket_service['id'],\n ticket_service_order=self.order['order_id'],\n event_id=self.order['event_uuid'],\n ticket_service_event=self.order['event_id'],\n datetime=timezone_now(),\n name=self.order['customer']['name'],\n email=self.order['customer']['email'],\n phone=self.order['customer']['phone'],\n address=self.order['customer']['address'],\n delivery=self.order['delivery'],\n payment=self.order['payment'],\n payment_id=None,\n status=self.order['status'],\n tickets_count=self.order['tickets_count'],\n total=self.order['total'],\n overall=self.order['overall'],\n domain_id=self.domain_id\n )\n except IntegrityError:\n response['success'] = False\n self.logger.critical('Такой заказ уже был добавлен в базу данных ранее!')\n else:\n response['success'] = True\n self.logger.info('\\nЗаказ {order_uuid} сохранён в БД\\n'.format(order_uuid=self.order['order_uuid']))\n\n for ticket_id in self.order['tickets']:\n try:\n OrderTicket.objects.create(\n id=self.order['tickets'][ticket_id]['ticket_uuid'],\n order_id=self.order['order_uuid'],\n ticket_service_id=self.ticket_service['id'],\n ticket_service_order=self.order['order_id'],\n is_fixed=self.order['tickets'][ticket_id]['is_fixed'],\n is_punched=False,\n bar_code=self.order['tickets'][ticket_id]['bar_code'],\n ticket_id=ticket_id,\n sector_id=self.order['tickets'][ticket_id]['sector_id'],\n sector_title=self.order['tickets'][ticket_id]['sector_title'],\n row_id=self.order['tickets'][ticket_id]['row_id'],\n seat_id=self.order['tickets'][ticket_id]['seat_id'],\n seat_title=self.order['tickets'][ticket_id]['seat_title'],\n price=self.order['tickets'][ticket_id]['price'],\n domain_id=self.domain_id\n )\n except IntegrityError:\n self.logger.critical('Невозможно добавить билет в БД!')\n else:\n self.logger.info('Билет {} сохранён в БД'.format(self.order['tickets'][ticket_id]['ticket_uuid']))\n\n return response\n\n def payment_create(self):\n payment_create = self._ps.payment_create(\n event_uuid=self.order['event_uuid'],\n event_id=self.order['event_id'],\n order_uuid=self.order['order_uuid'],\n order_id=self.order['order_id'],\n customer=self.order['customer'],\n overall=self.order['overall'],\n )\n\n self.logger.info('payment_create: {}'.format(payment_create))\n\n # Успешный запрос на оплату\n if payment_create['success']:\n self.logger.info('\\nСоздание новой онлайн-оплаты завершилось успешно')\n else:\n self.logger.info('\\nСоздание новой онлайн-оплаты завершилось НЕуспешно')\n\n return payment_create\n\n def payment_create_db(self, payment_create):\n self.order['payment_id'] = payment_create['payment_id']\n self.payment_url = payment_create['payment_url']\n\n Order.objects.filter(id=self.order['order_uuid']).update(\n datetime=timezone_now(),\n payment_id=self.order['payment_id']\n )\n\n self.logger.info('Идентификатор оплаты: {}'.format(self.order['payment_id']))\n\n self.update()\n\n def payment_status(self):\n payment_status = self._ps.payment_status(payment_id=self.order['payment_id'])\n\n self.logger.info('Идентификатор оплаты: {}'.format(self.order['payment_id']))\n self.logger.info('payment_status: {}'.format(payment_status))\n\n if payment_status['success']:\n self.logger.info('\\nОплата {payment_id} завершилась успешно'.format(\n payment_id=self.order['payment_id'])\n )\n else:\n self.logger.info('\\nОплата {payment_id} завершилась НЕуспешно'.format(\n payment_id=self.order['payment_id'])\n )\n\n return payment_status\n\n def order_approve(self):\n # Подтвердить можно только созданный ранее заказ\n if self.order['status'] == 'ordered':\n self.logger.info('Подтверждение оплаты заказа в сервисе продажи билетов...')\n\n order_approve = self._ts.order_approve(\n event_id=self.order['event_id'],\n order_uuid=self.order['order_uuid'],\n order_id=self.order['order_id'],\n payment_id=self.order['payment_id'],\n payment_datetime=self.now(),\n tickets=self.order['tickets'],\n )\n\n self.logger.info('order_approve: {}'.format(order_approve))\n\n if order_approve['success']:\n # Обновление статуса заказа в БД\n self.order_status_db('approved')\n\n self.logger.info('Заказ {order_id} в сервисе продажи билетов отмечен как оплаченный'.format(\n order_id=self.order['order_id']\n ))\n else:\n self.logger.info('Заказ {order_id} НЕ удалось отметить в сервисе продажи билетов как оплаченный'.format(\n order_id=self.order['order_id']\n ))\n\n response = order_approve\n else:\n response = {}\n response['success'] = False\n response['message'] = 'Подтвердить можно только созданный ранее заказ'\n\n return response\n\n def order_cancel(self):\n response = {}\n\n # Отменить можно только:\n # * либо созданный ранее заказ,\n # * либо подтвердлжённый заказ с оффлайн-оплатой.\n cancel_condition = (\n self.order['status'] == 'ordered' or\n self.order['status'] == 'approved' and self.order['payment'] == 'cash'\n )\n\n if cancel_condition:\n self.logger.info('\\nОтмена заказа в сервисе продажи билетов...')\n\n order_cancel = self._ts.order_cancel(\n event_id=self.order['event_id'],\n order_uuid=self.order['order_uuid'],\n order_id=self.order['order_id'],\n tickets=self.order['tickets'],\n )\n\n self.logger.info('order_cancel: {}'.format(order_cancel))\n\n if order_cancel['success']:\n response['success'] = True\n\n # Обновление статуса заказа в БД\n self.order_status_db('cancelled')\n\n cancel_message = 'Заказ {order_id} отменён в сервисе продажи билетов'.format(\n order_id=self.order['order_id']\n )\n\n response['message'] = cancel_message\n self.logger.info(cancel_message)\n else:\n response['success'] = False\n\n cancel_message = 'Заказ {order_id} НЕ удалось отменить в сервисе продажи билетов'.format(\n order_id=self.order['order_id']\n )\n\n response['message'] = cancel_message\n self.logger.info(cancel_message)\n else:\n response['success'] = False\n response['message'] = 'Отменить можно только созданный ранее заказ'\n\n return response\n\n def order_refund(self, amount, reason=None):\n response = {}\n\n amount = self._ps.decimal_price(amount)\n\n self.logger.info('\\nСумма возврата: {} р.'.format(amount))\n self.logger.info('Причина возврата: {}.'.format(reason))\n\n # Возврат возможен только для подтверждённых заказов\n if self.order['status'] == 'approved':\n # Проверка состояния билетов в заказе (на всякий случай)\n self.tickets_check('approved')\n\n # Возврат заказа в сервисе продажи билетов\n self.logger.info('\\nВозврат заказа в сервисе продажи билетов...')\n\n order_refund = self._ts.order_refund(\n order_id=self.order['order_id'],\n payment_id=self.order['payment_id'],\n amount=amount,\n reason=reason,\n )\n # order_refund = {'success': True, 'amount': amount}\n # order_refund = {'success': False, 'code': 2000, 'message': 'Order has been already deleted'}\n\n self.logger.info('order_refund: {}'.format(order_refund))\n\n if order_refund['success']:\n order_message = 'Заказ {order_id} успешно возвращён в сервисе продажи билетов'.format(\n order_id=self.order['order_id']\n )\n else:\n order_message = 'Заказ {order_id} в сервисе продажи билетов возвратить НЕ удалось'.format(\n order_id=self.order['order_id']\n )\n self.logger.info(order_message)\n\n # Возврат заказа в сервисе онлайн-оплаты\n self.logger.info('\\nВозврат заказа в сервисе онлайн-оплаты...')\n\n payment_refund = self._ps.payment_refund(\n event_uuid=self.order['event_uuid'],\n event_id=self.order['event_id'],\n order_uuid=self.order['order_uuid'],\n order_id=self.order['order_id'],\n customer=self.order['customer'],\n payment_id=self.order['payment_id'],\n amount=amount,\n )\n # payment_refund = {'success': True, 'amount': amount}\n # payment_refund = {'success': False, 'code': 5, 'message': 'Неверная сумма'}\n\n self.logger.info('payment_refund: {}'.format(payment_refund))\n\n if payment_refund['success']:\n payment_message = 'Заказ {order_id} успешно возвращён в сервисе онлайн-оплаты'.format(\n order_id=self.order['order_id']\n )\n else:\n payment_message = 'Заказ {order_id} в сервисе онлайн-оплаты возвратить НЕ удалось'.format(\n order_id=self.order['order_id']\n )\n self.logger.info(payment_message)\n\n if order_refund['success'] and payment_refund['success']:\n # Обновление статуса заказа в БД\n self.order_status_db('refunded')\n\n response['success'] = True\n response['message'] = 'Возврат по заказу № {order_id} проведён успешно'.format(\n order_id=self.order['order_id']\n )\n else:\n response['success'] = False\n\n if order_refund['success']:\n message = '{} {}'.format(\n payment_refund.get('code', ''), payment_refund.get('message')\n )\n response['message'] = 'Возврат по заказу № {order_id} проведён успешно только в сервисе продажи билетов. {message}'.format(\n order_id=self.order['order_id'], message=message.strip()\n )\n elif payment_refund['success']:\n message = '{} {}'.format(\n payment_refund.get('code', ''), payment_refund.get('message')\n )\n response['message'] = 'Возврат по заказу № {order_id} проведён успешно только в сервисе онлайн-оплаты. {message}'.format(\n order_id=self.order['order_id'], message=message.strip()\n )\n else:\n order_message = '{} {}'.format(\n order_refund.get('code', ''), order_refund.get('message')\n )\n payment_message = '{} {}'.format(\n payment_refund.get('code', ''), payment_refund.get('message')\n )\n message = '{} {}'.format(\n order_message.strip(), payment_message.strip()\n )\n response['message'] = 'Возврат НЕ удалось завершить успешно. {}'.format(\n message.strip()\n )\n else:\n response['success'] = False\n response['message'] = 'Возврат возможен только для подтверждённых заказов'\n\n return response\n\n def order_status_db(self, status):\n \"\"\"Сохранение статуса заказа в БД.\"\"\"\n self.order['status'] = status\n self.order['status_color'] = OrderBasket.ORDER_STATUS_CAPTION[self.order['status']]['color']\n self.order['status_caption'] = OrderBasket.ORDER_STATUS_CAPTION[self.order['status']]['description']\n\n Order.objects.filter(id=self.order['order_uuid']).update(status=self.order['status'])\n\n self.logger.info('\\nСтатус заказа: {}'.format(self.order['status_caption']))\n\n self.update()\n\n def email_prepare(self):\n # Отправка email администратору и покупателю\n email_from = {}\n email_from['user'] = self.ticket_service['order_email']['user']\n email_from['pswd'] = self.ticket_service['order_email']['pswd']\n email_from['connection'] = EmailBackend(\n host=settings.EMAIL_HOST,\n port=settings.EMAIL_PORT,\n username=email_from['user'],\n password=email_from['pswd'],\n use_tls=settings.EMAIL_USE_TLS,\n )\n\n # Информация о событии\n event = cache_factory('event', self.order['event_uuid'])\n # Информация о сайте\n domain = cache_factory('domain', event['domain_slug'])\n # Информация о сервисе продажи билетов\n ticket_service = cache_factory('ticket_service', self.ticket_service['id'])\n # Информация о сервисе онлайн-оплаты\n payment_service = cache_factory(\n 'payment_service', self.payment_service['id'],\n domain_slug=event['domain_slug']\n )\n\n email_context = {\n 'domain': domain,\n 'event': event,\n 'ticket_service': ticket_service,\n 'payment_service': payment_service,\n 'order': self.order,\n 'customer': self.order['customer']\n }\n\n response = {}\n response['from'] = email_from\n response['context'] = email_context\n\n return response\n\n def email_admin(self):\n email = self.email_prepare()\n\n admin_email = EmailMessage(\n 'order/email_admin.tpl',\n email['context'],\n email['from']['user'],\n (email['from']['user'],),\n connection=email['from']['connection']\n )\n\n try:\n sender = admin_email.send()\n except SMTPException as exc:\n sender = 0\n sender_exception = exc\n\n if bool(sender):\n message = 'Email-уведомление администратору отправлено'\n\n self.logger.info(message)\n\n return {\n 'success': True,\n 'message': message,\n }\n else:\n message = 'НЕ удалось отправить email-уведомление администратору.\\n{}'.format(sender_exception)\n\n self.logger.info(message)\n\n return {\n 'success': False,\n 'message': message,\n }\n\n def email_customer(self):\n email = self.email_prepare()\n\n customer_email = EmailMessage(\n 'order/email_customer.tpl',\n email['context'],\n email['from']['user'],\n (self.order['customer']['email'],),\n connection=email['from']['connection']\n )\n\n # Опциональная генерация электронных билетов и их вложение в письмо покупателю\n if self.order['delivery'] == 'email':\n self.logger.info('\\nСоздание электронных PDF-билетов...')\n for ticket_id in self.order['tickets']:\n # Формирование контекста для генерации PDF-билета (билет + событие + order_id)\n ticket_context = self.order['tickets'][ticket_id]\n ticket_context.update(email['context']['event'])\n ticket_context['order_id'] = self.order['order_id']\n\n self.logger.info('\\nИнформация о билете: {}'.format(ticket_context))\n\n pdf_ticket_file = render_eticket(ticket_context, self.logger)\n customer_email.attach_file(pdf_ticket_file, mimetype='application/pdf')\n\n try:\n sender = customer_email.send()\n except SMTPException as exc:\n sender = 0\n sender_exception = exc\n\n if bool(sender):\n message = 'Email-уведомление покупателю отправлено'\n\n self.logger.info(message)\n\n return {\n 'success': True,\n 'message': message,\n }\n else:\n message = 'НЕ удалось отправить email-уведомление покупателю.\\n{}'.format(sender_exception)\n\n self.logger.info(message)\n\n return {\n 'success': False,\n 'message': message,\n }\n\n def get_overall(self):\n \"\"\"Получение общей суммы заказа и её подписи в зависимости от возможных наценок/скидок.\"\"\"\n order_type = self.order['customer']['order_type'] if 'customer' in self.order else 'self_cash'\n extra = self.markup['extra'][order_type] if order_type in self.markup['extra'] else 0\n\n # Для любого типа заказа без дополнительных условий - с учётом сервисного сбора (если он задан)\n if extra > 0:\n self.order['overall'] = self.overall_with_extra(extra)\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_extra']\n # Иначе - сумма цен на билеты\n else:\n self.order['overall'] = self.order['total']\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_total']\n\n # При доставке курьером - с учётом стоимости доставки курьером (если она задана)\n if self.order['delivery'] == 'courier':\n if extra > 0:\n # С учётом доставки курьером и сервисного сбора\n if self.markup['courier_price'] > 0:\n self.order['overall'] = self.overall_plus_courier_price()\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_courier_extra']\n # Иначе - с учётом сервисного сбора\n # else:\n # self.order['overall'] = self.overall_with_extra(extra)\n # self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_extra']\n else:\n # С учётом доставки курьером\n if self.markup['courier_price'] > 0:\n self.order['overall'] = self.overall_plus_courier_price()\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_courier']\n # Иначе - сумма цен на билеты\n # else:\n # self.order['overall'] = self.order['total']\n # self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_total']\n\n # При онлайн-оплате - с учётом комиссии сервиса онлайн-оплаты (если она задана)\n if self.order['payment'] == 'online':\n if extra > 0:\n # С учётом комиссии платёжной системы и сервисного сбора\n if self.markup['commission'] > 0:\n self.order['overall'] = self.overall_with_commission()\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_commission_extra']\n # Иначе - также с учётом комиссии платёжной системы и сервисного сбора\n else:\n self.order['overall'] = self.overall_with_extra(extra)\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_commission_extra']\n else:\n # С учётом комиссии платёжной системы\n if self.markup['commission'] > 0:\n self.order['overall'] = self.overall_with_commission()\n self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_commission']\n # Иначе - сумма цен на билеты\n # else:\n # self.order['overall'] = self.order['total']\n # self.order['overall_header'] = OrderBasket.ORDER_OVERALL_CAPTION['overall_total']\n\n # Пересчёт общей суммы заказа для удобства оффлайн-оплаты (кратно значению в OVERALL_EXTRA_MULTIPLIER)\n if extra > 0 and self.order['payment'] == 'cash':\n overall = self.order['overall']\n multiplier = OrderBasket.OVERALL_EXTRA_MULTIPLIER\n\n self.order['overall'] = (overall - (overall % multiplier)) + multiplier\n\n def overall_with_extra(self, extra):\n \"\"\"Общая сумма заказа с учётом сервисного сбора.\n\n Если процент сервисного сбора больше ``0``,\n то к сумме заказа добавляется указанный процент от цены каждого из билетов в заказе.\n Если процент сервисного сбора равен ``0``, мы получаем ту же самую сумму.\n\n Args:\n extra (decimal.Decimal): Процент сервисного сбора для конкретного типа заказа.\n\n Returns:\n decimal.Decimal: Общая сумма заказа ``overall``.\n \"\"\"\n overall_with_extra = self.order['total']\n if extra > 0:\n for ticket_id in self.order['tickets']:\n ticket_price = self.order['tickets'][ticket_id]['price']\n overall_with_extra += (self.decimal_price(ticket_price) * extra) / self.decimal_price(100)\n return self.decimal_price(overall_with_extra)\n\n def overall_plus_courier_price(self):\n \"\"\"Общая сумма заказа с учётом стоимости доставки курьером.\n\n Если стоимость доставки курьером больше ``0``, то она добавляется к сумме заказа.\n Если стоимость доставки курьером равна ``0``, мы получаем ту же самую сумму.\n\n Returns:\n decimal.Decimal: Общая сумма заказа ``overall``.\n \"\"\"\n return self.decimal_price(self.order['overall'] + self.markup['courier_price'])\n\n def overall_with_commission(self):\n \"\"\"Общая сумма заказа при онлайн-оплате.\n\n Ес��и комиссия сервиса онлайн-оплаты не равна ``0``,\n то к сумме заказа добавляется указанный процент от самой суммы заказа.\n Если комиссия равна ``0``, мы получаем ту же самую сумму.\n\n Returns:\n decimal.Decimal: Общая сумма заказа ``overall``.\n \"\"\"\n return self.decimal_price(\n self.order['overall'] + ((self.order['overall'] * self.markup['commission']) / self.decimal_price(100))\n )\n\n def decimal_price(self, value):\n \"\"\"Преобразование входного значения в денежную сумму с 2 знаками после запятой (копейки) типа ``Decimal``.\n\n Args:\n value (str): Входное значение (в любом случае строка - для обхода проблем с округлением ``float``).\n\n Returns:\n decimal.Decimal: Денежная сумма.\n \"\"\"\n return Decimal(str(value)).quantize(Decimal('1.00'))\n\n def now(self):\n now = timezone_now()\n return now.astimezone(pytz.timezone(self.city_timezone))\n","sub_path":"bezantrakta/order/order_basket.py","file_name":"order_basket.py","file_ext":"py","file_size_in_byte":61066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79807508","text":"import pandas as pd\nimport numpy as np\n\nimport os\nfrom PIL import Image\n\nimport fastai\nfrom fastai.vision import *\nfrom fastai.callbacks import *\nfrom fastai.torch_core import *\nfrom fastai.callback import *\nfrom fastai.basic_train import *\nimport torchvision.models as tmodels\n\n\ndef cleanLabel(x):\n \n labelCount = 0\n \n if x.Pleural_Effusion == 1:\n labelCount += 1\n \n if x.Edema == 1:\n labelCount += 1\n \n if x.Cardiomegaly ==1:\n labelCount += 1\n \n if x.Pneumonia == 1:\n labelCount += 1\n \n return labelCount\n \n \n\ndef getLabel2(x,disease):\n \n if x[disease] ==1:\n return disease\n else:\n return \"Rest\"\n\n\n\n# def getLabel(x):\n \n# if x.Pleural_Effusion ==1:\n# return \"Pleural_Effusion\"\n# else:\n# return \"Rest\"\n\n\n# # elif x.Edema == 1:\n# # return \"Edema\"\n# # elif x.Cardiomegaly==1:\n# # return \"Cardiomegaly\"\n# # elif x.Pneumonia == 1:\n# # return \"Pneumonia\"\n# # else:\n# # return \"0\"\n \n\nbaseFolder = \"/home/santhosr/Documents/Chexpert\"\n\ncols = ['Path',\n 'Sex',\n 'Age',\n 'View',\n 'AP/PA',\n 'No_Finding',\n 'Enlarged_Cardiomediastinum',\n 'Cardiomegaly',\n 'Lung_Opacity',\n 'Lung_Lesion',\n 'Edema',\n 'Consolidation',\n 'Pneumonia',\n 'Atelectasis',\n 'Pneumothorax',\n 'Pleural_Effusion',\n 'Pleural_Other',\n 'Fracture',\n 'Support_Devices']\n\n\ntrainFile = pd.read_csv(os.path.join(baseFolder,'train.csv'), names = cols, header=0)\nvalidFile = pd.read_csv(os.path.join(baseFolder,'valid.csv'), names = cols, header=0)\n\ntrainFile[\"Path\"] = trainFile.Path.apply(lambda x : x.replace('CheXpert-v1.0-small',\"\")[1:])\nvalidFile[\"Path\"] = validFile.Path.apply(lambda x : x.replace('CheXpert-v1.0-small',\"\")[1:])\n\n\nselectCols = ['Path',\"View\",'Sex',\"Pleural_Effusion\", \"Edema\",\"Cardiomegaly\",\"Pneumonia\"]\n\ntrainFile = trainFile[selectCols]\nvalidFile = validFile[selectCols]\n\n# -1 for Uncertain, 0 for negative, 1 for positive\n\ntrainFile['isClean'] = trainFile.apply(lambda x : cleanLabel(x), axis = 1)\nvalidFile['isClean'] = validFile.apply(lambda x : cleanLabel(x), axis = 1)\n\ntrainFile['train'] = False\nvalidFile['train'] = True\n\ntrainFile = trainFile[trainFile.isClean==1]\nvalidFile = validFile[validFile.isClean==1]\n\n\ndf = pd.concat([trainFile,validFile])\n\ndf['label'] = df.apply(lambda x : getLabel(x), axis = 1)\n\nlabelMap = {\"Pleural_Effusion\":0, \"Edema\":1,\"Cardiomegaly\":2,\"Pneumonia\":3,\"Rest\":4}\n\ndef getLabelDf(x):\n \n# print(x)\n \n x = x[36:] #To account for the extra \"././\" added before the Path variable\n# print(x)\n x = df.loc[df.Path == x] \n# print(x)\n \n return labelMap[x.label.values[0]]\n\n\nclass ModelTrackerCallback(TrackerCallback):\n \"A `TrackerCallback` that saves the model when monitored quantity is best.\"\n def __init__(self, learn:Learner, path:str='/home/santhosr/Documents/Courses/CIS700/Project/models',id:int=None,monitor:str='val_loss', mode:str='auto',modelName:str='resnet50'):\n super().__init__(learn, monitor=monitor, mode=mode)\n \n self.bestAcc = 0.0001\n self.folderPath = path\n self.id = id\n self.modelName = modelName\n super().__post_init__()\n\n def on_epoch_end(self, epoch, **kwargs:Any)->None:\n \"Compare the value monitored to its best score and maybe save the model.\"\n\n acc = float(self.learn.recorder.metrics[epoch-1][0])\n val_loss = self.learn.recorder.val_losses[epoch-1]\n\n if acc>self.bestAcc:\n self.bestAcc = acc\n if self.id==None:\n fileName = 'model_'+self.modelName+'_acc'+str(int(acc*1000))+\"_loss\"+str(int(val_loss*1000))\n else:\n fileName = 'model_'+self.modelName+'_id' + str(self.id) + '_acc' + str(int(acc*1000)) + \"_loss\" + str(int(val_loss*1000))\n fileName = os.path.join(self.folderPath, fileName)\n self.learn.save(fileName)\n\n\nprint(\"Data Creation Start\")\ndata = ImageItemList.from_df(df=df,path=baseFolder, cols='Path').split_from_df(col='train').label_from_func(getLabelDf).transform(get_transforms(),size=256).databunch(bs=50).normalize()\nprint(\"Data Creation Complete\")\n\n\nlearn = create_cnn(data, tmodels.resnet50, metrics=accuracy,pretrained=True)\n\n# learn.load('/home/santhosr/Documents/Birad/ProcessedData/models/model_resnet50_acc668_loss600')\n\nbest_model_cb = partial(ModelTrackerCallback,id=6, modelName = \"resnet50_Edema\")\nlearn.callback_fns.append(best_model_cb)\n\nlearn.unfreeze()\nlearn.fit(30,1e-5)\n","sub_path":"resnet_train.py","file_name":"resnet_train.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281037169","text":"# --------------------------------------------------------\n#\n# PYTHON PROGRAM DEFINITION\n#\n# The knowledge a computer has of Python can be specified in 3 levels:\n# (1) Prelude knowledge --> The computer has it by default.\n# (2) Borrowed knowledge --> The computer gets this knowledge from 3rd party libraries defined by others\n# (but imported by us in this program).\n# (3) Generated knowledge --> The computer gets this knowledge from the new functions defined by us in this program.\n#\n# When launching in a terminal the command:\n# user:~$ python3 this_file.py\n# our computer first processes this PYTHON PROGRAM DEFINITION section of the file.\n# On it, our computer enhances its Python knowledge from levels (2) and (3) with the imports and new functions\n# defined in the program. However, it still does not execute anything.\n#\n# --------------------------------------------------------\n\n# ------------------------------------------------\n# IMPORTS\n# ------------------------------------------------\nimport codecs\nimport functools\nimport os\n\n# ------------------------------------------------\n# FUNCTION read_graph_from_folder\n# ------------------------------------------------\ndef read_graph_from_folder(my_dataset_dir):\n # 1. We create the output variable\n res = ()\n\n # 1.1. We output the number of nodes\n num_nodes = 0\n\n # 1.2. We output the connections per node\n edges_per_node = {}\n\n # 2. We list the files from the directory my_dataset_dir\n list_of_files = os.listdir(my_dataset_dir)\n if ('.DS_Store' in list_of_files):\n list_of_files.remove('.DS_Store')\n list_of_files.sort()\n\n # 3. We traverse the files one by one\n for file in list_of_files:\n # 3.1. We open the file for reading\n my_input_stream = codecs.open(my_dataset_dir + file, \"r\", encoding='utf-8')\n\n # 3.2. We traverse the file\n for line in my_input_stream:\n # 3.2.1. We parse the line\n (source_node, target_node, _) = tuple(line.strip().split(\" \"))\n\n # 3.2.2. We search for new nodes appearing in the line\n for node_name in [source_node, target_node]:\n # 3.2.2.1. If the node has not appeared before\n if node_name not in edges_per_node:\n # I. We associate the new node with an empty list of edges\n edges_per_node[node_name] = []\n\n # II. We increase the number of different nodes found so far\n num_nodes += 1\n\n # 3.2.3. We populate the edges of source_node\n edges_per_node[source_node].append( target_node )\n\n # 3.4. We close the file\n my_input_stream.close()\n\n # 4. We make the info to be a tuple (num_neighbours, neighbours_list)\n for node_name in edges_per_node:\n neighbour_list = edges_per_node[node_name]\n edges_per_node[node_name] = (len(neighbour_list), neighbour_list)\n\n # 5. We assign res\n res = (num_nodes, edges_per_node)\n\n # 6. We return res\n return res\n\n# ------------------------------------------\n# FUNCTION compute_page_rank\n# ------------------------------------------\ndef compute_page_rank(edges_per_node, reset_probability, max_iterations):\n\n # ------------------------------------------------\n # START OF YOUR CODE:\n # ------------------------------------------------\n\n # Remember that the function must return a dictionary with:\n # Key => The node number\n # Value => The PageRank value computed for this node.\n\n # Type all your code here.\n pass\n\n\n\n\n\n\n # ------------------------------------------------\n # END OF YOUR CODE\n # ------------------------------------------------\n\n# ------------------------------------------\n# FUNCTION my_main\n# ------------------------------------------\ndef my_main(my_dataset_dir, reset_probability, max_iterations):\n # 1. We read the graph from the file\n (num_nodes, edges_per_node) = read_graph_from_folder(my_dataset_dir)\n\n # 2. We compute the shortest paths to each node\n page_rank_per_node = compute_page_rank(edges_per_node, reset_probability, max_iterations)\n\n # 3. We sort the nodes in decreasing order in their rank\n rank_per_node = [ (round(page_rank_per_node[node], 2), node) for node in page_rank_per_node ]\n rank_per_node.sort(reverse=True)\n\n # 4. We print them\n for item in rank_per_node:\n print(\"id=\" + str(item[1]) + \"; pagerank=\" + str(item[0]))\n\n# --------------------------------------------------------\n#\n# PYTHON PROGRAM EXECUTION\n#\n# Once our computer has finished processing the PYTHON PROGRAM DEFINITION section its knowledge is set.\n# Now its time to apply this knowledge.\n#\n# When launching in a terminal the command:\n# user:~$ python3 this_file.py\n# our computer finally processes this PYTHON PROGRAM EXECUTION section, which:\n# (i) Specifies the function F to be executed.\n# (ii) Define any input parameter such this function F has to be called with.\n#\n# --------------------------------------------------------\nif __name__ == '__main__':\n # 1. We get the input values\n reset_probability = 0.15\n max_iterations = 3\n\n # 2. Local or Databricks\n local_False_databricks_True = False\n\n # 3. We set the path to my_dataset and my_result\n my_local_path = \"../../../../3_Code_Examples/L15-25_Spark_Environment/\"\n my_databricks_path = \"/\"\n\n my_dataset_dir = \"FileStore/tables/6_Assignments/my_dataset_2/\"\n\n if local_False_databricks_True == False:\n my_dataset_dir = my_local_path + my_dataset_dir\n else:\n my_dataset_dir = my_databricks_path + my_dataset_dir\n\n # 3. We call to my_main\n my_main(my_dataset_dir, reset_probability, max_iterations)\n","sub_path":"Big Data and Analytics/A02/my_code/A02_Part4/A02_Part4.py","file_name":"A02_Part4.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41326833","text":"'''\n\n\n\n'''\nimport os, subprocess, time, sys\nfrom sched import scheduler\nfrom numpy import floor, mod\nfrom comMethods import readfile,writeQOS\n\nclass clustersjob:\n \"\"\" The largest numbers of clusters (up to 900*5) take 64G of memory and a single processor. \n We need to run uncle 10 (build clusters) with a separate job on a large memory job\n that builds the clusters\"\"\"\n from comMethods import reportFinished,waitJobs\n\n def __init__(self,user,jobsInfo):\n \"\"\" \"\"\"\n self.user = user\n self.jobsInfo = jobsInfo\n self.jobIDs = []\n\n def clustBuild(self):\n '''Runs the process of building clusters'''\n self.makeRunClusters()\n self.waitJobs()\n ######### Don't remove this code: \n# dummy = os.listdir(os.getcwd()) #for some reason this needs to be here so that it can find clustersjob.out!!!!!\n# if not os.path.exists(os.getcwd()+'/clustersjob.out'):\n# sys.exit('Clusters job never started. Stopping program')\n# else:\n# lines = readfile(os.getcwd()+'/clustersjob.out')\n# if 'done' not in lines[-1]:\n# sys.exit('Clusters job failed. Stopping program') \n# \n# def waitclustersjob(self):\n# \"\"\"Waits until the cluster job is done \"\"\"\n# s = scheduler(time.time, time.sleep) \n# finished = False\n# start_time = time.time()\n# event_time = start_time\n# while not finished:\n# event_time += 5 #check every x seconds\n# s.enterabs(event_time, 1, self.reportFinished, (self.currJobIds))\n# s.run()\n# finished = self.reportFinished(self.currJobIds)\n# \n def makeRunClusters(self): \n \"\"\"Creates clusters jobfile and starts the run \"\"\" \n subprocess.call(['ln','-s','/fslhome/{}/graphener_links/uncle/uncle.x'.format(self.user['name'])]) \n jobFile = open('clustersjob','w')\n jobFile.write(\"#!/bin/bash\\n\\n\")\n walltime = 0.6\n hrs = int(floor(walltime)); mints = int(mod(walltime,1)*60)\n hrs,mints = writeQOS(self.jobsInfo,jobFile,walltime,hrs,mints)\n jobFile.write('#SBATCH --time={}:{}:00\\n'.format(hrs,mints))\n jobFile.write(\"#SBATCH --nodes=1\\n\")\n jobFile.write(\"#SBATCH --mem-per-cpu=64G\\n\")\n jobFile.write(\"#SBATCH --mail-user={}\\n\".format(self.user['email'])) \n jobFile.write(\"#SBATCH --mail-type=FAIL\\n\")\n jobFile.write(\"#SBATCH --mail-type=END\\n\")\n jobFile.write(\"#SBATCH --job-name=clusters\\n\" ) \n jobFile.write('module unload mpi\\n')\n jobFile.write('module load mpi/openmpi-1.6.5_intel-13.0.1\\n')\n jobFile.write('module unload mkl\\n')\n jobFile.write('module load mkl/11/2\\n')\n jobFile.write('module unload python\\n')\n jobFile.write('module load python/2/7\\n') \n jobFile.write(\"mpiexec uncle.x 10 > clustersjob.out\\n\") \n jobFile.close()\n if os.path.exists('clustersjob.out'): os.system('rm clustersjob.out')\n proc = subprocess.Popen(['sbatch','clustersjob'], stdout=subprocess.PIPE)\n jobid = proc.communicate()[0].split()[3]\n subprocess.call(['echo', '\\tSubmitted clusters job ' + jobid])\n self.jobIDs.append(jobid) \n","sub_path":"graphener/ClustersBuild.py","file_name":"ClustersBuild.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378064451","text":"# Source: https://pymotw.com/2/socket/udp.html\n\nimport socket, sys, time\nimport random\n\n\nhost = sys.argv[1]\ntextport = sys.argv[2]\n#host = \"localhost\"\n#textport = \"8081\"\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nport = int(textport)\nserver_address = (host, port)\n\nfor i in range (10):\n #print (\"Enter data to transmit: ENTER to quit\")\n data = str(random.randint(1,100))\n print(\" sending \"+ data)\n\n s.sendto(data.encode('utf-8'), server_address)\n\ns.shutdown(1)\n\n\n","sub_path":"randNumSender.py","file_name":"randNumSender.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313578150","text":"import os\nimport codecs\nimport ujson\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import Counter\n\nnp.random.seed(12345)\nglove_sizes = {'6B': int(4e5), '42B': int(1.9e6), '840B': int(2.2e6), '2B': int(1.2e6)}\nPAD = \"\"\nUNK = \"\"\n\n\ndef iob_to_iobes(labels):\n \"\"\"IOB -> IOBES\"\"\"\n iob2(labels)\n new_tags = []\n for i, tag in enumerate(labels):\n if tag == 'O':\n new_tags.append(tag)\n elif tag.split('-')[0] == 'B':\n if i + 1 != len(labels) and labels[i + 1].split('-')[0] == 'I':\n new_tags.append(tag)\n else:\n new_tags.append(tag.replace('B-', 'S-'))\n elif tag.split('-')[0] == 'I':\n if i + 1 < len(labels) and labels[i + 1].split('-')[0] == 'I':\n new_tags.append(tag)\n else:\n new_tags.append(tag.replace('I-', 'E-'))\n else:\n raise Exception('Invalid IOB format!')\n return new_tags\n\n\ndef iob2(labels):\n \"\"\"Check that tags have a valid IOB format. Tags in IOB1 format are converted to IOB2.\"\"\"\n for i, tag in enumerate(labels):\n if tag == 'O':\n continue\n split = tag.split('-')\n if len(split) != 2 or split[0] not in ['I', 'B']:\n return False\n if split[0] == 'B':\n continue\n elif i == 0 or labels[i - 1] == 'O': # conversion IOB1 to IOB2\n labels[i] = 'B' + tag[1:]\n elif labels[i - 1][1:] == tag[1:]:\n continue\n else:\n labels[i] = 'B' + tag[1:]\n return True\n\n\ndef write_json(filename, dataset):\n with codecs.open(filename, mode=\"w\", encoding=\"utf-8\") as f:\n ujson.dump(dataset, f)\n\n\ndef word_convert(word, lowercase=True, char_lowercase=False):\n if char_lowercase:\n char = [c for c in word.lower()]\n else:\n char = [c for c in word]\n if lowercase:\n word = word.lower()\n return word, char\n\n\ndef raw_dataset_iter(filename, lowercase=True, char_lowercase=False):\n with codecs.open(filename, mode=\"r\", encoding=\"utf-8\") as f:\n words, chars, labels = [], [], []\n for line in f:\n line = line.lstrip().rstrip()\n if len(line) == 0 or line.startswith(\"-DOCSTART-\"):\n if len(words) != 0:\n yield words, chars, labels\n words, chars, labels = [], [], []\n else:\n word, *_, label = line.split(\" \")\n word, char = word_convert(word, lowercase=lowercase, char_lowercase=char_lowercase)\n words.append(word)\n chars.append(char)\n labels.append(label)\n if len(words) != 0:\n yield words, chars, labels\n\n\ndef load_dataset(filename, iobes, lowercase=True, char_lowercase=False):\n dataset = []\n for words, chars, labels in raw_dataset_iter(filename, lowercase, char_lowercase):\n if iobes:\n labels = iob_to_iobes(labels)\n dataset.append({\"words\": words, \"chars\": chars, \"labels\": labels})\n return dataset\n\n\ndef load_emb_vocab(data_path, dim):\n vocab = list()\n with codecs.open(data_path, mode=\"r\", encoding=\"utf-8\") as f:\n if \"glove\" in data_path:\n total = glove_sizes[data_path.split(\".\")[-3]]\n else:\n total = int(f.readline().lstrip().rstrip().split(\" \")[0])\n for line in tqdm(f, total=total, desc=\"Load embedding vocabulary\"):\n line = line.lstrip().rstrip().split(\" \")\n if len(line) == 2:\n continue\n if len(line) != dim + 1:\n continue\n word = line[0]\n vocab.append(word)\n return vocab\n\n\ndef filter_emb(word_dict, data_path, dim):\n vectors = np.zeros([len(word_dict), dim])\n with codecs.open(data_path, mode=\"r\", encoding=\"utf-8\") as f:\n if \"glove\" in data_path:\n total = glove_sizes[data_path.split(\".\")[-3]]\n else:\n total = int(f.readline().lstrip().rstrip().split(\" \")[0])\n for line in tqdm(f, total=total, desc=\"Load embedding vectors\"):\n line = line.lstrip().rstrip().split(\" \")\n if len(line) == 2:\n continue\n if len(line) != dim + 1:\n continue\n word = line[0]\n if word in word_dict:\n vector = [float(x) for x in line[1:]]\n word_idx = word_dict[word]\n vectors[word_idx] = np.asarray(vector)\n return vectors\n\n\ndef build_token_counters(datasets):\n word_counter = Counter()\n char_counter = Counter()\n label_counter = Counter()\n for dataset in datasets:\n for record in dataset:\n for word in record[\"words\"]:\n word_counter[word] += 1\n for char in record[\"chars\"]:\n for c in char:\n char_counter[c] += 1\n for label in record[\"labels\"]:\n label_counter[label] += 1\n return word_counter, char_counter, label_counter\n\n\ndef build_dataset(data, word_dict, char_dict, label_dict, mode=0, rate=0.5):\n dataset = []\n for record in data:\n chars_list = []\n words = []\n for word in record[\"words\"]:\n words.append(word_dict[word] if word in word_dict else word_dict[UNK])\n for char in record[\"chars\"]:\n chars = [char_dict[c] if c in char_dict else char_dict[UNK] for c in char]\n chars_list.append(chars)\n if mode == 0: # labeled\n labels = [label_dict[label] for label in record[\"labels\"]]\n dataset.append({\"words\": words, \"chars\": chars_list, \"labels\": labels})\n elif mode == 1: # partially labeled\n labels = [label_dict[label] for label in record[\"labels\"]]\n label_mask = np.asarray([0 if v < rate else 1 for v in np.random.rand(len(labels))])\n labels = np.asarray(labels) * label_mask\n dataset.append({\"words\": words, \"chars\": chars_list, \"labels\": labels.tolist(),\n \"label_mask\": label_mask.tolist()})\n elif mode == 2: # unlabeled\n labels = [0 for _ in record[\"labels\"]]\n dataset.append({\"words\": words, \"chars\": chars_list, \"labels\": labels})\n else:\n raise ValueError(\"Unknown label process mode!!! Support: [0: labeled | 1: partial | 2: unlabeled]\")\n return dataset\n\n\ndef split_dataset(dataset, n):\n if n is None or type(n) != int or n <= 1 or n >= len(dataset):\n return dataset\n step = len(dataset) // n\n data_list = []\n idx = 0\n for i in range(n):\n if i == n - 1:\n data_list.append(dataset[idx:])\n break\n data_list.append(dataset[idx: idx + step])\n idx = idx + step\n return data_list\n\n\ndef process_word_token(word_counter, config):\n if config.wordvec_path is not None:\n word_vocab = [word for word, _ in word_counter.most_common()]\n emb_vocab = load_emb_vocab(config.wordvec_path, config.word_dim)\n word_vocab = list(set(word_vocab) & set(emb_vocab))\n tmp_word_dict = dict([(word, idx) for idx, word in enumerate(word_vocab)])\n vectors = filter_emb(tmp_word_dict, config.wordvec_path, config.word_dim)\n np.savez_compressed(config.wordvec, embeddings=np.asarray(vectors))\n else:\n word_vocab = [word for word, count in word_counter.most_common() if count >= config.word_threshold]\n word_vocab = [PAD, UNK] + word_vocab\n word_dict = dict([(word, idx) for idx, word in enumerate(word_vocab)])\n return word_dict\n\n\ndef process_char_token(char_counter, config):\n char_vocab = [PAD, UNK] + [char for char, count in char_counter.most_common() if count >= config.char_threshold]\n char_dict = dict([(char, idx) for idx, char in enumerate(char_vocab)])\n return char_dict\n\n\ndef process_label_token(label_counter):\n label_vocab = [\"O\"] + [label for label, _ in label_counter.most_common() if label != \"O\"]\n label_dict = dict([(label, idx) for idx, label in enumerate(label_vocab)])\n return label_dict\n\n\ndef write_to_jsons(datasets, files, save_path):\n for dataset, file in zip(datasets, files):\n write_json(os.path.join(save_path, file), dataset)\n\n\ndef process_data(config):\n # load raw datasets\n train_data = load_dataset(config.train_file, config.iobes, config.word_lowercase, config.char_lowercase)\n dev_data = load_dataset(config.dev_file, config.iobes, config.word_lowercase, config.char_lowercase)\n test_data = load_dataset(config.test_file, config.iobes, config.word_lowercase, config.char_lowercase)\n datasets = [train_data, dev_data, test_data]\n # build token counters\n word_counter, char_counter, label_counter = build_token_counters(datasets)\n # create save path\n if not os.path.exists(config.save_path):\n os.makedirs(config.save_path)\n # build word vocab\n word_dict = process_word_token(word_counter, config)\n # build char vocab\n char_dict = process_char_token(char_counter, config)\n # build label vocab\n label_dict = process_label_token(label_counter)\n # create indices datasets and write to files\n if \"dCRF\" in config.model_name:\n random.shuffle(train_data)\n train_folds = split_dataset(train_data, config.folds) # 10 folds\n train_set = dict()\n for i in range(config.folds):\n train_fold_set = build_dataset(train_folds[i], word_dict, char_dict, label_dict)\n train_set[\"fold_{}\".format(i)] = train_fold_set\n train_set_p = dict()\n for i in range(config.folds):\n train_fold_set = build_dataset(train_folds[i], word_dict, char_dict, label_dict, 1, config.partial_rate)\n train_set_p[\"fold_{}\".format(i)] = train_fold_set\n train_set_u = dict()\n for i in range(config.folds):\n train_fold_set = build_dataset(train_folds[i], word_dict, char_dict, label_dict, 2)\n train_set_u[\"fold_{}\".format(i)] = train_fold_set\n dev_set = build_dataset(dev_data, word_dict, char_dict, label_dict)\n test_set = build_dataset(test_data, word_dict, char_dict, label_dict)\n vocab = {\"word_dict\": word_dict, \"char_dict\": char_dict, \"label_dict\": label_dict}\n write_to_jsons([train_set, train_set_p, train_set_u, dev_set, test_set, vocab],\n [\"train.json\", \"train_p.json\", \"train_u.json\", \"dev.json\", \"test.json\", \"vocab.json\"],\n config.save_path)\n else:\n train_set = build_dataset(train_data, word_dict, char_dict, label_dict)\n dev_set = build_dataset(dev_data, word_dict, char_dict, label_dict)\n test_set = build_dataset(test_data, word_dict, char_dict, label_dict)\n vocab = {\"word_dict\": word_dict, \"char_dict\": char_dict, \"label_dict\": label_dict}\n write_to_jsons([train_set, dev_set, test_set, vocab], [\"train.json\", \"dev.json\", \"test.json\", \"vocab.json\"],\n config.save_path)\n","sub_path":"utils/prepro_lavd_data.py","file_name":"prepro_lavd_data.py","file_ext":"py","file_size_in_byte":10948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526799745","text":"import json\n\nimport click\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\ndef load_config(filename):\n \"\"\"\n Load file and parse JSON to dictionary\n \"\"\"\n with open(filename) as json_data:\n return json.load(json_data)\n\n\ndef get_urls(config):\n \"\"\"\n Return urls present in configuration dictionary\n \"\"\"\n urls = []\n base_url = config['meta']['base_url']\n for site in config['slates']:\n for sport in config['slates'][site]:\n for slate_size in config['slates'][site][sport]:\n date = config['slates'][site][sport][slate_size]\n queryParams = f'site={site}&date={date}'\n url = f'http://{base_url}/{sport}?{queryParams}'\n urls.append(url)\n return urls\n\n\ndef generate_build(driver, url, strategy):\n \"\"\"\n Navigate to LineupBuilder url, modify appropriate settings and click build\n \"\"\"\n print('Generating fixture for:\\n\\t', url)\n driver.get(url)\n\n wait = WebDriverWait(driver, 10)\n reset_class_name = 'reset'\n wait.until(EC.element_to_be_clickable((By.CLASS_NAME, reset_class_name)))\n # reset storage to work around slate player exclusion issues\n driver.find_element_by_class_name(reset_class_name).click()\n\n wait.until(EC.alert_is_present())\n driver.switch_to.alert.accept()\n\n driver.execute_script('window.scrollTo(0, 0)')\n # turn the games filter on and off to ensure all players are in the pool\n toggle_games_filter(driver)\n\n # switch build strategy\n switch_strategy(driver, strategy)\n\n # change some settings to meet DK lineup requirements\n bypass_lock_like_block(driver)\n\n # prevent request splitting by lowering output count\n lower_output_count(driver)\n\n # generate the build\n driver.find_element_by_id('build-button').click()\n\n # wait until exposures table appears (successful build)\n exposures_selector = '.builder-exposures'\n wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, exposures_selector))\n )\n\n\ndef lower_output_count(driver):\n \"\"\"\n Change output count slider\n \"\"\"\n output_count_selector = ('.lb-settings > .lb-settings-group >'\n '.lb-setting > .slider > span')\n output_count_slider = driver.find_element_by_css_selector(\n output_count_selector\n )\n actions = webdriver.ActionChains(driver)\n actions.move_to_element(output_count_slider)\n actions.click()\n DEFAULT_LINEUPS = 10\n # threshold above which a request is split into multiple chunks\n CHUNK_THRESHOLD = 5\n # force slider to the left below threshold\n for i in range(DEFAULT_LINEUPS - CHUNK_THRESHOLD):\n actions.send_keys(Keys.LEFT)\n actions.perform()\n\n\ndef toggle_games_filter(driver):\n \"\"\"\n Toggle the all games filter to force all players into pool\n \"\"\"\n filter_expand_selector = '.lst.builder-tabs > .rt > a'\n # show games filter\n expand_link = driver.find_element_by_css_selector(filter_expand_selector)\n expand_link.send_keys(Keys.RETURN)\n\n games_filter_selector = '.lst.filters.multi.teams > .label.games'\n\n wait = WebDriverWait(driver, 10)\n wait.until(EC.element_to_be_clickable(\n (By.CSS_SELECTOR, games_filter_selector)))\n\n games_filter = driver.find_element_by_css_selector(games_filter_selector)\n # exclude and re-add all games to force all players in pool\n games_filter.click()\n\n # click a second time if invalid pool warning is displayed\n try:\n driver.find_element_by_css_selector('.blk.ntc.warning')\n games_filter.click()\n except NoSuchElementException:\n pass\n\n # hide games filter\n expand_link.send_keys(Keys.RETURN)\n\n\ndef switch_strategy(driver, strategy):\n \"\"\"\n Switch between OPTIMAL, BALANCED, and RANDOM strategies\n \"\"\"\n strategy_selector = ('.lb-strategy-selector > ul > li'\n '> div > .slider > span')\n strategy_slider = driver.find_element_by_css_selector(strategy_selector)\n strategy_slider.click()\n\n # the number of times to move the strategy slider to the right\n num_right = 0\n if strategy == 'BALANCED':\n num_right = 1\n elif strategy == 'RANDOM':\n num_right = 2\n\n for i in range(num_right):\n strategy_slider.send_keys(Keys.RIGHT)\n\n\ndef bypass_lock_like_block(driver):\n \"\"\"\n Give low salary players 99% maxLiked to bypass restriction\n \"\"\"\n min_max_selector = ('.lb-settings > .lb-settings-group > .lst > li'\n '> label > input[data-option-name=useMinMaxExposures]')\n driver.find_element_by_css_selector(min_max_selector).send_keys(Keys.SPACE)\n driver.execute_script('window.scrollTo(0, 0)')\n wait = WebDriverWait(driver, 10)\n salary_selector = 'div[data-for=tooltip-formatted_salary]'\n wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, salary_selector)))\n salary_col_header = driver.find_element_by_css_selector(salary_selector)\n\n # sort by lowest salary first\n salary_col_header.click()\n\n # modify maxLiked for lowest salary players in current pool view\n liked_selector = 'div[data-for=tooltip-maxLiked] + .editable-cell > input'\n first_max_liked_input = driver.find_element_by_css_selector(liked_selector)\n first_max_liked_input.send_keys('99')\n actions = webdriver.ActionChains(driver)\n actions.send_keys(Keys.TAB)\n actions.send_keys('99')\n actions.perform()\n\n # restore original sorting order\n salary_col_header.click()\n\n\n@click.command()\n@click.option('--config', default='./config.json',\n help='Path to configuration file')\n@click.option('--driver', default='./browser_drivers/chromedriver',\n help='Path to browser driver')\ndef generate_fixtures(config, driver):\n \"\"\"\n Generate LineupBuilder input fixtures through browser automation\n\n Enable LineupBuilder request-saving in the service before using this script\n \"\"\"\n # load config from file to dict\n configuration = load_config(config)\n # extract target urls from configuration\n urls = get_urls(configuration)\n\n # run headless chrome for convenience\n browser_options = webdriver.ChromeOptions()\n browser_options.add_argument('start-maximized')\n # browser_options.add_argument('headless')\n\n print('Starting browser...')\n # sign in to basic access authentication for local dev\n browser_driver = webdriver.Chrome(driver, chrome_options=browser_options)\n try:\n username = configuration['meta']['username']\n password = configuration['meta']['password']\n base_url = configuration['meta']['base_url']\n browser_driver.get(f'http://{username}:{password}@{base_url}')\n\n strategies = ['OPTIMAL', 'BALANCED', 'RANDOM']\n for strategy in strategies:\n for url in urls:\n generate_build(browser_driver, url, strategy)\n finally:\n # clean up by closing browser\n browser_driver.quit()\n\n\nif __name__ == '__main__':\n generate_fixtures()\n","sub_path":"generate_fixtures.py","file_name":"generate_fixtures.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238740542","text":"import torch\nimport torchcde\n\n\ndef test_shape():\n for method in ('rk4', 'dopri5'):\n for _ in range(10):\n num_points = torch.randint(low=5, high=100, size=(1,)).item()\n num_channels = torch.randint(low=1, high=3, size=(1,)).item()\n num_hidden_channels = torch.randint(low=1, high=5, size=(1,)).item()\n num_batch_dims = torch.randint(low=0, high=3, size=(1,)).item()\n batch_dims = []\n for _ in range(num_batch_dims):\n batch_dims.append(torch.randint(low=1, high=3, size=(1,)).item())\n\n t = torch.rand(num_points).sort().values\n values = torch.rand(*batch_dims, num_points, num_channels)\n\n coeffs = torchcde.natural_cubic_spline_coeffs(values, t)\n spline = torchcde.NaturalCubicSpline(coeffs, t)\n\n class _Func(torch.nn.Module):\n def __init__(self):\n super(_Func, self).__init__()\n self.variable = torch.nn.Parameter(torch.rand(*[1 for _ in range(num_batch_dims)], 1, num_channels))\n\n def forward(self, t, z):\n return z.sigmoid().unsqueeze(-1) + self.variable\n\n f = _Func()\n z0 = torch.rand(*batch_dims, num_hidden_channels)\n\n num_out_times = torch.randint(low=2, high=10, size=(1,)).item()\n out_times = torch.rand(num_out_times, dtype=torch.float64).sort().values * (t[-1] - t[0]) + t[0]\n\n options = {}\n if method == 'rk4':\n options['step_size'] = 1. / num_points\n out = torchcde.cdeint(spline, f, z0, out_times, method=method, options=options, rtol=1e-4, atol=1e-6)\n assert out.shape == (*batch_dims, num_out_times, num_hidden_channels)\n","sub_path":"test/test_cdeint.py","file_name":"test_cdeint.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88596560","text":"class Vampire:\n coven = []\n\n def __init__(self, vampire_name, vampire_age, coffin, drank_blood):\n self.name = vampire_name\n self.age = vampire_age\n self.in_coffin = coffin\n self.drank_blood_today = drank_blood\n\n def drink_blood(self):\n self.drank_blood_today = True\n\n def go_home(self):\n self.in_coffin = True\n\n @classmethod\n def create(cls, vampire_name, vampire_age, coffin, drank_blood):\n new_vamp = Vampire(vampire_name, vampire_age, coffin, drank_blood)\n cls.coven.append(new_vamp)\n return new_vamp\n\n @classmethod\n def sunrise(cls):\n alive = []\n for vampire in cls.coven:\n if vampire.drank_blood_today and vampire.in_coffin:\n alive.append(vampire)\n cls.coven = alive\n \n @classmethod\n def sunset(cls):\n for vampire in cls.coven:\n vampire.drank_blood_today = False\n vampire.in_coffin = False\n\n\nstan = Vampire.create('Stan', 25, True, True)\nian = Vampire.create('Ian', 24, True, True)\naj = Vampire.create('Aj', 23, True, True)\nvictoria = Vampire.create('Victoria', 25, True, True)\nsimon = Vampire.create('Simon', 28, True, True)\nedi = Vampire.create('Edi', 49, True, True)\ncarol = Vampire.create('Carol', 28, True, True)\n\nfor vampire in Vampire.coven:\n print(vampire.name) # Should print out names of all the vampires\n\n# Testing the cases for the vampires\nVampire.sunset()\nstan.drink_blood()\nstan.go_home()\nian.drink_blood()\naj.drink_blood()\naj.go_home()\nvictoria.go_home()\nVampire.sunrise() # Should remove all vampires except \"Stan\" and \"Aj\"\nprint('')\nfor vampire in Vampire.coven:\n print(vampire.name) # Should print \"Stan\", \"Aj\"","sub_path":"vampires.py","file_name":"vampires.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76882922","text":"from cso_classifier import CSOClassifier\nimport csv\nimport json\nfrom pathlib import Path\n\ndef getRepoNameFromGitHubUrl(url: str) -> str:\n\treturn '_'.join(url.split('/')[-2:])\n\nif __name__ == '__main__':\n\tcc = CSOClassifier(modules = \"both\", enhancement = \"all\", explanation = True, delete_outliers=True)\n\n\treader = csv.DictReader(open(\"data/train_test_data/readme_new_preprocessed_test.csv\"), delimiter=';')\n\tdata = {}\n\tind = 0\n\tfor row in reader:\n\t\tdata[ind] = {\n\t\t\t\"title\": \"\",\n\t\t\t\"keywords\": \"\",\n\t\t\t\"abstract\": row['Text'],\n\t\t\t\"Label\": row['Label'],\n\t\t\t\"Repo\": row['Repo'],\n\t\t}\n\t\tind += 1\n\n\tpath = Path(\"data/comparison_data/csoc_output_all.csv\")\n\tnew = not path.exists()\n\twriter = csv.DictWriter(open(path, 'a+'), delimiter=';', fieldnames=['Label', 'Repo', 'CSOS'])\n\tif new:\n\t\twriter.writeheader()\n\toutpath = Path('data/csoc_all')\n\toutpath.mkdir(parents=True, exist_ok=True)\n\tresults = cc.batch_run(data, workers=8)\n\tprint(results)\n\tfor key, val in results.items():\n\t\twriter.writerow({'Label': data[key]['Label'], 'Repo': data[key]['Repo'], 'CSOS': ','.join(val['enhanced'])})\n\t\tjson.dump(val, open(outpath / (getRepoNameFromGitHubUrl(data[key]['Repo']) + '.json'), 'w'), indent=4)\n\t\t\t\n","sub_path":"src/Comparison/collect_cso.py","file_name":"collect_cso.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"138553054","text":"import requests,xmltodict\nfrom app_equip.handle_equip import *\nfrom app_equip.handle_pdf import *\nfrom app_equip.model import *\n\n# @author: zhufd\n# @license: (C) Copyright 明州体检\n# @contact: 245838515@qq.com\n# @software: HMS(健康管理系统)\n# @file: handle_parse.py\n# @date: 2018-12-23\n# @desc:业务处理包,解析过程、日志、数据库操作,提供给外层服务\n\n# 总入口:解析文件\ndef equip_file_parse(filename,session,log,process_queue,\n login_id,login_name,login_area, host_name, host_ip,\n equip_type,url,xmbh,czlj_info,equip_info,\n file_types:list,path_parse,path_error,file_handle=True):\n '''\n :param filename: 文件名,带路径\n :param session: 数据库会话\n :param log: 日志对象\n :param process_queue:进程队列\n :param login_id: 登录ID\n :param login_name: 登录姓名\n :param host_name: 登录主机\n :param host_ip: 登录IP\n :param equip_type: 设备类型\n :param url: 上传URL\n :param xmbh: 项目编号\n :param czlj_info: 数据操作对象,TJ_CZJLB\n :param equip_info: 数据操作对象,TJ_TJJLMXB\n :param file_types: 文件类型,列表类型\n :param path_parse: 解析成功->文件存放路径\n :param path_error: 解析失败->文件存放路径\n :param file_handle: 解析成功->是否删除原文件\n :return:\n '''\n # 前缀名 后缀名\n # file_prefix = os.path.splitext(filename)[0]\n file_suffix = os.path.splitext(filename)[1]\n if file_suffix not in file_types:\n return\n # 解析 电测听 数值\n if file_suffix == '.gnd':\n tjbh,jcrq,result,error = get_so_ac_result(filename)\n if error:\n log.info(\"文件:%s 解析电测听结果失败:%s\" %(filename,error))\n return\n # 校正听力值并更新数据库\n insert_cyct(session, login_id, login_name,login_area,tjbh,jcrq,result, host_name, host_ip)\n elif file_suffix == '.pdf':\n # PDF 文件解析\n values = pdfhandle(filename, equip_type, path_parse, path_error, url,\n log, file_handle)\n if not values:\n return\n tjbh,jcrq, file_new, file_up = values\n equip_info['file_path'] = file_up\n equip_info['tjbh'] = tjbh\n # 项目结果定制\n xmjg = \"结果详见%s附件\" % EquipName.get(equip_type)\n # 数据库处理\n dbhandle(\n session=session,\n log=log,\n login_id=login_id,\n tjbh=tjbh,\n xmbh=xmbh,\n xmjg=xmjg,\n jcrq=jcrq,\n filename=file_new,\n czjl_obj=czlj_info,\n equip_obj=equip_info\n )\n # 返回 消息给 UI\n if process_queue:\n process_queue.put(tjbh)\n log.info(\"向主进程UI传递消息:%s\" % tjbh)\n # 转换为图片 再上传\n pichandle(file_new, xmbh, log, url)\n\n\ndef pichandle(file_name,xmbh,log,url):\n t1 = time.time()\n if xmbh=='0806':\n file_pic = pdf2pic(file_name,rotate=90)\n else:\n file_pic = pdf2pic(file_name)\n t2 = time.time()\n log.info(\"Pdf(%s)->Pic(%s)转换成功!耗时:%s秒\" % (file_name,file_pic, str(round(t2-t1, 2))))\n response = api_equip_upload(url, file_pic)\n if response:\n log.info(\"文件(%s):上传成功!\" % file_pic)\n else:\n log.info(\"文件(%s):上传失败!\" % file_pic)\n\n# PDF文件解析、上传\ndef pdfhandle(file_name,equip_type,path_parse,path_error,up_url,log,is_file_remove=True):\n '''\n :param file_name: 待处理的文件\n :param equip_type: 设备类型\n :param path_parse: 解析目录\n :param path_error: 错误目录\n :param up_url: 上传URL\n :param log: 日志对象\n :param is_file_remove: 是否删除文件\n :return:空/(体检编号,新文件名,上传后的文件名) 注:所有文件均带路径\n '''\n t1= time.time()\n lstr = pdf2txt(file_name)\n t2 = time.time()\n if not lstr:\n log.info(\"文件:%s 解析为文本,耗时:%s 秒,解析失败。\" % (file_name, round(t2 - t1, 2)))\n return\n log.info(\"文件:%s 解析为文本,耗时:%s 秒。\" %(file_name,round(t2-t1,2)))\n tjbh,jcrq = txt2tjbh(lstr,equip_type)\n t3 = time.time()\n if not tjbh:\n error_file = os.path.join(path_error,os.path.basename(file_name))\n shutil.copy2(file_name, error_file)\n os.remove(file_name)\n log.info(\"文件:%s 提取体检编号,耗时:%s 秒,提取失败,转移至error目录。\" % (file_name, round(t3 - t2, 2)))\n return\n log.info(\"文件:%s 提取体检编号,耗时:%s 秒。\" % (file_name, round(t3 - t2, 2)))\n new_file = os.path.join(path_parse,\"%s_%s.pdf\" %(tjbh,equip_type))\n # 移动文件目录 从create->parse\n shutil.copy2(file_name, new_file)\n # 上传文件\n response = api_equip_upload(up_url, new_file)\n t3 = time.time()\n if not response:\n log.info(\"文件:%s 上传失败,耗时:%s 秒。\" % (new_file, round(t3 - t2, 2)))\n return\n file_up = response['data']\n log.info(\"文件:%s 上传成功,耗时:%s 秒。\" % (new_file, round(t3 - t2, 2)))\n # 删除原文件\n if is_file_remove:\n os.remove(file_name)\n\n return tjbh,jcrq,new_file,file_up\n\n# 操作日志\ndef db_czjl(session,log,tjbh,czjl_obj):\n czjl_obj['tjbh'] = tjbh\n czjl_obj['czsj'] = cur_datetime()\n # 更新记录:TJ_CZJLB\n try:\n session.bulk_insert_mappings(MT_TJ_CZJLB, [czjl_obj])\n session.commit()\n except Exception as e:\n session.rollback()\n log.info(\"体检顾客:%s,插入表TJ_CZJLB失败!错误信息:%s\" %(tjbh,e))\n\n# 设备接口\ndef db_equip(session,log,tjbh,xmbh,equip_obj):\n result = session.query(MT_TJ_EQUIP).filter(MT_TJ_EQUIP.tjbh == tjbh,\n MT_TJ_EQUIP.xmbh == xmbh).scalar()\n if result:\n # 存在则更新,PDF更新\n session.query(MT_TJ_EQUIP).filter(MT_TJ_EQUIP.tjbh == tjbh,\n MT_TJ_EQUIP.xmbh == xmbh\n ).update(\n {\n MT_TJ_EQUIP.modify_time: cur_datetime(),\n MT_TJ_EQUIP.file_path: equip_obj['file_path'], # 上传后的路径\n # MT_TJ_EQUIP.operator: equip_obj['operator'], # 操作工号\n # MT_TJ_EQUIP.operate_time: equip_obj['operate_time'], # 操作时间\n # MT_TJ_EQUIP.hostname: equip_obj['hostname'],\n # MT_TJ_EQUIP.hostip: equip_obj['hostip'],\n # MT_TJ_EQUIP.operator2: equip_obj['operator2'], # 操作姓名\n MT_TJ_EQUIP.operate_area: equip_obj['operate_area'], # 操作区域\n }\n )\n session.commit()\n else:\n try:\n equip_obj['create_time'] = cur_datetime()\n session.bulk_insert_mappings(MT_TJ_EQUIP, [equip_obj])\n session.commit()\n except Exception as e:\n session.rollback()\n log.info(\"体检顾客:%s,插入表TJ_EQUIP失败!错误信息:%s\" % (tjbh, e))\n\n# 项目明细\ndef db_jlmx(session,log,tjbh,xmbh,xmjg,jcrq,login_id):\n # 人体成分、电测听直接项目结束\n # 心电图、骨密度 则ZXPB=3\n result = session.query(MT_TJ_TJJLMXB).filter(MT_TJ_TJJLMXB.tjbh == tjbh,MT_TJ_TJJLMXB.xmbh == xmbh).scalar()\n if result:\n if result.jsbz == '1':\n zxpb = '1'\n jsbz = '1'\n elif xmbh in ['0310','5402']:\n zxpb = '1'\n jsbz = '1'\n else:\n zxpb = '3'\n jsbz = '0'\n try:\n if xmbh == '0310':\n # 不更新\n session.query(MT_TJ_TJJLMXB).filter(MT_TJ_TJJLMXB.tjbh == tjbh,MT_TJ_TJJLMXB.zhbh == xmbh).update({\n MT_TJ_TJJLMXB.zxpb: zxpb,\n MT_TJ_TJJLMXB.jsbz: jsbz,\n MT_TJ_TJJLMXB.qzjs: None,\n })\n elif xmbh == '5402':\n session.query(MT_TJ_TJJLMXB).filter(MT_TJ_TJJLMXB.tjbh == tjbh,MT_TJ_TJJLMXB.zhbh == xmbh).update({\n MT_TJ_TJJLMXB.zxpb: zxpb,\n MT_TJ_TJJLMXB.jsbz: jsbz,\n MT_TJ_TJJLMXB.qzjs: None,\n MT_TJ_TJJLMXB.jcys: login_id,\n MT_TJ_TJJLMXB.jcrq: cur_datetime(),\n MT_TJ_TJJLMXB.jg:xmjg\n })\n else:\n session.query(MT_TJ_TJJLMXB).filter(MT_TJ_TJJLMXB.tjbh == tjbh,MT_TJ_TJJLMXB.zhbh == xmbh).update({\n MT_TJ_TJJLMXB.zxpb: zxpb,\n MT_TJ_TJJLMXB.jsbz: jsbz,\n MT_TJ_TJJLMXB.qzjs: None,\n MT_TJ_TJJLMXB.jcys: login_id,\n MT_TJ_TJJLMXB.jcrq: jcrq,\n MT_TJ_TJJLMXB.jg:xmjg\n })\n session.commit()\n except Exception as e:\n session.rollback()\n log.info(\"体检顾客:%s,更新表TJ_TJJLMXB失败!错误信息:%s\" % (tjbh, e))\n else:\n log.info(\"体检顾客:%s,不存在项目:%s,请确认!\" % (tjbh, xmbh))\n\n\n# 心电图 特殊处理,插入数据库\ndef db_xdt(session,log,tjbh,filename):\n dcp_obj = {}\n dcp_obj['cusn'] = tjbh\n dcp_obj['department'] = '0018'\n dcp_obj['filename'] = '%s.PDF' % tjbh\n dcp_obj['filecontent'] = open(filename, 'rb').read()\n dcp_obj['uploadtime'] = cur_datetime()\n dcp_obj['flag'] = '0'\n try:\n session.query(MT_DCP_files).filter(MT_DCP_files.cusn == tjbh).delete()\n session.bulk_insert_mappings(MT_DCP_files, [dcp_obj])\n session.commit()\n except Exception as e:\n session.rollback()\n log.info(\"体检顾客:%s,插入表DCP_files失败!错误信息:%s\" % (tjbh, e))\n\n# 整合数据库处理\ndef dbhandle(session,log,login_id,tjbh,xmbh,xmjg,jcrq,filename,czjl_obj:dict,equip_obj:dict):\n db_czjl(session,log,tjbh,czjl_obj)\n db_equip(session,log,tjbh,xmbh,equip_obj)\n db_jlmx(session,log,tjbh,xmbh,xmjg,jcrq,login_id)\n if xmbh=='0806':\n db_xdt(session,log,tjbh,filename)\n\n# post 请求\ndef api_equip_upload(url, filename):\n file_obj = {\"file\": (filename, open(filename, \"rb\"))}\n try:\n response = requests.post(url, files=file_obj)\n if response.status_code == 200:\n return response.json()\n except Exception as e:\n print('URL:%s 请求失败!错误信息:%s' % (url, e))\n\n#### 电测听取值\ndef minus_list(list1:list,list2:list):\n return list(map(lambda x,y:x - y,list1,list2))\n\ndef merge_list(list1:list):\n return '|'.join([str(i) for i in list1])\n\n#单耳听阈加权值(dB)\ndef f_avg(result:list):\n return int(((result[0]+result[1]+result[2])/3)*0.9+result[4]*0.1)\n\n#双耳高频平均听阈(dB)\ndef f_avg2(result1:list,result2:list):\n return int((result1[3]+result1[4]+result1[5]+result2[3]+result2[4]+result2[5])/6)\n\n#单耳平均听阈(dB)\ndef f_avg3(result:list):\n return int((result[3] + result[4] + result[5]) / 3)\n\n#结果是否合格判断\ndef is_hg(result1:list,result2:list):\n if all([f_avg(result1)<25,f_avg(result2)<25,f_avg2(result1,result2)<40]):\n return '合格'\n else:\n return '不合格'\n\n# 获取气导的数值,骨导不参与计算\ndef get_so_ac_result(file_name):\n with open(file_name,encoding=\"utf-8\") as f:\n content = xmltodict.parse(f.read())\n # 获取本次检查记录\n session = content.get('Session',None)\n if not session:\n return None,None,None,\"检查记录为空\"\n # 获取体检编号\n tjbh = parse_nestdict(session,'Actors.Client.@Id')\n if not tjbh:\n return None,None,None,\"体检编号获取失败\"\n # 获取检查日期\n jcrq_tmp = session.get('@Date',None)\n if not jcrq_tmp:\n jcrq = cur_datetime()\n else:\n jcrq = jcrq_tmp[0:19].replace('T', ' ')\n # 获取结果信息\n results_tmp = session.get('Action',None)\n if not results_tmp:\n return tjbh,jcrq,results_tmp,\"结果信息获取失败\"\n tmp = {}\n result_key = 'Public.TAudioSession.ToneTHRAudiogram.TToneTHRAudiogram' # 结果信息 路径\n so_key_key = 'MeasCond.@SignalOutput1' # 气导、骨导 标记路径 以SignalOutput1 输出为主\n so_result_key = 'Curve.TTonePoint' # 气导、骨导 结果路径,左、右耳\n so_value_key = '@Intensity1' # 结果值 路径\n\n if isinstance(results_tmp,list):\n # 多组结果:可能是骨导+气导,也可能是多组气导\n for result_tmp in results_tmp:\n results = parse_nestdict(result_tmp,result_key)\n for result in results:\n so_key = parse_nestdict(result, so_key_key)\n # 获取列表\n so_value = [int(int(i.get(so_value_key, 0)) / 10) for i in parse_nestdict(result, so_result_key)]\n # 获取字典\n # so_value = dict((int(i.get('@Freq1', 0)),int(int(i.get('@Intensity1', 0)) / 10)) for i in parse_nestdict(result, so_result_key))\n if len(so_value) == 6:\n tmp[so_key] = so_value\n else:\n return tjbh,jcrq,None,\"%s的数值不足6组:%s\" %(so_key,str(so_value))\n\n else:\n # 单组结果,则为气导\n ac_results = parse_nestdict(results_tmp,result_key)\n for ac_result in ac_results:\n so_key = parse_nestdict(ac_result, so_key_key)\n # 获取列表\n so_value = [int(int(i.get(so_value_key, 0)) / 10) for i in parse_nestdict(ac_result, so_result_key)]\n # 获取字典\n # so_value = dict((int(i.get('@Freq1', 0)), int(int(i.get('@Intensity1', 0)) / 10)) for i in parse_nestdict(ac_result, so_result_key))\n if len(so_value) == 6:\n tmp[so_key] = so_value\n else:\n return tjbh, jcrq, None, \"%s的数值不足6组:%s\" % (so_key, str(so_value))\n\n\n return tjbh,jcrq,tmp,''\n\n#获取纯音听力原始记录,校正后插入到TJ_EQUIP\ndef insert_cyct(session,login_id,login_name,login_area,tjbh,jcrq,result,host_name,host_ip,xmbh='0310'):\n # 从数据库中校对人员信息\n result_user = session.execute(get_tjxx_sql(tjbh)).fetchone()\n if not result_user:\n print(\"未找到体检编号为:%s的人员信息\" %tjbh)\n return\n user_name = str2(result_user[1]) # 用户姓名\n user_sex = str2(result_user[2]) # 用户性别\n user_age = result_user[-1] # 用户年龄\n\n if user_age>=22:\n #result = session.execute(standard_audition_sql(user_age, user_sex)).fetchone()\n # if result:\n # standard_result = dict(zip([500,1000,2000,3000,4000,6000],[int(i[1]) for i in result]))\n results = session.execute(standard_audition_sql(user_age,user_sex)).fetchall()\n if results:\n result_tmp = sorted(results[0].items(), key=lambda item: item[0])\n standard_result = [int(i[1]) for i in result_tmp]\n else:\n standard_result = [0, 0, 0, 0, 0, 0]\n # standard_result = {500: 0, 1000: 0, 2000: 0, 3000: 0, 4000: 0, 6000: 0}\n else:\n standard_result = [0, 0, 0, 0, 0, 0]\n # standard_result = {500: 0, 1000: 0, 2000: 0, 3000: 0, 4000: 0, 6000: 0}\n #\n result[\"so_ACR_new\"] = minus_list(result[\"so_ACR\"], standard_result)\n result[\"so_ACL_new\"] = minus_list(result[\"so_ACL\"], standard_result)\n result[\"so_ACR_init\"] = standard_result\n result[\"so_ACL_init\"] = standard_result\n xmzd = is_hg(result[\"so_ACL_new\"], result[\"so_ACR_new\"])\n # 计算校正结果\n ms1 = merge_list(result[\"so_ACR\"]) + '|' + merge_list(result[\"so_ACL\"])\n ms2 = merge_list(result[\"so_ACR_new\"]) + '|' + merge_list(result[\"so_ACL_new\"])\n ms3 = merge_list(result[\"so_ACR_init\"]) + '|' + merge_list(result[\"so_ACL_init\"])\n ms4 = \"右耳平均语频=%s\\n\" \\\n \"右耳平均高频=%s\\n\" \\\n \"左耳平均语频=%s\\n\" \\\n \"左耳平均高频=%s\\n\" \\\n \"双耳高频平均听阈=%s\" % (f_avg(result[\"so_ACR_new\"]), f_avg3(result[\"so_ACR_new\"]),\n f_avg(result[\"so_ACL_new\"]), f_avg3(result[\"so_ACL_new\"]),\n f_avg2(result[\"so_ACR_new\"], result[\"so_ACL_new\"]))\n\n xmjg = '||||' + ms1 + '|||||' + ms2 + '|||||' + ms3 + '|$' + ms4\n try:\n # 表 TJ_EQUIP\n result = session.query(MT_TJ_EQUIP).filter(MT_TJ_EQUIP.tjbh==tjbh,MT_TJ_EQUIP.xmbh==xmbh).scalar()\n if result:\n # 不更新第一次操作用户\n session.query(MT_TJ_EQUIP).filter(MT_TJ_EQUIP.tjbh==tjbh,MT_TJ_EQUIP.xmbh==xmbh).update({\n MT_TJ_EQUIP.operate_time:jcrq,\n MT_TJ_EQUIP.equip_jg1:xmjg,\n MT_TJ_EQUIP.equip_jg2:xmzd\n })\n zxpb = '1'\n jsbz = '1'\n else:\n session.execute(insert_dct_sql(tjbh,cur_datetime(),user_name,jcrq,host_name,host_ip,xmjg,xmzd,login_id,login_name,login_area))\n zxpb = '3'\n jsbz = '0'\n # 表 TJ_TJJLMXB\n session.query(MT_TJ_TJJLMXB).filter(MT_TJ_TJJLMXB.tjbh==tjbh,MT_TJ_TJJLMXB.zhbh==xmbh).update({\n MT_TJ_TJJLMXB.zxpb: zxpb,\n MT_TJ_TJJLMXB.jsbz: jsbz,\n MT_TJ_TJJLMXB.qzjs: None,\n MT_TJ_TJJLMXB.jcrq: jcrq,\n MT_TJ_TJJLMXB.jcys: login_id,\n MT_TJ_TJJLMXB.jg: xmjg,\n MT_TJ_TJJLMXB.zd: xmzd,\n MT_TJ_TJJLMXB.shrq: jcrq,\n MT_TJ_TJJLMXB.shys: '120502002',\n })\n # 表 TJ_CZJLB 不做处理,由PDF文件上传时来处理\n session.commit()\n print(\"%s 体检顾客:%s,纯音测听结果(%s)解析成功!\" % (cur_datetime(),user_name,xmjg))\n except Exception as e:\n session.rollback()\n print(\"执行数据库出错:%s\" %e)\n\n# 确认路径,不存在则创建\ndef mkdir(path,log):\n try:\n if not os.path.isdir(path):\n os.makedirs(path)\n log.info(\"路径:%s 已启动监控。\" %path)\n return True\n except Exception as e:\n log.info('路径:%s 不准确,请调整!错误信息:%s' %(path,e))\n return False\n\n# 设备信息\nEquipName={\n '01':'电测听',\n '02':'人体成分(投放)',\n '03':'人体成分',\n '04':'骨密度',\n '05':'超声骨密度',\n '06':'动脉硬化',\n '07':'大便隐血',\n '08':'心电图',\n '11':'肺功能',\n '12':'胸部正位'\n}\n\n# 设备信息\nEquipNo={\n '01':'0310',\n '02':'5402',\n '03':'5402',\n '04':'501576',\n '05':'1000074',\n '06':'5401',\n '07':'2113',\n '08':'0806',\n '11':'0045',\n '12':'501716'\n}\n\n# 设备动作点\nEquipAction={\n '01':'0023',\n '02':'0022',\n '03':'0022',\n '04':'0020',\n '05':'1000074',\n '06':'0024',\n '07':'2113',\n '08':'0021',\n '11':'0045',\n '12':'501716'\n}\n\n# 设备动作点\nEquipActionName={\n '01':'电测听检查',\n '02':'人体成分检查',\n '03':'人体成分检查',\n '04':'骨密度检查',\n '05':'超声骨密度检查',\n '06':'动脉硬化检查',\n '07':'大肠癌检查',\n '08':'心电图检查',\n '11':'肺功能检查',\n '12':'DR检查'\n}\n\n# 嵌套字典解析\ndef parse_nestdict(nestdict:dict,keys:str,default=None):\n keys_list = keys.split('.')\n tmp = nestdict\n for key in keys_list:\n if isinstance(tmp,dict):\n val = tmp.get(key, None)\n else:\n val = None\n if val!= None:\n tmp = val\n else:\n return default\n\n return tmp\n\nif __name__==\"__main__\":\n from pprint import pprint\n pprint(get_so_ac_result(r\"E:\\DR\\create\\01\\My Suite\\178240009 龙元明, (男) 2019-01-02T14.58.02.1866546+08.00.gnd\"))\n\n # for root, dirs, files in os.walk(r\"E:\\DR\\create\\01\\My Suite\"):\n # if files and not dirs: # 必须是指定目录的下级目录\n # for file in files:\n # try:\n # print(get_so_ac_result(os.path.join(root, file)))\n # except Exception as e:\n # print(os.path.join(root, file))","sub_path":"app_equip/handle_parse.py","file_name":"handle_parse.py","file_ext":"py","file_size_in_byte":20945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"446893539","text":"morse_dict = {\n \"A\": \"* ***\",\n \"B\": \"*** * * *\",\n \"C\": \"*** * *** *\",\n \"D\": \"*** * *\",\n \"E\": \"*\",\n \"F\": \"* * *** *\",\n \"G\": \"*** *** *\",\n \"H\": \"* * * *\",\n \"I\": \"* *\",\n \"J\": \"* *** *** ***\",\n \"K\": \"*** * ***\",\n \"L\": \"* *** * *\",\n \"M\": \"*** ***\",\n \"N\": \"*** *\",\n \"O\": \"*** *** ***\",\n \"P\": \"* *** *** *\",\n \"Q\": \"*** *** * ***\",\n \"R\": \"* *** *\",\n \"S\": \"* * *\",\n \"T\": \"***\",\n \"U\": \"* * ***\",\n \"V\": \"* * * ***\",\n \"W\": \"* *** ***\",\n \"X\": \"*** * * ***\",\n \"Y\": \"*** * *** ***\",\n \"Z\": \"*** *** * *\",\n \" \": \" \",\n}\n\n\n# def translate_to_and_from_morse(phrase):\n# translated = []\n# morse_word_break = \" \" * 7\n# morse_letter_break = \" \" * 3\n\n# if phrase[0].isalpha():\n# phrase_list = list(phrase.upper())\n# previous_character = \"\"\n\n# for character in phrase_list:\n# if previous_character.isalpha() and character.isalpha():\n# translated.append(morse_letter_break)\n\n# translated.append(morse_dict[character])\n# previous_character = character\n# else:\n# word_list = phrase.split(morse_word_break)\n# previous_word = \"\"\n# for word in word_list:\n# letters = word.split(morse_letter_break)\n# if previous_word != \"\":\n# translated.append(\" \")\n# previous_word = word\n# for letter in letters:\n# for k, v in morse_dict.items():\n# if letter == v:\n# translated.append(k)\n\n# return \"\".join(translated)\n\n\nmorse_word_break = \" \" * 7\nmorse_letter_break = \" \" * 3\n\n\ndef translate_into_english(morse_phrase):\n translated = []\n previous_word = \"\"\n word_list = morse_phrase.split(morse_word_break)\n\n for word in word_list:\n letters = word.split(morse_letter_break)\n if previous_word != \"\":\n translated.append(\" \")\n previous_word = word\n for letter in letters:\n for k, v in morse_dict.items():\n if letter == v:\n translated.append(k)\n return \"\".join(translated)\n\n\ndef translate_into_morse(english_phrase):\n translated = []\n previous_character = \"\"\n phrase_list = list(english_phrase.upper())\n\n for character in phrase_list:\n if previous_character.isalpha() and character.isalpha():\n translated.append(morse_letter_break)\n translated.append(morse_dict[character])\n previous_character = character\n return \"\".join(translated)\n\n\ndef translate_to_and_from_morse(phrase):\n if phrase[0].isalpha():\n return translate_into_morse(phrase)\n else:\n return translate_into_english(phrase)\n\n\nhidden_message = \"*** * *** * * *** *** * * * * *** * *** * * *** * *** * *** *** *** *** *** *** * *** * *\"\neng_message = \"CATS ARE COOL\"\nprint(translate_to_and_from_morse(eng_message))\n","sub_path":"coding_problems/morse_refactor.py","file_name":"morse_refactor.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480849882","text":"'''\n Write a function to find the longest common prefix string amongst an array of strings.\n\n If there is no common prefix, return an empty string \"\".\n\n Examples:\n\n Input: [\"flower\",\"flow\",\"flight\"]\n Output: \"fl\"\n\n\n\n Input: [\"dog\",\"racecar\",\"car\"]\n Output: \"\"\n Explanation: There is no common prefix among the input strings.\n'''\n\ndef longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n prefixes=[]\n num = len(strs)\n for x in zip(*strs): #iterate through n-shortest string, making a tuple of each character\n if len(set(x)) == 1: #test to see if they're all the same \n prefixes.append(x[0]) #append if they are\n else:\n break #break out if not\n return \"\".join(prefixes) #return result.","sub_path":"MonthOne/Week 1/Arrays/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272461934","text":"import string\nfrom math import floor\n\nclass Encoder:\n # use this to encode URL to Base62 string\n def encode(self, inpt, base=62):\n # base validation\n if base <= 0 or base > 62:\n return 0\n\n # we want combinations of [a-z][A-Z][0-9]\n the_base = string.digits + string.lowercase + string.uppercase\n\n remainder = inpt % base\n result = the_base[remainder]\n\n quotient = floor(inpt / base)\n # loop and keep dividing\n while quotient:\n remainder = quotient % base\n quotient = floor(quotient / base)\n # add new encoded to the result\n result = the_base[int(remainder)] + result\n return result","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69884909","text":"# -*- coding: utf-8 -*-\nfrom Slevanda_python_training.Model.contact import Contact\n\n\ndef test_create_new_contact(app):\n app.contact.create_new_contact(Contact(firstname=\"First name\", middle=\"Middle name\", lastname=\"Last name\", nick=\"Nickname\",\n title=\"Title\", company=\"Company\", address=\"Address\", homephone=\"Home telephone\",\n mobilephone=\"Mobile telephone\", workphone=\"Work telephone\",\n fax_telephone=\"Fax telephone\", email_1=\"Email 1\", email_2=\"Email 2\",\n email_3=\"Email 3\", home_page=\"Homepage\", bday=\"1\", bmonth=\"January\", byear=\"1980\",\n aday=\"18\", amonth=\"July\", ayear=\"1998\", address_2=\"Address\",\n secondaryphone=\"Home address\", notes=\"Notes\"))\n app.session.logout()\n\n\n","sub_path":"test/New_Update_Contact(AddressBook).py","file_name":"New_Update_Contact(AddressBook).py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462226682","text":"#!/usr/bin/env python\n\n\"\"\"Test wrapper to /bin/draw_bar_chart_of_clusters.py.\nThe defaults are the actual test data. So it only needs to be called\nas METAPY.py\n\"\"\"\n\nimport os\nimport subprocess\n\nINFILE = os.path.join(\"tests\", \"test_data\", \"bar_charts\",\n \"swarm.outRENAMED_abundance\")\n\nOTU_DATABASE_SWARM = os.path.join(\"tests\", \"test_data\", \"bar_charts\",\n \"ITS_db_NOT_confirmed_for_swarm.fasta\")\n\n\ndef test_METAPY_exec():\n \"\"\"Run METAPY.py on test data and compare output to\n precomputed target. The default option are the actual\n test data. So we only need to call the program\n \"\"\"\n prog = os.path.join(\"bin\", \"draw_bar_chart_of_clusters.py\")\n temp_s = [\"python3\",\n prog,\n \"-i\",\n INFILE,\n \" --db\",\n OTU_DATABASE_SWARM]\n cmd_s = ' '.join(temp_s)\n pipe = subprocess.run(cmd_s, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True)\n if not os.path.isfile(INFILE + \"_barchart.png\"):\n sys_exit(\"outfile not generated: %s\" % INFILE + \"_barchart.png\")\n","sub_path":"metapy/tests/test_script_bar_charts.py","file_name":"test_script_bar_charts.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389620840","text":"# -*- encoding: utf-8 -*-\nfrom .models import Sale, DetailSale, ImageSale\nfrom .serializers import SaleSerializer, DetailSaleSerializer, ImageSaleSerializer\nfrom rest_framework import viewsets\nfrom rest_framework import filters\n\n\nclass SaleViewSet(viewsets.ModelViewSet):\n serializer_class = SaleSerializer\n queryset = Sale.objects.all()\n filter_backends = ( filters.DjangoFilterBackend, )\n filter_fields = ('id', 'user', )\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n def get_queryset(self):\n \"\"\"\n This view should return a list of all the directions\n for the currently authenticated user.\n \"\"\"\n user = self.request.user\n return Sale.objects.filter(user=user)\n\n\nclass DetailSaleViewSet(viewsets.ModelViewSet):\n serializer_class = DetailSaleSerializer\n queryset = DetailSale.objects.all()\n filter_backends = ( filters.DjangoFilterBackend, )\n filter_fields = ('sale', )\n\n\nclass ImageSaleViewSet(viewsets.ModelViewSet):\n serializer_class = ImageSaleSerializer\n queryset = ImageSale.objects.all()\n filter_backends = ( filters.DjangoFilterBackend, )\n filter_fields = ('sale', )","sub_path":"carrito/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529338226","text":"from .client import HTTPClient\nimport requests\n\n\nclass OpenNMTPredictHTTPClient(HTTPClient):\n\n def request(self, address):\n raise NotImplementedError()\n\n def request_many(self, addresses):\n raise NotImplementedError()\n\n\nclass NMTPredictHTTPClient(OpenNMTPredictHTTPClient):\n\n def __init__(self,\n model_name,\n host,\n port,\n version,\n timeout=30):\n super().__init__(model_name, host, port, version, timeout)\n base_url = \"http://\" + self.host + \":\" + str(self.port) + \"/v1/models/\" + self.model_name\n if version:\n self.url = base_url + \"/versions/\" + str(self.model_version) + \":predict\"\n else:\n self.url = base_url + \":predict\"\n\n def request(self, address):\n try:\n resp = requests.post(self.url, json={'instances': [address, ]}, timeout=self.timeout)\n except TimeoutError as e:\n print(e)\n return address, \"\"\n except ConnectionError as e:\n print(e)\n return address, \"\"\n if resp.status_code != 200:\n return address, \"\"\n resp.encoding = \"utf8\"\n result = resp.json()\n return address, result['predictions'][0]\n\n def request_many(self, addresses):\n try:\n resp = requests.post(self.url, json={'instances': addresses}, timeout=self.timeout)\n except TimeoutError as e:\n print(e)\n return self._return_empty(addresses)\n except ConnectionError as e:\n print(e)\n return self._return_empty(addresses)\n if resp.status_code != 200:\n return self._return_empty(addresses)\n resp.encoding = \"utf8\"\n result = resp.json()\n results = []\n for i, o in zip(addresses, result[\"predictions\"]):\n results.append((i, o))\n return results\n\n @staticmethod\n def _return_empty(addresses):\n results = []\n for a in addresses:\n results.append((a, \"\"))\n return results\n","sub_path":"client/onmt_http_client.py","file_name":"onmt_http_client.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"623260894","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 10 10:31:06 2017\n\n@author: codeplay2017\n\"\"\"\n#from __future__ import absolute_import\n#from __future__ import division\n#from __future__ import print_function\n#from __future__ import unicode_literals\n#\n#from builtins import str\n## etc., as needed\n#\n#from future import standard_library\n#standard_library.install_aliases()\n\nimport tensorflow as tf\nimport numpy as np\nimport os, pickle\nfrom make_data_pai import ImgDataSet\nfrom tensorflow.python import pywrap_tensorflow\nimport matplotlib.pyplot as plt\n\nimport utils.networks as nt\n\n#####-----------structure parameters-----------------\nINPUT_SIZE = 4096\nSTRIDE1 = 1 # stride for the first layer\nKEEP_PROB = [1,1]\n\n####-----------------------------------------------------------------------\ncwd = os.getcwd()\nsource_dir = os.path.join(os.path.join(cwd,'..'),'')\ndata_dir = os.path.join(source_dir, 'resources/data4raw_5speeds_4096_step2/') # ubuntu\n#data_dir = os.path.join(source_dir, 'resources\\\\data4raw_5speeds_4096_step2\\\\') # windows\n\n#model_path = os.path.join(out_dir, 'observation/171220/raw_5speed/2017-12-15_17:20:18/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/raw_1speed/50Hz/2017-12-15_16:11:15/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/angle_1speed/50Hz/2017-12-18_10:25:16/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/raw_2speed/2017-12-16_11:04:30/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/raw_2speed/10,30,50/2017-12-18_16:42:39/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/fft_5speed/2017-12-19_11:45:35/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/fft_1speed/10,30,50/2017-12-20_15:22:58/model.ckpt')\n#model_path = os.path.join(out_dir, 'observation/171220/afft_5speeds/2017-12-20_20:59:11/model.ckpt')\n#model_path = os.path.join(source_dir, 'models\\\\deep_networks\\\\alex\\\\exp2\\\\2018-02-20_170507\\\\model.ckpt') # windows\nmodel_path = os.path.join(source_dir, 'models/deep_networks/VGG19/exp5/2018-03-23_130140/model.ckpt')\n####-----------------------------------------------------------------------\n#%%\nreader = pywrap_tensorflow.NewCheckpointReader(model_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\n# Print tensor name and values\nkeylist = []\nfor key in var_to_shape_map:\n keylist.append(key)\nlist.sort(keylist)\n#%%\ndef load_test_data(test_speed):\n testset = ImgDataSet()\n num_testfile = 3*len(test_speed)\n for ii in range(num_testfile):\n# resource_path = FLAGS.buckets\n# data_path = os.path.join(resource_path.replace('step_2400','step20_test'),'input_data_t_'+str(ii+1)+'.pkl')\n temp = test_speed[ii//3]\n index = ii%3\n file_index = int(temp/10+index*5)\n print(file_index,end=',')\n data_path = os.path.join(data_dir,'input_data_t_'+str(file_index)+'.pkl')\n with tf.gfile.GFile(data_path, 'rb') as f:\n data = pickle.load(f, encoding='iso-8859-1')\n testset.join_data(data)\n testset.make(shuffle=True,clean=True)\n return testset\n\ndef load_train_data(test_speed):\n print(\"loading data...\")\n trainset = ImgDataSet()\n num_trainfile = 15*len(test_speed)\n for ii in range(num_trainfile):\n# data_path = os.path.join(FLAGS.buckets,'input_data_cwt_0-50_'+str(ii+1)+'.pkl')\n temp = test_speed[ii//15]\n index = ii%15\n file_index = int(index//5*25 + (temp/2-index%5))\n print(file_index,end=',')\n data_path = os.path.join(data_dir,'input_data_'+str(file_index)+'.pkl')\n with tf.gfile.GFile(data_path, 'rb') as f:\n data = pickle.load(f, encoding='iso-8859-1')\n trainset.join_data(data)\n trainset.make(shuffle=True,clean=True)\n print('num of train sample is '+str(trainset.num_examples()))\n return trainset\n\ndef main(): # _ means the last param\n # Create the model\n x = tf.placeholder(tf.float32, [None, INPUT_SIZE])\n y_ = tf.placeholder(tf.float32, [None, 3])\n is_training = tf.placeholder(tf.bool)\n keep_prob0 = tf.placeholder(tf.float32)\n keep_prob1 = tf.placeholder(tf.float32)\n keep_prob2 = tf.placeholder(tf.float32)\n \n y_conv = nt.VGG19(x, is_training,keep_prob0, keep_prob1, keep_prob2)\n \n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n\n ### load data\n test_speed = [40]\n testset = load_test_data(test_speed)\n# testset = load_train_data(test_speed)\n \n num_of_example = testset.num_examples()\n print('\\nnumber of test examples is ', num_of_example)\n \n with tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, model_path)\n\n ### test accuracy of model\n correct = 0\n count = 0\n test_step = 1\n for ii in range(num_of_example//test_step):\n count += 1\n test_batch = testset.next_batch(test_step)\n test_feed = {x: test_batch[0], y_: test_batch[1],\n keep_prob0:1.0, keep_prob1: 1.0, keep_prob2: 1.0, is_training: False}\n# test_accuracy, test_out = sess.run([accuracy, tf.argmax(y_conv,1)],\n# feed_dict=test_feed)\n test_accuracy, test_out = sess.run([accuracy, y_conv],\n feed_dict=test_feed)\n# if np.abs(test_accuracy - 1.0) > 0.01:\n# print(str(ii+1),test_batch[1],test_out)\n correct += test_accuracy\n test_batch = testset.next_batch(testset.num_examples())\n test_acc,test_out = sess.run([accuracy,y_conv], feed_dict={x:test_batch[0], y_:test_batch[1],\n keep_prob0:1,keep_prob1:1,keep_prob2:1,\n is_training: False})\n \n\n conv1 = reader.get_tensor('conv2/W_conv')\n# plot_conv(conv1)\n print('accuracy is ', correct/count, test_acc)\n\ndef plot_conv(conv_tensor):\n num_filter_in = conv_tensor.shape[-2]\n num_filter_out = conv_tensor.shape[-1]\n print(num_filter_in, num_filter_out)\n# num_feature = conv_tensor.shape[0]\n fig = plt.figure()\n for ii in range(num_filter_in):\n for jj in range(num_filter_out):\n plt.subplot(num_filter_in,num_filter_out,ii*num_filter_out+jj+1)\n plt.plot(np.abs(np.fft.fft((conv_tensor[:,0,ii,jj]))))\n \n \n \n \n#########------custom functions----------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"python/src/test_cnn_local.py","file_name":"test_cnn_local.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100518487","text":"from yacs.config import CfgNode as CN\n\n_C = CN()\n\n_C.MODEL = CN()\n_C.MODEL.DEVICE = \"cuda\"\n_C.MODEL.THRESHOLD = 0.5\n_C.MODEL.NUM_CLASSES = 21\n_C.MODEL.NEG_POS_RATIO = 3\n_C.MODEL.CENTER_VARIANCE = 0.1\n_C.MODEL.SIZE_VARIANCE = 0.2\n\n_C.MODEL.BACKBONE = CN()\n_C.MODEL.BACKBONE.NAME = 'vgg'\n_C.MODEL.BACKBONE.OUT_CHANNELS = (512, 1024, 512, 256, 256, 256)\n_C.MODEL.BACKBONE.PRETRAINED = True\n\n# -----------------------------------------------------------------------------\n# PRIORS\n# -----------------------------------------------------------------------------\n_C.MODEL.PRIORS = CN()\n_C.MODEL.PRIORS.FEATURE_MAPS = [38, 19, 10, 5, 3, 1]\n_C.MODEL.PRIORS.STRIDES = [8, 16, 32, 64, 100, 300]\n_C.MODEL.PRIORS.MIN_SIZES = [30, 60, 111, 162, 213, 264]\n_C.MODEL.PRIORS.MAX_SIZES = [60, 111, 162, 213, 264, 315]\n_C.MODEL.PRIORS.ASPECT_RATIOS = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]\n_C.MODEL.PRIORS.BOXES_PER_LOCATION = [4, 6, 6, 6, 4, 4]\n_C.MODEL.PRIORS.CLIP = True\n\n_C.MODEL.BOX_HEAD = CN()\n_C.MODEL.BOX_HEAD.NAME = 'SSDBoxHead'\n_C.MODEL.BOX_HEAD.PREDICTOR = 'SSDBoxPredictor'\n\n_C.INPUT = CN()\n_C.INPUT.IMAGE_SIZE = 300\n_C.INPUT.PIXEL_MEAN = [123, 117, 104]\n\n_C.DATASETS = CN()\n_C.DATASETS.TRAIN = ()\n_C.DATASETS.TARGET = ()\n_C.DATASETS.TEST = ()\n\n_C.DATA_LOADER = CN()\n_C.DATA_LOADER.NUM_WORKERS = 0\n_C.DATA_LOADER.PIN_MEMORY = True\n\n_C.SOLVER = CN()\n_C.SOLVER.MAX_ITER = 120000\n_C.SOLVER.LR_STEPS = [80000, 100000]\n_C.SOLVER.GAMMA = 0.1\n_C.SOLVER.BATCH_SIZE = 32\n_C.SOLVER.BACKBONELR = 1e-3\n_C.SOLVER.BOXHEADLR = 1e-3\n_C.SOLVER.DOMAINDISCRIMINATORLR = 1e-3\n_C.SOLVER.MOMENTUM = 0.9\n_C.SOLVER.WEIGHT_DECAY = 5e-4\n_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n_C.SOLVER.WARMUP_ITERS = 500\n_C.SOLVER.LAMBDA = 0.5\n\n_C.TEST = CN()\n_C.TEST.NMS_THRESHOLD = 0.45\n_C.TEST.CONFIDENCE_THRESHOLD = 0.01\n_C.TEST.MAX_PER_CLASS = -1\n_C.TEST.MAX_PER_IMAGE = 100\n_C.TEST.BATCH_SIZE = 10\n\n_C.OUTPUT_DIR = 'outputs'\n","sub_path":"ssd/ssd/config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377925380","text":"#!/usr/bin/python\n\nlow=0\nhigh=100\n\nprint('Please think of a number between 0 and 100!')\n\nwhile True: \n guess=(low+high)/2\n print('Is your secret number ' + str(guess) + ' ?')\n usr=raw_input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. \")\n if usr=='h':\n high=guess\t\n elif usr=='l':\n low=guess\n elif usr=='c':\n print('Game over. Your secret number was: ' + str(guess))\n break\n else:\n print('Sorry, I did not understand your input.')\n","sub_path":"MyPythonPrograms/bisectionsearch.py","file_name":"bisectionsearch.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582831227","text":"# print(\"Student Information:- \");\n# Name = \"Aman Rajan Singh\";\n# Div = \"CSE-17B\";\n# Enrollmentno = 200303105095;\n# Contact_No = 9512888760;\n# print(Name);\n# print(Div);\n# print(Enrollmentno);\n# print(Contact_No);\n# print(\"\\n);\n\ndays = 140;\nmonths = int(days/30);\nremaindays = days%30;\nprint(months,remaindays);\n\n\n\n\n","sub_path":"pythonProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55820631","text":"# User function template for Python\r\n\r\nclass Solution:\r\n def binarysearch(self, arr, n, k):\r\n l = 0\r\n r = n - 1\r\n while (l <= r):\r\n mid = (l + r) // 2\r\n if arr[mid] == k:\r\n return mid\r\n if arr[mid] > k:\r\n r = mid - 1\r\n if arr[mid] < k:\r\n l = mid + 1\r\n\r\n return -1\r\n\r\n\r\n# {\r\n# Driver Code Starts\r\n# Initial template for Python\r\n\r\nif __name__ == '__main__':\r\n t = int(input())\r\n for i in range(t):\r\n n = int(input())\r\n arr = list(map(int, input().strip().split(' ')))\r\n k = int(input())\r\n ob = Solution()\r\n print(ob.binarysearch(arr, n, k))\r\n\r\n# } Driver Code Ends","sub_path":"week 2/searching and sorting/Binary Search.py","file_name":"Binary Search.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6037515","text":"import numpy as np\nimport pickle\nimport sys\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.nn import functional as F\nimport random\nimport pandas as pd\n\nseed=1\ntorch.manual_seed(seed)\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.set_default_dtype(torch.float64)\n\n\ndef load_file(path):\n with open(path, 'rb') as f:\n file = pickle.load(f)\n if type(file) is not np.ndarray:\n file = np.array(file)\n return file\n\n\ndef get_acc(labels,outputs):\n _,predicted = torch.max(outputs.data, 1)\n data_number = y.shape[0]*1.0\n correct_num = (predicted==labels).sum().item()\n accuracy = correct_num/data_num\n return accuracy\n\n\ndf = pd.read_csv('./full.csv')\nX = df[['dep','octanol','octanoic','pentanol','temperature','humidity']]\ny = df[['average_speed','average_number_of_droplets_last_second','max_average_single_droplet_speed','average_number_of_droplets']]\nX = X.to_numpy()\ny = y.to_numpy()\nX = (X-X.min())/(X.max()-X.min())\ny= (y-y.min())/(y.max()-y.min())\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=12)\n\nX_train = torch.from_numpy(X_train).type(torch.DoubleTensor)\ny_train = torch.from_numpy(y_train).type(torch.DoubleTensor)\nX_test = torch.from_numpy(X_test).type(torch.DoubleTensor)\ny_test = torch.from_numpy(y_test).type(torch.DoubleTensor)\ntorch.set_printoptions(precision=10)\n\nstart_time=time.time()\nmynet = torch.nn.Sequential(\n torch.nn.Linear(6,5),\n torch.nn.ReLU(),\n torch.nn.Linear(5,4)\n )\n\noptimiser = torch.optim.Adam(mynet.parameters(),lr=0.1)\n# loss_func = torch.nn.MSELoss(reduction='mean')\nloss_func = torch.nn.MSELoss(reduction='none')\n\nlosses=[]\n# for t in range(300):\n# out = mynet(X_train)\n# loss = loss_func(out, y_train)\n# optimiser.zero_grad()\n# loss.backward()\n# optimiser.step()\n# if t%1 == 0:\n# print(loss.item())\n# losses.append(loss)\nfor t in range(300):\n out = mynet(X_train)\n loss = loss_func(out, y_train)\n loss_mean = torch.mean(loss)\n optimiser.zero_grad()\n loss_mean.backward()\n optimiser.step()\n if t%1 == 0:\n print(loss.detach())\n losses.append(loss)\ntrain_loss=loss.detach().numpy()\ntrain_loss=np.sum(train_loss,axis=0)\n\nend_time=time.time()\nprint('training loss is', train_loss/X_train.shape[0])\ny_pred=mynet(X_test)\nprint('ypred',y_pred)\nprint('ytest',y_test)\nmse=sum((y_pred-y_test)**2)/y_test.shape[0]\nperc_error= sum((y_pred-y_test)/y_test)/y_test.shape[0]\nprint(\"MSE\",mse)\nprint('perc_error',perc_error)\n\nprint('total_time',end_time- start_time)","sub_path":"NN_compare_2.py","file_name":"NN_compare_2.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350197148","text":"import os\nimport base64\n\nfrom passlib.hash import pbkdf2_sha256\nfrom flask import Flask, render_template, request, redirect, url_for, session\nfrom model import Donor, Donation \n\napp = Flask(__name__)\n#app.secret_key = b'Fb\\xf6U\\xce\\xcf\\x9eq\\xfc\\xcc\\x84\\xba\\x91B\\xf2\\xb0\\x17\\x07\\xdc\\x99)NV('\napp.secret_key = os.environ.get('SECRET_KEY').encode()\n\n@app.route('/')\ndef home():\n return redirect(url_for('all'))\n\n@app.route('/donations/')\ndef all():\n donations = Donation.select()\n return render_template('donations.jinja2', donations=donations)\n\n@app.route('/create', methods=['GET', 'POST'])\ndef create():\n\n if request.method == \"POST\":\n if request.form['name'] not in [donor.name for donor in Donor.select()]:\n try:\n new_donor = Donor(name=request.form['name'])\n new_donor.save()\n Donation(donor=new_donor, value=request.form['amount']).save()\n return redirect(url_for('all'))\n except Exception as ex:\n print(\"Whoops! Something went wrong!\")\n print(ex)\n return render_template('create.jinja2')\n else:\n try:\n donor_obj = Donor.get(Donor.name==(request.form['name']))\n donation = int(request.form['amount'])\n new_Donation = Donation(donor=donor_obj, value=donation)\n new_Donation.save()\n return redirect(url_for('all'))\n except Exception as ex:\n print(\"Whoops! Something went wrong!\")\n print(ex)\n return render_template('create.jinja2')\n\n else:\n return render_template('create.jinja2')\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 6738))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370183644","text":"###Codigo creado por Luis Gerardo Sandoval Rodriguez\r\n###Antes de comenzar se prueba que esten los modulos necesarios instalados, de lo contrario el programa salira de ejecución \r\ntry:\r\n\timport ntplib\r\nexcept ImportError:\r\n\tprint(\"Modulo 'ntplib' no instalado\")\r\n\texit()\r\ntry:\r\n\timport pytz\r\nexcept ImportError:\r\n\tprint(\"Modulo 'pytz' no instalado\")\r\n\texit()\r\ntry:\r\n\timport pymysql\r\nexcept ImportError:\r\n\tprint(\"Modulo 'import pymysql' no instalado\")\r\n\texit()\r\nimport sys,os\r\nimport time\r\nimport datetime\r\nimport hashlib \r\nfrom random import uniform\r\n\r\n###Funcion que hace una consulta a un servidor NTP para despues ponerla sobre la maquina en la que se corra el script \r\ndef cambiarFechaHora():\r\n\tutc=pytz.utc\r\n\r\n\tprint(\"hora y fecha actual antes de pedir al NPT----> \"+str(datetime.datetime.now()))\r\n\r\n\t####Formato para la conversion a UTC\r\n\tfmt = '%Y-%m-%d %H:%M:%S'\r\n\tfmtt = '%Y-%m-%d %H:%M:%S.%f'\r\n\tmexico = pytz.timezone('America/Mexico_City')\r\n\r\n\t####Pide tiempo al servidor ntp\r\n\tclient=ntplib.NTPClient()\r\n\tprint(\"pidiendo hora a servidor --------------------> ntp.cais.rnp.br...\")\r\n\ttiempo_salida_peticion=datetime.datetime.now()\r\n\tresponse=client.request('ntp.cais.rnp.br')#('europe.pool.ntp.org')\r\n\ttiempo_llegada_peticion=datetime.datetime.now()\r\n\r\n\r\n\ttiempo_respuesta_peticion=(tiempo_llegada_peticion-tiempo_salida_peticion)/2\r\n\tprint(f\"tiempo de respuesta de peticion -------------> {tiempo_respuesta_peticion}\")\r\n\thora_servidor=time.localtime(response.tx_time)\r\n\t#hora_servidor=time.gmtime(response.tx_time)\r\n\tprint(f\"hora de servidor NTP ------------------------> {hora_servidor}\")\r\n\r\n\r\n\t####Cambia al formato para la conversion a UTC\r\n\tdate_time=str(hora_servidor[0])+\"-\"+str(hora_servidor[1])+\"-\"+str(hora_servidor[2])+\" \"+str(hora_servidor[3])+\":\"+str(hora_servidor[4])+\":\"+str(hora_servidor[5])\r\n\r\n\r\n\t####Cambia hora a UTC\r\n\tdt = datetime.datetime.strptime(date_time, fmt)\r\n\tam_dt = mexico.localize(dt)\r\n\tprint(am_dt)\r\n\t#hora_utc=am_dt.astimezone(utc).strftime(fmt)+\",00\"\r\n\thora_utc=am_dt.strftime(fmt)+\".00\"\r\n\r\n\tprint(f\"hora sin la suma el retraso {hora_utc}\")\r\n\r\n\t#hora_servidor=time.strftime(fmt,hora_servidor)+\".00\"\r\n\r\n\ttiempo_en_ejecucion=datetime.datetime.now()-tiempo_llegada_peticion\r\n\tprint(f\"tiempo de retraso de ejecucion --------------> {tiempo_en_ejecucion}\")\r\n\thora_ntp=(datetime.datetime.strptime(hora_utc, fmtt)+tiempo_respuesta_peticion+tiempo_en_ejecucion).strftime(fmtt)\r\n\r\n\t####Aplica la hora al sistema (year,month,dayOfWeek,day,hour,minute,second,millisecond)\r\n\tif sys.platform=='linux':\r\n\t\tos.system(f\"sudo date --set \\\"{hora_ntp}\\\"\")\t\r\n\t\tprint(\"\")\r\n\r\n\r\n\r\n\r\n###Funcion que devuelve la temperatura obtenida de un sensor, asi como la fecha y hora en el momento que fue tomada\r\n##Nota->Los datos en este caso son generados aleatoriamente (valor de temperatura entre 16 a 37) porque no hay \r\n##manera de obtenerlo de un sensor real\r\ndef leerTemperatura():\r\n\tarray = {}\r\n\tprint(\"Obteniendo temperatura......\")\r\n\tarray['fecha'] = datetime.datetime.now().strftime(\"%y/%m/%d\")\r\n\tarray['hora'] = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n\tarray['valor'] = uniform(16, 37) \r\n\treturn array\r\n\r\n\r\n\r\n\r\n###Funcion que retorna una firma electronica que se genera aplicando MD5 a el nombre del sensor\r\ndef generarFirma(sensor): \r\n\tresult = hashlib.md5(sensor.encode()) \r\n\tprint(f\"firma del sensor generada -------------------> {result.hexdigest()}\") \r\n\treturn result.hexdigest()\r\n\r\n\r\n###Funcion que se conecta a una base de datos para guardar los registros recibidos \r\ndef guardarDatos(id, firma, latitud, longitud, fecha, hora, variable, valor):\r\n\tDB_HOST = '127.0.0.1' \r\n\tDB_USER = 'root' \r\n\tDB_PASS = '' \r\n\tDB_NAME = 'IoT_datos' \r\n\r\n\ttry:\r\n\r\n\t\t# Abre conexion con la base de datos\r\n\t\tconn = pymysql.connect(DB_HOST,DB_USER,DB_PASS,DB_NAME)\r\n\r\n\t\t# prepare a cursor object using cursor() method\r\n\t\tcursor = conn.cursor()\r\n\r\n\t\tquery = f\"Insert into clima values('{id}', '{firma}',{latitud}, {longitud}, '{fecha}','{hora}', '{variable}', '{valor}')\"\r\n\r\n\t\t# ejecuta el SQL query usando el metodo execute().\r\n\t\ttry:\r\n\t\t cursor.execute(query)\r\n\t\t conn.commit()\r\n\t\t print(\"Datos guardados en la base de datos con exito\") \r\n\t\texcept:\r\n\t\t # Rollback en bd\r\n\t\t conn.rollback()\r\n\t\t print(\"Ocurrio un error al intertar guardar los datos en la base de datos\")\r\n\r\n\r\n\t\t# desconecta del servidor\r\n\t\tcursor.close()\r\n\t\tconn.close()\r\n\texcept (pymysql.err.OperationalError, pymysql.err.InternalError) as e:\r\n\t\tos.system('clear')\r\n\t\tprint(f\"Ocurrio un error al intentar conectar a al base de datos --> {e}\")\r\n\t\texit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n###Nota-> Los datos son fijos porque solo se esta simulando un sensor, que es el de temperatura, asi como la latitud y longitud de \r\n##donde se supone que esta localizado este sensor\r\n\r\nsensor = 'temperatura_01'\r\nlatitud = 19.721803\r\nlongitud = -101.185790\r\ncambiarFechaHora()\r\n\r\n###bucle que esta \"actualizando\" los valores del sensor de temperatura cada 60 segundos\r\nwhile(True):\r\n\tdatosSensor = leerTemperatura()\r\n\tfirma = generarFirma(sensor)\r\n\tguardarDatos(sensor, firma, latitud, longitud, datosSensor['fecha'], datosSensor['hora'], '2', datosSensor['valor'])\r\n\tprint(\"---------------------------------------------------------------------------------------------------------------\")\r\n\tprint(\"\\n\\nEn 60 segundos se actualizara la informacion\")\r\n\ttime.sleep(60)\r\n\tprint(\"\\n\\n---------------------------------------------------------------------------------------------------------------\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"IoT_almacenamientoLocalSensorTemperatura.py","file_name":"IoT_almacenamientoLocalSensorTemperatura.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193986461","text":"from modules import __common__\nfrom datetime import datetime\nfrom client import client\nimport discord\nimport time\nimport key\nimport log\nimport os\n\n\nclient.basic_help(title=\"exit\", desc=\"Cleans up and shuts down the bot.\")\n\ndetailed_help = {\n\t\"Usage\": f\"{client.default_prefix}exit [pid]\",\n\t\"Arguments\": \"`pid` - (Optional) Process ID identifying which bot to exit out of\",\n\t\"Description\": \"This command completely exits out of the bot, and performs any registered cleanup procedures. A user ID check is performed against a builtin list of users allowed to run this command.\",\n\t# NO Aliases field, this will be added automatically!\n}\nclient.long_help(cmd=\"exit\", mapping=detailed_help)\n\n\n@client.command(trigger=\"kill\", aliases=[\"exit\"])\nasync def command(command: str, message: discord.Message):\n\tif not __common__.check_permission(message.author):\n\t\tawait message.add_reaction(\"❌\")\n\t\tif message.author.id == key.shutdown_easter_egg_user:\n\t\t\tawait message.channel.send(\"*hehehe*\\n\\nCan't fool me! >:3\")\n\t\treturn\n\telse:\n\t\tparts = command.split(\" \")\n\t\ttry:\n\t\t\ttarget_pid = int(parts[2])\n\t\texcept IndexError:\n\t\t\ttarget_pid = os.getpid()\n\t\texcept ValueError:\n\t\t\tawait message.channel.send(\"Invalid integer of PID to kill\")\n\t\t\treturn\n\t\tif target_pid == os.getpid():\n\t\t\tawait message.channel.send(\"Shutting down bot...\")\n\t\t\tawait message.channel.send(f\"Uptime: {time.perf_counter() - client.first_execution:.3f} seconds ({(time.perf_counter() - client.first_execution)/86400:.3f} days)\")\n\t\t\tlog.info(f\"Bot shutdown initiated at {datetime.utcnow().__str__()} by {message.author.name}#{message.author.discriminator}\")\n\t\t\tawait client.on_shutdown()\n\treturn\n\n\n@client.command(trigger=\"_kill\", aliases=[\"_exit\"])\nasync def emergency_suicide(command: str, message: discord.Message):\n\tif not __common__.check_permission(message.author):\n\t\tawait message.add_reaction(\"❌\")\n\t\treturn\n\telse:\n\t\tos._exit(2)","sub_path":"modules/exit.py","file_name":"exit.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132431780","text":"import math\n\ndef calcula_distancia_do_projetil (v,y0,teta):\n \n p1=(v**2/(2*9.8))\n p2=(1+math.sqrt(1+(2*9.8*y0)/((v**2)*(math.sin(teta)**2))))\n p3=(math.sin(2*teta))\n d=p1*p2*p3\n return (d)\n \n \n\n","sub_path":"backup/user_026/ch14_2020_03_02_22_57_08_309371.py","file_name":"ch14_2020_03_02_22_57_08_309371.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649582757","text":"\"\"\"Quality-time server.\"\"\"\n\nimport os\n\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() == \"true\"\n\nif not DEBUG:\n from gevent import monkey # pylint: disable=import-error\n monkey.patch_all()\n\n# pylint: disable=wrong-import-order,wrong-import-position\n\nimport bottle\nimport logging\n\nfrom initialization import init_bottle, init_database\n\n\ndef serve() -> None: # pragma: nocover\n \"\"\"Connect to the database and start the application server.\"\"\"\n logging.getLogger().setLevel(logging.INFO)\n database = init_database()\n init_bottle(database)\n server_port = os.environ.get(\"SERVER_PORT\", \"5001\")\n bottle.run( # nosec\n server=\"wsgiref\" if DEBUG else \"gevent\", host=\"0.0.0.0\", port=server_port, reloader=not DEBUG,\n log=None if DEBUG else logging.getLogger())\n\n\nif __name__ == \"__main__\":\n serve()\n","sub_path":"components/server/src/quality_time_server.py","file_name":"quality_time_server.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176266879","text":"\"\"\"\r\nThien Pham\r\nAssignment#4 \r\nCOSC 1306\r\n\"\"\"\r\nimport math\r\ndef getNumber():\r\n number = float(input(\"Enter a number to find the square root:\"))\r\n while number < 0:\r\n print(\"The number\",number,\"is an invalid negaive number\")\r\n print(\"Please enter a positive number\")\r\n number = float(input(\"Please enter a number:\"))\r\n return number\r\ndef newton(num,steps):\r\n guess = 2\r\n count = 0\r\n while count < steps:\r\n guess = (guess + num/guess)/2\r\n count += 1\r\n return guess\r\ndef work(value):\r\n \r\n print(\"Steps = Value\")\r\n i = 1\r\n while i <= 16:\r\n print(i,\"=\",newton(value,i))\r\n i *=2\r\n print(\"The square root is of\",value,\"is\",math.sqrt(value))\r\n\r\n \r\nvalue = 2\r\n\r\nwhile value !=0:\r\n print(\"=\"*80)\r\n print(\"Enter 0 to exit or\")\r\n value = getNumber()\r\n if value!= 0:\r\n work(value)\r\n\r\nprint(\"Thank you good bye\")","sub_path":"COSC_1306/Assignments/Assignment#4.py","file_name":"Assignment#4.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"652724078","text":"import discord\nfrom discord.ext.commands import Bot\nimport bin.settings as settings\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions, MissingPermissions, CommandNotFound\nfrom bin.handler import Handle\nimport bin.secret as token\n\ninitial_extensions = ['cogs.management']\n\nBOT_PREFIX = (\"TLE_\")\nclient = Bot(command_prefix=BOT_PREFIX, case_insensitive=True)\nclient.case_insensitive = True\n\nif __name__ == '__main__':\n for extension in initial_extensions:\n client.load_extension(extension)\n\n@client.event\nasync def on_ready():\n await client.change_presence(activity=discord.Game(name=\"Bringing communities together\", type=1))\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n\n@client.event\nasync def on_message(message):\n LIST_CHANNELS = Handle.get_list_channels()\n if message.channel.id in LIST_CHANNELS and message.author.id != client.user.id:\n await Handle.process_message(message, client)\n await client.process_commands(message)\n\n\n@client.event\nasync def on_message_delete(message):\n LIST_CHANNELS = Handle.get_list_channels()\n if message.channel.id in LIST_CHANNELS and message.author.id == client.user.id:\n await Handle.process_deletion(message, client)\n\nclient.run(token.TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201019152","text":"from utils import tf_layer\nimport tensorflow as tf\nfrom tensorflow.contrib import slim\n\ndef get_cfg_attr(CFG, name, default_value):\n r = CFG.get(name, default_value)\n print('{}:{}'.format(name, r))\n return r\n\ndef get_norm_func(network_cfg):\n norm_func = network_cfg.get('norm_func',\"None\")\n if norm_func == 'LayerNorm':\n print(\"use layer normalization\")\n return tf_layer.layer_norm\n elif norm_func == 'BatchNorm':\n print(\"use batch normalization\")\n return slim.batch_norm\n elif norm_func == 'GroupNorm':\n print(\"use group normalization\")\n return tf_layer.group_norm\n elif norm_func == 'InstanceNorm':\n print(\"use instance normalization\")\n return tf_layer.instance_norm\n elif norm_func == 'SwitchNorm':\n print(\"use switch normalization\")\n return tf_layer.switch_norm\n else:\n print(\"not use normalization\")\n return tf_layer.none_layer\n\ndef get_conv_func(network_cfg):\n conv_func = network_cfg.get('conv_type',\"None\")\n if conv_func == 'PWS':\n print(\"use PWS conv\")\n return tf_layer.pws_conv\n elif conv_func == 'WN':\n print(\"use WN conv\")\n return tf_layer.wn_conv\n elif conv_func == 'WS':\n print(\"use WS conv\")\n return tf_layer.ws_conv\n else:\n print(\"use norm conv\")\n return slim.conv2d","sub_path":"detection_for_voc/python/utils/cfg_utils.py","file_name":"cfg_utils.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597791618","text":"#!/usr/bin/python3\n\"\"\" This handles the storage for SQL \"\"\"\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session, sessionmaker, scoped_session\nfrom sqlalchemy import Column, String, ForeignKey, DateTime\nfrom os import environ\nfrom os import getenv\nfrom models.state import State\nfrom models.city import City\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom models.review import Review\n\nclasses = {'State': State, 'BaseModel': BaseModel, 'City': City}\n\n\nclass DBStorage:\n \"\"\" Database Storage\n\n cl_attr:\n __engine\n __session\n \"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\" initiates engine according\n to environ varaibles\n \"\"\"\n uname = environ['HBNB_MYSQL_USER']\n pword = environ['HBNB_MYSQL_PWD']\n dbname = environ['HBNB_MYSQL_DB']\n host = environ['HBNB_MYSQL_HOST']\n\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n uname, pword, host, dbname), pool_pre_ping=True)\n\n if getenv('HBNB_ENV') == 'test':\n # drop all tables\n Base.metadata.drop_all(bind=self.__engine)\n\n def all(self, cls=None):\n \"\"\" query on SQL database\n calling all objects\n \"\"\"\n # Select * from cls passed into all\n # if cls=None query all objects\n # return dictionary like FileStorage\n obj = {}\n if cls is not None:\n for u in self.__session.query(classes[cls]).all():\n obj[classes[cls].__name__ + \".\" + classes[cls].id] = u\n return obj\n else:\n for a in classes:\n find = self.__session.query(classes[a]).all()\n for u in find:\n obj[u.__class__.__name__ + \".\" + u.id] = u\n return obj\n\n def new(self, obj):\n \"\"\" add object to current session\n \"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\" SQL save and commmit session\n \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" Deletes an object from\n the current session\n \"\"\"\n # only delete if not None\n if obj:\n self.__session().drop(obj)\n\n def reload(self):\n \"\"\" create all tables in the db\n \"\"\"\n from models.base_model import Base\n Base.metadata.create_all(self.__engine)\n sesh = sessionmaker(bind=self.__engine, expire_on_commit=False)\n self.__session = scoped_session(sesh)\n\n def close(self):\n \"\"\"calls close method\"\"\"\n self.__session.close()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516961116","text":"# \"Doors\" a game by Anrew Skelly & Fionn Ó Muirí\r\n# Started on the 15th of March 2018\r\n\r\n\r\n#import required modules\r\nimport pygame, time, random\r\n\r\n#set some basic variables\r\npygame.init()\r\n\r\npygame.font.init()\r\n\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption('Doors')\r\nclock = pygame.time.Clock()\r\n\r\n#colours\r\nwhite = ((255,255,255))\r\nblack = ((0,0,0))\r\ngrey = ((150,150,150))\r\n\r\n#Images\r\ndoorOpenImg = pygame.image.load('img\\doorOpen.png')\r\ndoorClosedImg = pygame.image.load('img\\doorClosed.png')\r\npausedImg = pygame.image.load('img\\paused.png')\r\nmenuImg = pygame.image.load('img\\menu.png')\r\nlogoImg = pygame.image.load('img\\logo.png')\r\n\r\nmenuImgScale = pygame.transform.scale(menuImg,(int((display_height-100)/1.5),display_height-100))\r\nbgimgWidth, bgimgHeight = menuImgScale.get_rect().size\r\n\r\n#class\r\nclass achievement:\r\n name = \"\"\r\n score = 0\r\n achieved = False\r\n img = doorOpenImg\r\n x= 0\r\n y= 0\r\n width = 50\r\n height = 75\r\n\r\n#music\r\npygame.mixer.music.load(\"music/Düsseldorf Waltz.mp3\")\r\npygame.mixer.music.play(-1)\r\n\r\n# other required variables\r\nscore = 0\r\nhighScoreDoc = open(\"highScore.txt\", \"r\")\r\nhighScore = int(highScoreDoc.read())\r\nhighScoreDoc.close\r\n\r\nsound = True\r\n\r\ndestination = \"\"\r\n\r\noptionButtons = []\r\nmenuButtons = []\r\ngameOverButtons = []\r\nstatsButtons = []\r\n\r\nachievementsDoc = open(\"achievements.txt\", \"r\")\r\n#achievementsStr = achievementsDoc.read().split(\"\\n\")\r\nachievementsStr = [\"False\",\"False\",\"False\",\"False\"]\r\nachievementsDoc.close\r\n\r\nstatNames = [\"Total Doors: \",\"Avg Score: \"]\r\nstatsDoc = open(\"stats.txt\", \"r\")\r\nstats = statsDoc.read().split(\"\\n\")\r\nstatsDoc.close\r\nstats[0] = int(stats[0])\r\nstats[1] = float(stats[1])\r\n\r\nachNames = [\"Baby Steps\", \"Ha! Pitiful\", \"Alright. You've proved your point\", \"Show off\"]\r\nachScores = [1, 5, 10, 20]\r\n\r\nachList = []\r\nachGet = \"\"\r\nreached = False\r\n\r\ncounter = 0\r\n\r\n#Create achievements\r\nfor i in range(0,len(achNames)):\r\n newAch = achievement()\r\n newAch.name = achNames[i]\r\n newAch.score = achScores[i]\r\n newAch.img = pygame.image.load('img/achievements/%s.png' % (newAch.name))\r\n \r\n if achievementsStr[i] == \"True\":\r\n newAch.achieved = True\r\n else:\r\n newAch.achieved = False\r\n\r\n achList.append(newAch)\r\n\r\n#template for each door\r\nclass door:\r\n size = 0 \r\n x = 0\r\n y = 0\r\n speed = 0\r\n doorOpen = False\r\n\r\n#template for buttons\r\nclass button:\r\n imgA = pygame.image.load('img\\doorOpen.png')\r\n imgB = pygame.image.load('img\\doorOpen.png')\r\n width = 0\r\n height = 0\r\n x = 0\r\n y = 0\r\n\r\nfor i in range(0,4):\r\n newButton = button()\r\n newButton.imgA = pygame.transform.scale(pygame.image.load(\"img/buttons/menu/btn%sa.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.imgB = pygame.transform.scale(pygame.image.load(\"img/buttons/menu/btn%sb.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.width, newButton.height = newButton.imgA.get_rect().size\r\n newButton.x = int((display_width/2)-(newButton.width/5))\r\n newButton.y = int((display_height/3.25)+(i*100))\r\n\r\n menuButtons.append(newButton)\r\n\r\nfor i in range(0,3):\r\n newButton = button()\r\n newButton.imgA = pygame.transform.scale(pygame.image.load(\"img/buttons/options/btn%sa.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.imgB = pygame.transform.scale(pygame.image.load(\"img/buttons/options/btn%sb.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.width, newButton.height = newButton.imgA.get_rect().size\r\n newButton.x = int((display_width/2)-(newButton.width/5))\r\n newButton.y = int((display_height/3.25)+(i*100))\r\n\r\n optionButtons.append(newButton)\r\n\r\nfor i in range(0,2):\r\n newButton = button()\r\n newButton.imgA = pygame.transform.scale(pygame.image.load(\"img/buttons/gameOver/btn%sa.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.imgB = pygame.transform.scale(pygame.image.load(\"img/buttons/gameOver/btn%sb.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.width, newButton.height = newButton.imgA.get_rect().size\r\n newButton.x = int((display_width/2)-(newButton.width/5))\r\n newButton.y = int((display_height/1.6)+(i*100))\r\n\r\n gameOverButtons.append(newButton)\r\n\r\nfor i in range(2,3):\r\n newButton = button()\r\n newButton.imgA = pygame.transform.scale(pygame.image.load(\"img/buttons/options/btn%sa.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.imgB = pygame.transform.scale(pygame.image.load(\"img/buttons/options/btn%sb.png\" % (i+1)),(int(bgimgWidth*0.55),int(((bgimgWidth*0.5)/5)*3)))\r\n newButton.width, newButton.height = newButton.imgA.get_rect().size\r\n newButton.x = int((display_width/2)-(newButton.width/5))\r\n newButton.y = int(display_height/1.6)\r\n\r\n statsButtons.append(newButton)\r\n\r\ndef achievements():\r\n global score\r\n global achGet\r\n \r\n for i in range(0,len(achList)):\r\n if score == achList[i].score:\r\n if not achList[i].achieved:\r\n achList[i].achieved = True\r\n\r\n achGet = achList[i]\r\n\r\n achievementsDoc = open(\"achievements.txt\", \"w\")\r\n achievementsDoc.write(str(achList[0].achieved)+\"\\n\"+str(achList[1].achieved)+\"\\n\"+str(achList[2].achieved)+\"\\n\"+str(achList[3].achieved))\r\n achievementsDoc.close\r\n\r\ndef achAnimation():\r\n global achGet\r\n global reached\r\n global counter\r\n \r\n if achGet != \"\":\r\n if achGet.x == 0:\r\n reached = False\r\n achGet.x = display_width\r\n achGet.y = display_height-achGet.height-10\r\n else:\r\n if reached and counter >20:\r\n achGet.x+=1\r\n if achGet.x>display_width:\r\n achGet = \"\"\r\n counter = 0\r\n \r\n elif achGet.x > display_width-achGet.width-10 and reached == False:\r\n achGet.x-=1\r\n\r\n else:\r\n reached = True\r\n counter+=1\r\n \r\n if achGet != \"\": \r\n gameDisplay.blit(achGet.img,(achGet.x,achGet.y))\r\n pygame.display.update()\r\n \r\ndef menuFade(mode,menu):\r\n #scales background image\r\n menuImgScale = pygame.transform.scale(menuImg,(int((display_height-100)/1.5),display_height-100))\r\n bgimgWidth, bgimgHeight = menuImgScale.get_rect().size\r\n\r\n y = int(display_height/3.4)\r\n text = []\r\n\r\n buttons = []\r\n if menu == \"menu\":\r\n buttons = menuButtons\r\n elif menu == \"options\":\r\n buttons = optionButtons\r\n elif menu == \"gameOver\":\r\n buttons = gameOverButtons\r\n elif menu == \"stats\":\r\n buttons = statsButtons\r\n\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', 40)\r\n textsurface = myfont.render('Highscore: %s' % (highScore), False, white)\r\n \r\n myfontA = pygame.font.Font('img/fonts/VINERITC.ttf', int(display_width/20))\r\n textsurfaceA = myfontA.render('''Game Over''', False, white)\r\n textASize, x = textsurfaceA.get_rect().size\r\n textsurfaceB = myfontA.render('''Score: %s''' % (score), False, white)\r\n textBSize, x = textsurfaceB.get_rect().size\r\n\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', int(display_width/35))\r\n for i in range(0,len(stats)):\r\n statsText = myfont.render('%s%s' % (statNames[i],int(stats[i])), False, white)\r\n text.append(statsText)\r\n \r\n s = pygame.Surface((display_width,display_height)) \r\n\r\n if mode == \"in\":\r\n for i in range(255,0,-3):\r\n gameDisplay.fill(black)\r\n y = int(display_height/3.4)\r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n \r\n for j in range(0,len(buttons)):\r\n #draws button\r\n gameDisplay.blit(buttons[j].imgA,(buttons[j].x, buttons[j].y,))\r\n\r\n if menu == \"options\" or menu == \"menu\":\r\n gameDisplay.blit(textsurface,(0,0))\r\n\r\n elif menu == \"gameOver\":\r\n gameDisplay.blit(textsurfaceA,((display_width/2)-(textASize/2)+50,200))\r\n gameDisplay.blit(textsurfaceB,((display_width/2)-(textBSize/2)+50,260))\r\n\r\n elif menu == \"stats\":\r\n for j in range(0,len(text)):\r\n gameDisplay.blit(text[j],(display_height/1.65,y))\r\n y+=int(display_height/14)\r\n\r\n s.set_alpha(i) \r\n s.fill((0,0,0)) \r\n gameDisplay.blit(s, (0,0))\r\n \r\n pygame.display.update()\r\n \r\n elif mode == \"out\":\r\n for i in range(0,255,3):\r\n gameDisplay.fill(black)\r\n y = int(display_height/3.4)\r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n \r\n for j in range(0,len(buttons)):\r\n #draws button\r\n gameDisplay.blit(buttons[j].imgA,(buttons[j].x, buttons[j].y,))\r\n \r\n if menu == \"options\" or menu == \"menu\":\r\n gameDisplay.blit(textsurface,(0,0))\r\n\r\n elif menu == \"gameOver\":\r\n gameDisplay.blit(textsurfaceA,((display_width/2)-(textASize/2)+50,200))\r\n gameDisplay.blit(textsurfaceB,((display_width/2)-(textBSize/2)+50,260))\r\n\r\n elif menu == \"stats\":\r\n for j in range(0,len(text)):\r\n gameDisplay.blit(text[j],(display_height/1.65,y))\r\n y+=int(display_height/14)\r\n \r\n s.set_alpha(i) \r\n s.fill((0,0,0)) \r\n gameDisplay.blit(s, (0,0))\r\n \r\n pygame.display.update()\r\n\r\n# game function\r\ndef game():\r\n global score\r\n global highScore\r\n global destination\r\n\r\n paused = False\r\n\r\n doors = []\r\n\r\n #prints score in top ledt\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', 30)\r\n textsurface = myfont.render(\"Score: %s Highscore: %s\" % (0, highScore), False, white)\r\n \r\n mouseDownPast = False\r\n mouseDownNow = False\r\n\r\n score = 0\r\n \r\n # create first door\r\n newDoor = door()\r\n\r\n newDoor.size = random.randrange(40, 70)\r\n newDoor.x = random.randrange(0, display_width-newDoor.size)\r\n newDoor.y = random.randrange(display_height, display_height*2)\r\n newDoor.speed = (newDoor.size-30)/5\r\n \r\n doors.append(newDoor)\r\n\r\n # the game loop\r\n while True:\r\n\r\n #gets mouse position\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n \r\n # check for events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n mouseDownNow = True\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n mouseDownNow = False\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n #pauses/unpauses the game when space is pressed\r\n if paused:\r\n pygame.time.delay(500)\r\n paused = False\r\n else:\r\n paused = True\r\n gameDisplay.fill(black)\r\n gameDisplay.blit(pausedImg,(display_width/2-100, display_height/2-150))\r\n pygame.display.update()\r\n \r\n if not paused:\r\n # refresh the screen\r\n gameDisplay.fill(black)\r\n \r\n # move and draw doors\r\n for i in range(0,len(doors)):\r\n\r\n \r\n # scales door images to required size\r\n OpenImg = pygame.transform.scale(doorOpenImg, (doors[i].size, int(doors[i].size*1.5)))\r\n ClosedImg = pygame.transform.scale(doorClosedImg, (doors[i].size, int(doors[i].size*1.5)))\r\n\r\n #chance of door opening before halfway\r\n openchance = random.randrange(doors[i].speed*5,70)\r\n if doors[i].y < display_height and openchance >= 69:\r\n if not doors[i].doorOpen:\r\n doors[i].doorOpen = True\r\n\r\n # if door hasn't opened by certain point, open it\r\n elif doors[i].y < display_height/3:\r\n if not doors[i].doorOpen:\r\n doors[i].doorOpen = True\r\n\r\n # move door\r\n doors[i].y -= doors[i].speed\r\n\r\n #checks to see if the door was clicked\r\n if mouseX >= doors[i].x and mouseX <= doors[i].x + doors[i].size:\r\n if mouseY >= doors[i].y and mouseY <= doors[i].y + doors[i].size*1.5:\r\n if mouseDownNow == True and mouseDownPast == False:\r\n if doors[i].doorOpen:\r\n #increases score\r\n score+=1\r\n\r\n stats[0]+=1\r\n\r\n statsDoc = open(\"stats.txt\", \"w\")\r\n statsDoc.write(str(stats[0])+\"\\n\"+str(stats[1]))\r\n statsDoc.close\r\n\r\n achievements()\r\n\r\n #checks for highscore\r\n if score > highScore:\r\n highScore = score\r\n highScoreDoc = open(\"highScore.txt\", \"w\")\r\n highScoreDoc.write(str(highScore))\r\n highScoreDoc.close\r\n\r\n textsurface = myfont.render(\"Score: %s Highscore: %s\" % (score, highScore), False, white)\r\n\r\n # resets the door\r\n doors[i].size = random.randrange(40, 70)\r\n doors[i].y = display_height\r\n doors[i].x = random.randrange(0, display_width-newDoor.size)\r\n doors[i].speed = (doors[i].size-35)/5\r\n\r\n doors[i].doorOpen = False\r\n\r\n #adds a new door every time the score if a multiple of 5 \r\n if score>0 and score<=25 and score%5 == 0:\r\n newDoor = door()\r\n\r\n newDoor.size = random.randrange(40, 70)\r\n newDoor.x = random.randrange(0, display_width-newDoor.size)\r\n newDoor.y = random.randrange(display_height, display_height*2)\r\n newDoor.speed = (newDoor.size-30)/5\r\n \r\n doors.append(newDoor)\r\n \r\n\r\n #draws the door\r\n if doors[i].doorOpen:\r\n pygame.draw.rect(gameDisplay, grey, pygame.Rect(doors[i].x,doors[i].y,doors[i].size, doors[i].size*1.5))\r\n gameDisplay.blit(OpenImg, (doors[i].x,doors[i].y))\r\n else:\r\n pygame.draw.rect(gameDisplay, black, pygame.Rect(doors[i].x,doors[i].y,doors[i].size, doors[i].size*1.5))\r\n gameDisplay.blit(ClosedImg, (doors[i].x,doors[i].y))\r\n\r\n #if the door reaches the top, end game\r\n if doors[i].y < 0-(doors[i].size*1.5):\r\n if stats[1]>0:\r\n stats[1] = (stats[1]+score)/2\r\n else:\r\n stats[1] = score\r\n\r\n statsDoc = open(\"stats.txt\", \"w\")\r\n statsDoc.write(str(stats[0])+\"\\n\"+str(stats[1]))\r\n statsDoc.close\r\n \r\n \r\n pygame.display.update()\r\n destination = \"gameOver\"\r\n menuFade(\"in\",\"gameOver\")\r\n return\r\n \r\n mouseDownPast = mouseDownNow\r\n \r\n gameDisplay.blit(textsurface,(0,0))\r\n\r\n achAnimation()\r\n \r\n # update screen and set FPS\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\n# game over function\r\ndef gameOver():\r\n global destination\r\n global achList\r\n\r\n buttons = gameOverButtons\r\n\r\n mouseDownNow = False\r\n \r\n # writes gameover + score\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', int(display_width/20))\r\n textsurfaceA = myfont.render('''Game Over''', False, white)\r\n textASize, x = textsurfaceA.get_rect().size\r\n textsurfaceB = myfont.render('''Score: %s''' % (score), False, white)\r\n textBSize, x = textsurfaceB.get_rect().size\r\n\r\n #waits for space press for menu or quit\r\n while True:\r\n gameDisplay.fill(black)\r\n\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n \r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n mouseDownNow = True\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n mouseDownNow = False\r\n\r\n #goes through buttons\r\n for i in range(0,len(buttons)):\r\n #checks if button was pressed\r\n if mouseX > buttons[i].x and mouseX < buttons[i].x+buttons[i].width:\r\n if mouseY > buttons[i].y and mouseY < buttons[i].y+buttons[i].height:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgB,(buttons[i].x, buttons[i].y,))\r\n if mouseDownNow:\r\n if i == 0:\r\n menuFade(\"out\", \"gameOver\")\r\n destination = \"game\"\r\n return\r\n if i == 1:\r\n menuFade(\"out\",\"gameOver\")\r\n menuFade(\"in\",\"menu\")\r\n destination = \"menu\"\r\n return\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n \r\n gameDisplay.blit(textsurfaceA,((display_width/2)-(textASize/2)+50,200))\r\n gameDisplay.blit(textsurfaceB,((display_width/2)-(textBSize/2)+50,260))\r\n pygame.display.update()\r\n\r\n# menu function\r\ndef menu():\r\n global menuButtons\r\n global destination\r\n\r\n buttons = menuButtons\r\n\r\n mouseDownNow = False\r\n \r\n while True:\r\n gameDisplay.fill(black)\r\n\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n\r\n # checks for clicks\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n mouseDownNow = True\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n mouseDownNow = False\r\n\r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n\r\n #goes through buttons\r\n for i in range(0,len(buttons)):\r\n #checks if button was pressed\r\n if mouseX > buttons[i].x and mouseX < buttons[i].x+buttons[i].width:\r\n if mouseY > buttons[i].y and mouseY < buttons[i].y+buttons[i].height:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgB,(buttons[i].x, buttons[i].y,))\r\n if mouseDownNow:\r\n if i == 0:\r\n menuFade(\"out\", \"menu\")\r\n destination = \"game\"\r\n return\r\n if i == 1:\r\n menuFade(\"out\",\"menu\")\r\n menuFade(\"in\",\"options\")\r\n destination = \"options\"\r\n return\r\n if i == 3:\r\n menuFade(\"out\",\"menu\")\r\n pygame.quit()\r\n quit()\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n\r\n # draws highscore in the top left\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', 40)\r\n textsurface = myfont.render('Highscore: %s' % (highScore), False, white)\r\n gameDisplay.blit(textsurface,(0,0))\r\n \r\n pygame.display.update()\r\n \r\ndef startUp():\r\n global destination\r\n #scales background image\r\n menuImgScale = pygame.transform.scale(menuImg,(int((display_height-100)/1.5),display_height-100))\r\n bgimgWidth, bgimgHeight = menuImgScale.get_rect().size\r\n\r\n logoImgWidth, logoImgHeight = logoImg.get_rect().size\r\n\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', 40)\r\n textsurface = myfont.render('Highscore: %s' % (highScore), False, white)\r\n \r\n for i in range(255,0,-3):\r\n gameDisplay.blit(logoImg,((display_width/2)-(logoImgWidth/2),(display_height/2)-(logoImgHeight/2)))\r\n s = pygame.Surface((display_width,display_height)) \r\n s.set_alpha(i) \r\n s.fill((0,0,0)) \r\n gameDisplay.blit(s, (0,0)) \r\n pygame.display.update()\r\n \r\n for i in range(0,255,3):\r\n gameDisplay.blit(logoImg,((display_width/2)-(logoImgWidth/2),(display_height/2)-(logoImgHeight/2)))\r\n s = pygame.Surface((display_width,display_height)) \r\n s.set_alpha(i) \r\n s.fill((0,0,0)) \r\n gameDisplay.blit(s, (0,0)) \r\n pygame.display.update()\r\n\r\n menuFade(\"in\",\"menu\")\r\n\r\ndef options():\r\n global destination\r\n global optionButtons\r\n global sound\r\n\r\n buttons = optionButtons\r\n\r\n mouseDownNow = False\r\n\r\n while True:\r\n\r\n gameDisplay.fill(black)\r\n\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n\r\n # checks for clicks\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n mouseDownNow = True\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n mouseDownNow = False\r\n\r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n\r\n #goes through buttons\r\n for i in range(0,len(buttons)):\r\n #checks if button was pressed\r\n if mouseX > buttons[i].x and mouseX < buttons[i].x+buttons[i].width:\r\n if mouseY > buttons[i].y and mouseY < buttons[i].y+buttons[i].height:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgB,(buttons[i].x, buttons[i].y,))\r\n if mouseDownNow:\r\n if i == 0:\r\n if sound:\r\n sound = False\r\n pygame.mixer.music.stop()\r\n pygame.time.delay(100)\r\n else:\r\n sound = True\r\n pygame.mixer.music.load(\"music/Düsseldorf Waltz.mp3\")\r\n pygame.mixer.music.play(-1)\r\n pygame.time.delay(100)\r\n\r\n if i == 1:\r\n menuFade(\"out\",\"options\")\r\n menuFade(\"in\",\"stats\")\r\n destination = \"stats\"\r\n return\r\n \r\n if i == 2:\r\n menuFade(\"out\",\"options\")\r\n menuFade(\"in\",\"menu\")\r\n destination = \"menu\"\r\n return\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n\r\n # draws highscore in the top left\r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', 40)\r\n textsurface = myfont.render('Highscore: %s' % (highScore), False, white)\r\n gameDisplay.blit(textsurface,(0,0))\r\n \r\n pygame.display.update()\r\n\r\ndef statsPage():\r\n global destination\r\n global stats\r\n mouseDownNow = False\r\n text = []\r\n y = int(display_height/3.4)\r\n\r\n buttons = statsButtons\r\n\r\n mouseDownNow = False\r\n \r\n myfont = pygame.font.Font('img/fonts/VINERITC.ttf', int(display_width/35))\r\n for i in range(0,len(stats)):\r\n textsurface = myfont.render('%s%s' % (statNames[i],int(stats[i])), False, white)\r\n text.append(textsurface)\r\n\r\n #waits for space press for menu or quit\r\n while True:\r\n y = int(display_height/3.4)\r\n gameDisplay.fill(black)\r\n\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n \r\n #draws background image\r\n gameDisplay.blit(menuImgScale,(int((display_width/2)-(bgimgWidth/2)),int((display_height/2)-(bgimgHeight/2))))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n mouseDownNow = True\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\r\n mouseDownNow = False\r\n\r\n #goes through buttons\r\n for i in range(0,len(buttons)):\r\n #checks if button was pressed\r\n if mouseX > buttons[i].x and mouseX < buttons[i].x+buttons[i].width:\r\n if mouseY > buttons[i].y and mouseY < buttons[i].y+buttons[i].height:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgB,(buttons[i].x, buttons[i].y,))\r\n if mouseDownNow:\r\n if i == 0:\r\n menuFade(\"out\",\"stats\")\r\n menuFade(\"in\",\"options\")\r\n destination = \"options\"\r\n return\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n else:\r\n #draws button\r\n gameDisplay.blit(buttons[i].imgA,(buttons[i].x, buttons[i].y,))\r\n\r\n \r\n for i in range(0,len(text)):\r\n gameDisplay.blit(text[i],(display_height/1.65,y))\r\n y+=int(display_height/14)\r\n \r\n pygame.display.update()\r\n \r\n# launch game\r\nif __name__ == \"__main__\":\r\n startUp()\r\n menu()\r\n while True:\r\n if destination == \"game\":\r\n game()\r\n if destination == \"menu\":\r\n menu()\r\n if destination == \"options\":\r\n options()\r\n if destination == \"gameOver\":\r\n gameOver()\r\n if destination == \"stats\":\r\n statsPage()\r\n\r\npygame.quit()\r\nquit()\r\n","sub_path":"Doors alpha 0.7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597400463","text":"import rosbag, math, os, numpy, re, laspy\nimport sensor_msgs.point_cloud2 as pc2\n\ndef exportLas(lasName=r'points.laz'):\n\n regex = re.compile(r'.+\\.frames$')\n frameFiles = filter(regex.search, os.listdir())\n frameFiles = [re.sub(r'\\.frames$', r'', i) for i in frameFiles]\n\n x = numpy.array([])\n y = numpy.array([])\n z = numpy.array([])\n gps = numpy.array([])\n tStp = 0\n for i in sorted(frameFiles):\n print(r'loading ' + i)\n pts = numpy.loadtxt(i + r'.3d')\n mat = numpy.loadtxt(i + r'.frames')[-1,:]\n mat = numpy.array([ mat[0:4], mat[4:8], mat[8:12], mat[12:16] ])\n\n oneCol = numpy.array([[1]] * len(pts))\n pts = numpy.append(pts, oneCol, axis=1)\n pts = numpy.dot(pts, mat)\n\n x = numpy.append(x, pts[:,0])\n y = numpy.append(y, pts[:,1])\n z = numpy.append(z, pts[:,2])\n gps = numpy.append(gps, [tStp] * len(pts))\n\n tStp += 1\n\n # numpy.savetxt('temp.txt', numpy.column_stack((x,y,z)), fmt='%f')\n\n lasHead = laspy.header.Header(point_format=1)\n xmin = numpy.floor(numpy.min(x))\n ymin = numpy.floor(numpy.min(y))\n zmin = numpy.floor(numpy.min(z))\n\n scale = 1000000\n\n lasOut = laspy.file.File(lasName, mode=\"w\", header = lasHead)\n lasOut.header.offset = [xmin,ymin,zmin]\n lasOut.header.scale = [1/scale] * 3\n\n lasOut.set_x(x*scale)\n lasOut.set_y(y*scale)\n lasOut.set_z(z*scale)\n lasOut.set_gps_time(gps)\n\n lasOut.close()\n\ndef getTime(msg):\n\n secs = msg.header.stamp.secs\n nsecs = msg.header.stamp.nsecs\n time = (secs, nsecs)\n return time\n\ndef getAngles(msg = None):\n\n if msg == None:\n return [0,0,0]\n\n pitch = msg.angle.x * 180 / math.pi\n yaw = msg.angle.y * 180 / math.pi\n roll = msg.angle.z * 180 / math.pi\n\n imu = [pitch,yaw,roll]\n return imu\n\ndef getPos():\n return [0,0,0]\n\ndef getCloud(msg):\n\n cloud = []\n for p in pc2.read_points(msg, skip_nans=True): \n cloud.append(p)\n \n return cloud\n\n\ngoDir = r'/home/tiago/Desktop/bag/'\nslamDir = r'/home/tiago/SLAM/slam6d-code/bin/'\nbagFiles = os.listdir(goDir)\n\nfor goBag in bagFiles:\n # goBag = r'20180626_ser1_x_45.bag'\n \n if re.match(r'.+\\.bag$', goBag) == None:\n continue\n\n os.chdir(goDir)\n bagPref = re.sub(r'\\.bag', '', goBag)\n bag = rosbag.Bag(goBag)\n\n os.mkdir(bagPref)\n os.chdir(bagPref)\n\n ang = True\n tempAng = numpy.array([getPos(), getAngles()])\n cld = False\n skip = 0\n skipMax = 1\n counter = 0\n for topic, msg, t in bag.read_messages():\n \n # if(not cld and not ang and topic == r'/ekf_euler'):\n # tempAng = getAngles(msg)\n # tempAng = numpy.array([getPos(), tempAng])\n # ang = True\n\n if(ang and not cld and topic == r'/velodyne_points'):\n tempCloud = getCloud(msg)\n tempCloud = numpy.array(tempCloud)\n cld = True\n\n if ang and cld:\n if skip == 0:\n print('converting cloud ' + str(counter))\n ctr = '00' + str(counter) if counter < 10 else ('0' + str(counter) if counter < 100 else str(counter))\n numpy.savetxt('scan' + ctr + '.pose', tempAng, fmt=\"%f\")\n numpy.savetxt('scan' + ctr + '.3d', tempCloud[:,0:3], fmt=\"%f\")\n counter += 1\n\n skip = 0 if skip >= skipMax else skip+1\n # ang = False\n cld = False\n \n if(counter > 999):\n break\n\n bag.close()\n\n cwd = os.getcwd()\n os.chdir(slamDir)\n\n cmd = r'./slam6D -i 600 -I 15 --metascan --epsICP=0.00000001 --epsSLAM=0.1 -d 1.25 -D 1 -G 1 -r 0.2 -a 2 ' + cwd\n print(cmd)\n os.system(cmd)\n\n cmd = r'./exportPoints ' + cwd #+ r' && ./txt2las -i points.pts -o points.laz -v'\n print(cmd)\n os.system(cmd)\n\n if(cwd[-1] != r'/'): \n cwd += r'/'\n\n os.rename(r'points.pts', cwd + r'points.pts')\n os.rename(r'positions.txt', cwd + r'positions.txt')\n os.rename(r'poses.txt', cwd + r'poses.txt')\n # os.rename(r'points.laz', cwd + r'points.laz')\n os.rename(r'loopclose.pts', cwd + r'loopclose.pts')\n\n os.chdir(cwd)\n exportLas()\n","sub_path":"python/slam_tests/bag_extractor.py","file_name":"bag_extractor.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402924734","text":"import os, unittest\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom igf_data.igfdb.igfTables import Base, Project, Project_attribute\nfrom igf_data.igfdb.baseadaptor import BaseAdaptor\nfrom igf_data.igfdb.projectadaptor import ProjectAdaptor\nfrom igf_data.utils.projectutils import mark_project_barcode_check_off\nfrom igf_data.utils.dbutils import read_dbconf_json\n\nclass Projectutils_test1(unittest.TestCase):\n def setUp(self):\n self.dbconfig = 'data/dbconfig.json'\n dbparam=read_dbconf_json(self.dbconfig)\n data=[{'project_igf_id': 'IGFP001_test1_24-1-18',},\n {'project_igf_id': 'IGFP002_test1_24-1-18',\n 'barcode_check':'ON'},\n {'project_igf_id': 'IGFP003_test1_24-1-18',\n 'barcode_check':'OFF'}\n ]\n self.data=pd.DataFrame(data)\n base = BaseAdaptor(**dbparam)\n self.engine = base.engine\n self.dbname=dbparam['dbname']\n Base.metadata.create_all(self.engine)\n self.session_class=base.get_session_class()\n\n def tearDown(self):\n Base.metadata.drop_all(self.engine)\n os.remove(self.dbname)\n\n def test_mark_project_barcode_check_off(self):\n pr=ProjectAdaptor(**{'session_class':self.session_class})\n pr.start_session()\n pr.store_project_and_attribute_data(self.data)\n pr.close_session()\n\n mark_project_barcode_check_off(project_igf_id='IGFP001_test1_24-1-18',\n session_class=self.session_class) # no attribute record\n pr.start_session()\n attribute_check=pr.check_project_attributes(project_igf_id='IGFP001_test1_24-1-18',\n attribute_name='barcode_check')\n self.assertTrue(attribute_check)\n pr_attributes=pr.get_project_attributes(project_igf_id='IGFP001_test1_24-1-18',\n attribute_name='barcode_check')\n for pr_attribute in pr_attributes.to_dict(orient='records'):\n self.assertEqual(pr_attribute['attribute_value'],'OFF')\n \n pr_attributes=pr.get_project_attributes(project_igf_id='IGFP002_test1_24-1-18',\n attribute_name='barcode_check')\n for pr_attribute in pr_attributes.to_dict(orient='records'):\n self.assertEqual(pr_attribute['attribute_value'],'ON')\n pr.close_session()\n\n mark_project_barcode_check_off(project_igf_id='IGFP002_test1_24-1-18',\n session_class=self.session_class) # barcode check ON\n pr.start_session()\n pr_attributes=pr.get_project_attributes(project_igf_id='IGFP002_test1_24-1-18',\n attribute_name='barcode_check')\n for pr_attribute in pr_attributes.to_dict(orient='records'):\n self.assertEqual(pr_attribute['attribute_value'],'OFF')\n\n pr_attributes=pr.get_project_attributes(project_igf_id='IGFP003_test1_24-1-18',\n attribute_name='barcode_check')\n for pr_attribute in pr_attributes.to_dict(orient='records'):\n self.assertEqual(pr_attribute['attribute_value'],'OFF')\n pr.close_session()\n\n mark_project_barcode_check_off(project_igf_id='IGFP003_test1_24-1-18',\n session_class=self.session_class) # barcode check OFF\n pr.start_session()\n pr_attributes=pr.get_project_attributes(project_igf_id='IGFP003_test1_24-1-18',\n attribute_name='barcode_check')\n for pr_attribute in pr_attributes.to_dict(orient='records'):\n self.assertEqual(pr_attribute['attribute_value'],'OFF')\n pr.close_session()\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/utils/projectutils_test.py","file_name":"projectutils_test.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197034469","text":"#!/usr/bin/env python\n\n# This is a simple customized pdb that has no prompt and outputs the\n# name and line number of the file being debugged after each command.\n# It is appropriate for a IDE-like debugger package in a text editor,\n# such as Atom.\n# dominique.orban@gmail.com, 2016.\n\nimport os\nimport pdb\nimport sys\nimport traceback\n\nclass Restart(Exception):\n \"\"\"Causes a debugger to be restarted for the debugged python program.\"\"\"\n pass\n\n\nclass AtomPDB(pdb.Pdb):\n\n def __init__(self, **kwargs):\n kwargs.pop(\"stdout\", None)\n pdb.Pdb.__init__(self, stdout=sys.__stdout__, **kwargs)\n self.prompt = \"\"\n\n def do_locate(self, arg):\n # An interface can grep the file and line number to follow along.\n frame, lineno = self.stack[self.curindex]\n filename = self.canonic(frame.f_code.co_filename)\n print >> self.stdout, \"file::\", filename, \"\\nline::\", lineno\n\n def preloop(self):\n self.do_locate(1)\n\n def precmd(self, line):\n return line\n\n def postcmd(self, stop, line):\n return stop\n\n\ndef main():\n if not sys.argv[1:] or sys.argv[1] in (\"--help\", \"-h\"):\n print >> sys.__stdout__, \"atom_pdb.py script [args...]\"\n sys.exit(2)\n\n script = sys.argv[1]\n if not os.path.exists(script):\n sys.exit(1)\n del sys.argv[0]\n sys.path[0] = os.path.dirname(script)\n apdb = AtomPDB()\n while True:\n try:\n apdb._runscript(script)\n if apdb._user_requested_quit:\n break\n print >> sys.__stdout__, \"The program finished and will be restarted\"\n except Restart:\n print >> sys.__stdout__, \"Restarting\", script, \"with arguments:\"\n print >> sys.__stdout__, \" \".join(sys.argv[1:])\n except SystemExit:\n print >> sys.__stdout__, \"The program exited via sys.exit(). Exit status: \", sys.exc_info()[1]\n except Exception as inst:\n traceback.print_exc()\n print >> sys.__stdout__, \"Uncaught exception \", type(inst), \" ... entering post-mortem debugging\"\n print >> sys.__stdout__, \"Continue or Step will restart the program\"\n apdb.interaction(None, sys.exc_info()[2])\n print >> sys.__stdout__, \"Post-mortem debugging finished. \", script, \" will be restarted.\"\n\n\nif __name__ == \"__main__\":\n\n import atom_pdb\n atom_pdb.main()\n","sub_path":"resources/atom_pdb.py","file_name":"atom_pdb.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"124805238","text":"import unittest\n\nfrom nexusmaker import Record, NexusMaker, NexusMakerAscertained\nfrom nexusmaker import NexusMakerAscertainedWords\nfrom nexusmaker import CognateParser\n\nRECORDS = \"\"\"\nAiwoo-501\t132312\tfive\tvili\t1\nAiwoo-501\t133751\tleg\tnyike\t86\nAiwoo-501\t133752\tleg\tnuku\t86\nAiwoo-501\t208804\thand\tnyime\t1,66\nAiwoo-501\t208805\thand\tnyimä\t1,66\nBanoni-4\t1075\tleg\trapinna\t\t\nBanoni-4\t250221\tfive\tghinima\t1\nBanoni-4\t4\thand\tnuma-\t1,64\nDehu-196\t129281\tfive\ttripi\t1\nDehu-196\t196\thand\twanakoim\t\nEton-1088\t265408\tfive\te-lim\t1\nEton-1088\t278627\tleg\ttua-ŋ\t95\nHiw-639\t164951\thand\tmja-\t1,78\nHiw-639\t164952\tleg\tᶢʟoŋo-\t17\nHiw-639\t165135\tfive\ttəβɔjimə\t1\nIaai-471\t125656\thand\tbeñi-\t14\nIaai-471\t125657\thand\tHAND\t\nIaai-471\t125659\tleg\tca\t\nIaai-471\t125853\tfive\tbaa|xaca\t\nIaai-471\t125865\tfive\tthabyŋ\t\nLamogai-67\t83796\tfive\telmé\t1\nLamogai-67\t83881\thand\tmulǵu\t45\nLamogai-67\t83882\thand\tmelsé\t45\nLamogai-67\t83883\thand\tmilpí\t45\nLamogai-67\t83884\thand\tmelép\t45\nLamogai-67\t83885\thand\tmilpú\t45\nLamogai-67\t83886\thand\tmeylá\t45\nLamogai-67\t83887\thand\tmelsék\t45\nLamogai-67\t83942\tleg\tkaip\t1\nLamogai-67\t83943\tleg\tkaŋgú\t1\n\"\"\"\n\nRECORDS = [r.split(\"\\t\") for r in RECORDS.split(\"\\n\") if len(r)]\nCOMPLEX_TESTDATA = [\n Record(Language=r[0], Word=r[2], Item=r[3], Cognacy=r[4])\n for r in RECORDS\n]\n\nEXPECTED_COGNATES = {\n ('five', '1'): {\n 'Aiwoo-501', 'Banoni-4', 'Dehu-196', 'Eton-1088', 'Hiw-639',\n 'Lamogai-67'\n },\n ('leg', '86'): {'Aiwoo-501'},\n ('hand', '1'): {'Aiwoo-501', 'Banoni-4', 'Hiw-639'},\n ('hand', '64'): {'Banoni-4'},\n ('hand', '66'): {'Aiwoo-501'},\n ('leg', '95'): {'Eton-1088'},\n ('hand', '78'): {'Hiw-639'},\n ('leg', '17'): {'Hiw-639'},\n ('hand', '14'): {'Iaai-471'},\n ('hand', '45'): {'Lamogai-67'},\n ('leg', '1'): {'Lamogai-67'},\n}\n\nEXPECTED_UNIQUES = [\n ('leg', 'Banoni-4'),\n ('hand', 'Dehu-196'),\n ('leg', 'Iaai-471'),\n ('five', 'Iaai-471'),\n]\n\n\n\n\nclass TestNexusMaker(unittest.TestCase):\n model = NexusMaker\n # number of cognate sets expected\n expected_ncog = len(EXPECTED_COGNATES) + len(EXPECTED_UNIQUES)\n # number of characters expected in the nexus file\n expected_nchar = len(EXPECTED_COGNATES) + len(EXPECTED_UNIQUES)\n \n def setUp(self):\n self.maker = self.model(data=COMPLEX_TESTDATA)\n self.nex = self.maker.make()\n\n def test_languages(self):\n self.assertEqual(self.maker.languages, {\n 'Aiwoo-501', 'Banoni-4', 'Dehu-196', 'Eton-1088', 'Hiw-639',\n 'Iaai-471', 'Lamogai-67'\n })\n\n def test_words(self):\n self.assertEqual(self.maker.words, {'hand', 'leg', 'five'})\n \n def test_ncognates(self):\n self.assertEqual(len(self.maker.cognates), self.expected_ncog)\n \n def test_cognate_sets(self): # pragma: no cover\n errors = []\n for ecog in EXPECTED_COGNATES:\n if ecog not in self.maker.cognates:\n errors.append(\"Missing %s\" % (ecog, ))\n elif self.maker.cognates.get(ecog, set()) != EXPECTED_COGNATES[ecog]:\n errors.append(\"Cognate set %s incorrect %r != %r\" % (\n ecog,\n EXPECTED_COGNATES[ecog],\n self.maker.cognates.get(ecog, set())\n ))\n\n if errors:\n raise AssertionError(\"Errors: %s\" % \"\\n\".join(errors))\n\n def test_uniques(self): # pragma: no cover\n errors = []\n obtained = [c for c in self.maker.cognates if 'u' in c[1]]\n expected = {e: 0 for e in EXPECTED_UNIQUES}\n # check what has been identified as unique\n for cog in obtained:\n if len(self.maker.cognates[cog]) != 1:\n errors.append(\"Unique cognate %s should only have one member\" % (cog, ))\n # make key to look up EXPECTED_UNIQUES as (word, language)\n key = (cog[0], list(self.maker.cognates[cog])[0])\n # error on anything that is not expected\n if key not in expected:\n errors.append(\"%s unexpectedly seen as unique\" % (key, ))\n else:\n expected[key] += 1\n\n # the counts for each expected cognate should be max 1.\n for e in expected:\n if expected[e] != 1:\n errors.append(\"Expected 1 cognate for %s, but got %d\" % (e, expected[e]))\n\n if errors:\n raise AssertionError(\"Errors: %s\" % \"\\n\".join(errors))\n\n def test_dehu_is_all_missing_for_leg(self):\n for cog in [cog for cog in self.nex.data if cog.startswith('leg_')]:\n assert self.nex.data[cog]['Dehu-196'] == '?'\n\n def test_eton_is_all_missing_for_hand(self):\n for cog in [cog for cog in self.nex.data if cog.startswith('hand_')]:\n assert self.nex.data[cog]['Eton-1088'] == '?'\n\n def test_only_one_unique_for_Iaai471(self):\n iaai = 0\n for cog in [cog for cog in self.nex.data if cog.startswith('five_u_')]:\n present = [t for t in self.nex.data[cog] if self.nex.data[cog][t] == '1']\n if present == ['Iaai-471']:\n iaai += 1\n\n if iaai != 1: # pragma: no cover\n raise AssertionError(\"Should only have one unique site for Iaai-471-five\")\n\n def test_nexus_symbols(self):\n assert sorted(self.nex.symbols) == sorted(['0', '1'])\n\n def test_nexus_taxa(self):\n self.assertEqual(self.maker.languages, self.nex.taxa)\n\n def test_nexus_characters_expected_cognates(self):\n for e in EXPECTED_COGNATES:\n assert \"_\".join(e) in self.nex.characters\n\n def test_nexus_characters_expected_uniques(self):\n uniques = [\n c for c in self.nex.characters if\n CognateParser().is_unique_cognateset(c, labelled=True)\n ]\n assert len(uniques) == len(EXPECTED_UNIQUES)\n\n def test_nexus_nchar(self):\n assert len(self.nex.characters) == self.expected_nchar\n \n def test_entries_with_a_cognate_word_arenot_added_as_unique(self):\n hand = [c for c in self.nex.characters if c.startswith('hand_')]\n hand = [c for c in hand if CognateParser().is_unique_cognateset(c, labelled=True)]\n assert len(hand) == 1, 'Only expecting one unique character for hand'\n assert self.nex.data['hand_u_2']['Iaai-471'] in ('0', '?'), \\\n 'Iaai-471 should not be unique for `hand`'\n \n\nclass TestNexusMakerAscertained(TestNexusMaker):\n model = NexusMakerAscertained\n expected_nchar = len(EXPECTED_COGNATES) + len(EXPECTED_UNIQUES) + 1\n\n\nclass TestNexusMakerAscertainedWords(TestNexusMaker):\n model = NexusMakerAscertainedWords\n expected_nchar = len(EXPECTED_COGNATES) + len(EXPECTED_UNIQUES) + 3\n\n\n","sub_path":"nexusmaker/tests/test_Complex.py","file_name":"test_Complex.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"43228207","text":"\nimport sys\nsys.path.append('e:\\\\art\\\\code\\\\deepArt-generation\\\\source')\n\n\nimport config, utils\nfrom scipy.misc import imsave,imresize\nimport numpy as np\nfrom skimage import io\nfrom skimage.transform import resize\nimport os\n\nimport matplotlib.pyplot as plt\n\nout_path = 'e:\\\\art\\\\wikiportraits'\npath = config.datafile('portrait-sel')\n# using directory with hand-selected images\n\nprint(\"Reading images...\")\n\nall_images_fn = [x for x in os.listdir(path) if x.endswith(\".jpg\") | x.endswith(\".png\") | x.endswith(\".jpeg\")]\n\nall_images = []\nratios = []\n\nfor fn in all_images_fn:\n n = int(fn[:fn.find('-')])\n if n>8000: continue\n try:\n im = io.imread(os.path.join(path, fn))\n if im.shape[0] >= 256 and im.shape[1] >=256:\n all_images.append(im)\n ratios.append(im.shape[0]/im.shape[1])\n except:\n print(\"Error reading {}\".format(fn))\n\nplt.hist(ratios,bins=30)\n\nsel_images = [x for x in all_images if 1.23 <= x.shape[0]/x.shape[1] <= 1.35]\nprint(len(sel_images))\n\nfor i,im in enumerate(sel_images):\n # im = resize(image=im, output_shape=(128,128), mode=\"reflect\")\n im = imresize(im, (128,128))\n imsave(os.path.join(out_path, str(i) + '.png'), im)\n\nsel_images = list(filter(lambda x: x.shape==(32,32,3),map(lambda x: imresize(x,(32,32)),sel_images)))\n\nnp.savez_compressed(os.path.join(out_path,'wikiportraits.npz'),imgs=sel_images)\n","sub_path":"source/process_portraits.py","file_name":"process_portraits.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502832926","text":"event = {u'Records': [{u'eventSource': u'aws:ses',\n u'eventVersion': u'1.0',\n u'ses': {u'mail': {u'commonHeaders': {u'date': u'Sat, 10 Oct 2015 22:07:11 +0000',\n u'from': [u'Felipe Gasparini '],\n u'messageId': u'',\n u'returnPath': u'xxx@gmail.com',\n u'subject': u'Fwd: assunto',\n u'to': [\n u'\"teste2@felipegasparini.com\" ']},\n u'destination': [u'teste2@felipegasparini.com'],\n u'headers': [{u'name': u'Return-Path',\n u'value': u''},\n {u'name': u'Received',\n u'value': u'from mail-ig0-f175.google.com (mail-ig0-f175.google.com [209.85.213.175]) by inbound-smtp.us-east-1.amazonaws.com with SMTP id a3tjl7dpbfqnhhl7oiaej4m5q0olr0c1qvt6glg1 for teste2@felipegasparini.com; Sat, 10 Oct 2015 22:07:22 +0000 (UTC)'},\n {u'name': u'Received',\n u'value': u'by igbni9 with SMTP id ni9so465796igb.1 for ; Sat, 10 Oct 2015 15:07:21 -0700 (PDT)'},\n {u'name': u'DKIM-Signature',\n u'value': u'v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=mime-version:references:in-reply-to:from:date:message-id:subject:to :content-type; bh=PBfQGDhObxGAS+njunTub/mprGGP2YkQlJYRRbHr4Xs=; b=CIGLbGAmjqOfekYYZxg7upC+jhEfo2nbefnb0z6r7LES4VKgvuUdGh5sKoDAres8L6 euaaTtxrkruDCQAMB8mEneTg7HQaGm1Rl/KiDApVj6p+WFKK24UzxQu13fA7/Fa3rGfU KkN4N1AftUrVLHMOdQh2aNGF3LVjTl+HybkqGqXg159+a4qyBTMgvFCJarhDu47kaMZ4 jR6oDGR4VhdiN7rFceo1Ii7JI70GOo1D87H4zP0RwIMFwTZe2yH4+7qg4dOBVD32qqpd sv0KT+35PBjiXyD1xNnDS1B4eRGiLHWgoGp/V6VP0ezKEf56o5ofr5DFphS5hFyAvgBg CPMA=='},\n {u'name': u'X-Received',\n u'value': u'by 10.50.67.179 with SMTP id o19mr5245610igt.63.1444514841337; Sat, 10 Oct 2015 15:07:21 -0700 (PDT)'},\n {u'name': u'MIME-Version',\n u'value': u'1.0'},\n {u'name': u'References',\n u'value': u''},\n {u'name': u'In-Reply-To',\n u'value': u''},\n {u'name': u'From',\n u'value': u'Felipe Gasparini '},\n {u'name': u'Date',\n u'value': u'Sat, 10 Oct 2015 22:07:11 +0000'},\n {u'name': u'Message-ID',\n u'value': u''},\n {u'name': u'Subject',\n u'value': u'Fwd: assunto'},\n {u'name': u'To',\n u'value': u'\"teste2@felipegasparini.com\" '},\n {u'name': u'Content-Type',\n u'value': u'multipart/mixed; boundary=047d7bdca610fe0c350521c751c2'}],\n u'headersTruncated': False,\n u'messageId': u'a3tjl7dpbfqnhhl7oiaej4m5q0olr0c1qvt6glg1',\n u'source': u'xxx@gmail.com',\n u'timestamp': u'2015-10-10T22:07:22.216Z'},\n u'receipt': {u'action': {\n u'functionArn': u'arn:aws:lambda:us-east-1:979209632398:function:lambdaduty_email_handler',\n u'invocationType': u'Event',\n u'type': u'Lambda'},\n u'dkimVerdict': {u'status': u'PASS'},\n u'processingTimeMillis': 888,\n u'recipients': [u'teste2@felipegasparini.com'],\n u'spamVerdict': {u'status': u'PASS'},\n u'spfVerdict': {u'status': u'PASS'},\n u'timestamp': u'2015-10-10T22:07:22.216Z',\n u'virusVerdict': {u'status': u'PASS'}}\n\n }}]}\n\nescalation_policy = {\n 'name': 'default',\n 'policies': [\n {\n 'type': 'email',\n 'data': {\n 'to': 'xxx@gmail.com'\n }\n },\n {\n 'type': 'sms',\n 'data': {\n 'to': '55199999999'\n }\n },\n {\n 'type': 'phone_call',\n 'data': {\n 'to': '55199999999'\n }\n }\n ]\n}\n\nget_alert_handler_event = {\n 'params': {\n 'path': {\n 'id': '12312312321312312'\n }\n }\n}\n","sub_path":"sample_data.py","file_name":"sample_data.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512548461","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 qizai \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nThis is an implementation of the paper online NMF.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\nimport ipdb\nimport os\nimport argparse\nimport pycuda.autoinit\nimport pycuda.gpuarray as gpuarray\nimport skcuda.linalg as linalg\n# import cvxpy as cvx\nfrom functools import reduce\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LassoLars\nfrom sklearn.preprocessing import normalize\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import accuracy_score, adjusted_mutual_info_score\nfrom cluster_assignment_method import get_clustering_assignment_1, get_clustering_assignment_2\nfrom common_functions import get_g_hat_value, evaluation_clustering\nfrom common_functions import my_nmf_clustering, nmf_clustering\nfrom common_functions import geo_projection_to_cvx_cmb\n# from cvxpy_update_functions import update_D_hat_cvxpy, update_W_hat_cvxpy\nfrom pycuda_update_W_comparison import update_W_hat_skcuda, opt_cal_W_hat_numpy\nfrom pycuda_update_W_comparison import opt_cal_W_hat_solve, update_W_hat_numpy\nfrom online_NMF import online_dict_learning\nfrom convex_NMF import CNMF\nfrom visualization_NMF import plot_diff_method_scRNA\n\nimport sys\nsys.path.append('batch_setting')\nfrom batch_cvx_online_NMF import cvx_online_dict_learning_batch\n\ndef my_normalize(X):\n '''\n scale X to be in a unit ball\n X: n x m, n samples in row\n '''\n n_dim, m_dim = X.shape\n max_norm = max([np.linalg.norm(X[k, :]) for k in range(n_dim)])\n X_new = X / max_norm\n return X_new\n\n\ndef cvx_online_dict_learning(X, y_true, n_hat, k_cluster, T, lmda, eps, \n flag=True, version = 'Rr'):\n '''\n X: R^(n * m)\n y_true: str^n\n W_0: R^(n_hat * k)\n x_i : R^m\n alpha: R^k\n cvx_online problem \n min||x_i - X.T * W * alpha|| + lambda * ||alpha||\n\n in the online setting, there is no X in (n * m), \n instead, we need to store a candidate set and solve the subproblem:\n min ||x_i - X_hat * W_hat * alpha|| + lambda * ||alpha||\n\n X_hat : R^(m * n_hat)\n W_hat : R^(n_hat * k)\n\n version: Rr, restricted, heuristic approach\n Ru, uniform, random assignment\n '''\n n_dim, m_dim = X.shape\n\n A_t = np.zeros((k_cluster, k_cluster))\n B_t = np.zeros((m_dim, k_cluster))\n x_sum = 0\n alpha_sum = 0\n\n # step 1: sample n_hat * k_cluster points as initial X_hat.\n X_0 = np.zeros((m_dim, n_hat))\n for idx in range(n_hat):\n sample_idx = np.random.randint(0, n_dim)\n x_sample = X[sample_idx, :]\n X_0[:, idx] = x_sample\n\n\n # step 1: initialization, get X_hat (including clusters info)\n # and W_hat from X_0, using same init as in CNMF.\n # here representative_size_count is the n_1_hat, n_2_hat, ..., n_k_hat.\n t1 = time.time()\n X_hat, W_hat, representative_size_count = initialize_X_W_hat(X_0, k_cluster)\n X_0, W_0 = X_hat.copy(), W_hat.copy()\n t2 = time.time()\n # print('init cost {:.4f}'.format(t2 - t1))\n \n # step 2: after initialization of X_hat, update alpha, W_hat and X_hat alternatively.\n t_start = time.time()\n print(lmda, _NF, eps)\n for t in range(T):\n # t_start_online = time.time()\n if t % 50 == 0 and flag:\n D_t = np.matmul(X_hat, W_hat)\n tmp_assignment = get_clustering_assignment_1(X, D_t, k_cluster)\n tmp_acc, tmp_AMI = evaluation_clustering(tmp_assignment, y_true)\n print('1)iteration {}, distance acc = {:.4f}, AMI = {:.4f}'.format(t, tmp_acc, tmp_AMI))\n\n tmp_assignment = get_clustering_assignment_2(X, D_t, k_cluster, lmda)\n tmp_acc, tmp_AMI = evaluation_clustering(tmp_assignment, y_true)\n print('2)iteration {}, kmeans of weights acc = {:.4f}, AMI = {:.4f}'.format(t, tmp_acc, tmp_AMI))\n t_end = time.time()\n print('time elapse = {:.4f}s'.format(t_end - t_start))\n t_start = t_end\n\n print('-' * 7)\n\n\n sample_idx = np.random.randint(0, n_dim)\n x_sample = X[sample_idx, :]\n\n # update alpha\n t1 = time.time()\n lars_lasso = LassoLars(alpha = lmda, max_iter = 500)\n D_t = np.matmul(X_hat, W_hat)\n lars_lasso.fit(D_t, x_sample)\n alpha_t = lars_lasso.coef_\n t2 = time.time()\n # print('lasso cost {:.4f}s'.format(t2 - t1))\n \n # using different clustering assignment\n t1 = time.time()\n if version == 'Rr':\n cluster_of_x_i = np.argmax(alpha_t)\n # elif version == 'Ru':\n else:\n cluster_of_x_i = int(np.random.uniform(0, k_cluster))\n t2 = time.time()\n # print('argmax alpha cost {:.4f}s'.format(t2 - t1))\n\n t1 = time.time()\n A_t += np.matmul(alpha_t.reshape(k_cluster, 1), alpha_t.reshape(1, k_cluster))\n B_t += np.matmul(x_sample.reshape(m_dim, 1), alpha_t.reshape(1, k_cluster))\n x_sum += (np.linalg.norm(x_sample) ** 2)\n alpha_sum += lmda * np.linalg.norm(alpha_t, 1)\n t2 = time.time()\n # print('update At, Bt cost {:.4f}s'.format(t2 - t1))\n\n\n # update X_hat\n t1 = time.time()\n W_hat, X_hat = update_W_X_hat(W_hat, X_hat, representative_size_count, x_sample, cluster_of_x_i, \n A_t, B_t, x_sum, alpha_sum, t, eps)\n t2 = time.time()\n # print('update X_hat, W_hat cost {:.4f}s'.format(t2 - t1))\n\n print('Dcitionary update done! Time elapse {:.04f}s'.format(time.time() - t_start))\n\n return W_hat, X_hat, representative_size_count, X_0, W_0\n\ndef initialize_X_W_hat(X_0, k_cluster):\n '''\n takes intial collection of X and number of cluster as input,\n run k-Means on it, return the sorted (by cluster) X_hat, W_hat,\n and number of points in each cluster, i.e. n_hat_i\n '''\n # this function takes the initialziation step of CNMF and gives a X_hat, W_hat\n # cluster X_hat, get X_hat, W_0 as output of some method, and assignment of X_0\n # kmeans works with row vector, however, X_0 is a column vec matrix.\n kmeans = KMeans(n_clusters = k_cluster, max_iter = 1000)\n kmeans.fit(X_0.T)\n X_hat_assignments = kmeans.labels_\n\n\n # now we need to classify the X_hat to X_1, X_2, X_3\n # by using a dictionary candidate_clusters\n candidate_clusters = {x:np.array([]) for x in set(X_hat_assignments)}\n for idx, label in enumerate(X_hat_assignments):\n if candidate_clusters[label].size == 0:\n candidate_clusters[label] = X_0[:, idx]\n else:\n candidate_clusters[label] = np.vstack((candidate_clusters[label], X_0[:, idx]))\n\n X_hat = np.array([])\n check_list = []\n sorted_assignment = []\n for label in candidate_clusters:\n candidate_clusters[label] = candidate_clusters[label].T\n shape_of_cluster = candidate_clusters[label].shape\n print('label {} has shape of: {}'.format(label, shape_of_cluster))\n check_list.append(shape_of_cluster[1])\n if X_hat.size == 0:\n X_hat = candidate_clusters[label]\n sorted_assignment = [label] * shape_of_cluster[1]\n else:\n X_hat = np.hstack((X_hat, candidate_clusters[label]))\n sorted_assignment += [label] * shape_of_cluster[1]\n\n sorted_assignment = np.array(sorted_assignment)\n\n # based on the CNMF paper, we start the initialization with fresh k-Means\n # H: R^{n * k} matrix, indicate the cluster assignments\n # centroids can be calculated as F = X*W*D^{-1}, Where D: R^{k * k} is the count diagonal matrix\n # then we can say W = H*D^{-1}\n m_dim, n_dim = X_hat.shape\n cluster_count = [len(np.where(X_hat_assignments == i)[0]) for i in range(k_cluster)]\n assert cluster_count == check_list\n \n D = np.zeros((k_cluster, k_cluster), int)\n for idx in range(k_cluster):\n D[idx][idx] = cluster_count[idx] + 1e-3\n\n H = np.zeros((n_dim, k_cluster), int)\n for idx in range(k_cluster):\n non_zero_idx = np.where(sorted_assignment == idx)[0]\n H[non_zero_idx, idx] = 1\n \n W_hat = np.matmul((H + np.ones(H.shape, int) * 0.2), np.linalg.inv(D))\n\n return X_hat, W_hat, cluster_count\n\n\ndef update_W_X_hat(W_hat, X_hat, repre_size_count, x_sample, cluster_of_x_i, \n A_t, B_t, x_sum, alpha_sum, t, eps):\n # add W_hat block diagonal constraint,\n # using projection.\n # linalg.init()\n\n # W_hat_gpu = gpuarray.to_gpu(W_hat.astype(np.float64))\n # tmp_x = np.ascontiguousarray(X_hat)\n # X_hat_gpu = gpuarray.to_gpu(tmp_x.astype(np.float64))\n # A_t_gpu = gpuarray.to_gpu(A_t.astype(np.float64))\n # B_t_gpu = gpuarray.to_gpu(B_t.astype(np.float64))\n\n\n cluster_seperation_idx = np.cumsum(repre_size_count)\n end_idx = cluster_seperation_idx[cluster_of_x_i]\n start_idx = end_idx - repre_size_count[cluster_of_x_i]\n A_t_inv = np.linalg.pinv(A_t)\n\n # W_opt_old_X = opt_cal_W_hat_numpy(W_hat, X_hat, A_t, B_t, x_sum, alpha_sum, eps, t)\n W_opt_old_X = opt_cal_W_hat_solve(W_hat, X_hat, A_t_inv, B_t, x_sum, alpha_sum, eps, t)\n g_hat_old_X = get_g_hat_value(t, W_opt_old_X, X_hat, A_t, B_t, x_sum, alpha_sum)\n\n # W_opt_old_X = update_W_hat_skcuda(W_hat_gpu, X_hat_gpu, A_t_gpu, B_t_gpu, \n # x_sum, alpha_sum, eps, t)\n # g_hat_old_X = get_g_hat_value(t, W_opt_old_X.get(), X_hat, A_t, B_t, x_sum, alpha_sum)\n\n list_of_W_opt_new_X = [W_opt_old_X]\n list_of_g_hat_new_X = [g_hat_old_X]\n list_of_new_X = [X_hat]\n\n # print('starting loop in update_W_X, total {}'.format(end_idx - start_idx))\n for idx in range(start_idx, end_idx):\n # print('iter # {}'.format(idx))\n t1 = time.time()\n X_hat_new = X_hat.copy()\n X_hat_new[:, idx] = x_sample\n list_of_new_X.append(X_hat_new)\n # tmp_x = np.ascontiguousarray(X_hat_new)\n # X_hat_new_gpu = gpuarray.to_gpu(tmp_x.astype(np.float64))\n t2 = time.time()\n # print('\\t update X_hat cost {:.4f}s'.format(t2 - t1))\n\n t1 = time.time()\n # W_opt_new_X = opt_cal_W_hat_numpy(W_hat, X_hat_new, A_t, B_t, x_sum, alpha_sum, eps, t)\n # W_opt_new_X = update_W_hat_numpy(W_hat, X_hat_new, A_t, B_t, x_sum, alpha_sum, eps, t)\n W_opt_new_X = opt_cal_W_hat_solve(W_hat, X_hat_new, A_t_inv, B_t, x_sum, alpha_sum, eps, t)\n g_hat_new_X = get_g_hat_value(t, W_opt_new_X, X_hat_new, A_t, B_t, x_sum, alpha_sum)\n\n # W_opt_new_X = update_W_hat_skcuda(W_hat_gpu, X_hat_new_gpu, A_t_gpu, B_t_gpu, \n # x_sum, alpha_sum, eps, t)\n # g_hat_new_X = get_g_hat_value(t, W_opt_new_X.get(), X_hat_new, A_t, B_t, x_sum, alpha_sum)\n t2 = time.time()\n # print('\\t update W_hat_new cost {:.4f}'.format(t2 - t1))\n\n t1 = time.time()\n list_of_W_opt_new_X.append(W_opt_new_X)\n list_of_g_hat_new_X.append(g_hat_new_X)\n t2 = time.time()\n # print('appending W_opt list cost {:.4f}s'.format(t2 - t1))\n\n min_g_idx = np.argmin(list_of_g_hat_new_X)\n\n X_hat_new = list_of_new_X[min_g_idx]\n W_hat_new = list_of_W_opt_new_X[min_g_idx]\n # if list_of_g_hat_new_X[min_g_idx] <= g_hat_old_X:\n # X_hat_new = X_hat.copy()\n # X_hat_new[:, start_idx + min_g_idx] = x_sample\n # # W_hat_new = list_of_W_opt_new_X[min_g_idx].get()\n # W_hat_new = list_of_W_opt_new_X[min_g_idx].copy()\n # else:\n # X_hat_new = X_hat.copy()\n # # W_hat_new = W_opt_old_X.get()\n # W_hat_new = W_opt_old_X.copy()\n\n\n return W_hat_new, X_hat_new\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--numIter', type=int, default=1200)\n parser.add_argument('--lmda', type=float, default=1e-1)\n parser.add_argument('--eps', type=float, default=1e-5)\n parser.add_argument('--normal_factor', '--NF', type=float, default=200)\n parser.add_argument('--file_name', type=str, default='tmp_pic')\n parser.add_argument('--dtype', type=str, default='scRNA',\n choices=['scRNA', 'synthetic', \n 'synthetic_1', 'synthetic_2'],\n help='synthetic1: well sep, 2: close cluster')\n parser.add_argument('--k_cluster', type=int, default=10)\n parser.add_argument('--csize', type=int, default=500,\n help='size of each cluster, integer, default 500')\n parser.add_argument('--candidate_size', type=int, default=15)\n parser.add_argument('--pca', type=int, default = 100)\n parser.add_argument('--numAver', type=int, default=1)\n args = parser.parse_args()\n\n # set number of iteration, lambda in lasso, epsilon in dictionary update and normalization factor\n print(args)\n numIter = args.numIter\n lmda = args.lmda\n eps = args.eps\n _NF = args.normal_factor\n file_name = args.file_name\n k_cluster = args.k_cluster\n cluster_size = args.csize\n candidate_set_size = args.candidate_size\n P_component = args.pca\n aver_num = args.numAver\n\n data_type = 'scRNA'\n\n # np.random.seed(42)\n data_root = '/home/jianhao2/'\n data_root_shared = '/data/shared/jianhao/'\n # df_file = os.path.join(data_root, 'pandas_dataframe')\n # feat_file = os.path.join(data_root, 'df_feature_column')\n if data_type == 'synthetic':\n k_cluster = 10\n df_name = 'df_synthetic_well_sep'\n fc_name = 'feature_column_synthetic_well_sep'\n df_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', df_name) \n feat_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', fc_name)\n elif data_type == 'synthetic_1':\n k_cluster = 10\n df_name = 'df_synthetic_disjoint_{}'.format(cluster_size)\n fc_name = 'feature_column_synthetic_disjoint_{}'.format(cluster_size)\n df_file = os.path.join(data_root_shared, 'synthetic_data', df_name) \n feat_file = os.path.join(data_root_shared, 'synthetic_data', fc_name)\n elif data_type == 'synthetic_2':\n k_cluster = 10\n df_name = 'df_synthetic_overlap_{}'.format(cluster_size)\n fc_name = 'feature_column_synthetic_overlap_{}'.format(cluster_size)\n df_file = os.path.join(data_root_shared, 'synthetic_data', df_name) \n feat_file = os.path.join(data_root_shared, 'synthetic_data', fc_name)\n elif data_type == 'scRNA':\n k_cluster = 10\n # df_name = 'pandas_dataframe_10_clusters_-1'\n # fc_name = 'df_feature_column_10_clusters_-1'\n df_name = 'pandas_dataframe_10_clusters_500'\n fc_name = 'df_feature_column_10_clusters_500'\n df_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', df_name)\n feat_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', fc_name)\n\n # np.random.seed(42)\n df = pd.read_pickle(df_file)\n with open(feat_file, 'rb') as f:\n feat_cols = pickle.load(f)\n X_raw = df[feat_cols].values\n X_raw = X_raw - np.min(X_raw) + 0.1\n Y = df['label'].values\n\n\n\n # # ----------------------------------------------------\n # X_for_nmf = normalize(X_raw) * _NF\n # D_nmf, label_nmf = nmf_clustering(X_for_nmf, k_cluster, numIter = 1000)\n # acc_nmf, AMI_nmf = evaluation_clustering(label_nmf, Y)\n\n # print(' ------ final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_nmf, AMI_nmf))\n\n\n # ----------------------------------------------------\n # use PCA to reduce X_raw to [num_of_cells * number of PCA componets]\n\n if P_component != -1:\n pca = PCA(n_components = P_component)\n # X_pca_all = pca.fit_transform(np.vstack((X_raw, D_nmf)))\n # X_pca = X_pca_all[:-k_cluster, :]\n\n X_pca = pca.fit_transform(X_raw)\n else:\n X_pca = X_raw\n pca_cols = ['Principle component {}'.format(i) for i in range(X_pca.shape[1])]\n\n\n\n # ----------------------------------------------------\n X = normalize(X_pca) * _NF\n # X = X_pca\n\n n_dim, m_dim = X.shape\n # ----------------------------------------------------\n # 1) online cvxMF, our algorithm. Rr version.\n n_hat = k_cluster * candidate_set_size\n t_ocmf = 0\n acc = 0\n acc_array = []\n for round_num in range(aver_num):\n t1 = time.time()\n # W_hat_tmp, X_hat_tmp, repre_size_count_tmp, X_0_tmp, W_0_tmp = cvx_online_dict_learning(X, Y, n_hat, k_cluster, \n # numIter, lmda, eps,\n # flag = False, version = 'Rr')\n W_hat_tmp, X_hat_tmp, repre_size_count_tmp, X_0_tmp, W_0_tmp = cvx_online_dict_learning_batch(X, Y, n_hat, k_cluster, \n numIter, lmda, eps, _NF,\n flag = False)\n t2 = time.time()\n t_ocmf += (t2 - t1)\n D_final_tmp = np.matmul(X_hat_tmp, W_hat_tmp)\n\n # clustered_label = get_clustering_assignment_1(X, D_final)\n clustered_label_ocmf = get_clustering_assignment_2(X, D_final_tmp, k_cluster, lmda)\n acc_tmp, AMI_tmp = evaluation_clustering(clustered_label_ocmf, Y)\n acc_array.append(acc_tmp)\n if acc_tmp >= acc:\n W_hat = W_hat_tmp\n X_hat = X_hat_tmp\n X_0 = X_0_tmp\n W_0 = W_0_tmp\n D_final = D_final_tmp\n acc = acc_tmp\n AMI = AMI_tmp\n repre_size_count = repre_size_count_tmp\n if acc >= 0.9:\n break\n acc_aver = np.mean(acc_array)\n t_ocmf_Rr = t_ocmf / (round_num + 1)\n print(' ------ ocmf final accuracy = {:.4f}, AMI = {:.4f}'.format(acc, AMI))\n\n df_centroids = pd.DataFrame(D_final.T, columns = pca_cols)\n df_centroids['label'] = ['ocmf: type {}'.format(x) for x in range(1, k_cluster + 1)]\n\n df_centroids.to_pickle('results_logging/ocmf_centroid_df')\n\n df_x_hat = pd.DataFrame(X_hat.T, columns = pca_cols)\n X_hat_set = ['group {}'.format(i) for i in range(k_cluster)]\n X_hat_label = []\n for idx in range(k_cluster):\n X_hat_label += [X_hat_set[idx]] * repre_size_count[idx]\n # ipdb.set_trace()\n df_x_hat['label'] = X_hat_label\n df_x_hat.to_pickle('results_logging/x_hat_df')\n \n # 2) online cvxMF, our algorithm. Ru version.\n n_hat = k_cluster * candidate_set_size\n t_ocmf_Ru = 0\n acc_Ru = 0\n acc_array = []\n for round_num in range(aver_num):\n t1 = time.time()\n W_hat_tmp, X_hat_tmp, repre_size_count_tmp, X_0_tmp, W_0_tmp = cvx_online_dict_learning(X, Y, n_hat, k_cluster, \n numIter, lmda, eps,\n flag = False, version = 'Ru')\n t2 = time.time()\n t_ocmf_Ru += (t2 - t1)\n D_final_tmp = np.matmul(X_hat_tmp, W_hat_tmp)\n\n # clustered_label = get_clustering_assignment_1(X, D_final)\n clustered_label_ocmf_Ru = get_clustering_assignment_2(X, D_final_tmp, k_cluster, lmda)\n acc_tmp, AMI_tmp = evaluation_clustering(clustered_label_ocmf_Ru, Y)\n acc_array.append(acc_tmp)\n if acc_tmp >= acc_Ru:\n W_hat_Ru = W_hat_tmp\n X_hat_Ru = X_hat_tmp\n X_0_Ru = X_0_tmp\n W_0_Ru = W_0_tmp\n D_final_Ru = D_final_tmp\n acc_Ru = acc_tmp\n AMI_Ru = AMI_tmp\n repre_size_count_Ru = repre_size_count_tmp\n if acc >= 0.9:\n break\n acc_aver_Ru = np.mean(acc_array)\n t_ocmf_Ru = t_ocmf_Ru / (round_num + 1)\n print(' ------ ocmf final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_Ru, AMI_Ru))\n\n df_centroids_Ru = pd.DataFrame(D_final_Ru.T, columns = pca_cols)\n df_centroids_Ru['label'] = ['ocmf: type {}'.format(x) for x in range(1, k_cluster + 1)]\n\n df_centroids_Ru.to_pickle('results_logging/ocmf_centroid_df')\n\n df_x_hat_Ru = pd.DataFrame(X_hat_Ru.T, columns = pca_cols)\n X_hat_set = ['group {}'.format(i) for i in range(k_cluster)]\n X_hat_label = []\n for idx in range(k_cluster):\n X_hat_label += [X_hat_set[idx]] * repre_size_count[idx]\n # ipdb.set_trace()\n df_x_hat_Ru['label'] = X_hat_label\n df_x_hat_Ru.to_pickle('results_logging/x_hat_df_Ru')\n \n # ----------------------------------------------------\n # 3) compare with online NMF in their paper\n # D_0 = np.random.randn(m_dim, k_cluster)\n # D_0 = np.absolute(D_0)\n # ipdb.set_trace()\n D_0 = (X_0 @ W_0).reshape(m_dim, k_cluster)\n # D_0 = normalize(D_0, axis = 0) * _NF\n\n acc_omf = 0\n AMI_omf = 0\n acc_omf_array = []\n t_omf = 0\n\n for round_num in range(aver_num):\n t1 = time.time()\n D_omf_final_tmp = online_dict_learning(X, lmda = lmda, D_0 = D_0, T = numIter, k_cluster = k_cluster, eps = eps, _NF = _NF)\n t2 = time.time()\n t_omf += (t2 - t1)\n\n clustered_label_omf = get_clustering_assignment_2(X, D_omf_final_tmp, \n k_cluster, lmda)\n acc_omf_tmp, AMI_omf_tmp = evaluation_clustering(clustered_label_omf, Y)\n acc_omf_array.append(acc_omf_tmp)\n if acc_omf_tmp >= acc_omf:\n D_omf_final = D_omf_final_tmp\n acc_omf, AMI_omf = acc_omf_tmp, AMI_omf_tmp\n if acc_omf >= 0.9:\n break\n acc_aver_omf = np.mean(acc_omf_array)\n t_omf = t_omf/(round_num + 1)\n\n print(' ------ onlineMF final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_omf, \n AMI_omf))\n\n df_centroids_omf = pd.DataFrame(D_omf_final.T, columns = pca_cols)\n df_centroids_omf['label'] = ['online MF cell type {}'.format(x) for x in range(1, k_cluster + 1)]\n df_centroids_omf.to_pickle('results_logging/omf_centroid_df')\n \n # ----------------------------------------------------\n df_final = pd.DataFrame(X, columns = pca_cols)\n df_final['label'] = Y\n df_final = df_final.append(df_x_hat)\n df_final = df_final.append(df_x_hat_Ru)\n\n df_final = df_final.append(df_centroids)\n df_final = df_final.append(df_centroids_Ru)\n df_final = df_final.append(df_centroids_omf)\n print('shape of df_final: ', df_final.shape)\n\n accuracy_dict = {\n 'omf': [acc_omf, AMI_omf],\n 'ocnmf_Ru': [acc, AMI],\n 'ocnmf_Rr': [acc_Ru, AMI_Ru],\n }\n\n size_of_cluster = n_dim//k_cluster\n tmp_type = Y[0]\n tmp_count = 0\n cluster_size_count = []\n for cur_type in Y:\n if cur_type == tmp_type:\n tmp_count += 1\n else:\n cluster_size_count.append(tmp_count)\n tmp_count = 1\n tmp_type = cur_type\n cluster_size_count.append(tmp_count)\n\n with open('scRNA_results/repre_size_count_Rr', 'wb') as f:\n pickle.dump(repre_size_count, f)\n with open('scRNA_results/repre_size_count_Ru', 'wb') as f:\n pickle.dump(repre_size_count_Ru, f)\n with open('scRNA_results/acc_dict', 'wb') as f:\n pickle.dump(accuracy_dict, f)\n with open('scRNA_results/cluster_size_count', 'wb') as f:\n pickle.dump(cluster_size_count, f)\n\n fig = plot_diff_method_scRNA(df_final, pca_cols, n_dim, k_cluster, \n accuracy_dict, repre_size_count, repre_size_count_Ru, \n size_of_cluster = None,\n cluster_size_count = cluster_size_count)\n\n tmp = 'May_23_test_fig_scRNA.png'\n save_File_name = tmp\n p2f = os.path.join(save_File_name)\n fig.savefig(p2f, dpi = 150,\n bbox_inces = 'tight')\n \n print('===' * 7)\n print('ocmf Rr takes {:.4f}s'.format(t_ocmf_Rr))\n print('ocmf Ru takes {:.4f}s'.format(t_ocmf_Ru))\n print('omf takes {:.4f}s'.format(t_omf))\n","sub_path":"scRNA_cvx_online.py","file_name":"scRNA_cvx_online.py","file_ext":"py","file_size_in_byte":23319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585951727","text":"import pathlib\nimport collections\n\n\ndata = pathlib.Path(\"inputs/9.txt\").read_text().splitlines()[0].split(\" \")\nnumPlayers = int(data[0])\nnumMarbles = int(data[6])\n\nscores = collections.defaultdict(int)\ncircle = collections.deque([0])\nfor marble in range(1, numMarbles + 1):\n if marble % 23 == 0:\n circle.rotate(7)\n scores[marble % numPlayers] += marble + circle.popleft()\n else:\n circle.rotate(-2)\n circle.appendleft(marble)\n\nprint(max(scores.values()))\n","sub_path":"Day 9_1.py","file_name":"Day 9_1.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212918005","text":"from datetime import datetime, timedelta\nimport re\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom argparse import ArgumentParser\n\n\ndef get_parser_of_command_line():\n parser = ArgumentParser(description='Settings search movies')\n\n parser.add_argument('-d', '--day', nargs='?',\n help='The number of days in cinemas',\n default=21, type=int)\n parser.add_argument('-c', '--count', nargs='?',\n help='The number of cinemas which show the film',\n default=30, type=int)\n parser.add_argument('-r', '--rating', nargs='?',\n help='Minimal movie rating',\n default=3.00, type=float)\n return parser.parse_args()\n\n\ndef get_date_for_search(delta_days):\n current_date = datetime.now().date()\n initial_date = (datetime.now() - timedelta(days=delta_days)).date()\n return current_date, initial_date\n\n\ndef fetch_afisha_page():\n url_afisha = 'https://www.afisha.ru/msk/schedule_cinema/'\n raw_html = requests.get(url_afisha).content\n return raw_html\n\n\ndef parse_afisha_list(afisha_html, good_count_cinemas):\n content = bs(afisha_html, 'html.parser')\n content_movies = content('div',\n {'class': 'object s-votes-hover-area collapsed'})\n info_movies_afisha = []\n for movie in content_movies:\n title = movie('h3', {'class': 'usetags'})[0].get_text()\n count_cinemas = len(movie('td',{'class': 'b-td-item'},'a'))\n if int(count_cinemas) >= good_count_cinemas:\n title_and_count_cinemas = {'title': title,\n 'count_cinemas': count_cinemas}\n info_movies_afisha.append(title_and_count_cinemas)\n return info_movies_afisha\n\n\ndef fetch_movie_info(last_month, current_month):\n url_premiers_of_month = 'https://www.kinopoisk.ru/premiere/ru/2017/month/{}/'\n url_after_scroll = 'page/1/ajax/true/'\n if current_month != last_month:\n last_month_content = requests.get(\n url_premiers_of_month.format(last_month)).content\n last_month_content_scroll = requests.get(\n url_premiers_of_month.format(last_month) +\n url_after_scroll).content\n current_month_content = requests.get(\n url_premiers_of_month.format(current_month)).content\n current_month_content_scroll = requests.get(\n url_premiers_of_month.format(current_month) +\n url_after_scroll).content\n full_content = last_month_content + current_month_content + \\\n current_month_content_scroll + last_month_content_scroll\n else:\n current_month_content = requests.get(\n url_premiers_of_month.format(current_month)).content\n current_month_content_scroll = requests.get(\n url_premiers_of_month.format(current_month) +\n url_after_scroll).content\n full_content = current_month_content + current_month_content_scroll\n return full_content\n\n\ndef get_films_in_kinopoisk(kinopoisk_content,\n initial_date,\n current_date,\n good_rate):\n content = bs(kinopoisk_content, 'html.parser')\n content_movies = content.find_all('div', {'class': 'premier_item'})\n info_movies_kinopoisk = []\n for movie in content_movies:\n title = movie.find('span', {'class': 'name'}).text\n start_date = movie.find('meta').get('content')\n try:\n rating = movie.find('u').text.split()[0]\n except AttributeError:\n continue\n if re.search('\\W.',rating) is None:\n continue\n start_date = datetime.strptime(start_date,\"%Y-%m-%d\").date()\n if current_date >= start_date >= initial_date and float(rating) >= good_rate:\n title_and_rate = {'title': title, 'rate': rating}\n info_movies_kinopoisk.append(title_and_rate)\n return info_movies_kinopoisk\n\n\ndef get_pop_movies(list_afisha, list_kinopoisk):\n common_info_list = []\n for movie_afisha in list_afisha:\n for movie_kinopoisk in list_kinopoisk:\n if movie_afisha['title'] == movie_kinopoisk['title']:\n movie_afisha.update(movie_kinopoisk)\n common_info_list.append(movie_afisha)\n return common_info_list\n\n\ndef output_movies_to_console(common_info_list):\n for film in common_info_list[:10]:\n movie = 'Film: {title}'.format(**film)\n rate = 'Kinopoisk rating: {rate}'.format(**film)\n count_cinemas = 'Show in {count_cinemas} cinemas in Moscow'.format(**film)\n print(movie)\n print(rate)\n print(count_cinemas + '\\n')\n\n\nif __name__ == '__main__':\n user_settings = get_parser_of_command_line()\n delta_days = user_settings.day\n good_rate = user_settings.rating\n good_count_cinemas = user_settings.count\n current_date, initial_date = get_date_for_search(delta_days)\n current_month = datetime.today().month\n last_month = initial_date.month\n kinopoisk_content = fetch_movie_info(last_month,current_month)\n list_kinopoisk = get_films_in_kinopoisk(kinopoisk_content,\n initial_date,\n current_date,\n good_rate)\n afisha_content = fetch_afisha_page()\n list_afisha = parse_afisha_list(afisha_content, good_count_cinemas)\n movies_list = get_pop_movies(list_afisha, list_kinopoisk)\n output_movies_to_console(movies_list)","sub_path":"cinemas.py","file_name":"cinemas.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389605784","text":"\nfrom blockchain import Blockchain\nfrom utility.verification import Verification\nfrom wallet import Wallet\n\nclass Node():\n def __init__(self):\n self.wallet = Wallet()\n self.chain = None\n \n\n def run_mode(self):\n waiting = True\n while waiting:\n print('Please enter an input ')\n print('1: Add new Transaction Value')\n print('2: Mine the block')\n print('3: Output the blockchain block')\n print('4: to verify transactions.')\n print('5: to create a wallet.')\n print('6: to load wallet.')\n print('quit: to litterally Quit ')\n user_choice = self.get_user_choice()\n if user_choice == '1':\n txt_out = self.get_transaction_input()\n reciever,coin = (txt_out)\n if self.chain.add_transaction(reciever,coin,self.wallet.public_key):\n print('Transaction completed')\n else:\n print('Transaction Failed!!')\n print(self.chain.get_otx())\n elif user_choice == '2':\n if not self.chain.mine_block():\n print(\"Sorry you can't mine a block right now,Please create a wallet.\")\n elif user_choice == '3':\n self.display_blockchains()\n elif user_choice == '4':\n if Verification.verify_openTransaction(self.chain.get_otx(),self.chain.get_balance):\n print('All Transactions are valid...')\n else:\n print('The Transactions are invalid')\n elif user_choice == '5':\n self.wallet.create_key()\n self.chain = Blockchain(self.wallet.public_key)\n elif user_choice == '6':\n pass\n elif user_choice == 'quit':\n break\n else:\n print('Enter a Valid choice!')\n if not Verification.verify_chain(self.chain.blockchain):\n self.display_blockchains()\n print('Invalid Blockchain')\n break\n print('Balance of {} is {:.6f}'.format(self.wallet.public_key,self.chain.get_balance(self.wallet.public_key)))\n \n\n #print('Transactions completed')\n\n else:\n print('Event finished')\n \n print('Done!')\n\n\n def display_blockchains(self):\n # This displays Elements in the blockchain\n count = 0\n\n for bl in self.chain.blockchain:\n print('block '+str(count))\n print(bl)\n count += 1\n \n\n def get_user_choice(self):\n user_input = input('Enter your choice: ')\n return user_input\n\n\n # gets inputs of the user to add to the blockchain\n def get_transaction_input(self):\n trans_recipient = input('Enter a recipient name: ')\n trans_amount = float(input('Enter valid transaction: '))\n return trans_recipient,trans_amount\n\n \nif __name__ == \"__main__\":\n node = Node()\n node.run_mode()","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251347027","text":"from pyramid.response import Response\r\nfrom pyramid.view import view_config\r\nfrom pyramid.httpexceptions import HTTPFound\r\nfrom pyramid.renderers import get_renderer\r\nfrom pyramid.url import resource_url\r\nfrom pyramid.security import remember\r\nfrom pyramid.security import forget\r\nfrom pyramid.security import has_permission\r\nfrom datetime import datetime \r\nfrom sqlalchemy.exc import DBAPIError\r\nimport colander\r\nfrom osipkd.tools import BULANS\r\nfrom ..models import (\r\n DBSession,\r\n UserResourcePermission,\r\n Resource,\r\n User,\r\n )\r\n\r\nfrom datetime import (datetime, date)\r\n\r\nfrom pyjasper.client import JasperGenerator\r\n\r\nclass BaseViews(object):\r\n def __init__(self, context, request):\r\n self.context = context\r\n self.request = request\r\n self.session = request.session\r\n self.cust_nm = \"PEMERINTAH KABUPATEN/KOTA DEMO\"\r\n\r\n cday = datetime.today() \r\n if not 'tahun' in self.session:\r\n self.session['tahun'] = cday.strftime('%Y')\r\n self.session['tahun'] = cday.strftime('%Y')\r\n \r\n if not 'bulan' in self.session:\r\n self.session['bulan'] = str(cday.month).zfill(2)\r\n\r\n if not 'tanggal' in self.session:\r\n self.session['tanggal'] = datetime.date(cday)\r\n\r\n # if cday.month ==12:\r\n # self.session['bulan'] = '01'\r\n # self.session['tahun'] = str(cday.year + 1)\r\n # else:\r\n \r\n\r\n if not 'unit_id' in self.session:\r\n self.session['unit_id'] = 0 #No tahun datetime.strftime(datetime.now(),'%Y')\r\n if not 'all_unit' in self.session:\r\n self.session['all_unit'] = 0 # no status\r\n if not 'unit_kd' in self.session:\r\n self.session['unit_kd'] = \"\"\r\n if not 'unit_nm' in self.session:\r\n self.session['unit_nm'] = \"\"\r\n if not 'cust_nm' in self.session:\r\n self.session['cust_nm'] = self.cust_nm\r\n \r\n if not 'rekening_kd' in self.session:\r\n self.session['rekening_kd'] = \"\"\r\n if not 'rekening_nm' in self.session:\r\n self.session['rekening_nm'] = \"\"\r\n \r\n # Inisiasi tahun anggaran\r\n ########################################################################\r\n #remark in production \r\n #self.session['bulan'] = '01'\r\n #self.session['unit_id'] = 9\r\n #self.session['unit_kd'] = '4027.114'\r\n #self.session['unit_nm'] = 'DINAS DEMO'\r\n ########################################################################\r\n self.tahun = self.session['tahun']\r\n self.bulan = self.session['bulan']\r\n self.unit_id = self.session['unit_id']\r\n self.all_unit = self.session['all_unit']\r\n \r\n self.d = {}\r\n self.d['success'] = False \r\n self.d['msg']='Hak akses dibatasi'\r\n\r\n self.unit_kd = self.session['unit_kd']\r\n self.unit_nm = self.session['unit_nm']\r\n #default datas \r\n \"\"\"self.datas={}\r\n self.datas['tahun'] = self.tahun\r\n self.datas['bulan'] = self.bulan\r\n self.datas['unit_id'] = self.unit_id\r\n self.datas['all_unit'] = self.all_unit\r\n self.datas['unit_kd'] = self.session['unit_kd']\r\n self.datas['unit_nm'] = self.session['unit_nm']\r\n permission = UserResourcePermission()\r\n permission.perm_name = \"read\"\r\n permission.user_name = \"aagusti\"\r\n #resource = DBSession.query(User).filter_by(id=1)\r\n resource = Resource()\r\n resource.resource_name = 'GAJI'\r\n resource.resource_type = '1'\r\n \r\n DBSession.add(resource)\r\n request.user.resources.append(resource)\r\n \"\"\"\r\n \r\n \r\n\r\n def _DTstrftime(self, chain):\r\n ret = chain and datetime.strftime(chain, '%d-%m-%Y')\r\n if ret:\r\n return ret\r\n else:\r\n return chain\r\n \r\n def _number_format(self, chain):\r\n import locale\r\n locale.setlocale(locale.LC_ALL, 'id_ID.utf8')\r\n ret = locale.format(\"%d\", chain, grouping=True)\r\n if ret:\r\n return ret\r\n else:\r\n return chain\r\n \r\n@view_config(route_name='change-act', renderer='json', permission='view')\r\ndef change_act(request):\r\n ses = request.session\r\n req = request\r\n params = req.params\r\n url_dict = req.matchdict\r\n \r\n if url_dict['act']=='tahun':\r\n ses['tahun'] = 'tahun' in params and params['tahun'] or '2014'\r\n if 'bulan' in params:\r\n ses['bulan'] = params['bulan'] or '12'\r\n return {'success':True, 'msg':'Sukses Ubah Tahun'}\r\n \r\n elif url_dict['act']=='tanggal':\r\n ses['tanggal'] = 'tanggal' in params and datetime.strptime(params['tanggal'],'%Y-%m-%d') or datetime.date(datetime.now())\r\n ses['tanggal2'] = 'tanggal2' in params and datetime.strptime(params['tanggal2'],'%Y-%m-%d') or ses['tanggal']\r\n return {'success':True, 'msg':'Sukses Ubah Tanggal'}\r\n \r\n elif url_dict['act']=='bulan':\r\n ses['bulan'] = 'bulan' in params and params['bulan'] or '12'\r\n return {'success':True, 'msg':'Sukses Ubah Bulan menjadi %s' % BULANS[ses['bulan']]}\r\n ","sub_path":"osipkd/views/base_view.py","file_name":"base_view.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518097625","text":"\nimport datetime\nimport logging\nimport xml.etree.cElementTree as ET\n\n\n# /////////////////////////////////////////////////////////////////\n#\n# Parseador encargado de traducir los diagramas en xml\n#\n#\n#\nclass Parser:\n def __init__(self,\n diagram,\n filename):\n self.diagram = diagram\n self.filename = filename\n\n def setAuthorDate(self,\n autor):\n self.autor = autor\n self.date = str(datetime.datetime.today()).split()[0]\n\n def parse_relation(self,\n relations,\n diagram,\n di):\n # modificamos los targets y sources eliminando los extends\n for rel in relations:\n re = ET.SubElement(diagram, \"relationship\", name=rel.name)\n st1 = \"\"\n st2 = \"\"\n\n for so in rel.sources:\n st1 += so + \", \"\n ET.SubElement(re, \"source\").text = st1[0:len(st1) - 2]\n for so in rel.targets:\n st2 += so + \", \"\n ET.SubElement(re, \"target\").text = st2[0:len(st2) - 2]\n\n def toXML(self):\n root = ET.Element(\"filter\")\n logging.info(\"Creating new root\")\n\n name = ET.SubElement(root, \"name\").text = \"DPDF Model\"\n author = ET.SubElement(root, \"author\").text = self.autor\n date = ET.SubElement(root, \"date\").text = self.date\n logging.info(\"Meta:name: \" +\n name +\n \" autor: \" +\n author +\n \" date: \" +\n date)\n diagrams = ET.SubElement(root, \"diagrams\")\n\n for di in self.diagram:\n\n logging.info(\"new diagram: \" +\n di.name)\n diagram = ET.SubElement(diagrams,\n \"diagram\",\n name=di.name)\n for et in di.entities:\n if et.abstract is not True:\n ET.SubElement(diagram, \"entity\").text = et.name\n ET.SubElement(diagram, \"entity\").text = \"TextNote\"\n ET.SubElement(diagram, \"entity\").text = \"UMLComment\"\n self.parse_relation(di.complexRelations, diagram, di)\n\n tree = ET.ElementTree(root)\n logging.info(\"writing on: \" +\n self.filename)\n tree.write(self.filename)\n","sub_path":"app/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39645396","text":"# ROS imports\nimport rclpy\nfrom rclpy.node import Node\nfrom std_msgs.msg import Int8MultiArray, Float64MultiArray\nimport numpy as np\n\n# temperature threshold\ntemp_thres = 30\nradius_thres = 7\nresolution = 64\n\n\nclass CommandNode(Node):\n\n def __init__(self):\n super().__init__('com_node')\n self.publisher_ = self.create_publisher(Int8MultiArray, 'com_node', 10)\n self.heat_subscription = self.create_subscription(Float64MultiArray, 'heat_array', self.callback, 10)\n self.heat_subscription # prevent unused variable warning\n\n def callback(self, heat_array):\n coordinates = get_bright_loc(heat_array.data)\n self.get_logger().info(f'Coordinates: X:{coordinates[0]}, Y:{coordinates[1]}')\n comm_array = Int8MultiArray()\n comm_array.data = command(coordinates)\n self.publisher_.publish(comm_array)\n self.get_logger().info(\n f'Direction: X:{comm_array.data[0]}, Y:{comm_array.data[1]}, Active:{comm_array.data[2]}')\n\n\ndef main(args=None):\n rclpy.init(args=args)\n command_pub = CommandNode()\n rclpy.spin(command_pub)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n command_pub.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Target Detection/command_pub_test.py","file_name":"command_pub_test.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523920032","text":"from fastapi import FastAPI, File, UploadFile, Header, HTTPException, Depends\nfrom fastapi.security.api_key import APIKeyHeader\nimport uvicorn\nfrom pydantic import BaseModel\nimport hashlib\nfrom db import Database\n\napp = FastAPI()\ndatabase = Database('carbery_db.db')\nx_auth_user = APIKeyHeader(\n name=\"X-Auth-User\",\n auto_error=False\n)\n\n\nclass FileHash(BaseModel):\n userId: str\n filename: str\n sha256: str\n md5: str\n\n\nasync def check_auth(\n x_auth_user: str = Depends(x_auth_user)\n):\n if not x_auth_user:\n raise HTTPException(\n status_code=401,\n detail='Не авторизован.'\n )\n\n return x_auth_user\n\n\n@app.get(\n '/file_hashes​/{hash}',\n response_model=FileHash,\n summary='Получение имени файла по хэшу.',\n status_code=200,\n response_description='OK'\n)\nasync def get_filename(hash: str):\n values = database.get_value(\n '''SELECT * FROM carbery_table\n WHERE sha256=? OR md5=?''',\n (hash, hash)\n )\n if not values:\n raise HTTPException(\n status_code=404,\n detail='Файлы с данным хэшем не найдены.'\n )\n return {\n 'userId': values.get('userId'),\n 'filename': values.get('filename'),\n 'sha256': values.get('sha256'),\n 'md5': values.get('md5')\n }\n\n\n@app.delete(\n '/file_hashes/{hash}',\n response_model=FileHash,\n summary='Удаление хэшей файлов.',\n status_code=200,\n response_description='OK'\n)\nasync def delete_hash(\n hash: str,\n x_auth_user: str = Depends(check_auth)\n):\n values = database.get_value(\n '''SELECT * FROM carbery_table\n WHERE userId=? and (sha256=? or md5=?)''',\n (x_auth_user, hash, hash)\n )\n if not values:\n raise HTTPException(\n status_code=404,\n detail='Файлы с данным хэшем не найдены.'\n )\n database.del_value((x_auth_user, hash, hash))\n\n\n@app.post(\n '/file_hashes',\n response_model=FileHash,\n summary='Добавление хэша файла.',\n status_code=201,\n response_description='OK'\n)\nasync def add_hash(\n file: UploadFile = File(...),\n x_auth_user: str = Depends(check_auth)\n):\n userId = x_auth_user\n filename = file.filename\n file = file.file.read()\n md5 = hashlib.md5(file).hexdigest()\n sha256 = hashlib.sha256(file).hexdigest()\n database.add_value((x_auth_user, filename, sha256, md5))\n return {\n 'userId': x_auth_user,\n 'filename': filename,\n 'sha256': sha256,\n 'md5': md5\n }\n\nif __name__ == \"__main__\":\n uvicorn.run(\n 'main:app',\n host='localhost',\n port=3000,\n access_log=False\n )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624814041","text":"import sys\nfrom PyQt5.QtCore import pyqtSignal, QSize, Qt\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import (QWidget, QSlider, QApplication, \n QHBoxLayout, QVBoxLayout,QLabel)\n\nclass MyWidget(QWidget):\n\n clicked = pyqtSignal()\n \n def __init__(self, parent = None):\n \n QWidget.__init__(self, parent)\n self.color = QColor(0, 0, 0)\n \n self.lb1=QLabel(self)\n self.lb1.move(10,10)\n self.lb1.setText('label 1')\n self.lb1.adjustSize() \n \n\n self.sl1=QSlider(Qt.Horizontal,self)\n self.sl1.move(50,50)\n self.sl1.setRange(0,10)\n self.sl1.setSingleStep(1)\n self.sl1.valueChanged[int].connect(self.changeValue)\n\n def paintEvent(self, event):\n \n painter = QPainter()\n painter.begin(self)\n painter.fillRect(event.rect(), QBrush(self.color))\n painter.end()\n \n \n def mousePressEvent(self, event):\n \n self.setFocus(Qt.OtherFocusReason)\n event.accept()\n \n def mouseReleaseEvent(self, event):\n \n if event.button() == Qt.LeftButton:\n \n self.color = QColor(self.color.green(), self.color.blue(),\n 127 - self.color.red())\n print(self.color)\n self.update()\n self.clicked.emit()\n event.accept()\n \n def sizeHint(self):\n \n return QSize(200, 200)\n \n def changeValue(self,value):\n print(value)\n #self.lbl3.setText(str(value))\n #self.lbl3.adjustSize()\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n window = QWidget()\n \n mywidget = MyWidget()\n label = QLabel('hi hi')\n \n #mywidget.clicked.connect(label.clear)\n mywidget.clicked.connect(label.clear)\n \n layout = QVBoxLayout()\n layout.addWidget(mywidget)\n layout.addWidget(label)\n window.setLayout(layout)\n \n window.show()\n sys.exit(app.exec_())\n","sub_path":"pyqt/pyqt4.py","file_name":"pyqt4.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117371039","text":"#-*- coding: utf-8 -*-\nimport csv\nimport random\nprint(\"Lecture du Fichier CSV\")\n\n#cr = csv.reader(open(\"liste.csv\",\"rb\"))\nifile = open('liste.csv', \"r\")\nc = csv.writer(open(\"matière.csv\", \"wb\"), delimiter=';')\nread = csv.reader(ifile, delimiter=';')\nfor row in read:\n\tc.writerow([row[0],row[1],random.randint(0,20)])","sub_path":"dev/lec_csv.py","file_name":"lec_csv.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209767788","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport codecs\nimport collections\nimport os\nfrom six.moves import cPickle\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef identity(data):\n return data\n\nclass text_data:\n def __init__ (self,\n input_file,\n line_processor=identity, # any preprocessing for the line read\n line_splitter=identity, # should the line be split into words\n # or characters or something else?\n max_voc=40000):\n\n vocab_file = input_file + \".vocab.pkl\"\n\n self.line_processor = line_processor\n self.line_splitter = line_splitter\n\n if not os.path.exists(vocab_file):\n print(\"creating new vocab information\")\n worddict = collections.Counter()\n with codecs.open(input_file, 'r', 'utf-8') as infile:\n for line in infile:\n line = line_splitter(line_processor(line))\n# print (line)\n for w in line:\n worddict[w] += 1\n\n # yeehaw... magic numbers ;)\n worddict['<>'] = 999999999\n sorted_words = (sorted(worddict.items(), key=lambda x: (-x[1], x[0])))[:max_voc]\n self.id_to_word, _ = list(zip(*sorted_words))\n\n self.word_to_id = dict(zip(self.id_to_word, range(len(self.id_to_word))))\n\n with open(vocab_file, 'wb') as f:\n cPickle.dump(self.id_to_word, f)\n cPickle.dump(self.word_to_id, f)\n\n else:\n print(\"loading vocab information\")\n with open(vocab_file, 'rb') as f:\n self.id_to_word = cPickle.load(f)\n self.word_to_id = cPickle.load(f)\n print (\"ids: %d\" % len(self.id_to_word))\n print (\"words: %d\" % len(self.word_to_id))\n self.unk_id = self.word_to_id['<>']\n print (\"<> id is: %d\" % self.unk_id)\n\n\n def vocab_size(self):\n return len(self.word_to_id)\n\n\n def id(self, word):\n\n return self.word_to_id.get(word, self.unk_id)\n\n def word(self, idt):\n if len(self.id_to_word) > idt:\n return self.id_to_word[idt]\n else:\n return '<>'\n\n\n def idize_file(self, input_file):\n file_data = []\n with codecs.open(input_file, 'r', 'utf-8') as infile:\n for line in infile:\n file_data.append([self.id(w) for w in\n self.line_splitter(self.line_processor(line))])\n\n return file_data\n\n def iterator(self, data, batch_size, num_steps): # careful! this one does flattening!\n # end of line information is lost\n # it's heavily based on\n # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py\n flattened_list = [y for x in data for y in x]\n npdata = np.array(flattened_list, dtype=np.int32)\n\n data_length = len(npdata)\n batch_length = data_length // batch_size\n\n dat = np.zeros([batch_size, batch_length], dtype=np.int32)\n for i in range(batch_size):\n dat[i] = npdata[batch_length * i:batch_length*(i+1)]\n\n epoch_size = (batch_length - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = dat[:, i*num_steps:(i+1)*num_steps]\n y = dat[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)\n","sub_path":"libs/read_text.py","file_name":"read_text.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285160106","text":"from PIL import Image\nimport pytesseract\nfrom pytesseract import image_to_string\nimport os\nimport cv2\nimport re\nfrom autocorrect import spell\nimport string\nfrom models import *\n\ndb_pre_ocr = db('pre_ocr')\ndb_post_ocr = db('post_ocr')\n\n\ndef extractText(image_path):\n try:\n image = cv2.imread(image_path)\n if image is None:\n return \"00000\"\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.threshold(\n gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n gray = cv2.medianBlur(gray, 3)\n\n # write the grayscale image to disk as a temporary file so we can\n # apply OCR to it\n filename = \"scraper/{}.png\".format(os.getpid())\n cv2.imwrite(filename, gray)\n\n # load the image as a PIL/Pillow image, apply OCR, and then delete\n # the temporary file\n text = pytesseract.image_to_string(Image.open(filename))\n os.remove(filename)\n text = text.strip().split()\n chars_to_remove = ['.', '!', ':']\n extracted = list()\n for t in text:\n for c in chars_to_remove:\n t = t.replace(c, '')\n extracted.append(spell(t))\n return(' '.join(extracted))\n\n except BaseException as e:\n print(e)\n\n\ntry:\n process_files = db_pre_ocr.find({})\n\n for image in process_files:\n fromImage = extractText(image['location'])\n fromImageClean = re.sub(r'\\W+', ' ', fromImage) # Only alphanumeric\n if fromImage != \"00000\":\n\n add_image = {\n 'location': image['location'],\n 'attributes': image['attributes'],\n 'ocr_out': fromImageClean\n }\n db_post_ocr.insert_one(add_image)\nexcept BaseException as e:\n print(e)\n","sub_path":"scraper/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388685840","text":"print('Enter the number of numbers that need to be bubble sorted')\nx = int(input())\n\nnumbers = []\nwrite_numbers = []\ni = 0\nwhile i < x:\n print('Enter number')\n n = input()\n write_numbers.append(n)\n numbers.append(int(n))\n i += 1\n\ndef listToString(l, s):\n for q in range(x):\n temp = str(l[q])\n s += temp\n s += ', '\n return s\n\nwrite_numbers_string = ' '\nappend_unsorted = open('records.txt', 'a+')\nappend_unsorted.write('\\nNew Entry. Unsorted numbers: \\n')\nappend_unsorted.write(listToString(write_numbers, write_numbers_string))\nappend_unsorted.close()\n\nprint('List you entered')\nprint(numbers)\n\ndef bubblesort(l):\n swapped = None\n for a in range(x):\n swapped == False\n for b in range(x-a-1):\n if l[b] > l[b+1]:\n l[b], l[b+1] = l[b+1], l[b]\n swapped == True\n if swapped == False:\n break\n\nbubblesort(numbers)\n\nprint('Bubble Sorted List')\nprint(numbers)\n\nwrite_numbers_string = ' '\nwrite_numbers = numbers\nappend_sorted = open('records.txt', 'a+')\nappend_sorted.write('\\nSorted numbers: \\n')\nappend_sorted.write(listToString(write_numbers, write_numbers_string))\nappend_sorted.close()\n","sub_path":"My Own Python Programs/Bubble Sort/bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275437774","text":"from faker import Factory\nfrom random import randint, random\nfrom datetime import date, timedelta\n\nfrom stat_tracker.models import User, Activity, Timestamp\n\nACTIVITIES = [\"Run a Mile\",\n \"Read Something\",\n \"Work Out\",\n \"Make Dinner\",\n \"Water Plants\",\n \"Remember Birthdays\",\n \"Call Mom\",\n \"Set Alarm\",\n \"Practice Instruments\",\n \"Study\",\n \"Laundry\"]\n\ndef seed(db):\n \"\"\"\n Seeds the database but commented out to avoid running accidentally\n :param db\n :return:\n \"\"\"\n users = db.session.query(User).all()\n # for user in users:\n # num_of_activities = randint(0,len(ACTIVITIES)-1)\n # count = 0\n # while count < num_of_activities:\n # activity = Activity(name=ACTIVITIES[randint(0, len(ACTIVITIES)-1)],\n # creator=user.id,\n # activity_type=\"Once A Day\")\n #\n # if not Activity.query.filter_by(name=activity.name, creator=activity.creator).first():\n # db.session.add(activity)\n # count += 1\n\n # today = date.today()\n # last_sixty_days = [today - timedelta(days=i) for i in range(60)]\n # last_sixty_days = [day.strftime(\"%Y-%m-%d\") for day in last_sixty_days]\n # activities = db.session.query(Activity).all()\n #\n # for activity in activities:\n # random_percent = random()\n #\n # for day in last_sixty_days:\n # if random() < random_percent:\n # timestamp = Timestamp(timestamp=day,\n # activity_id=activity.id,\n # actor_id=activity.creator)\n # if not Timestamp.query.filter_by(timestamp=day, activity_id=activity.id).first():\n # db.session.add(timestamp)\n #\n # db.session.commit()\n\n\ndef trim_seeds(db):\n \"\"\"\n Removes 30% of randomly selected timestamps.\n :param db:\n :return:\n \"\"\"\n timestamps = Timestamp.query.all()\n for timestamp in timestamps:\n if random() < 0.3:\n db.session.delete(timestamp)\n db.session.commit()","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342584331","text":"\"\"\" Analýza nehod v kraji Vysočina podle typu vozovky\n\"\"\"\n\nimport pandas as pd\nimport geopandas\nimport matplotlib.pyplot as plt\nimport contextily as ctx\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\n# typy komunikací které analyzuji\n# p36 == 0 -> dálnice\n# 1 -> 1. třídy, 2. -> 2.třídy, 3.-> 3.třídy\n# 6 - místní komunikace, 7 - polní, lesní cesty\ncommunication_types = [0, 1, 2, 3, 6, 7]\ncommunication_names = {0: 'Dálnice',\n 1: 'Silnice 1. třídy',\n 2: 'Silnice 2. třídy',\n 3: 'Silnice 3. třídy',\n 6: 'Místní komunikace',\n 7: 'Účelové komunikace\\n'\n '(polní, lesní cesty atd.)'}\n# simplifikace druh nehod pod větší skupiny\naccident_causes = {\n 100: \"nezaviněná řidičem\",\n 200: \"nepřiměřené rychlost jízdy\",\n 300: \"nesprávné předjíždění\",\n 400: \"nedání přednosti v jízdě\",\n 500: \"nesprávný způsob jízdy\",\n 600: \"technická závada vozidla\"\n}\n# délka silnic 1.-3. třídy na území Vysočiny a dálnice D1 (jiná zde neprochází)\n# statistiky platné ke dni 31.12.2006\n# zdroj: https://www.kr-vysocina.cz/assets/File.ashx?id_org=450008&id_dokumenty=4001876#:~:text=2.1.,-Z%C3%A1kladn%C3%AD%20%C3%BAdaje%20o&text=Na%20%C3%BAzem%C3%AD%20kraje%20Vyso%C4%8Dina%20se,v%20d%C3%A9lce%204%20579%20km.\ncommunication_lenght = {\n 0: 93,\n 1: 422,\n 2: 1630,\n 3: 2949\n}\n\ndef get_data(data_location: str = \"accidents.pkl.gz\"):\n \"\"\"\n Funkce extrahuje data z daného souboru, vyfiltruje je a vrátí výsledný dataframe.\n\n :param data_location: Umístění souboru se zdrojovými daty.\n :return: Extrahovaný dataframe vyfiltrovaný pouze pro Vysočinu a typy silnice 0-3 a 6-7.\n \"\"\"\n df = pd.read_pickle(data_location)\n # ponechani pouze dat z kraje Vysocina\n df = df[df[\"region\"] == \"VYS\"]\n # vyrazeni p36 4 a 5, tedy polozek uzel a komunikace sledovane ve meste a 8 (ostatni ucelove komunikace\n # jako parkoviste, protoze zkoumam pouze typ komunikace)\n df = df.query('p36 != \"4\" and p36 != \"5\" and p36 != \"8\"')\n return df\n\n\ndef create_info_graph(df: pd.DataFrame):\n \"\"\"\n Funkce z daného dataframe vytvoří graf a uloží ho do \"fig.png\" souboru.\n Tento graf zobrazuje nehody na mapě s barevným kódem podle druhu vozovky.\n\n :param df: Dataframe s daty o nehodách.\n \"\"\"\n df = df[df.d.notnull()]\n df = df[df.d.notnull()]\n gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df['d'], df['e']), crs=\"EPSG:5514\")\n\n comunication_types_reverse = communication_types.copy()\n comunication_types_reverse.reverse()\n comunication_colors = {0: 'indigo',\n 1: 'blue',\n 2: 'dodgerblue',\n 3: 'darkturquoise',\n 6: 'black',\n 7: 'lime'}\n legend_elements = []\n for i in communication_types:\n legend_elements.append(Line2D([0], [0], color=comunication_colors[i], lw=4,\n label=communication_names[i]))\n\n dfs = {}\n for i in communication_types:\n data = gdf.query('p36 == \"' + str(i) + '\"')\n dfs[i] = data\n\n # vytvoreni grafu z in_df a out_df\n fig, ax = plt.subplots(1, 1, figsize=(20, 15))\n for i in comunication_types_reverse:\n dfs[i].plot(ax=ax, color=comunication_colors[i], markersize=15)\n\n ctx.add_basemap(ax, crs=gdf.crs.to_string(), source=ctx.providers.Stamen.TonerLite)\n ax.axis('off')\n ax.set_title('Nehody kraje Vysočina podle typu silnice', fontdict={\"fontsize\": 25})\n ax.legend(handles=legend_elements, fontsize='xx-large')\n\n plt.savefig('fig.png')\n plt.show()\n\n\ndef find_most(d: dict):\n \"\"\"\n Najde v daném dictionary největší hodnotu a vrátí její key.\n :param d: Dictionary k prohledání.\n :return: Key nejvyšší hodnoty.\n \"\"\"\n max_val = 0\n max_val2 = 0\n max_type = None\n max_type2 = None\n for key, val in d.items():\n if val > max_val:\n max_val2 = max_val\n max_type2 = max_type\n max_val = val\n max_type = key\n elif val > max_val2:\n max_val2 = val\n max_type2 = key\n return max_type, max_type2\n\n\ndef create_statistics(df: pd.DataFrame):\n \"\"\"\n Vytvori tabulku s informacemi o typu nehod na ruznych silnicich.\n :param df: DataFrame ke zpracovani do statistik.\n \"\"\"\n conditions = [\n (df['p12'] == 100),\n (df['p12'] > 200) & (df['p12'] <= 209),\n (df['p12'] > 300) & (df['p12'] <= 311),\n (df['p12'] > 400) & (df['p12'] <= 414),\n (df['p12'] > 500) & (df['p12'] <= 516),\n (df['p12'] > 600) & (df['p12'] <= 615)\n ]\n # vytvori dalsi sloupecek, kde je p36 zjednodusena do cisla z accident_causes podle typu nehody\n df['cause_code'] = np.select(conditions, accident_causes.keys())\n\n # vytvori dictionary s polozkami code_group : accident count\n sum_d = df['cause_code'].value_counts().to_dict()\n # vytvori tabulku, vysledna bude formatu\n # typ silnice | vsechny | pomer | rychlost pocet | rychlost pomer | nejcastejsi pricina | nej. pocet | nej. pomer\n # a bude mit radky pro kazdy z communication_types + sum (soucet pro vsechny)\n # kazdy radek je key table dictionary s typem silnice a jako value je dalsi dictionary s polozkami radku\n table = {}\n table['sum'] = {}\n sum_total = len(df['cause_code'])\n table['sum']['total'] = sum_total\n table['sum']['total_share'] = sum_total / sum_total\n table['sum']['speed_cnt'] = sum_d[200]\n table['sum']['speed_share'] = sum_d[200] / sum_total\n sum_most, sum_most2 = find_most(sum_d)\n table['sum']['most_type'] = accident_causes[sum_most]\n table['sum']['most_type2'] = accident_causes[sum_most2]\n # for cyklus provede vytvoreni \"radku\" (dictionary v table) pro kazdou silnici\n for road in communication_types:\n road_df = df.query('p36 == \"' + str(road) + '\"')\n road_d = road_df['cause_code'].value_counts().to_dict()\n road_total = len(road_df)\n table[road] = {}\n table[road]['total'] = road_total\n table[road]['total_share'] = road_total / sum_total\n table[road]['speed_cnt'] = road_d[200]\n table[road]['speed_share'] = road_d[200] / road_total\n road_most, road_most2 = find_most(road_d)\n table[road]['most_type'] = accident_causes[road_most]\n table[road]['most_type2'] = accident_causes[road_most2]\n\n # vytisknuti tabulky v github markdown formátu\n print('# Tabulka typu silnic a jejich nejčastějších nehod')\n print('| Typ silnice | Počet nehod | Počet nehod v % | Nejčastější příčina | '\n ' 2. nejčastejší příčina |')\n print('| --- | --- | --- | --- | --- | --- |')\n for road in communication_types:\n print('|', communication_names[road],\n '|', table[road][\"total\"],\n '|', round((table[road][\"total_share\"]*100), 2), '%',\n '|', table[road][\"most_type\"],\n '|', table[road][\"most_type2\"],\n '|')\n print('|', 'Součet',\n '|', table['sum'][\"total\"],\n '|', round((table['sum'][\"total_share\"] * 100), 2), '%',\n '|', table['sum'][\"most_type\"],\n '|', table['sum']['most_type2'],\n '|', end='\\n\\n')\n\n # nasleduji cast se zabyva pouze vypoctem o dalnici a silnicich 1.-3. tridy\n # protoze k nim mame informace o delce\n total_lenght = sum(communication_lenght.values())\n total_accidents = 0\n for i in range(0, 4):\n total_accidents += table[i]['total']\n\n print('# Analýza nehod v souvislosti s délkou silnic\\nPouze dálnice a silnice 1.-3. třídy.')\n print('Celkový počet nehod:', total_accidents)\n print('Celková délka silnic:', total_lenght)\n print('Dálnice:\\n\\t', str(communication_lenght[0]), ' km (',\n round(communication_lenght[0]*100/total_lenght),\n '%): ', table[0]['total'], ' nehod (',\n round(table[0]['total']*100/total_accidents), '%)\\n\\t',\n round(table[0]['total']/communication_lenght[0], 2), ' nehod/km', sep='')\n print('Silnice I. třídy:\\n\\t', str(communication_lenght[1]), ' km (',\n round(communication_lenght[1]*100/total_lenght),\n '%): ', table[1]['total'], ' nehod (',\n round(table[1]['total']*100/total_accidents), '%)\\n\\t',\n round(table[1]['total']/communication_lenght[1], 2), ' nehod/km', sep='')\n print('Silnice II. třídy:\\n\\t', str(communication_lenght[2]), ' km (',\n round(communication_lenght[2]*100/total_lenght),\n '%): ', table[2]['total'], ' nehod (',\n round(table[2]['total']*100/total_accidents), '%)\\n\\t',\n round(table[2]['total']/communication_lenght[2], 2), ' nehod/km', sep='')\n print('Silnice III. třídy:\\n\\t', str(communication_lenght[3]), ' km (',\n round(communication_lenght[3]*100/total_lenght),\n '%): ', table[3]['total'], ' nehod (',\n round(table[3]['total']*100/total_accidents), '%\\n\\t',\n round(table[3]['total']/communication_lenght[3], 2), ' nehod/km', sep='')\n\n\nif __name__ == \"__main__\":\n df = get_data()\n create_info_graph(df)\n create_statistics(df)\n","sub_path":"izv3/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":9346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141065591","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 16:10:58 2018\n\n@author: fengh\n\"\"\"\n\nimport argparse\nimport motion\nimport almath\nimport time\n\nfrom naoqi import ALProxy\n\narmName = \"LArm\"\nlFootOffset = almath.Pose2D(0.0, 0.09, 0.0)\nrFootOffset = almath.Pose2D(0.0, -0.09, 0.0)\nstepSpeed = 1.0\nstepLength = 0.05\n\ndef initRobotPosition(motionProxy, postureProxy):\n ''' Inits NAO's position and stiffnesses to make the guiding possible.'''\n \n global armName\n \n motionProxy.wakeUp()\n postureProxy.goToPosture(\"StandInit\", 0.3)\n motionProxy.moveInit()\n time.sleep(1.0)\n # Make left arm loose.\n motionProxy.setAngles(\"LWristYaw\", 0.0, 1.0)\n motionProxy.setAngles(\"Head\", [0.44, -0.44], 0.5)\n motionProxy.setStiffnesses(armName, 0.0)\n motionProxy.setStiffnesses(\"LWristYaw\", 0.2)\n\n # Disable arm moves while walking on left arm.\n motionProxy.setMoveArmsEnabled(False, True)\n time.sleep(1.0)\n \ndef interpretJointsPose(motionProxy, memoryProxy):\n ''' Translates the current left arm pose into a target position for NAO's\n foot. '''\n\n # Retrieve current arm position.\n armPose = motionProxy.getAngles(armName, True)\n\n targetX = 0.0\n targetY = 0.0\n targetTheta = 0.0\n gaitConfig = motionProxy.getMoveConfig(\"Default\")\n\n # Filter Shoulder Pitch.\n if (armPose[0] > - 0.9 and armPose[0] < -0.20):\n targetX = stepLength\n elif (armPose[0] > -2.5 and armPose[0] < -1.5):\n targetX = - stepLength - 0.02\n\n # Filter Wrist Yaw.\n if armPose[4] > 0.2:\n targetTheta = gaitConfig[2][1]\n elif armPose[4] < -0.2:\n targetTheta = - gaitConfig[2][1]\n\n # Return corresponding pose.\n return almath.Pose2D(targetX, targetY, targetTheta)\n \ndef main(robotIP, PORT=9559):\n\n global armName\n \n motionProxy = ALProxy(\"ALMotion\", robotIP, PORT)\n postureProxy = ALProxy(\"ALRobotPosture\", robotIP, PORT)\n\n # Wake up robot\n # motionProxy.wakeUp()\n\n # Send robot to Stand Init\n #postureProxy.goToPosture(\"StandInit\", 0.5)\n postureProxy.goToPosture(\"Crouch\", 1)\n\n # effector = \"LArm\"\n frame = motion.FRAME_TORSO\n axisMask = almath.AXIS_MASK_VEL # just control position\n useSensorValues = False\n\n path = []\n currentTf = motionProxy.getTransform(armName, frame, useSensorValues)\n targetTf = almath.Transform(currentTf)\n targetTf.r1_c4 -= 0 # x\n targetTf.r2_c4 += 0.1 # y\n targetTf.r3_c4 += 0 # z\n \n path.append(list(targetTf.toVector()))\n path.append(currentTf)\n\n # Go to the target and back again\n times = [2.0, 4.0] # seconds\n\n motionProxy.transformInterpolations(armName, frame, path, axisMask, times)\n\n # Go to rest position\n # motionProxy.rest()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, default=\"127.0.0.1\",\n help=\"Robot ip address\")\n parser.add_argument(\"--port\", type=int, default=9559,\n help=\"Robot port number\")\n\n args = parser.parse_args()\n main(args.ip, args.port)\n\n\n","sub_path":"Motion_Part/NAO_arm_test.py","file_name":"NAO_arm_test.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607747034","text":"\"\"\"\nmodel training\n\"\"\"\n\nimport argparse\nfrom models.modules import QConv2d, WQPROFIT\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport os\nimport time\nimport sys\nimport models\nimport logging\nimport torch.utils.model_zoo as model_zoo\nfrom torchsummary import summary\nfrom dataset import get_loader \n\nfrom utils import *\nfrom collections import OrderedDict\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10/ImageNet Training')\nparser.add_argument('--model', type=str, help='model type')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\nparser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train.')\nparser.add_argument('--batch_size', default=128, type=int, metavar='N', help='mini-batch size (default: 64)')\n\nparser.add_argument('--schedule', type=int, nargs='+', default=[60, 120],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1],\n help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')\nparser.add_argument('--lr_decay', type=str, default='step', help='mode for learning rate decay')\nparser.add_argument('--print_freq', default=1, type=int,\n metavar='N', help='print frequency (default: 200)')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('--log_file', type=str, default=None,\n help='path to log file')\n\n# dataset\nparser.add_argument('--dataset', type=str, default='cifar10', help='dataset: CIFAR10 / ImageNet_1k')\nparser.add_argument('--data_path', type=str, default='./data/', help='data directory')\n\n# model saving\nparser.add_argument('--save_path', type=str, default='./save/', help='Folder to save checkpoints and log.')\nparser.add_argument('--evaluate', action='store_true', help='evaluate the model')\n\n# Acceleration\nparser.add_argument('--ngpu', type=int, default=3, help='0 = CPU.')\nparser.add_argument('--workers', type=int, default=16,help='number of data loading workers (default: 2)')\n\n# Fine-tuning\nparser.add_argument('--fine_tune', dest='fine_tune', action='store_true',\n help='fine tuning from the pre-trained model, force the start epoch be zero')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='full pre-trained model')\nparser.add_argument('--resume', default='', type=str, help='path of the pretrained model')\n\n# quantization\nparser.add_argument('--wbit', type=int, default=4, help='weight precision')\nparser.add_argument('--abit', type=int, default=4, help='activation precision')\nparser.add_argument('--alpha_init', type=int, default=10., help='initial activation clipping')\nparser.add_argument('--channel_wise', type=int, default=0, help='channel_wise quantization flag')\n\n# activation clipping(PACT)\nparser.add_argument('--clp', dest='clp', action='store_true', help='using clipped relu in each stage')\nparser.add_argument('--a_lambda', type=float, default=0.01, help='The parameter of alpha L2 regularization')\n\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nargs.use_cuda = args.ngpu > 0 and torch.cuda.is_available() # check GPU\n\ndef main():\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n \n logger = logging.getLogger('training')\n if args.log_file is not None:\n fileHandler = logging.FileHandler(args.save_path+args.log_file)\n fileHandler.setLevel(0)\n logger.addHandler(fileHandler)\n streamHandler = logging.StreamHandler()\n streamHandler.setLevel(0)\n logger.addHandler(streamHandler)\n logger.root.setLevel(0)\n logger.info(args)\n\n # Prepare the data\n trainloader, testloader, num_classes = get_loader(args)\n\n # Prepare the model\n logger.info('==> Building model..\\n')\n model_cfg = getattr(models, args.model)\n model_cfg.kwargs.update({\"num_classes\": num_classes})\n net = model_cfg.base(*model_cfg.args, **model_cfg.kwargs) \n logger.info(net)\n\n checkpoint = model_zoo.load_url(models.model_urls[str(args.model)])\n new_state_dict = OrderedDict()\n logger.info(\"=> loading checkpoint...\")\n\n for k, v in checkpoint.items():\n name = k\n if 'fc' in k:\n if num_classes == 1000:\n new_state_dict[name] = v\n else:\n logger.info(\"=> load backbone only!\")\n else:\n new_state_dict[name] = v\n \n state_tmp = net.state_dict()\n state_tmp.update(new_state_dict)\n\n net.load_state_dict(state_tmp)\n logger.info(\"=> loaded checkpoint!\")\n \n if args.use_cuda:\n net = net.cuda()\n \n if args.ngpu > 1:\n net = torch.nn.DataParallel(net)\n logger.info(\"Data Parallel!\")\n\n # Loss function\n criterion = nn.CrossEntropyLoss().cuda()\n \n model_params = []\n for name, params in net.named_parameters():\n if 'act_alpha' in name:\n print(name)\n model_params += [{'params': [params], 'lr': 1e-1, 'weight_decay': 1e-4}]\n else:\n model_params += [{'params': [params]}]\n\n optimizer = optim.SGD(model_params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n \n # Evaluate\n if args.evaluate:\n logger.info(\"Evaluate!\")\n test_acc, val_loss = test(testloader, net, criterion, 0)\n logger.info(f'Test accuracy: {test_acc}')\n exit()\n\n # Training\n start_time = time.time()\n epoch_time = AverageMeter()\n best_acc = 0.\n start_epoch = 0\n columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'tr_time', 'te_loss', 'te_acc', 'best_acc']\n\n for epoch in range(start_epoch, start_epoch+args.epochs):\n need_hour, need_mins, need_secs = convert_secs2time(\n epoch_time.avg * (args.epochs - epoch))\n \n need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(\n need_hour, need_mins, need_secs)\n \n current_lr, current_momentum = adjust_learning_rate_schedule(\n optimizer, epoch, args.gammas, args.schedule, args.lr, args.momentum)\n\n # Training phase\n train_results = train(trainloader, net, criterion, optimizer, epoch, args)\n\n # Test phase\n test_acc, val_loss = test(testloader, net, criterion, epoch) \n is_best = test_acc > best_acc\n\n if is_best:\n best_acc = test_acc\n\n state = {\n 'state_dict': net.state_dict(),\n 'acc': best_acc,\n 'epoch': epoch,\n 'optimizer': optimizer.state_dict(),\n }\n \n filename='checkpoint.pth.tar'\n save_checkpoint(state, is_best, args.save_path, filename=filename)\n\n e_time = time.time() - start_time\n epoch_time.update(e_time)\n start_time = time.time()\n \n values = [epoch + 1, optimizer.param_groups[0]['lr'], train_results['loss'], train_results['acc'], \n e_time, val_loss, test_acc, best_acc]\n\n print_table(values, columns, epoch, logger)\n print(need_time)\n\nif __name__ == '__main__':\n main()\n","sub_path":"train_fp.py","file_name":"train_fp.py","file_ext":"py","file_size_in_byte":7434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154079124","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\n\nfrom statistics import median\n\nauthor = 'Shardul Vaidya, CESS India'\n\ndoc = \"\"\"\nfolks reading a prime paragraph and answering a few questions\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'prime'\n players_per_group = None\n num_rounds = 1\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n q1 = models.LongStringField(label='Please describe in one sentence what the text was about:')\n q2 = models.IntegerField(label='To what extent do you agree or disagree with the renaming of Urdu/Muslim city names?',\n choices=[[1,'Fully Agree'],\n [2,'Somewhat Agree'],\n [3,'Neither Agree Nor Disagree'],\n [4,'Somewhat Disagree'],\n [5, 'Fully Disagree']],\n widget=widgets.RadioSelectHorizontal\n )\n q3 = models.IntegerField(\n label='I believe people look up to me and respect me.',\n choices=[[1, 'Fully Agree'],\n [2, 'Somewhat Agree'],\n [3, 'Neither Agree Nor Disagree'],\n [4, 'Somewhat Disagree'],\n [5, 'Fully Disagree']],\n widget=widgets.RadioSelect\n )\n q4 = models.IntegerField(\n label='I feel I have much to be proud of.',\n choices=[[1, 'Fully Agree'],\n [2, 'Somewhat Agree'],\n [3, 'Neither Agree Nor Disagree'],\n [4, 'Somewhat Disagree'],\n [5, 'Fully Disagree']],\n widget=widgets.RadioSelect\n )\n\n\n\n","sub_path":"prime/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6920252","text":"def set_template(args):\n if args.template is None:\n return\n\n elif args.template == 'vae':\n args.dataset_code = 'ml-1m'\n args.min_rating = 4\n args.min_uc = 5\n args.min_sc = 0\n args.split = 'user_ratio'\n args.n_held_out = 1000\n args.negative_sampling = False\n\n args.dataloader_code = 'vae'\n batch_size = 500\n args.train_batch_size = batch_size\n args.val_batch_size = batch_size\n args.test_batch_size = batch_size\n\n args.partition_ratio = 0.2\n args.partition_random_seed = 1024\n\n args.model_code = 'vae'\n args.vae_p_dims = [200, 600]\n args.vae_q_dims = None\n args.vae_dropout = 0.5\n args.vae_total_anneal_steps = 200000\n args.vae_anneal_cap = 0.2\n\n args.trainer_code = 'vae'\n args.device = 'cuda'\n args.num_gpu = 1\n args.device_idx = '0'\n args.optimizer = 'Adam'\n args.lr = 1e-3\n args.weight_decay = 0\n args.decay_step = 1\n args.gamma = 1.0\n args.num_epochs = 200\n args.metric_ks = [20, 50, 100]\n args.best_metric = 'NDCG@100'\n\n elif args.template.startswith('bert'):\n args.dataset_code = 'ml-1m'\n args.min_rating = 0\n args.min_uc = 5\n args.min_sc = 0\n args.split = 'leave_one_out'\n\n args.dataloader_code = 'bert'\n batch = 128\n args.train_batch_size = batch\n args.val_batch_size = batch\n args.test_batch_size = batch\n\n args.negative_sampling = True\n args.train_negative_sampler_code = 'random'\n args.train_negative_sample_size = 0\n args.train_negative_sampling_seed = 0\n args.test_negative_sampler_code = 'random'\n args.test_negative_sample_size = 100\n args.test_negative_sampling_seed = 98765\n\n args.trainer_code = 'bert'\n args.device = 'cuda'\n args.num_gpu = 1\n args.device_idx = '0'\n args.optimizer = 'Adam'\n args.lr = 0.001\n args.decay_step = 25\n args.gamma = 1.0\n args.num_epochs = 100\n args.metric_ks = [1, 5, 10, 20, 50, 100]\n args.best_metric = 'NDCG@10'\n\n args.model_code = 'bert'\n args.model_init_seed = 0\n num_users, num_items = get_user_item_nums(args)\n\n args.bert_dropout = 0.1\n args.bert_hidden_units = 256\n args.bert_mask_prob = 0.15\n args.bert_max_len = 100\n args.bert_num_blocks = 2\n args.bert_num_heads = 4\n args.bert_num_items = num_items\n\n\ndef get_user_item_nums(args):\n if args.dataset_code == 'ml-1m':\n if args.min_rating == 4 and args.min_uc == 5 and args.min_sc == 0:\n return 6034, 3533\n elif args.min_rating == 0 and args.min_uc == 5 and args.min_sc == 0:\n return 6040, 3706\n raise ValueError()\n","sub_path":"templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356965311","text":"from django.test import TestCase\r\nfrom django.contrib.auth.models import User\r\nfrom demo.models import SignInRecord\r\nimport json\r\n\r\n\r\n# Create your tests here.\r\n\r\n\r\nclass MyTestCase(TestCase):\r\n fixtures = ['demodb.json']\r\n\r\n def setUp(self):\r\n loginSuccess = self.client.login(username=\"07033\", password=\"hdhhb2002\")\r\n self.assertEqual(loginSuccess, True)\r\n\r\n def test_teacher_login(self):\r\n teacherLoginResponse = self.client.post(\"/demo/teacher_login/\", {'username': '07033', 'password': 'hdhhb2002'})\r\n formatJsonResponse = json.loads(teacherLoginResponse.content.decode())\r\n self.assertEqual(formatJsonResponse['status'], 1)\r\n\r\n def test_test_create_test(self):\r\n testCreateTestResponse = self.client.get(\"/demo/test_create_test/\", {'courseId': 1})\r\n self.assertEqual(testCreateTestResponse.status_code, 200)\r\n\r\n def test_absence_record(self):\r\n # oldLateRecordNum = len(LateRecord.objects.all())\r\n # requestParams = '{\"courseId\": 1, \"studentIdForAbsence\": [1,2,], \"studentIdForLeave\": [3,4,]}'\r\n # formatJsonRequest = json.loads(requestParams)\r\n # absenceRecordResponse = self.client.post(\"/demo/absence_record/\", requestParams, content_type=\"application/json\")\r\n # self.assertEqual(absenceRecordResponse.status_code, 200)\r\n # newLateRecordNum = len(LateRecord.objects.all())\r\n # self.assertEqual(newLateRecordNum - oldLateRecordNum, len(formatJsonRequest['studentIdForAbsence']+len(formatJsonRequest['studentIdForLeave'])))\r\n\r\n oldLateRecordNum = len(SignInRecord.objects.all())\r\n dict_string = {'courseId': 1, 'studentIdForAbsence': [1, 2, ], 'studentIdForLeave': [3, 4, ]}\r\n numbers = len(dict_string['studentIdForAbsence']) + len(dict_string['studentIdForLeave'])\r\n J_string = json.dumps(dict_string)\r\n absenceRecordResponse = self.client.post(\"/demo/absence_record/\", data=J_string,\r\n content_type=\"application/json\")\r\n self.assertEqual(absenceRecordResponse.status_code, 200)\r\n newLateRecordNum = len(SignInRecord.objects.all())\r\n self.assertEqual(newLateRecordNum - oldLateRecordNum, numbers)\r\n\r\n def test_record_again(self):\r\n\r\n old_signIn_record_num = len(SignInRecord.objects.all())\r\n old_late_record_num = len(SignInRecord.objects.filter(state=2))\r\n old_leave_num = len(SignInRecord.objects.filter(state=1))\r\n\r\n dic_string = {'courseId': 1, 'weekOrdinal': 1, 'studentIdForMiss': [1, 2], 'studentIdForLeave': [],\r\n 'studentIdForLate': [3]}\r\n number_miss = len(dic_string['studentIdForMiss'])\r\n number_late = len(dic_string['studentIdForLate'])\r\n number_leave = len(dic_string['studentIdForLeave'])\r\n J_string = json.dumps(dic_string)\r\n record_again_response = self.client.post('/demo/record_again/', data=J_string, content_type='application/json')\r\n self.assertEqual(record_again_response.status_code, 200)\r\n\r\n new_signInRecord_num = len(SignInRecord.objects.all())\r\n new_late_num = len(SignInRecord.objects.filter(state=2))\r\n new_leave_num = len(SignInRecord.objects.filter(state=1))\r\n self.assertEqual(old_signIn_record_num - new_signInRecord_num, number_miss)\r\n self.assertEqual(new_late_num - old_late_record_num, number_late)\r\n self.assertEqual(new_leave_num - old_leave_num, number_leave)\r\n","sub_path":"demo/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200059863","text":"#!/usr/bin/env python3\n\n# Copyright 2020 The Kraken Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom kraken.server.bg.clry import app\nfrom . import srvcheck\nfrom . import consts\n\n\ndef main():\n planner_url = os.environ.get('KRAKEN_PLANNER_URL', consts.DEFAULT_PLANNER_URL)\n srvcheck.check_url('planner', planner_url, 7997)\n\n argv = [\n 'worker',\n '--loglevel=INFO',\n '--max-tasks-per-child=1',\n ]\n app.worker_main(argv=argv)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server/kraken/server/kkcelery.py","file_name":"kkcelery.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37691280","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\n\"\"\"Eulerproblem 7, find the 10 001st prime number \"\"\"\n\ndef prime(n):\n\ti=2\n\ttestno = 2\n\ttester = 2\n\twhile i (N, z_dim, 1, 1) -> (N, z_dim, H, W)\n # x_with_z : (N, 3 + z_dim, H, W)\n z = z.unsqueeze(dim=2).unsqueeze(dim=3)\n z = z.expand(z.size(0), z.size(1), x.size(2), x.size(3))\n x_with_z = torch.cat([x, z], dim=1)\n\n down_1 = self.downsample_1(x_with_z)\n down_2 = self.downsample_2(down_1)\n down_3 = self.downsample_3(down_2)\n down_4 = self.downsample_4(down_3)\n down_5 = self.downsample_5(down_4)\n # down_6 = self.downsample_6(down_5)\n down_7 = self.downsample_7(down_5)\n\n up_1 = self.upsample_1(down_7)\n # up_2 = self.upsample_2(torch.cat([up_1, down_6], dim=1))\n up_3 = self.upsample_3(torch.cat([up_1, down_5], dim=1))\n up_4 = self.upsample_4(torch.cat([up_3, down_4], dim=1))\n up_5 = self.upsample_5(torch.cat([up_4, down_3], dim=1))\n up_6 = self.upsample_6(torch.cat([up_5, down_2], dim=1))\n out = self.upsample_7(torch.cat([up_6, down_1], dim=1))\n\n return out\n\n\nclass Discriminator(nn.Module):\n \"\"\"\n PatchGAN discriminator. See https://arxiv.org/pdf/1611.07004 6.1.2 Discriminator architectures.\n It uses two discriminator which have different output sizes(different local probabilities).\n d_1 : (N, 3, 128, 128) -> (N, 1, 14, 14)\n d_2 : (N, 3, 128, 128) -> (N, 1, 30, 30)\n\n In training, the generator needs to fool both of d_1 and d_2 and it makes the generator more robust.\n \"\"\"\n\n def __init__(self):\n super(Discriminator, self).__init__()\n # Discriminator with last patch (14x14)\n # (N, 3, 128, 128) -> (N, 1, 14, 14)\n self.d_1 = nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=2, padding=0, count_include_pad=False),\n ConvBlock(3, 32, k=4, s=2, p=1, norm=False, non_linear='leaky_relu'),\n ConvBlock(32, 64, k=4, s=2, p=1, norm=True, non_linear='leaky-relu'),\n ConvBlock(64, 128, k=4, s=1, p=1, norm=True, non_linear='leaky-relu'),\n ConvBlock(128, 1, k=4, s=1, p=1, norm=False, non_linear=None))\n\n # Discriminator with last patch (30x30)\n # (N, 3, 128, 128) -> (N, 1, 30, 30)\n self.d_2 = nn.Sequential(ConvBlock(3, 64, k=4, s=2, p=1, norm=False, non_linear='leaky_relu'),\n ConvBlock(64, 128, k=4, s=2, p=1, norm=True, non_linear='leaky-relu'),\n ConvBlock(128, 256, k=4, s=1, p=1, norm=True, non_linear='leaky-relu'),\n ConvBlock(256, 1, k=4, s=1, p=1, norm=False, non_linear=None))\n\n def forward(self, x):\n out_1 = self.d_1(x)\n out_2 = self.d_2(x)\n return out_1, out_2\n\n\nclass ResBlock(nn.Module):\n \"\"\"\n This residual block is different with the one we usually know which consists of\n [conv - norm - act - conv - norm] and identity mapping(x -> x) for shortcut.\n\n Also spatial size is decreased by half because of AvgPool2d.\n \"\"\"\n\n def __init__(self, in_dim, out_dim):\n super(ResBlock, self).__init__()\n self.conv = nn.Sequential(nn.InstanceNorm2d(in_dim, affine=True),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm2d(in_dim, affine=True),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),\n nn.AvgPool2d(kernel_size=2, stride=2, padding=0))\n\n self.short_cut = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2, padding=0),\n nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n out = self.conv(x) + self.short_cut(x)\n return out\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Output is mu and log(var) for re-parameterization trick used in Variation Auto Encoder.\n Encoding is done in this order.\n 1. Use this encoder and get mu and log_var\n 2. std = exp(log(var / 2))\n 3. random_z = N(0, 1)\n 4. encoded_z = random_z * std + mu (Re-parameterization trick)\n \"\"\"\n\n def __init__(self, z_dim=8):\n super(Encoder, self).__init__()\n\n self.conv = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1)\n self.res_blocks = nn.Sequential(ResBlock(64, 128),\n ResBlock(128, 192),\n ResBlock(192, 256))\n self.pool_block = nn.Sequential(nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.AvgPool2d(kernel_size=8, stride=8, padding=0))\n\n # Return mu and log var for re-parameterization trick\n self.fc_mu = nn.Linear(256, z_dim)\n self.fc_log_var = nn.Linear(256, z_dim)\n\n def forward(self, x):\n # (N, 3, 128, 128) -> (N, 64, 64, 64)\n out = self.conv(x)\n # (N, 64, 64, 64) -> (N, 128, 32, 32) -> (N, 192, 16, 16) -> (N, 256, 8, 8)\n out = self.res_blocks(out)\n # (N, 256, 8, 8) -> (N, 256, 1, 1)\n out = self.pool_block(out)\n # (N, 256, 1, 1) -> (N, 256)\n out = out.view(x.size(0), -1)\n\n # (N, 256) -> (N, z_dim) x 2\n mu = self.fc_mu(out)\n log_var = self.fc_log_var(out)\n\n return mu, log_var\n","sub_path":"BicycleGAN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412465790","text":"# coding=utf-8\n# @author: bryan\n\nimport pandas as pd\n\"\"\"\n用户最多连续看了多少个商品/店铺没有购买,在6号连续看了多少个商品/店铺没有购买,6号一共没有购买的商品数,店铺数\n商品,店铺,类别,城市,品牌点击购买趋势,前7天统计\n商品,店铺,类别,城市,品牌 被一次性购买的比例 ,一次性购买次数/购买次数\n商品,店铺,类别,城市,品牌 第一次出现到第一次购买的时间间隔\n\n\"\"\"\n\n\"\"\"连续未购买7个特征,线下提升万0.5\"\"\"\ndef user_continue_nobuy(org):\n # 每条记录先按user_id,timestamp排序\n data = org[org['day'] < 7].sort_values(by=['user_id','context_timestamp'], ascending=True)\n train=org[org.day==7][['instance_id','user_id']]\n\n def no_buy_count(grouped_x):\n max_cnt = 0\n max_no_buy=0\n for is_trade in grouped_x:\n if is_trade==0:\n max_no_buy += 1\n max_cnt = max(max_cnt, max_no_buy)\n else:\n max_no_buy=0\n return max_cnt\n\n user_nobuy = data.groupby('user_id', as_index=False)['is_trade']\\\n .agg({'user_continue_nobuy_click_cnt':lambda x:no_buy_count(x)})\n\n print('user_continue_nobuy_click_cnt finish')\n data=data[data.day==6].sort_values(by=['user_id','context_timestamp'])\n day6_user_nobuy=data.groupby('user_id', as_index=False)['is_trade']\\\n .agg({'day6_user_continue_nobuy_click_cnt': lambda x: no_buy_count(x)})\n\n print('day6_user_continue_nobuy_click_cnt finish')\n train=pd.merge(train, user_nobuy,on='user_id',how='left')\n train = pd.merge(train, day6_user_nobuy, on='user_id', how='left')\n data = org[org['day'] ==6]\n user_buy_items=data[data.is_trade==1].groupby('user_id', as_index=False)['item_id'].agg({'day6_user_buy_items':lambda x:len(set(x))})\n user_nobuy_items=data.groupby('user_id', as_index=False)['item_id'].agg({'day6_user_nobuy_items': lambda x: len(set(x))})\n user_buy_shops = data[data.is_trade == 1].groupby('user_id', as_index=False)['item_id'].agg({'day6_user_buy_shops': lambda x: len(set(x))})\n user_nobuy_shops = data.groupby('user_id', as_index=False)['item_id'].agg({'day6_user_nobuy_shops': lambda x: len(set(x))})\n\n print('day6_user_nobuy finish')\n train=pd.merge(train,user_buy_items,on='user_id',how='left')\n train = pd.merge(train, user_nobuy_items, on='user_id', how='left')\n train = pd.merge(train, user_buy_shops, on='user_id', how='left')\n train = pd.merge(train, user_nobuy_shops, on='user_id', how='left')\n train['day6_user_items_d_shops']=train['day6_user_nobuy_items']/train['day6_user_nobuy_shops']\n train=train.drop('user_id',axis=1)\n train.to_csv('../data/nobuy_feature.csv',index=False)\n print('nobuy_feature finish')\n # return train\n\ndef trend(data,item):\n tmp = data.groupby([item, 'day'], as_index=False)['is_trade'].agg({'buy': 'sum', 'cnt': 'count'})\n features = []\n for key, df in tmp.groupby(item, as_index=False):\n feature = {}\n feature[item] = key\n # 遍历groupd中的元素\n for index, row in df.iterrows():\n feature[item+'buy' + str(row['day'])] = row['buy']\n feature[item+'cnt' + str(row['day'])] = row['cnt']\n features.append(feature)\n features =pd.DataFrame(features)\n return features\n # def f(x):\n # return 1 if x>0 else 0\n # for i in range(6):\n # features[item + 'buy_trend_'+str(i+1)] = features[item + 'buy'+str(i+1)] - features[item + 'buy'+str(i)]\n # features[item + 'buy_trend_' + str(i + 1)]=features[item + 'buy_trend_'+str(i+1)].apply(f)\n # features[item+'buy_trend'] = features[item + 'buy_trend_1'] + features[item + 'buy_trend_2']+ features[item + 'buy_trend_3']+ features[item + 'buy_trend_4']+ features[item + 'buy_trend_5']+ features[item + 'buy_trend_6']\n # for i in range(6):\n # features[item + 'cnt_trend_'+str(i+1)] = features[item + 'cnt'+str(i+1)] - features[item + 'cnt'+str(i)]\n # features[item + 'cnt_trend_' + str(i + 1)] = features[item + 'cnt_trend_' + str(i + 1)].apply(f)\n # features[item+'cnt_trend'] = features[item + 'cnt_trend_1'] + features[item + 'cnt_trend_2'] + features[item + 'cnt_trend_3'] + features[item + 'cnt_trend_4'] + features[item + 'cnt_trend_5'] + features[item + 'cnt_trend_6']\n # return features.drop([item + 'buy_trend_'+str(i+1) for i in range(6)]+[item + 'cnt_trend_'+str(i+1) for i in range(6)],axis=1)\n\n\"\"\"\n商品,店铺,类别,城市,品牌点击购买趋势,前7天统计,比上一天高为1,否则为0,再统计1的次数,7个特征*5\n\"\"\"\ndef trend_expode(data, col='shop_id'): # 该函数有点像hive里的expode,将行转成列\n import json\n # col可能为店铺,因此tmp为以店铺,day为key统计成交次数以及行为次数\n tmp = data.groupby([col, 'day'], as_index=False)['is_trade'].agg({'buy': 'sum', 'cnt': 'count'})\n # tmp列名店铺, day, buy, cnt\n samples = []\n # 以 店铺为key,进行二次聚合\n for key, df in tmp.groupby(col, as_index=False):\n sample = {}\n sample[col] = key # 店铺id, 每个sample是一条样本\n for index, row in df.iterrows():\n sample[col + '_buy_' + str(int(row['day']))] = row['buy'] # 以shop为key,将不同天数的成交以行行为次数当成不同的列名\n sample[col + '_cnt_' + str(int(row['day']))] = row['cnt']\n\n samples.append(sample)\n\n print(\"samples:\", samples)\n \"\"\"\n fetaures是一个列表,每个列表是一行样本, 其中列表里的元素是一个dict,dict的不同key代表不同的列名\n 这样的方式一般可用于样本特征的稀疏存储,\n 因此最后的列名为:['shop_id', 'shop_id_buy_0', 'shop_id_buy_2', 'shop_id_buy_3',...]\n [\n {\n \"shop_id\": 0,\n \"shop_id_buy_2\": 0,\n \"shop_id_cnt_2\": 1,\n \"shop_id_buy_5\": 0,\n \"shop_id_cnt_5\": 2\n },\n {\n \"shop_id\": 1,\n \"shop_id_buy_2\": 0,\n \"shop_id_cnt_2\": 1,\n \"shop_id_buy_7\": 0,\n \"shop_id_cnt_7\": 1\n }\n ]\n \"\"\"\n samples = pd.DataFrame(samples)\n return samples, tmp\n\ndef trend_feature(org):\n data=org[org.day<7]\n cols = ['item_id', 'item_brand_id', 'shop_id', 'item_category_list', 'item_city_id',\n 'predict_category_property', 'context_page_id', 'query1', 'query']\n train=org[org.day==7][['instance_id'] + cols]\n\n # 将不同的统计维度的feature join起来\n for col in cols:\n print(col + ' finish')\n train=pd.merge(train, trend_expode(data, col)[0], on=col, how='left')\n\n train=train.drop(cols, axis=1) # 将原始的列名去掉\n for col in cols:\n for day in range(6):\n train['_'.join([col, str(day + 1), 'd', str(day), 'cnt'])]= train[col + 'cnt' + str(day + 1)] / train[col + 'cnt' + str(day)]\n train['_'.join([col, str(day + 1), 'd', str(day), 'buy'])]= train[col + 'buy' + str(day + 1)] / train[col + 'buy' + str(day)]\n\n train=train[[i for i in train.columns if 'cnt6' not in i]]\n train.to_csv('../data/trend_feature.csv',index=False)\n print('trend_feature finish')\n # return train\n\n# 商品,店铺,类别,城市,品牌,页面 被一次性购买的比例,次数 ,一次性购买次数/购买次数 线下测试只有item,shop的shot_rate有用\n# 用户,商品,店铺,类别,城市,品牌,页面 7号一次性购买次数,交叉提取\n# 如何定义一次性购买: cvr=1, 认为转化率为1就是一次性购买\ndef oneshot(data, col):\n tmp = data.groupby([col], as_index=False)['is_trade'].agg({col + '_buy': 'sum'})\n # shop_id, user_id\n shot = data.groupby([col, 'user_id'], as_index=False)['is_trade'].agg({'is_shot': 'mean'})\n # is_shot=1: 即每次行为都成交就是一次性购买\n shot = shot[shot[\"is_shot\"] == 1].groupby([col], as_index=False)['is_shot']\\\n .agg({col + 'shot_num': 'count'})\n tmp = pd.merge(tmp, shot, on=[col], how='left')\n tmp[col + '_shot_rate'] = tmp[col + 'shot_num'] / tmp[col + '_buy']\n return tmp[[col, col + '_shot_rate']]\n\n# calc data,join data\ndef today_shot(c_data, j_data):\n cols=['item_id', 'shop_id', 'query', 'query1']\n j_data=j_data[['instance_id'] + cols]\n for col in cols:\n j_data = pd.merge(j_data, oneshot(c_data, col), on=col, how='left')\n j_data=j_data.drop(cols, axis=1)\n j_data.columns=['instance_id','today_item_shot_rate','today_shop_shot_rate','today_query_shot_rate','today_query1_shot_rate']\n return j_data\n\ndef today_shot_feature(org):\n from sklearn.model_selection import train_test_split\n data=org[org['day']==7]\n train=data[data['is_trade']>-1]\n predict=data[data['is_trade']<0]\n predict=today_shot(train,predict)\n train1,train2=train_test_split(train, test_size=0.5, random_state=1024)\n train12=today_shot(train1, train2)\n train21=today_shot(train2, train1)\n data=pd.concat([train21, train12, predict], axis=0).reset_index(drop=True)\n return data\n\ndef day6_shot_feature(org):\n data=org[org.day==6]\n items = ['item_id', 'shop_id', 'query', 'query1']\n train = org[org.day == 7][['instance_id']+items]\n for item in items:\n train = pd.merge(train, oneshot(data, item), on=item, how='left')\n train=train.drop(items,axis=1)\n train.columns=['instance_id','day6_item_shot_rate','day6_shop_shot_rate','day6_query_shot_rate','day6_query1_shot_rate']\n return train\n\ndef oneshot_feature(org):\n data=org[org.day<7]\n items = ['item_id', 'shop_id', 'query', 'query1']\n train = org[org.day == 7][['instance_id']+items]\n for item in items:\n train=pd.merge(train,oneshot(data, item),on=item,how='left')\n print(item+' finish')\n\n train = train.drop(items, axis=1)\n print(train.columns)\n today=today_shot_feature(org)\n print(today.columns)\n\n day6=day6_shot_feature(org)\n print(day6.columns)\n\n train=pd.merge(train, today, on='instance_id', how='left')\n train = pd.merge(train, day6, on='instance_id', how='left')\n\n train.to_csv('../data/oneshot_feature.csv', index=False)\n print('oneshot_feature finish')\n\n# 商品,店铺,类别,城市,品牌,query 第一次出现到第一次购买的时间间隔\n# 前所有天,第七天\ndef first_ocr(data,item):\n import numpy as np\n import datetime\n def sec_diff(a, b):\n if (a is np.nan) | (b is np.nan):\n return np.nan\n return (datetime.datetime.strptime(str(b), \"%Y-%m-%d %H:%M:%S\") - datetime.datetime.strptime(str(a),\"%Y-%m-%d %H:%M:%S\")).seconds\n\n ocr=data.groupby(item,as_index=False)['context_timestamp'].agg({'min_ocr_time':'min'})\n buy=data[data[\"is_trade\"]==1].groupby(item, as_index=False)['context_timestamp']\\\n .agg({'min_buy_time':'min'})\n data=pd.merge(ocr, buy, on=item, how='left')\n data[item+'_ocr_buy_diff_day6'] = data.apply(lambda x:sec_diff(x['min_ocr_time'],x['min_buy_time']),axis=1)\n return data[[item,item+'_ocr_buy_diff_day6']]\n\n# calc data,join data\ndef today_ocr(c_data, j_data):\n items=['item_id','shop_id','predict_category_property']\n item_shot=first_ocr(c_data, items[0])\n shop_shot=first_ocr(c_data, items[1])\n query_shot=first_ocr(c_data, items[2])\n j_data=pd.merge(j_data,item_shot,on=items[0],how='left')\n j_data = pd.merge(j_data, shop_shot, on=items[1], how='left')\n j_data = pd.merge(j_data, query_shot, on=items[2], how='left')\n j_data= j_data[['instance_id','item_id_ocr_buy_diff','shop_id_ocr_buy_diff','predict_category_property_ocr_buy_diff']]\n j_data.columns=['instance_id','today_item_id_ocr_buy_diff','today_shop_id_ocr_buy_diff','today_predict_category_property_ocr_buy_diff']\n return j_data\n\ndef today_ocr_feature(org):\n from sklearn.model_selection import train_test_split\n data=org[org['day']==7]\n train=data[data['is_trade']!=-1]\n predict=data[data['is_trade']==-1]\n predict=today_ocr(train,predict)\n train1,train2=train_test_split(train,test_size=0.5,random_state=1024)\n train22=today_ocr(train1, train2)\n train11=today_ocr(train2, train1)\n data=pd.concat([train11,train22,predict]).reset_index(drop=True)\n return data\n\ndef first_ocr_feature(org):\n items=['item_id','query','query1']\n data=org[org.day<7]\n train=org[org.day==7][['instance_id']+items]\n # for item in items:\n # tmp=first_ocr(data, item)\n # tmp.columns=[item,item+'_ocr_buy_diff_all_day']\n # train=pd.merge(train,tmp,on=item,how='left')\n # print(item)\n data=data[data.day==6]\n for item in items:\n tmp=first_ocr(data, item)\n train=pd.merge(train,tmp,on=item,how='left')\n print(item)\n #today=today_ocr_feature(orgin_data)\n #train=pd.merge(train,today,on='instance_id',how='left')\n train=train.drop(items, axis=1)\n train.to_csv('../data/ocr_feature.csv',index=False)\n print('ocr_feature finish')\n\n\n\"\"\"\nitem和shop 属性的变化,前7天的均值,第7天和前七天均值的差值,第7天和第六天的差值\nitem_price_level,item_sales_level,item_collected_level,item_pv_level\nshop_review_num_level,shop_review_positive_rate,shop_star_level,shop_score_service,shop_score_delivery,shop_score_description\n线下可以提升1个万分位\n\"\"\"\ndef item_shop_var_feature(org):\n import numpy as np\n col=['item_id','shop_id']\n item_cates=['item_price_level','item_sales_level','item_collected_level','item_pv_level']\n shop_cates=['shop_review_num_level','shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery','shop_score_description']\n data=org[org.day<7]\n train=org[org.day==7][['instance_id']+col+item_cates+shop_cates]\n\n for cate in item_cates:\n train=pd.merge(train,data.groupby('item_id',as_index=False)[cate].agg({'item_id_'+cate+'_var':np.std,'item_id_'+cate+'_avg':'mean'}),on='item_id',how='left')\n train['_'.join(['diff',cate,'today_d_7days'])]=train[cate]-train['item_id_'+cate+'_avg']\n\n for cate in shop_cates:\n train=pd.merge(train,data.groupby('shop_id',as_index=False)[cate].agg({'shop_id_'+cate+'_var':np.std,'shop_id_'+cate+'_avg':'mean'}),on='shop_id',how='left')\n train['_'.join(['diff', cate, 'today_d_7days'])] = train[cate] - train['shop_id_' + cate + '_avg']\n\n data = org[org.day == 6]\n\n for cate in item_cates:\n avg=data.groupby('item_id',as_index=False)[cate].agg({'item_id_day6'+cate+'_avg':'mean'})\n tmp=pd.merge(train,avg,on='item_id',how='left')\n train['_'.join(['diff',cate,'today_d_6day'])]=tmp[cate]-tmp['item_id_day6'+cate+'_avg']\n\n for cate in shop_cates:\n avg=data.groupby('shop_id',as_index=False)[cate].agg({'shop_id_day6'+cate+'_avg':'mean'})\n tmp=pd.merge(train,avg,on='shop_id',how='left')\n train['_'.join(['diff',cate,'today_d_6day'])]=tmp[cate]-tmp['shop_id_day6'+cate+'_avg']\n\n train.drop(col + item_cates + shop_cates, axis=1)\\\n .to_csv('../data/item_shop_var_feature.csv',index=False)\n\nif __name__ == '__main__':\n org=pd.read_csv('../data/origion_concat.csv')\n user_continue_nobuy(org)\n trend_feature(org)\n oneshot_feature(org)\n first_ocr_feature(org)\n item_shop_var_feature(org)","sub_path":"logit_feature.py","file_name":"logit_feature.py","file_ext":"py","file_size_in_byte":15261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177099850","text":"import os\r\nimport numpy as np\r\nimport cv2\r\nimport sys\r\nimport argparse\r\nfrom pathlib import Path\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\n\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\n\r\nfrom conn_graph import ConnectionGraph\r\n\r\n# set print options for numpy\r\nnp.set_printoptions(precision=6, suppress=True, threshold=sys.maxsize)\r\n\r\ndef calibrate_camera(folder, pattern_size):\r\n # modified from opencv camera calibration example\r\n img_names = list(Path(folder).glob('*.jpg')) + list(Path(folder).glob('*.jpeg')) + list(Path(folder).glob('*.png'))\r\n assert len(img_names) > 0\r\n\r\n if args.debug:\r\n Path(args.calib_output_folder).mkdir(exist_ok=True, parents=True)\r\n square_size = 1.0\r\n pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)\r\n pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2) # indices\r\n pattern_points *= square_size\r\n\r\n obj_points = []\r\n img_points = []\r\n\r\n h, w = cv2.imread(str(img_names[0]), cv2.IMREAD_GRAYSCALE).shape[:2] # TODO: use imquery call to retrieve results\r\n \r\n def processImage(img_filename):\r\n print('processing %s... ' % img_filename)\r\n img = cv2.imread(str(img_filename), 0)\r\n if img is None:\r\n print(\"Failed to load\", img_filename)\r\n return None\r\n\r\n assert w == img.shape[1] and h == img.shape[0], (\"size: %d x %d ... \" % (img.shape[1], img.shape[0]))\r\n found, corners = cv2.findChessboardCorners(img, pattern_size)\r\n if found:\r\n term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)\r\n cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)\r\n\r\n if args.debug:\r\n vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\r\n cv2.drawChessboardCorners(vis, pattern_size, corners, found)\r\n outfile = Path(args.calib_output_folder) / (img_filename.stem + '_chess.png')\r\n cv2.imwrite(str(outfile), vis)\r\n\r\n if not found:\r\n print('chessboard not found')\r\n return None\r\n\r\n print(' %s... OK' % img_filename)\r\n return (corners.reshape(-1, 2), pattern_points)\r\n\r\n threads_num = args.threads\r\n if threads_num <= 1:\r\n chessboards = [processImage(fn) for fn in img_names]\r\n else:\r\n print(\"Run with %d threads...\" % threads_num)\r\n pool = ThreadPool(threads_num)\r\n chessboards = pool.map(processImage, img_names)\r\n\r\n chessboards = [x for x in chessboards if x is not None]\r\n for (corners, pattern_points) in chessboards:\r\n img_points.append(corners)\r\n obj_points.append(pattern_points)\r\n\r\n # calculate camera distortion\r\n rms, mtx, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None)\r\n\r\n # get optimal camera matrix\r\n camera_matrix, roi = cv2.getOptimalNewCameraMatrix(mtx,distCoeffs,(w,h),1,(w,h))\r\n # Get error\r\n mean_error = 0\r\n for i in range(len(obj_points)):\r\n projpts, jacobian = cv2.projectPoints(obj_points[i], rvecs[i], tvecs[i], mtx, distCoeffs)\r\n error = cv2.norm(img_points[i].reshape(-1, 1, 2), projpts, cv2.NORM_L2) / len(projpts)\r\n mean_error += error\r\n print(\"\\nRMS:\", rms)\r\n print(\"Calibration Error: %f\" % (mean_error/len(obj_points)))\r\n print(\"distortion coefficients: \", distCoeffs.ravel())\r\n\r\n return camera_matrix, distCoeffs, mtx, roi\r\n\r\n# undistort images\r\ndef undistort_images(input_folder, output_folder, new_mtx, distCoeffs, mtx, roi):\r\n img_folder = list(Path(input_folder).glob('*.jpg')) + list(Path(input_folder).glob('*.jpeg')) + list(Path(input_folder).glob('*.png'))\r\n for img_filename in img_folder:\r\n img = cv2.imread(str(img_filename))\r\n if distCoeffs is not None and mtx is not None and roi is not None:\r\n dst = cv2.undistort(img, mtx, distCoeffs, None, new_mtx)\r\n x,y,w,h = roi\r\n dst = dst[y:y+h, x:x+w]\r\n # Update images\r\n cv2.imwrite(str(Path(output_folder) / img_filename.name), dst)\r\n\r\n\r\ndef get_kps_from_imgs(folder):\r\n imgs = []\r\n kps = []\r\n descs = []\r\n for img_filename in list(Path(folder).glob('*.png')) + list(Path(folder).glob('*.jpg')) + list(Path(folder).glob('*.jpeg')):\r\n img = cv2.imread(str(img_filename))\r\n img_gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\n # ATTENTION:\r\n # Since SIFT and SURF is not officially supported in OpenCV and it's patented\r\n # I would like to use an efficient alternative called ORB which is officially supported by OpenCV\r\n # first: we should install opencv-contrib-python\r\n # > pip install opencv-contrib-python\r\n\r\n # Initiate ORB detector\r\n orb = cv2.ORB_create(args.num_of_kps, scoreType=cv2.ORB_FAST_SCORE)\r\n # find the keypoints with ORB\r\n kp = orb.detect(img_gray, None)\r\n # compute the descriptors with ORB\r\n kp, desc = orb.compute(img, kp)\r\n\r\n imgs.append(img)\r\n kps.append(kp)\r\n # must change to float32 for FLANN function\r\n descs.append(desc.astype(np.float32))\r\n\r\n\r\n if args.debug:\r\n Path(args.sift_output_folder).mkdir(exist_ok=True, parents=True)\r\n # draw keypoints on image\r\n img_kpt = cv2.drawKeypoints(img_gray, kp, None, color=(0,255,0), flags=0)\r\n cv2.imwrite(str(Path(args.sift_output_folder) / (img_filename.stem + 'kpt.png')), img_kpt)\r\n\r\n return imgs, kps, descs\r\n\r\n\r\ndef get_norm_matrix(features):\r\n assert features.shape[1] == 3 # point should have dimension 3xN\r\n assert np.sum(np.isnan(features)) + np.sum(np.isinf(features)) == 0 # points can not be on infinity\r\n assert np.sum(features[:,2]) > 1e-10 # this method does not support points at infinity\r\n\r\n centroid = np.mean(features, axis=0)\r\n dist = np.sqrt(np.sum((features - centroid) ** 2, axis=1))\r\n mean_dist = np.mean(dist)\r\n\r\n norm_matrix = np.array([[np.sqrt(2)/mean_dist, 0, -np.sqrt(2)/mean_dist*centroid[0]], \r\n [0, np.sqrt(2)/mean_dist, -np.sqrt(2)/mean_dist*centroid[1]], \r\n [0,0,1]])\r\n\r\n return norm_matrix\r\n\r\ndef findFundamentalMatrix(features1, features2, method=0):\r\n if method == 0:\r\n # use findFundamentalMat to filter some points without correspondence\r\n # F: fundamental matrix\r\n # input should be 2d float points\r\n\r\n\r\n F, funda_mask = cv2.findFundamentalMat(features1, features2, cv2.FM_RANSAC)\r\n # F, funda_mask = cv2.findFundamentalMat(features1, features2, cv2.FM_LMEDS)\r\n \r\n # 8 points algorithm match the result got from self manipulated algorithms\r\n # F, funda_mask = cv2.findFundamentalMat(features1, features2, cv2.FM_8POINT)\r\n funda_mask = funda_mask.ravel()\r\n \r\n else:\r\n\r\n num_of_kps = features1.shape[0]\r\n homo_f1 = np.hstack([features1, np.ones((num_of_kps, 1))])\r\n homo_f2 = np.hstack([features2, np.ones((num_of_kps, 1))])\r\n normmat1 = get_norm_matrix(homo_f1)\r\n normmat2 = get_norm_matrix(homo_f2)\r\n f1_norm = homo_f1.dot(normmat1.T)\r\n f2_norm = homo_f2.dot(normmat2.T)\r\n u1n = f1_norm[:,0].reshape(-1, 1)\r\n v1n = f1_norm[:,1].reshape(-1, 1)\r\n u2n = f2_norm[:,0].reshape(-1, 1)\r\n v2n = f2_norm[:,1].reshape(-1, 1)\r\n\r\n if method == 1:\r\n A = np.hstack([u2n * u1n, u2n * v1n, u2n, v2n * u1n, v2n * v1n, v2n, u1n, v1n, np.ones((num_of_kps, 1))])\r\n u_A, s_A, vh_A = np.linalg.svd(A)\r\n F_norm = vh_A.T[:,8].reshape(3, 3)\r\n\r\n elif method == 2:\r\n WW = np.hstack([f2_norm[:,0].reshape(-1, 1) * f1_norm, \\\r\n f2_norm[:,1].reshape(-1, 1) * f1_norm, f1_norm[:,0:3]])\r\n u_W, s_W, vh_W = np.linalg.svd(WW)\r\n F_norm = vh_W.T[:,8].reshape(3, 3)\r\n\r\n u_F, s_F, vh_F = np.linalg.svd(F_norm)\r\n F_norm_prime = u_F.dot(np.diag([s_F[0], s_F[1], 0])).dot(vh_F)\r\n F = normmat2.T.dot(F_norm_prime).dot(normmat1)\r\n\r\n # svd method won't return reasonable mask, so just all 1 as masks\r\n funda_mask = np.ones((num_of_kps))\r\n\r\n return F, funda_mask\r\n\r\n\r\ndef find_correspondence(info1, info2):\r\n \"\"\"\r\n from keypoints and their descriptions for each images\r\n find pixelwise correspondence from two extracted features lists\r\n calculate each pairs of correspondence, filter invalid points\r\n \"\"\"\r\n\r\n idx1, img1, kp1, desc1 = info1\r\n idx2, img2, kp2, desc2 = info2\r\n\r\n print('num of keypoints: %d (img %d), %d (img %d)' % (len(kp1), idx1, len(kp2), idx2))\r\n\r\n # use FLANN to do feature matching\r\n FLANN_INDEX_KDTREE = 0\r\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\r\n search_params = dict(checks = 50)\r\n\r\n flann = cv2.FlannBasedMatcher(index_params, search_params)\r\n\r\n matches = flann.knnMatch(desc1, desc2, k=2)\r\n\r\n # store all the good matches as per Lowe's ratio test (0.7).\r\n good = [m for m, n in matches if m.distance < args.lowe_test_ratio * n.distance]\r\n\r\n # round to integer, but remain as type float32\r\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good])\r\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good])\r\n\r\n # minimum match count\r\n min_match_count = int(args.min_match_percent * min(len(kp1), len(kp2)))\r\n\r\n if len(good) > min_match_count:\r\n\r\n # use findHomography to filter some points without correspondence\r\n # params: RANSAC reprojection threshold {5.0}\r\n # input should be 3d\r\n M, homo_mask = cv2.findHomography(src_pts.reshape(-1,1,2), \\\r\n dst_pts.reshape(-1,1,2), cv2.RANSAC, 5.0)\r\n # change from n * 1 to 1-d array\r\n homo_matches_mask = homo_mask.ravel()\r\n\r\n\r\n # self defined findFundamentalMatrix method using SVD\r\n F, funda_matches_mask = findFundamentalMatrix(src_pts, dst_pts, method=0)\r\n\r\n # calculate inliers(H) / inliers(F)\r\n inliers_rate = np.sum(homo_matches_mask) / np.sum(funda_matches_mask)\r\n\r\n # get common results for H and F\r\n matchesMask = np.logical_and(homo_matches_mask, funda_matches_mask)\r\n\r\n if args.debug:\r\n h, w, c = img1.shape\r\n # four corner points in the first image\r\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\r\n # use it to calculate the correspondence in the second image\r\n dst = cv2.perspectiveTransform(pts,M)\r\n # mark the four courners in the second image\r\n img2_pers = cv2.polylines(img2.copy(), [np.int32(dst)],True,255,3, cv2.LINE_AA)\r\n\r\n # draw matches image\r\n Path(args.match_output_folder).mkdir(exist_ok=True, parents=True)\r\n\r\n draw_params = dict(# matchColor = (0,255,0), # draw matches in green color\r\n singlePointColor = None, # dont't draw keypoints withoud matches\r\n matchesMask = matchesMask.tolist(), # draw only inliers\r\n flags = 2) # dont't draw keypoints withoud matches\r\n\r\n img_match = cv2.drawMatches(img1,kp1,img2_pers,kp2, good, None, **draw_params)\r\n cv2.imwrite(str(Path(args.match_output_folder) / \"_\".join((\"match\", str(idx1), str(idx2), \".jpg\"))), img_match)\r\n\r\n src_pts = src_pts[np.nonzero(matchesMask)]\r\n dst_pts = dst_pts[np.nonzero(matchesMask)]\r\n\r\n\r\n if src_pts.shape[0] < min_match_count:\r\n print(\"(%d/%d) matches are found, but not enough inliers (%d). \" \r\n % (len(good), min_match_count, np.sum(matchesMask)))\r\n return None\r\n\r\n # sort these points for better look\r\n lex_idx = np.lexsort((src_pts[:,1], src_pts[:,0]))\r\n src_pts = src_pts[lex_idx]\r\n dst_pts = dst_pts[lex_idx]\r\n\r\n\r\n # correct matches to minimize fundamental matrix ransac error\r\n src_pts_new, dst_pts_new = cv2.correctMatches(F, src_pts.reshape(1, src_pts.shape[0], src_pts.shape[1]), \\\r\n dst_pts.reshape(1, dst_pts.shape[0], dst_pts.shape[1]))\r\n\r\n\r\n print(\"number of matches are found - %d/%d, inliers(H) / inliers(F): %.6f\" % (src_pts_new[0].shape[0], min_match_count, inliers_rate))\r\n\r\n # return int array for connected graph search\r\n # because we need to do features tracking, an int result will make matching easier\r\n # return ((idx1, img1, np.round(src_pts_new[0]).astype(np.float32)), (idx2, img2, np.round(dst_pts_new[0]).astype(np.float32)), F, M, inliers_rate)\r\n \r\n # return float array\r\n return ((idx1, img1, src_pts_new[0]), (idx2, img2, dst_pts_new[0]), F, M, inliers_rate)\r\n\r\n # if we don't want to correct_matches, just return this\r\n # return ((idx1, img1, src_pts), (idx2, img2, dst_pts), F, M, inliers_rate)\r\n\r\n else:\r\n print(\"Not enough matches are found, lowe ratio test not passed - %d/%d\" % (len(good), min_match_count))\r\n\r\n return None\r\n\r\n\r\ndef build_connectivity_graph(correspondences):\r\n \"\"\"\r\n use the found correspondence of points to build connectivity graph\r\n which is weighted graph, with weights as their connections\r\n \"\"\"\r\n graph = ConnectionGraph(correspondences)\r\n return graph\r\n\r\n\r\ndef get_relative_pose(features1, features2, F, kint1, kint2, distCoeffs=None):\r\n '''\r\n triangulation using linear method, svd\r\n use features from both images and fundamental matrix F\r\n '''\r\n\r\n # 1. change to homogeneous coordinates\r\n\r\n assert features1.shape == features2.shape\r\n\r\n num_of_kps = features1.shape[0]\r\n\r\n # # not use normalized point to recover R and T\r\n # pts_l_norm = np.expand_dims(features1, axis=1).astype(np.float32)\r\n # pts_r_norm = np.expand_dims(features2, axis=1).astype(np.float32)\r\n\r\n # use opencv to calculate rotation and translation\r\n pts_l_norm = cv2.undistortPoints(np.expand_dims(features1, axis=1), cameraMatrix=kint1, distCoeffs=None)\r\n pts_r_norm = cv2.undistortPoints(np.expand_dims(features2, axis=1), cameraMatrix=kint2, distCoeffs=None)\r\n\r\n # print(F)\r\n # F = np.array([[-5.91176170742754e-07,-1.15686479351066e-07,0.00365007704411600], [1.26728959502786e-06,-4.02454207350275e-07,-0.00807758873453800], [0.000693859536366768,0.00835431229618000, -1.08724818522269]])\r\n # print(F)\r\n\r\n\r\n # # directly use findEssentialMat method cannot give you a good result, so neglect it\r\n # E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=kint1[0,0], pp=(kint1[0,2], kint1[1,2]), method=cv2.RANSAC, prob=0.999, threshold=3.0)\r\n\r\n # get essential matrix by using FundamentalMat calculated before\r\n E = kint2.T.dot(F).dot(kint1)\r\n\r\n # prove that b.dot(F).dot(a) = 0\r\n errF = 0\r\n errE = 0\r\n for i in range(len(features1)):\r\n x1 = np.array([[features1[i][0], features1[i][1], 1]]).T\r\n x2 = np.array([[features2[i][0], features2[i][1], 1]])\r\n\r\n errF += x2.dot(F).dot(x1)[0][0]\r\n errE += x2.dot(np.linalg.inv(kint2.T)).dot(E).dot(np.linalg.inv(kint1)).dot(x1)[0][0]\r\n\r\n print('sum of fundamental matrix error: %.10f, essential matrix error: %.10f' % (errF, errE))\r\n\r\n # 1. self defined essential matrix decomposition using SVD\r\n # make up matrices W and Z\r\n # W = np.array([[0,-1,0], [1,0,0],[0,0,1]])\r\n # Z = np.array([[0,1,0], [-1,0,0],[0,0,0]])\r\n\r\n # # svd essential matrix, get rotation and translation for right camera\r\n # U, S, Vh = np.linalg.svd(E)\r\n # T_x = U.dot(Z).dot(U.T)\r\n # T = np.array([[T_x[2,1], T_x[0,2], T_x[1,0]]]).T\r\n # R = U.dot(W).dot(Vh)\r\n\r\n\r\n # 2. use recoverPose to recover R and T\r\n points, R, T, mask = cv2.recoverPose(E, pts_l_norm, pts_r_norm, cameraMatrix=kint1)\r\n\r\n return R, T\r\n\r\n\r\ndef triangulate(features1, features2, Pr1, Pr2):\r\n\r\n # # 1. self defined triangulation linear method\r\n # X3Ds = []\r\n # for i in range(features1.shape[0]):\r\n # AA = np.vstack([features1[i][0] * Pr1[2] - Pr1[0], \\\r\n # features1[i][1] * Pr1[2] - Pr1[1], \\\r\n # features2[i][0] * Pr2[2] - Pr2[0], \\\r\n # features2[i][1] * Pr2[2] - Pr2[1]])\r\n # UU, SS, VVh = np.linalg.svd(AA)\r\n # homo_X3D = VVh.T[:,3] # the last column of VV\r\n # X3Ds.append(homo_X3D[:-1] / homo_X3D[3])\r\n # X3Ds = np.float32(X3Ds)\r\n\r\n\r\n # 2. cv2.triangulatePoints, both get the same results\r\n # Attention: should input (float) features instead of integer\r\n X4Ds = cv2.triangulatePoints(Pr1, Pr2, features1.T, features2.T).T\r\n X3Ds = X4Ds[:,:3] / (X4Ds[:,3].reshape(X4Ds.shape[0], -1))\r\n\r\n # return 3D points, Rotation and Translation for the second camera\r\n return X3Ds\r\n\r\ndef solvePnP(points, img_idx, camera_matrix, distCoeffs):\r\n if len(points) <= 8:\r\n # return Identity, awaiting bundle adjustment\r\n R = np.eye(3)\r\n T = np.zeros((3,1))\r\n else:\r\n points_3d = np.vstack([point['3d'] for point_idx, point in points])\r\n features = np.vstack([point['track'][img_idx] for point_idx, point in points])\r\n\r\n retval, rvec, tvec, inliers = cv2.solvePnPRansac(points_3d, features, camera_matrix, distCoeffs)\r\n\r\n R, jacobian = cv2.Rodrigues(rvec)\r\n T = -tvec\r\n\r\n return R, T\r\n\r\ndef display_3d(points, i):\r\n\r\n X3Ds = np.vstack([point['3d'] for point_idx, point in points.items() if point['3d'] is not None])\r\n\r\n # when displaying, eliminate outliers\r\n\r\n\r\n\r\n # display the image\r\n if args.debug:\r\n # create the first figure\r\n fig = plt.figure(num=i)\r\n ax = plt.axes(projection='3d')\r\n # ax.scatter3D(X3Ds[:,0], X3Ds[:,1], X3Ds[:,2], c=X3Ds[:,2], cmap='Blues')\r\n ax.scatter3D(X3Ds[:,0], X3Ds[:,1], X3Ds[:,2], cmap='Blues')\r\n ax.set_xlabel('x axis')\r\n ax.set_ylabel('y axis')\r\n ax.set_zlabel('z axis')\r\n plt.show()\r\n\r\n\r\ndef main(args):\r\n \"\"\"\r\n Step 1: Camera calibration\r\n\r\n input: calibration images / self defined camera intrinsic parameters\r\n return: camera_matrix, distortion coefficients\r\n \"\"\"\r\n\r\n if args.calib_type == 'real':\r\n # TODO: real-time calibration with camera on this computer\r\n camera_matrix, distCoeffs, mtx, roi = calibrate_camera(args.calib_folder, tuple(args.pattern_size))\r\n\r\n elif args.calib_type == 'manual':\r\n # manual calibration\r\n ox, oy = 640 / 2, 480 / 2\r\n # ox, oy = 384 / 2, 512 / 2\r\n pwm = 1.22 * 1e-6\r\n f = (28 * 1e-3) / pwm\r\n camera_matrix = np.array([[f, 0, ox], [0, f, oy], [0,0,1]])\r\n\r\n # unknown distort coefficients\r\n distCoeffs = None\r\n mtx = None\r\n roi = None\r\n\r\n elif args.calib_type == 'default':\r\n # use default images calibration\r\n camera_matrix, distCoeffs, mtx, roi = calibrate_camera(args.calib_folder, tuple(args.pattern_size))\r\n\r\n else:\r\n raise NotImplementedError(\"Not implemented yet.\")\r\n\r\n print(\"camera calibration type: %s\" % (args.calib_type))\r\n print('intrinsic matrix:\\n', camera_matrix)\r\n \r\n # undistort images\r\n undistort_folder = args.input_folder.replace('images', 'images_undistort')\r\n Path(undistort_folder).mkdir(exist_ok=True, parents=True)\r\n\r\n undistort_images(args.input_folder, undistort_folder, camera_matrix, distCoeffs, mtx, roi)\r\n\r\n \"\"\"\r\n Step 2: get keypoints from images\r\n \r\n input: images taken from objects. Notice: We assume that they are using the same camera, \r\n and the image size should be the same (if not, I can't not guarantee that the result will be fine, but it may works as well)\r\n return: images, keypoints and their descriptions\r\n \"\"\"\r\n\r\n imgs, kps, descs = get_kps_from_imgs(undistort_folder)\r\n\r\n # assume there are more than 2 images for reconstruction\r\n if len(kps) < 2:\r\n raise Error('Error number of images should be larger than 2.')\r\n return\r\n\r\n\r\n \"\"\"\r\n Step 3: find correspondence for keypoints for each image pairs\r\n \r\n input: images taken from objects with keypoints, descriptions from step 2\r\n return: correspondences\r\n \"\"\"\r\n\r\n # for each pair of image, find correspondence\r\n correspondences = []\r\n for i in range(len(imgs) - 1):\r\n for j in range(i + 1, len(imgs)):\r\n correspondence = find_correspondence((i, imgs[i], kps[i], descs[i]), (j, imgs[j], kps[j], descs[j]))\r\n if correspondence is not None:\r\n correspondences.append(correspondence)\r\n\r\n\r\n \"\"\"\r\n Step 4: build a connected graph based on correspondences\r\n \r\n input: correspondences from step 3\r\n return: graph with tracks of points\r\n \"\"\"\r\n\r\n # build connected graph with tracks based on pairwise connections\r\n graph = build_connectivity_graph(correspondences)\r\n print('graph build complete')\r\n\r\n\r\n \"\"\"\r\n Step 5: initialize 3d model using stereo reconstruction\r\n \r\n input: camera matrix from step 1, graph from step 4\r\n return: initialized 3d model\r\n \"\"\"\r\n\r\n # Select the first edge to build the first model (stereo reconstruction)\r\n\r\n node1, node2, edge_weights = graph.get_next_edge()\r\n num_of_edges = 1\r\n print('first edge from node %d, %d with weight %f: ' % (node1.idx, node2.idx, edge_weights))\r\n dic = node2.neighbors[node1.idx]\r\n features1 = node1.kps[dic['left']]\r\n features2 = node2.kps[dic['right']]\r\n F = dic['F'] # fundamental matrix\r\n # get relative pose between feature1, feature2\r\n R2, T2 = get_relative_pose(features1, features2, F, camera_matrix, camera_matrix, distCoeffs=distCoeffs) # use the same camera\r\n \r\n R1 = np.eye(3)\r\n T1 = np.zeros((3,1))\r\n\r\n # projection matrix for initial pair, assume the left camera is at the world center\r\n Pr1 = camera_matrix.dot(np.hstack([R1, T1]))\r\n\r\n # this may not get a better solution like Pr2 = np.hstack([R2, T2])\r\n Pr2 = camera_matrix.dot(np.hstack([R2, T2]))\r\n\r\n # if not, use this\r\n # currently I cannot figure out why this gets worse solution than the previous line\r\n # Pr2 = np.hstack([R2, T2])\r\n\r\n # triangulation to get the first 3d model\r\n X3Ds = triangulate(features1, features2, Pr1, Pr2)\r\n\r\n\r\n # update the graph for the pose between node1 and node2\r\n graph.update_tracks(node1.idx, R1, T1, node2.idx, R2, T2, X3Ds)\r\n\r\n\r\n \"\"\"\r\n Step 6: bundle adjustment for the first 3d model\r\n \r\n it's very possible that first 3d model is a very bad result!!! and I cannot figure out the exact reason yet\r\n though the reprojection error is relatively small and fundamental matrix result are fine\r\n\r\n input: graph and camera matrix\r\n return: bundle adjusted first 3d model\r\n \"\"\"\r\n \r\n # graph.bundle_adjust(camera_matrix, num_iter=1000)\r\n\r\n # visualize the first 3d results\r\n display_3d(graph.track.points, num_of_edges)\r\n\r\n\r\n\r\n\r\n # build init 3d model using the edge with largest weight\r\n while (graph.has_edge()):\r\n \"\"\"\r\n Step 7: get another edge based on how many point the new edge observes\r\n \"\"\"\r\n\r\n node1, node2, edge_weights = graph.get_next_edge()\r\n num_of_edges += 1\r\n print('select edge from node %d, %d with weight %f: ' % (node1.idx, node2.idx, edge_weights))\r\n\r\n # features for image left and right\r\n dic = node2.neighbors[node1.idx]\r\n features1 = node1.kps[dic['left']]\r\n features2 = node2.kps[dic['right']]\r\n\r\n # get valid 3d points that contains node1 and node2\r\n valid_points = graph.track.get_points_with_correspondence(node1.idx, node2.idx)\r\n\r\n # get absolute poses for left and right cameras\r\n if node1.absolute_pose['R'] is None and node1.absolute_pose['T'] is None:\r\n print(\"node %d's pose unestimated, use solvepnp to solve\")\r\n R_left, T_left = solvePnP(valid_points, node1.idx, camera_matrix, distCoeffs=distCoeffs)\r\n\r\n else:\r\n R_left, T_left = node1.absolute_pose['R'], node1.absolute_pose['T']\r\n\r\n if node2.absolute_pose['R'] is None and node2.absolute_pose['T'] is None:\r\n print(\"node %d's pose unestimated, use solvepnp to solve\")\r\n R_right, T_right = solvePnP(valid_points, node2.idx, camera_matrix, distCoeffs=distCoeffs)\r\n\r\n else:\r\n R_right, T_right = node2.absolute_pose['R'], node2.absolute_pose['T']\r\n\r\n print(R_left.shape, T_left.shape, R_right.shape, T_right.shape)\r\n\r\n # projection matrix for initial pair, assume the left camera is at the world center\r\n Pr1 = camera_matrix.dot(np.hstack([R_left, T_left]))\r\n Pr2 = camera_matrix.dot(np.hstack([R_right, T_right]))\r\n\r\n # triangulation to get the first 3d model\r\n X3Ds = triangulate(features1, features2, Pr1, Pr2)\r\n\r\n # update the graph for the pose between node1 and node2\r\n graph.update_tracks(node1.idx, R_left, T_left, node2.idx, R_right, T_right, X3Ds)\r\n\r\n \"\"\"\r\n Step 8: bundle adjustment for the updated 3d model\r\n\r\n input: graph and camera matrix\r\n return: bundle adjusted first 3d model\r\n \"\"\"\r\n \r\n # graph.bundle_adjust(camera_matrix, num_iter=1000)\r\n\r\n # visualize the first 3d results\r\n display_3d(graph.track.points, num_of_edges)\r\n\r\n \r\n\r\n # # # check matches are correct\r\n # # if args.debug:\r\n # # img1 = node1.img\r\n # # img2 = node2.img\r\n # # h, w, c = img1.shape\r\n # # img_cat = np.hstack([img1, img2])\r\n\r\n # # for i in range(len(features1)):\r\n # # img_cat[int(features1[i][1])][int(features1[i][0])] = np.array([0,255,0])\r\n # # img_cat[int(features2[i][1])][int(features2[i][0] + w)] = np.array([0,255,0])\r\n # # cv2.line(img_cat, (int(features1[i][0]), int(features1[i][1])), (int(features2[i][0] + w), int(features2[i][1])), color=(0,255,0))\r\n # # cv2.imwrite('test.png', img_cat)\r\n\r\n\r\n\r\n # F = dic['F'] # fundamental matrix\r\n\r\n # # estimate essential matrix\r\n # # assume the left camera is the reference camera, with R1 = I, t1 = 0\r\n # # triangulation using linear method\r\n\r\n # # 1. get first model (3D points) using stereo reconstruction\r\n # X3Ds, R, T = triangulate(features1, features2, F, camera_matrix, camera_matrix, distCoeffs=distCoeffs) # use the same camera\r\n\r\n # # update the graph for the pose between node1 and node2\r\n # graph.update_tracks(node1.idx, node2.idx, R, T, X3Ds)\r\n\r\n\r\n # # it's very possible that X3Ds is a very bad result!!! and I cannot figure out the exact reason yet\r\n # # the reprojection error is relatively small and fundamental matrix result are fine\r\n\r\n # # 2. do bundle adjustment for the existing model, if use incremental bundle adjustment for the model\r\n # graph.bundle_adjust()\r\n \r\n\r\n # if args.debug:\r\n # fig = plt.figure()\r\n # ax = plt.axes(projection='3d')\r\n # # ax.scatter3D(X3Ds[:,0], X3Ds[:,1], X3Ds[:,2], c=X3Ds[:,2], cmap='Blues')\r\n # ax.scatter3D(X3Ds[:,0], X3Ds[:,1], X3Ds[:,2], cmap='Blues')\r\n # ax.set_xlabel('x axis')\r\n # ax.set_ylabel('y axis')\r\n # ax.set_zlabel('z axis')\r\n # plt.show()\r\n\r\n # return\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Process imput arguments for 3D reconstruction.')\r\n parser.add_argument('-c', '--calib_type', type=str, \\\r\n choices=['real', 'manual', 'default'], default='default', \\\r\n help='Types of camera calibration. \\\r\n \"real\": use local camera to calibrate in real time; \\\r\n \"manual\": use manual camera calibration settings; \\\r\n \"default\": use default images for calibration;')\r\n\r\n # configurations\r\n parser.add_argument('-d', '--debug', action='store_true', help='Run in debug mode')\r\n parser.add_argument('-t', '--threads', type=int, default=4, help='number of threads')\r\n parser.add_argument('-p', '--pattern_size', type=int, nargs='+', default=(10,7), help='pattern size for camera calibration')\r\n \r\n\r\n # folders\r\n parser.add_argument('-cf', '--calib_folder', type=str, default='calib_images/iphone7p', \\\r\n help='Image folder name for camera calibration')\r\n parser.add_argument('-if', '--input_folder', type=str, default='images/book1', \\\r\n help='Image folder for 3d reconstruction')\r\n parser.add_argument('-co', '--calib_output_folder', type=str, default='debug/calib_outputs', \\\r\n help='Output folder for calibration')\r\n parser.add_argument('-so', '--sift_output_folder', type=str, default='debug/sift_outputs/book1', \\\r\n help='Output folder for sift keypoints extraction')\r\n parser.add_argument('-mo', '--match_output_folder', type=str, default='debug/match_outputs/book1', \\\r\n help='Output folder for feature matching')\r\n parser.add_argument('-o', '--output_folder', type=str, default='outputs/book1', \\\r\n help='Output folder for 3d reconstruction')\r\n\r\n\r\n # hyper parameters\r\n parser.add_argument('-nkp', '--num_of_kps', type=int, default=10000, help='number of key points')\r\n parser.add_argument('-ltr', '--lowe_test_ratio', type=float, default=0.8, help='lowe ratio test rate')\r\n parser.add_argument('-mmc', '--min_match_percent', type=float, default=0.01, help='minimum match percentage')\r\n args = parser.parse_args()\r\n main(args)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# some test which is borrowed from other notes\r\n\r\ndef test():\r\n homo_x1 = np.array([[2.86666666666667,-22.2187500000000,24.8000000000000,-23.2285714285714,27.1388888888889,-33.2894736842105,47.7931034482759,31.7333333333333],\r\n [-33.4333333333333,14.4375000000000,-4.93333333333333,35.4285714285714,3.19444444444444,-4.52631578947369,-51.2413793103448,36.8666666666667],\r\n [1,1,1,1,1,1,1,1]])\r\n homo_x2 = np.array([[-229.522875448942,-258.292895283908,-206.597731739827,-262.230921762122,-209.943236963811,-275.437557347701,-181.668315941037,-199.405970254657],\r\n [-34.2802236631725,14.9107171694714,-5.02902645276237,36.6259219139803,3.25917390157400,-4.69475951687246,-51.9047013403661,37.5131564447507],\r\n [1,1,1,1,1,1,1,1]])\r\n\r\n# homo_x1 = np.array([[-102.766291777084,255.500969078484,-44.9335294173212,-223.944930920947,-98.8327970841866,-44.2336788063890,-60.8647393705177,8.51496788806941,0.745050635701492,-44.4137684538591],\r\n# [54.0935032492134,171.835606944456,-32.0135418758319,41.3734222619891,-279.742900618785,-135.811558132020,-72.2051817851882,1.82504383577172,0.343791528708997,95.0035092386631],\r\n# [1,1,1,1,1,1,1,1,1,1]])\r\n# homo_x2 = np.array([[-352.297383508563,17.5588770566538,-289.262722752471,-489.673069977241,-342.330002971343,-287.517068912743,-306.265697905156,-232.343678170461,-214.290449718330,-288.256419038601],\r\n# [57.1826262772602,165.199531371376,-33.3194551784004,45.2071887678686,-294.996539885698,-141.290294105620,-75.4683665784438,1.87260592942277,0.351144636763272,98.8541988292808],\r\n# [1,1,1,1,1,1,1,1,1,1]])\r\n\r\n F, mask = findFundamentalMatrix(homo_x1[:2,2:].T, homo_x2[:2,2:].T, method=0)\r\n\r\n kint1 = np.array([[1000, 0,0], [0,1000, 0], [0,0,1]])\r\n kint2 = np.array([[1000, 0,0], [0,1000, 0], [0,0,1]])\r\n E = kint2.T.dot(F).dot(kint1)\r\n\r\n\r\n\r\n pts_l_norm = cv2.undistortPoints(np.expand_dims(homo_x1[:2].T, axis=1).astype(np.float32), cameraMatrix=kint1, distCoeffs=None)\r\n pts_r_norm = cv2.undistortPoints(np.expand_dims(homo_x2[:2].T, axis=1).astype(np.float32), cameraMatrix=kint2, distCoeffs=None)\r\n\r\n\r\n # pts_l_norm = np.expand_dims(homo_x1[:2].T, axis=1).astype(np.float32)\r\n # pts_r_norm = np.expand_dims(homo_x2[:2].T, axis=1).astype(np.float32)\r\n\r\n\r\n # E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=kint1[0,0], pp=(kint1[0,2], kint1[1,2]), method=cv2.RANSAC, prob=0.999, threshold=3.0)\r\n # # print(E)\r\n\r\n points, R, T, mask = cv2.recoverPose(E, pts_l_norm, pts_r_norm, focal=kint1[0,0], pp=(kint1[0,2], kint1[1,2]))\r\n\r\n Pr1 = kint1.dot(np.hstack([np.eye(3), np.zeros((3,1))]))\r\n Pr2 = kint2.dot(np.hstack([R, T]))\r\n\r\n X4Ds = cv2.triangulatePoints(Pr1, Pr2, homo_x1[:2], homo_x2[:2]).T\r\n X3Ds = X4Ds[:,:3] / X4Ds[:,3].reshape(X4Ds.shape[0], -1)\r\n print(X3Ds)\r\n\r\n# test()","sub_path":"reconstruct.py","file_name":"reconstruct.py","file_ext":"py","file_size_in_byte":32307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186836175","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport smtplib\n\nURL=\"https://www.amazon.in/Vivo-Storage-Additional-Exchange-Offers/dp/B07KXC1YGG/ref=br_msw_pdt-4/260-3954790-6831115?_encoding=UTF8&smid=A14CZOWI0VEHLG&pf_rd_m=A1VBAL9TL5WCBF&pf_rd_s=&pf_rd_r=JWT9RNZX2ZRSN6WBWWQE&pf_rd_t=36701&pf_rd_p=2b9bb3c1-71bb-48bb-8476-31c6e37895b1&pf_rd_i=desktop\"\n\nheaders={\n \"User-Agent\":'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n}\n\ndef checkprice():\n page = requests.get(URL,headers=headers)\n\n soup = BeautifulSoup(page.content,'html.parser')\n price=[]\n ld = soup.find(id=\"priceblock_dealprice\").get_text()\n for i in ld[2:]:\n if i==\",\":\n pass\n elif i=='.':\n price.append('.') \n else:\n price.append(i)\n\n print(float(\"\".join(price)))\n\n if float(\"\".join(price))<25000:\n send_email()\ndef send_email():\n server=smtplib.SMTP('smtp.gmail.com',587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login('linkesh0017@gmail.com')\n\n subject=\"Test msg\"\n Message=\"Yass!!\"\n\n T=f\"Subject:{subject} Body :{Message}\"\n\n server.sendmail(\n 'linkesh0017@gmail.com',\n 'linkesh00017@gmail.com',\n T\n )\n\n print(\"\\nYour Msg has been sent Successfully\")\n\n\ncheckprice()","sub_path":"DE/Scrapping/webscrap.py","file_name":"webscrap.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186200929","text":"def solution(progresses, speeds):\n # ceil 함수 사용을 위한 math library import\n import math\n answer = []\n\n # 최고 우선 순위 작업(progresses[0])의 완료 시간을 계산하여 time에 할당\n time = math.ceil((100 - progresses[0]) / speeds[0])\n cnt = 0\n\n # progresses 안에 요소가 있는 동안\n while len(progresses) > 0:\n # progresses 안에 남아있는 작업 중 첫번째 작업이 완료되면\n if progresses[0] + (time * speeds[0]) >= 100:\n # 완료된 작업과 작업 속도를 dequeue\n progresses.pop(0)\n speeds.pop(0)\n # 동시 배포 작업의 수를 구하기 위해 cnt += 1\n cnt += 1\n # progresses 안에 남아있는 작업 중 첫번째 작업이 완료되지 않았다면\n else:\n # 배포 필요한 작업이 있을 경우(cnt가 1보다 같거나 클 경우)\n if cnt >= 1:\n # 배포 필요한 작업의 수(cnt)를 answer에 append한 뒤 \n answer.append(cnt)\n # cnt 초기화\n cnt = 0\n \n # 작업 시간 += 1\n time += 1\n # 마지막 배포 작업의 경우 while문 안에서 answer에 append 되지 않았기 때문에\n # while문 밖에서 최종 배포 작업 append 처리\n answer.append(cnt)\n return answer\n","sub_path":"code/shinn92kr/stack&queue/프로그래머스_기능개발.py","file_name":"프로그래머스_기능개발.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227879434","text":"from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\n\nclass FloatApp(App):\n\tdef build(self):\n\t\tlayout = FloatLayout()\n\t\tbtn1 = Button(text=\"1st\",size_hint=[0.2,0.2],right=400,y=50)\n\t\tbtn2 = Button(text=\"2nd\",size_hint=[0.2,0.2],pos_hint={\"center_x\":0.5,\"top\":0.9})\n\t\tlayout.add_widget(btn1)\n\t\tlayout.add_widget(btn2)\n\t\treturn layout\nFloatApp().run()","sub_path":"Kivy/Kivy Lesson/First/FloatApp1-2.py","file_name":"FloatApp1-2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466712279","text":"import random\nmespv=40\nn='force'\ndegmag='intelligence'\nmana='mana'\npvmob='pv'\ndegmob='degat'\nmesdeg=random.randrange(0+n, 5+n)\nmesdegmag=random.randrange(0+degmag, 5+degmag)\nwhile True:\n if pvmob<=0:\n print (\"You won!\")\n elif mespv<=0:\n print (\"You Died!\")\n else:\n tg=input(\"1:attaque, 2:magie, 3:defense, 4:fuite\")\n if tg=='1':\n pvmob=pvmob-mesdeg\n mespv=mespv-degmob\n elif tg=='2':\n mana=mana-1\n pvmob=pvmob-mesdegmag\n mespv=mespv-degmob\n elif tg=='3':\n mespv=mespv-degmob/2\n elif tg=='4':\n quitter\n\n\n","sub_path":"Scripts Vic/combatsys.py","file_name":"combatsys.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351771742","text":"from OhServer.request import Request\nfrom OhServer import HTTPStatus\nimport unittest\n\ntestRequestString = \"GET /restapi/v1.0 HTTP/1.1\\r\\nAccept: application/json\\r\\nAuthorization: Bearer UExBMDFUMDRQV1MwMnzpdvtYYNWMSJ7CL8h0zM6q6a9ntw\\r\\n\\r\\nHello World This is Others\\r\\nWow Amazing!!\"\nmessageToken = \"GET\"\ntarget = \"/restapi/v1.0\"\nhttpVersion = \"HTTP/1.1\"\nothers = \"Hello World This is Others\\r\\nWow Amazing!!\"\noptions = {'Accept': 'application/json', 'Authorization': 'Bearer UExBMDFUMDRQV1MwMnzpdvtYYNWMSJ7CL8h0zM6q6a9ntw'}\n\nclass RequestTest(unittest.TestCase) :\n def testInterpret(self):\n r = Request()\n self.assertEqual(r.parse_request(testRequestString), HTTPStatus.OK)\n self.assertEqual(r.messageToken, messageToken)\n self.assertEqual(r.target, target)\n self.assertEqual(r.httpVersion, httpVersion)\n self.assertEqual(r.others, others)\n self.assertEqual(r.options, options)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/requestTest.py","file_name":"requestTest.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511194107","text":"import time\n\n\ndef bench(loops=10, times=10, *args):\n total_times = [0] * len(args)\n for i in range(times):\n for count, func in enumerate(args):\n time_start = time.time()\n for k in range(loops):\n func()\n time_end = time.time()\n total_times[count] += time_end - time_start\n for i in range(len(args)):\n total_times[i] /= times\n for count, func in enumerate(args):\n print(\"function: %s loops: %s avg_time: %f time per loop: %f\" % (\n func.__name__,\n loops,\n total_times[count],\n total_times[count] / loops,\n ))\n\n\ndef test1():\n time.sleep(0.01)\n\n\ndef test3():\n time.sleep(0.03)\n\n\ndef test2():\n time.sleep(0.02)\n\n\ndef main():\n bench(test1, test2, test3)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"microbench.py","file_name":"microbench.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"7470582","text":"from utils import data_generator\nfrom utils.constituent_building import *\nfrom utils.conjugate import *\nfrom utils.randomize import choice\nfrom utils.vocab_sets import *\n\nclass AnaphorGenerator(data_generator.BenchmarkGenerator):\n def __init__(self):\n super().__init__(field=\"syntax_semantics\",\n linguistics=\"binding\",\n uid=\"principle_A_reconstruction\",\n simple_lm_method=True,\n one_prefix_method=False,\n two_prefix_method=False,\n lexically_identical=True)\n\n def sample(self):\n # It's himself that John likes.\n # IT'S Refl Rel N1 V1\n # It's himself that likes John.\n # IT'S Refl Rel V1 N1\n\n V1 = choice(all_refl_preds)\n N1 = choice(get_matches_of(V1, \"arg_1\", all_nouns))\n N1 = N_to_DP_mutate(N1)\n Rel = choice(get_matched_by(N1, \"arg_1\", all_relativizers))\n Refl = choice(get_matched_by(N1, \"arg_1\", all_reflexives))\n V1 = conjugate(V1, N1)\n\n data = {\n \"sentence_good\": \"It's %s %s %s %s.\" % (Refl[0], Rel[0], N1[0], V1[0]),\n \"sentence_bad\": \"It's %s %s %s %s.\" % (Refl[0], Rel[0], V1[0], N1[0])\n }\n return data, data[\"sentence_good\"]\n\ngenerator = AnaphorGenerator()\ngenerator.generate_paradigm(rel_output_path=\"outputs/benchmark/%s.jsonl\" % generator.uid)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Spanish_Benchmark/English_Categories/binding/principle_A_reconstruction.py","file_name":"principle_A_reconstruction.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11532619","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport click\n\n\nimage_metadata = {\n 'amazon_minimal': {'name': '*amzn-ami-minimal-*-VERSION.x86_64*', 'owner': 'amazon'},\n 'ubuntu': {'name': '*ubuntu-*-RELEASE-*-VERSION', 'owner': '099720109477'}\n}\n\ninstance_to_arch = {\n \"t1.micro\": \"paravirtual\",\n \"t2.nano\": \"hvm\",\n \"t2.micro\": \"hvm\",\n \"t2.small\": \"hvm\",\n \"t2.medium\": \"hvm\",\n \"t2.large\": \"hvm\",\n \"m1.small\": \"paravirtual\",\n \"m1.medium\": \"paravirtual\",\n \"m1.large\": \"paravirtual\",\n \"m1.xlarge\": \"paravirtual\",\n \"m2.xlarge\": \"paravirtual\",\n \"m2.2xlarge\": \"paravirtual\",\n \"m2.4xlarge\": \"paravirtual\",\n \"m3.medium\": \"hvm\",\n \"m3.large\": \"hvm\",\n \"m3.xlarge\": \"hvm\",\n \"m3.2xlarge\": \"hvm\",\n \"m4.large\": \"hvm\",\n \"m4.xlarge\": \"hvm\",\n \"m4.2xlarge\": \"hvm\",\n \"m4.4xlarge\": \"hvm\",\n \"m4.10xlarge\": \"hvm\",\n \"c1.medium\": \"paravirtual\",\n \"c1.xlarge\": \"paravirtual\",\n \"c3.large\": \"hvm\",\n \"c3.xlarge\": \"hvm\",\n \"c3.2xlarge\": \"hvm\",\n \"c3.4xlarge\": \"hvm\",\n \"c3.8xlarge\": \"hvm\",\n \"c4.large\": \"hvm\",\n \"c4.xlarge\": \"hvm\",\n \"c4.2xlarge\": \"hvm\",\n \"c4.4xlarge\": \"hvm\",\n \"c4.8xlarge\": \"hvm\",\n \"r3.large\": \"hvm\",\n \"r3.xlarge\": \"hvm\",\n \"r3.2xlarge\": \"hvm\",\n \"r3.4xlarge\": \"hvm\",\n \"r3.8xlarge\": \"hvm\",\n \"i2.xlarge\": \"hvm\",\n \"i2.2xlarge\": \"hvm\",\n \"i2.4xlarge\": \"hvm\",\n \"i2.8xlarge\": \"hvm\",\n \"d2.xlarge\": \"hvm\",\n \"d2.2xlarge\": \"hvm\",\n \"d2.4xlarge\": \"hvm\",\n \"d2.8xlarge\": \"hvm\",\n \"hi1.4xlarge\": \"hvm\",\n \"hs1.8xlarge\": \"hvm\",\n \"cr1.8xlarge\": \"hvm\",\n \"cc2.8xlarge\": \"hvm\"\n}\n\n\ndef sort_image_list(thelist):\n return sorted(thelist, key=lambda k: k['CreationDate'])\n\n\ndef find_virt_type(instance_size):\n try:\n return instance_to_arch.get(instance_size)\n except KeyError:\n return \"\"\n\n\ndef find_image(region, release, version, os, virt_type):\n version_to_find = image_metadata[os]['name'].replace('VERSION', version).replace('RELEASE', release)\n ec2_client = boto3.client('ec2', region_name=region)\n try:\n images = sort_image_list(ec2_client.describe_images(\n Owners=[image_metadata[os]['owner']],\n Filters=[\n {'Name': 'state', 'Values': ['available']},\n {'Name': 'architecture', 'Values': ['x86_64']},\n {'Name': 'virtualization-type', 'Values': [virt_type]},\n {'Name': 'root-device-type', 'Values': ['ebs']},\n {'Name': 'name', 'Values': [version_to_find]}\n ]\n )['Images'])\n return images[-1]['ImageId']\n except ClientError as e:\n print(\"Error: {}\".format(e.message))\n except IndexError:\n return \"\"\n\n\n@click.command()\n@click.argument('instance_size')\n@click.option('--os', default='amazon_minimal',\n type=click.Choice(['amazon_minimal', 'ubuntu']),\n help='OS to search for')\n@click.option('--release', default='16.04', help='release of OS to search for')\n@click.option('--version', default='*', help='AMI version to search for')\n@click.option('--region', default='us-west-2', help='region to query')\ndef main(region, release, version, os, instance_size):\n virt_type = find_virt_type(instance_size)\n print(find_image(region, release, version, os, virt_type))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"packer/bin/amifinder/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503651648","text":"import iemdb\nimport numpy as np\nimport numpy.ma\n\nCOOP = iemdb.connect('coop', bypass=True)\nccursor = COOP.cursor()\nccursor2 = COOP.cursor()\n\nccursor.execute(\"\"\"\n SELECT year, min(high), min(low) from alldata_ia where station = 'IA1319'\n and year < 2012 and month = 5 GROUP by year\n\"\"\")\nhigh_hits = numpy.ma.zeros( (31,), 'f')\nlow_hits = numpy.ma.zeros( (31,), 'f')\nfor row in ccursor:\n ccursor2.execute(\"\"\"\n SELECT day from alldata_ia where year = %s and month = 5 and \n high = %s and station = 'IA1319'\n \"\"\", (row[0], row[1]))\n for row2 in ccursor2:\n high_hits[row2[0].day-1] += 1.0 / ccursor2.rowcount\n\n ccursor2.execute(\"\"\"\n SELECT day from alldata_ia where year = %s and month = 5 and \n low = %s and station = 'IA1319'\n \"\"\", (row[0], row[2]))\n for row2 in ccursor2:\n low_hits[row2[0].day-1] += 1.0 / ccursor2.rowcount\n\nhigh_hits.mask = numpy.where(high_hits == 0, True, False)\nlow_hits.mask = numpy.where(low_hits == 0, True, False)\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(2,1, sharex=True)\n\n#res = ax.contourf( data, extend='max')\nax[0].set_title(\"Cedar Rapids (1893-2011) Frequency of Day in May\\nHaving Coldest High Temperature of May\")\nax[0].set_ylabel(\"Years (ties split)\")\n\nax[0].grid(True)\nax[0].bar(numpy.arange(1,32) - 0.4, high_hits)\nax[0].set_xlim(0.5, 31.5)\n\nax[1].set_title(\"Having Coldest Low Temperature of May\")\nax[1].set_ylabel(\"Years (ties split)\")\nax[1].grid(True)\nax[1].set_xlabel(\"Day of May\")\nax[1].bar(numpy.arange(1,32) - 0.4, low_hits)\n\n\nfig.savefig('test.ps')\nimport iemplot\niemplot.makefeature('test') \n","sub_path":"scripts/feature/month_coldest_day.py","file_name":"month_coldest_day.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69236604","text":"def fourSumCount(A, B, C, D):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :type C: List[int]\n :type D: List[int]\n :rtype: int\n \"\"\"\n\n target = 0\n hashtable = {}\n\n for a in A: # 1\n for b in B: # 1\n if a + b not in hashtable:\n hashtable[a + b] = 1\n else:\n hashtable[a + b] += 1\n\n print(hashtable)\n\n count = 0\n for c in C:\n for d in D:\n if target - (c + d) in hashtable:\n count += hashtable[target - (c + d)]\n return count\n\n\nA = [1, 1]\nB = [1, -1]\nC = [-1, 1]\nD = [-1, -1]\nprint(fourSumCount(A, B, C, D))\n","sub_path":"Archive/Others/Leetcode/[IMP]4Sum_ll_2.py","file_name":"[IMP]4Sum_ll_2.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594933930","text":"from .models import AddGas\nfrom django.shortcuts import render,redirect \nfrom django.contrib.auth.decorators import login_required\nfrom consumer.forms import FeedbackComplaintForm\nfrom consumer.models import FeedbackComplaint, Order\nfrom userauth.models import User, Consumer, Distributor\nfrom userauth.forms import UserEditForm, DistributorDetailsForm\nfrom .forms import GasAddForm, EditConsumerForm\nfrom .filters import CheckOrderFilter\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n@login_required(login_url='/user-auth/login/')\ndef distributor_dashboard(request):\n # try:\n if request.user.distributor:\n stock_gas = AddGas.objects.filter(user_id=request.user.id)\n return render(request, 'distributor/dashboard.html', {'items': stock_gas})\n else:\n return redirect('c_dashboard')\n\n\n\n@login_required(login_url='/user-auth/login/')\ndef check_orders(request):\n filterForm = CheckOrderFilter()\n if request.method == 'POST':\n orders = Order.objects.filter(store_name=request.user.distributor)\n filterForm = CheckOrderFilter(request.POST, queryset=orders)\n items = filterForm.qs\n context = {'filter_orders': items, 'filter_form': filterForm}\n return render(request, 'distributor/check_orders.html', context)\n else:\n # pagination for pending orders\n other_orders = Order.objects.filter(store_name=request.user.distributor).filter(status=\"pending\")\n no_of_orders = len(other_orders)\n page = request.GET.get('page', 1)\n paginator = Paginator(other_orders, 1)\n try:\n other_orders = paginator.page(page)\n except PageNotAnInteger:\n other_orders = paginator.page(1)\n\n except EmptyPage:\n other_orders = paginator.page(paginator.num_pages)\n\n # pagination for delivered orders\n delivered_orders = Order.objects.filter(store_name=request.user.distributor).filter(status=\"delivered\")\n page = request.GET.get('page', 1)\n paginator = Paginator(delivered_orders, 1)\n try:\n delivered_orders = paginator.page(page)\n except PageNotAnInteger:\n delivered_orders = paginator.page(1)\n\n except EmptyPage:\n delivered_orders = paginator.page(paginator.num_pages)\n\n context = {'orders': other_orders, 'delivered_orders': delivered_orders, 'filter_form': filterForm, 'num_orders': no_of_orders}\n return render(request, 'distributor/check_orders.html', context)\n\n\n@login_required(login_url='/user-auth/login/')\ndef manage_consumers(request):\n \n consumers = Consumer.objects.all().order_by('id')\n page = request.GET.get('page', 1)\n paginator = Paginator(consumers, 1)\n try:\n consumers = paginator.page(page)\n except PageNotAnInteger:\n consumers = paginator.page(1)\n\n except EmptyPage:\n consumers = paginator.page(paginator.num_pages)\n return render(request, 'distributor/manage_consumers.html', {'consumers': consumers})\n\n\n@login_required(login_url='/user-auth/login/')\ndef add_gas(request):\n form = GasAddForm()\n if request.method == 'POST':\n form = GasAddForm(request.POST)\n if form.is_valid():\n user_id = request.user.id\n gas_name = form.cleaned_data['gas_name']\n gas_number= form.cleaned_data['gas_number']\n print(gas_name, gas_number)\n\n ga = AddGas(user_id = user_id, gas_name = gas_name, gas_number= gas_number)\n ga.save()\n return redirect('dashboard')\n else:\n print(form.errors)\n return render(request, 'distributor/stock.html', {'form': form})\n\n\n@login_required(login_url='/user-auth/login/')\ndef edit_gas(request, id):\n gas = AddGas.objects.get(id = id)\n form = GasAddForm(instance = gas)\n if request.method == \"POST\":\n form = GasAddForm(request.POST, instance=gas)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n else:\n print(form.errors)\n \n return render(request, 'distributor/edit_gas.html', {'form': form})\n\n\ndef delete_gas(request, id):\n gas = AddGas.objects.get(id=id)\n gas.delete()\n return redirect('dashboard')\n\n\n@login_required(login_url='/user-auth/login/')\ndef edit_consumers(request, id):\n consumers = Order.objects.get(id=id)\n form = EditConsumerForm(instance = consumers)\n if request.method == \"POST\":\n form = EditConsumerForm(request.POST, instance=consumers)\n if form.is_valid():\n form.save()\n return redirect('check_orders')\n \n return render(request, 'distributor/edit_consumers.html', {'form': form })\n\n@login_required(login_url='/user-auth/login/')\ndef your_profile(request):\n try:\n if request.user.distributor:\n c_id = request.user.id\n details_query = Distributor.objects.get(user_id = c_id)\n print(details_query)\n return render(request, 'distributor/profile.html', {\"details\": details_query})\n except:\n return redirect('your_profile')\n\n\n@login_required(login_url='/user-auth/login/')\ndef edit_profile(request):\n user = User.objects.get(id=request.user.id)\n # print(user)\n distributor = Distributor.objects.get(user=request.user)\n # print(consumer)\n u_form = UserEditForm(instance = user)\n d_form = DistributorDetailsForm(instance = distributor)\n if request.method == \"POST\":\n u_form = UserEditForm(request.POST, instance=user)\n d_form = DistributorDetailsForm(request.POST, instance=distributor)\n if u_form.is_valid() and d_form.is_valid():\n u_form.save()\n d_form.save()\n return redirect('your_profile')\n else:\n print(u_form.errors)\n print(d_form.errors) \n\n context = {'u_form': u_form, 'd_form':d_form}\n return render(request, 'distributor/edit_profile.html', context)\n\n\n@login_required(login_url='/user-auth/login')\ndef FeedbackComplaintView(request):\n try:\n if request.user.distributor:\n if request.method == 'POST':\n form = FeedbackComplaintForm(request.POST)\n if form.is_valid():\n fc = form.save()\n fc.user = request.user\n fc.save()\n return redirect('dashboard')\n\n context = {'form': FeedbackComplaintForm}\n return render(request, 'distributor/feedback_complaint.html', context)\n except:\n return redirect('feedback_complaint')\n","sub_path":"gas_booking/distributor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539983344","text":"\n\nclass Person():\n \n def __init__(self, name, surname, lvl = 1):\n self.name = name\n self.surname = surname\n self.lvl =lvl\n \n def getPerson(self):\n print (self.name.ljust(2), self.surname.ljust(12), str(self.lvl).ljust(3))\n \n def __del__(self):\n print('До свидания, мистер ', self.surname, self.name)\n \ndisp1 = Person('А', 'Румянцев', 10)\ndisp2 = Person('Ю', 'Бессонова')\ndisp3 = Person('Г', 'Кунгурякова', 0.1)\n\ndisp1.getPerson()\ndisp3.getPerson()\ndisp2.getPerson()\n\ndel disp3\n\ninput()\n","sub_path":"PersonOOP.py","file_name":"PersonOOP.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73850150","text":"# coding: utf-8\n\n\"\"\"\nCopyright 2016 SmartBear Software\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Ref: https://github.com/swagger-api/swagger-codegen\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ResourceConditionNode(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self):\n \"\"\"\n ResourceConditionNode - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'variable_name': 'str',\n 'conjunction': 'str',\n 'operator': 'str',\n 'operands': 'list[ResourceConditionValue]',\n 'terms': 'list[ResourceConditionNode]'\n }\n\n self.attribute_map = {\n 'variable_name': 'variableName',\n 'conjunction': 'conjunction',\n 'operator': 'operator',\n 'operands': 'operands',\n 'terms': 'terms'\n }\n\n self._variable_name = None\n self._conjunction = None\n self._operator = None\n self._operands = None\n self._terms = None\n\n @property\n def variable_name(self):\n \"\"\"\n Gets the variable_name of this ResourceConditionNode.\n\n\n :return: The variable_name of this ResourceConditionNode.\n :rtype: str\n \"\"\"\n return self._variable_name\n\n @variable_name.setter\n def variable_name(self, variable_name):\n \"\"\"\n Sets the variable_name of this ResourceConditionNode.\n\n\n :param variable_name: The variable_name of this ResourceConditionNode.\n :type: str\n \"\"\"\n \n self._variable_name = variable_name\n\n @property\n def conjunction(self):\n \"\"\"\n Gets the conjunction of this ResourceConditionNode.\n\n\n :return: The conjunction of this ResourceConditionNode.\n :rtype: str\n \"\"\"\n return self._conjunction\n\n @conjunction.setter\n def conjunction(self, conjunction):\n \"\"\"\n Sets the conjunction of this ResourceConditionNode.\n\n\n :param conjunction: The conjunction of this ResourceConditionNode.\n :type: str\n \"\"\"\n allowed_values = [\"AND\", \"OR\"]\n if conjunction not in allowed_values:\n raise ValueError(\n \"Invalid value for `conjunction`, must be one of {0}\"\n .format(allowed_values)\n )\n\n self._conjunction = conjunction\n\n @property\n def operator(self):\n \"\"\"\n Gets the operator of this ResourceConditionNode.\n\n\n :return: The operator of this ResourceConditionNode.\n :rtype: str\n \"\"\"\n return self._operator\n\n @operator.setter\n def operator(self, operator):\n \"\"\"\n Sets the operator of this ResourceConditionNode.\n\n\n :param operator: The operator of this ResourceConditionNode.\n :type: str\n \"\"\"\n allowed_values = [\"EQ\", \"IN\", \"GE\", \"GT\", \"LE\", \"LT\"]\n if operator not in allowed_values:\n raise ValueError(\n \"Invalid value for `operator`, must be one of {0}\"\n .format(allowed_values)\n )\n\n self._operator = operator\n\n @property\n def operands(self):\n \"\"\"\n Gets the operands of this ResourceConditionNode.\n\n\n :return: The operands of this ResourceConditionNode.\n :rtype: list[ResourceConditionValue]\n \"\"\"\n return self._operands\n\n @operands.setter\n def operands(self, operands):\n \"\"\"\n Sets the operands of this ResourceConditionNode.\n\n\n :param operands: The operands of this ResourceConditionNode.\n :type: list[ResourceConditionValue]\n \"\"\"\n \n self._operands = operands\n\n @property\n def terms(self):\n \"\"\"\n Gets the terms of this ResourceConditionNode.\n\n\n :return: The terms of this ResourceConditionNode.\n :rtype: list[ResourceConditionNode]\n \"\"\"\n return self._terms\n\n @terms.setter\n def terms(self, terms):\n \"\"\"\n Sets the terms of this ResourceConditionNode.\n\n\n :param terms: The terms of this ResourceConditionNode.\n :type: list[ResourceConditionNode]\n \"\"\"\n \n self._terms = terms\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n\n","sub_path":"build/PureCloudPlatformApiSdk/models/resource_condition_node.py","file_name":"resource_condition_node.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359201510","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#小说接口http\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom novel_api import Novel\nimport sys\nimport json\nreload(sys)\nsys.setdefaultencoding('utf-8')\napp = Flask(__name__)\nCORS(app, supports_credentials=True)\napp.config['JSON_AS_ASCII'] = False\n\n@app.route('/', methods=['GET'])\ndef test():\n return jsonify({'status':'True','message':'Hello world'})\n\n@app.route('/search_novel', methods=['POST', 'GET'])\ndef search_novel():\n data = request.get_data()\n data = data.replace(\"'\", '\"')\n data = json.loads(data)\n novel_name = data.get('novel',None)\n if novel_name:\n obj = Novel()\n result = obj.get_novel(name=novel_name)\n return jsonify({'status':'True','message':'小说搜索成功','data':result})\n else:\n return jsonify({ 'status': 'False', 'message': '小说名称不能为空' })\n\n@app.route('/get_novel_chapter', methods=['POST', 'GET'])\ndef get_novel_chapter():\n data = request.get_data()\n data = data.replace(\"'\", '\"')\n data = json.loads(data)\n novel_url = data.get('novel_url',None)\n sort_id = data.get('sort',None)\n if novel_url:\n obj = Novel()\n result = obj.get_chapter(url=novel_url,sort_id=sort_id)\n return jsonify({'status':'True','message':'小说章节获取成功','data':result})\n else:\n return jsonify({ 'status': 'False', 'message': '小说名称不能为空' }) \n\n\n@app.route('/get_novel_chapter_content', methods=['POST', 'GET'])\ndef get_novel_chapter_content():\n data = request.get_data()\n data = data.replace(\"'\", '\"')\n data = json.loads(data)\n chapter_url = data.get('chapter_url',None)\n if chapter_url:\n obj = Novel()\n result = obj.get_content(url=chapter_url)\n return jsonify({'status':'True','message':'小说内容获取成功','data':result})\n else:\n return jsonify({ 'status': 'False', 'message': '小说章节不能为空' })\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1',port=8090,debug=True)\n","sub_path":"api/novel_http.py","file_name":"novel_http.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351162939","text":"import bge\nimport ship\n\nlights = 0\nenergy = 0\n\ndef init(cont):\n global lights\n cont.script = 'hud.run'\n lights = 0\n cont.owner.resolution = 5\n \nUVSPEED = -0.001\n \ndef run(cont):\n global lights\n if cont.sensors['LIGHTON'].positive:\n lights += 1\n \n if lights != bge.logic.globalDict['total_lights']:\n cont.owner['Text'] = str(lights) + '/' + str(bge.logic.globalDict['total_lights'])\n elif cont.owner['Text'] != 'Return Home':\n cont.owner['Text'] = 'Return Home'\n bge.logic.sendMessage(\"COMPLETE\")\n\n global energy\n ship_energy = ship.energy\n if ship_energy == None:\n ship_energy = 0\n energy = (ship_energy*0.05 + energy*0.95)\n #print(energy)\n\n energy_meter = cont.owner.scene.objects['EnergyMeter']\n energy_meter.color = [0.8*energy/100, 0.6*energy/100, 0.3*energy/100, energy]\n mesh = energy_meter.meshes[0]\n array = mesh.getVertexArrayLength(0)\n\n for v in range(0,array):\n vert = mesh.getVertex(0,v) \n vert.v2 += UVSPEED\n","sub_path":"BGMC18 CaveFly V4/BGMC18 CaveFly/scripts/hud.py","file_name":"hud.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164734796","text":"# -*- coding: utf-8 -*-\n\nfrom setting_core.constants import App as Constants, Res\nfrom setting_core.widget import Setting_group, Setting_button, Setting_hiddable_list_widget, Setting_setting_bar, \\\n Clock_widget, Network_adder, Network_info, App_info\n\n__author__ = \"Julien Dubois\"\n__version__ = \"0.1.0\"\n\nimport os\n\nfrom lemapi.api import get_gui, get_listener_manager, get_settings\nfrom lemapi.application import Application\nfrom lemapi.constants import App\nfrom lemapi.event_manager import Event\nfrom lemapi.network import Wifi\nfrom lemapi.util import get_linux_system_info\nfrom lemapi.view import View\nfrom lemapi.widget import Image_widget, Text, Clickable_text, Separator_widget, Scrollable_group\n\nfrom pygame.locals import K_DOWN, K_UP, K_RETURN\n\n\nclass Menu_view(View):\n def init_widgets(self):\n w, h = get_gui().get_size()\n\n bg_path = os.path.join(Res.IMAGE_PATH, \"background.jpg\")\n self.add_widget(\"background_image\", Image_widget, (w*0.5, h*0.5), bg_path, \\\n size=(w, h), anchor=(0, 0))\n self.add_widget(\"setting_group\", Setting_group, (w, h*0.5), anchor=(0, 0))\n self.add_widget(\"quit_button\", Setting_button, (w, h*0.5), anchor=(0, 0), \\\n size=(w*0.25, h*0.15), text=\"Quitter\", textAnchor=(-0.8, 0), textKwargs={\"anchor\": (-1, 0)})\n self.add_widget(\"clock_widget\", Clock_widget, (w * 0.1, h * 0.2), anchor=(-1, 0))\n self.add_widget(\"title_text\", Text, (w*0.1, h*0.3), \"Paramètres\", fontSize=30, bold=True, textColor=(100, 100, 100, 255))\n\n def update(self):\n super().update()\n\n def init_events(self):\n lm = get_listener_manager()\n w = self.widgets[\"setting_group\"]\n event = Event(w.previous_setting)\n lm.km.add_key_down_event(event, K_DOWN)\n lm.cm.add_joy_down_event(event)\n event = Event(w.next_setting)\n lm.km.add_key_down_event(event, K_UP)\n lm.cm.add_joy_up_event(event)\n event = Event(w.click_setting)\n lm.km.add_key_down_event(event, K_RETURN)\n lm.cm.add_button_pressed_event(event, \"button_a\")\n\n def add_setting(self, widget_name):\n if widget_name in self.widgets:\n self.widgets[\"setting_group\"].add_setting_widget(self.widgets[widget_name])\n\n def reset_angle(self):\n self.widgets[\"setting_group\"].reset_angle()\n\n\nclass Setting_view(View):\n def __init__(self, title):\n self.title = title\n super().__init__()\n\n def init_widgets(self):\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n tc = (150, 150, 150, 255)\n\n bg_path = os.path.join(Res.IMAGE_PATH, \"background.jpg\")\n self.add_widget(\"background_image\", Image_widget, (w // 2, h // 2), \\\n bg_path, size=(w, h), anchor=(0, 0))\n self.add_widget(\"back_arrow_clickable_text\", Clickable_text, (pw, ph), \"Retour\", \\\n textColor=tc)\n self.add_widget(\"title_text\", Text, (w*0.15 + pw, ph), self.title, textColor=(255, 255, 255, 255))\n self.add_widget(\"title_separator\", Separator_widget, (pw, h*0.07 + ph), (w - pw, h*0.07 + ph), \\\n lineColor=(150, 150, 150, 255))\n\n\nclass Network_setting_view(Setting_view):\n def __init__(self):\n self.last_widget_height = 0\n super().__init__(\"Réseau\")\n\n def init_widgets(self):\n super().init_widgets()\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n tc0 = (255, 255, 255, 255)\n tc1 = (150, 150, 150, 255)\n\n self.add_widget(\"add_network_clickable_text\", Clickable_text, (w - pw, ph), \"Ajouter un réseau\", textColor=tc1, anchor=(1, -1))\n self.add_widget(\"connected_title_text\", Text, (pw + w*0.05, ph + h*0.15), \"Connection actuelle\", anchor=(-1, 0), textColor=tc0)\n self.add_widget(\"known_title_text\", Text, (pw + w*0.05, ph + h*0.35), \"Réseaux connus\", anchor=(-1, 0), textColor=tc0)\n self.add_widget(\"network_scrollable_group\", Scrollable_group, (w*0.5, ph + h*0.45), size=(w - pw*2, h - ph*2 - h*0.45), anchor=(0, -1))\n\n current_ssid = Wifi.get_current_ssid()\n if current_ssid:\n button_text = current_ssid\n psk = Wifi.get_network_psk()\n if psk:\n button_text += \" (sécurisé)\"\n else:\n button_text += \" (non sécurisé)\"\n self.add_widget(\"connected_button\", Setting_button, (w*0.5, h*0.25), anchor=(0, -1), text=button_text, \\\n size=(w - 2*pw - w*0.1, h*0.1), textAnchor=(-0.95, 0), textKwargs={\"anchor\":(-1, 0), \"fontSize\":18})\n else:\n self.add_widget(\"no_connected_network_text\", Text, (pw + w*0.1, ph + h*0.25), \"Déconnecté du réseau !\", \\\n anchor=(-1, 0), textColor=tc1, fontSize=18)\n\n \n ssids = Wifi.get_known_ssid()\n\n if ssids:\n for ssid in ssids:\n self.add_network()\n else:\n self.widgets[\"known_title_text\"].text = \"Aucun réseau connu\"\n\n def add_network(self, ssid):\n self.widgets[\"known_title_text\"].text = \"Réseaux connus\"\n w, h = get_gui().get_size()\n sg = self.widgets[\"network_scrollable_group\"]\n sw, sh = sg.kwargs[\"size\"]\n psk = Wifi.get_network_psk(ssid)\n text = ssid\n\n if psk:\n text += \" (sécurisé)\"\n else:\n text += \" (non sécurisé)\"\n\n sg.addSubWidget(\"{ssid}_network_button\".format(ssid=ssid), Setting_button, (0, self.last_widget_height), \\\n text=text, size=(sw, h*0.09), textAnchor=(-0.95, 0), textKwargs={\"anchor\":(-1, 0), \"fontSize\":18})\n\n self.last_widget_height += h*0.1\n\n def open_add_network_dialog(self, add_fct):\n w, h = get_gui().get_size()\n self.add_widget(\"network_adder\", Network_adder, (w*0.5, h*0.5), add_fct, self.close_add_network_dialog, anchor=(0, 0))\n\n def close_add_network_dialog(self):\n self.remove_widget(\"network_adder\")\n\n def open_info_network_dialog(self, remove_fct, ssid):\n w, h = get_gui().get_size()\n psk = Wifi.get_network_psk(ssid)\n self.add_widget(\"network_info\", Network_info, (w*0.5, h*0.5), remove_fct, self.close_info_network_dialog, ssid, anchor=(0, 0), \\\n psk=psk)\n\n def close_info_network_dialog(self):\n self.remove_widget(\"network_info\")\n\n\nclass Display_setting_view(Setting_view):\n def __init__(self):\n super().__init__(\"Affichage\")\n\n def init_widgets(self):\n super().init_widgets()\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n tc0 = (255, 255, 255, 255)\n tc1 = (150, 150, 150, 255)\n\n self.add_widget(\"theme_text\", Text, (pw + w*0.05, ph + h*0.2), \"Thème\", anchor=(-1, 0), textColor=tc0)\n self.add_widget(\"theme_list\", Setting_hiddable_list_widget, (w*0.95 - pw, ph + h*0.2), anchor=(1, 0), \\\n text=get_settings().get(\"theme_color\", \"white\"), size=(w*0.2, h*0.07))\n\n for color in App.THEME_COLORS:\n self.widgets[\"theme_list\"].addItem(color)\n\n\nclass Audio_setting_view(Setting_view):\n def __init__(self):\n super().__init__(\"Sons\")\n\n def init_widgets(self):\n super().init_widgets()\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n tc0 = (255, 255, 255, 255)\n tc1 = (150, 150, 150, 255)\n\n self.add_widget(\"volume_text\", Text, (pw + w*0.05, ph + h*0.2), \"Volume\", anchor=(-1, 0), textColor=tc0)\n self.add_widget(\"volume_setting_bar\", Setting_setting_bar, (w*0.95 - pw, ph + h*0.2), anchor=(1, 0), size=(w*0.4, h*0.06), \\\n cursorWidth=int(w * 0.015), lineThickness=int(h * 0.02), value=get_settings().get(\"sound_volume\", 1))\n\n\nclass App_setting_view(Setting_view):\n def __init__(self):\n self.last_widget_height = 0\n super().__init__(\"Applications\")\n\n def init_widgets(self):\n super().init_widgets()\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n tc0 = (255, 255, 255, 255)\n\n self.add_widget(\"installed_title_text\", Text, (pw + w*0.05, ph + h*0.15), \"Applications installées\", anchor=(-1, -1), \\\n textColor=tc0)\n self.add_widget(\"info_scrollable_group\", Scrollable_group, (w*0.5, h*0.3), size=(w - pw*2, h - ph*2 - h*0.3), anchor=(0, -1))\n\n app_paths = Application.get_local_apps()\n\n for app_path in app_paths:\n app = Application(app_path)\n self.add_app(app)\n\n def add_app(self, app):\n w, h = get_gui().get_size()\n sg = self.widgets[\"info_scrollable_group\"]\n sw, sh = sg.kwargs[\"size\"]\n text = \"{name} v{version}\".format(name=app.get_name(), version=app.get_version())\n\n if os.path.split(app.path)[-1] in App.DEFAULT_APPS:\n text += \" (installé par défaut)\"\n\n sg.addSubWidget(\"{app}_app_button\".format(app=app.get_name()), Setting_button, (0, self.last_widget_height), \\\n text=text, size=(sw, h*0.09), textAnchor=(-0.95, 0), textKwargs={\"anchor\":(-1, 0), \"fontSize\":18})\n\n self.last_widget_height += h*0.1\n\n def open_app_info_dialog(self, remove_fct, app):\n w, h = get_gui().get_size()\n removable = True\n if app.get_real_name() in App.DEFAULT_APPS:\n removable = False\n\n self.add_widget(\"app_info\", App_info, (w*0.5, h*0.5), remove_fct, self.close_app_info_dialog, app, anchor=(0, 0), \\\n removableApp=removable)\n\n def close_app_info_dialog(self):\n self.remove_widget(\"app_info\")\n\n\n\nclass About_setting_view(Setting_view):\n def __init__(self):\n w, h = get_gui().get_size()\n self.last_widget_height = h*0.05\n super().__init__(\"A propos...\")\n\n def init_widgets(self):\n super().init_widgets()\n w, h = get_gui().get_size()\n pw, ph = Constants.VIEW_PADDING\n self.add_widget(\"info_scrollable_group\", Scrollable_group, (w*0.5, h*0.2), size=(w - pw*2, h - ph*2 - h*0.2), anchor=(0, -1))\n\n self.add_info(\"Système\", \"LemAPI\")\n self.add_info(\"Version du système\", App.VERSION)\n self.add_info(\"Version de Raspbian\", get_linux_system_info(\"VERSION\"))\n self.add_info(\"Organisation\", \"Lycée Madame de Staël\")\n self.add_info(\"Développeur\", __author__)\n\n def add_info(self, title, info):\n w, h = get_gui().get_size()\n tc0 = (255, 255, 255, 255)\n tc1 = (150, 150, 150, 255)\n sg = self.widgets[\"info_scrollable_group\"]\n sw, sh = sg.kwargs[\"size\"]\n\n sg.addSubWidget(\"{title}_title_text\".format(title=title), Text, (w*0.05, self.last_widget_height), title, \\\n anchor=(-1, 0), textColor=tc0)\n sg.addSubWidget(\"{title}_text\".format(title=title), Text, (sw - w*0.05, self.last_widget_height), info, \\\n anchor=(1, 0), textColor=tc1)\n\n self.last_widget_height += h*0.1","sub_path":"setting_core/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292924222","text":"arr = []\ntry:\n while True:\n arr.append([int(n) for n in input().split()])\nexcept EOFError:\n pass\n\n\ndef maxlrectangle(matrix):\n if not matrix or not matrix[0]:\n return 0\n n = len(matrix[0])\n height = [0] * (n + 1)\n ans = 0\n for row in matrix:\n for i in range(n):\n height[i] = height[i] + 1 if row[i] == 1 else 0\n stack = [-1]\n for i in range(n + 1):\n while height[i] < height[stack[-1]]:\n h = height[stack.pop()]\n w = i - 1 - stack[-1]\n ans = max(ans, h * w)\n stack.append(i)\n return ans\n\n\nprint(maxlrectangle(arr))\n","sub_path":"work1/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"104784135","text":"import sys\nfrom ftplib import FTP\nouttaLib = \"../\"\ndef db_edit(file,line,index,new):\n with open(file,\"r\") as f:\n lines = f.readlines()\n items = lines[line].split(\";\")\n out=\"\"\n x=-1\n for i in items:\n x+=1\n if(x==index):\n out+=new\n if (items[x] == items[-1]):\n out += \"\\n\"\n else:\n out+=items[x]\n if not(items[x]==items[-1]):\n out+=\";\"\n lines[line]=out\n with open(file, \"w\") as f:\n f.writelines(lines)\n\ndef exitProgram():\n input(\"Press enter to exit \")\n sys.exit(0)\nhost = \"f14-preview.royalwebhosting.net\"\npasw = \"tooezforrtz\"\nuser = \"2289107\"\ndef connectToServer():\n ftp = FTP(host)\n ftp.login(user,pasw)\n return ftp","sub_path":"lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427255841","text":"# https://leetcode.com/problems/maximum-length-of-repeated-subarray/submissions/\n\nclass Solution:\n def findLength(self, A: [int], B: [int]) -> int:\n rows = len(A) + 1\n cols = len(B) + 1\n \n dp = [ [0]*cols for _ in range(rows)]\n \n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if A[i-1] == B[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n \n # print([row for row in dp])\n \n return max([max(row) for row in dp])","sub_path":"python/Dynamic Programming/maximum-length-of-repeated-subarray.py","file_name":"maximum-length-of-repeated-subarray.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178119376","text":"# -*- coding: utf-8 -*-\r\n\r\ndef all_exist(ref_list, chk_list):\r\n len_ref = len(ref_list)\r\n len_chk = len(chk_list)\r\n for j in range(len_ref):\r\n # 含まれていればok\r\n if ref_list[j] in chk_list:\r\n pass\r\n # 含まれていなかったらallじゃない\r\n else:\r\n return False\r\n else:\r\n # 探してる要素iが含まれていないため終了\r\n return True\r\n\r\nimport rhinoscriptsyntax as rs\r\nimport Rhino\r\nimport glob\r\nimport os\r\n# 切断面モードの選択\r\n# 0:切断面なし\r\n# 1:x-z方向切断・y正方向残し\r\n# 2:x-z方向切断・y負方向残し\r\n# 3:y-z方向切断・x正方向残し\r\n# 4:y-z方向切断・x負方向残し\r\nrid_mode = 0\r\n\r\n# dataファイルの取得\r\nfile_list = glob.glob('./data/*.inp')\r\n\r\nsrf_list = \\\r\n[[0,1,2,3], [4,5,6,7], \\\r\n[0,1,5,4], [3,2,6,7], \\\r\n[1,2,6,5], [0,3,7,4]]\r\n\r\nfor file_name in file_list:\r\n\r\n f = open(file_name)\r\n f.readline()\r\n stepNum = int(f.readline())\r\n for step_i in range(1,stepNum + 1):\r\n text = 'dammy'\r\n while text.split()[0] != 'step' + str(step_i):\r\n text = f.readline()\r\n\r\n text = f.readline()\r\n point_num = int(text.split()[0])\r\n element_num = int(text.split()[1])\r\n\r\n # 点の読み取り\r\n j = 1\r\n points_list = []\r\n centerPt = [0.0,0.0,0.0]\r\n while j <= point_num:\r\n text = f.readline()\r\n k = 0\r\n pos = []\r\n for val in text.split()[1:4]:\r\n pos.append(float(val))\r\n centerPt[k] = centerPt[k] + float(val)\r\n k += 1\r\n\r\n points_list.append(pos)\r\n j += 1\r\n\r\n for j in range(3):\r\n centerPt[j] = centerPt[j] / point_num\r\n\r\n # 要素データの読み取り\r\n j = 1\r\n elements_list = []\r\n for j in range(element_num):\r\n text = f.readline()\r\n ele_list = []\r\n for num in text.split()[3:11]:\r\n ele_list.append(int(num))\r\n # 実数誤差用数値算出\r\n if j == 0:\r\n oneBlock_size = [0.0, 0.0, 0.0]\r\n for k in range(3):\r\n oneBlock_size[k] = (points_list[ele_list[7] - 1][k] - points_list[ele_list[0] - 1][k]) / 4\r\n delta_size = min(oneBlock_size)\r\n # 節点チェック\r\n # mode 0, 1, 2, 3, 4\r\n # 0:切断面なし\r\n # 1:x-z方向切断・y正方向残し\r\n # 2:x-z方向切断・y負方向残し\r\n # 3:y-z方向切断・x正方向残し\r\n # 4:y-z方向切断・x負方向残し\r\n # 160614 @yamazaki 原点から見るほうが多いのでとりあえずmode1,3を制作\r\n # 1:要素の節点1番が、中央点のy座標からdelta_size引いたもの、よりも小さかったらappendしないよう処理\r\n # 実数誤差処理は要素サイズの半分とかにする。\r\n if rid_mode == 1:\r\n if points_list[ele_list[0] - 1][1] < centerPt[1] - delta_size:\r\n continue\r\n\r\n elements_list.append(ele_list)\r\n j += 1\r\n\r\n # 各要素の各面を構成する節点(4つ)を共有している要素があれば、その面は無効\r\n quadNode_list = []\r\n j = 0\r\n for j_elementNode in elements_list:\r\n for k in range(6):\r\n srch_pts = srf_list[k]\r\n # ゼロ基準の配列に治すために-1している。\r\n quadNode = [j_elementNode[srch_pts[0]], j_elementNode[srch_pts[1]], \\\r\n j_elementNode[srch_pts[2]], j_elementNode[srch_pts[3]]]\r\n # j要素k面が、他の面と接しているかをチェック\r\n for l in range(element_num):\r\n if j == l:\r\n continue\r\n if all_exist(quadNode, elements_list[l]):\r\n # 接している要素を見つけたのでループから出て次の面をチェック\r\n break\r\n else:\r\n # 接していないようなので、面に追加\r\n quadNode[0] = quadNode[0] - 1\r\n quadNode[1] = quadNode[1] - 1\r\n quadNode[2] = quadNode[2] - 1\r\n quadNode[3] = quadNode[3] - 1\r\n quadNode_list.append(quadNode)\r\n j += 1\r\n # print quadNode_list\r\n Meshes = rs.AddMesh(points_list, quadNode_list)\r\n\r\n if stepNum == 1:\r\n saveName = file_name.replace('data', 'figure')\r\n saveName = saveName.replace('.inp', '.png')\r\n outpath = os.getcwd() + \"\\\\\" + saveName\r\n else:\r\n saveName = file_name.replace('data', 'figure')\r\n saveName = saveName.replace('.inp', 'case'+str(step_i)+'.png')\r\n outpath = os.getcwd() + \"\\\\\" + saveName\r\n\r\n Rhino.RhinoApp.RunScript(\"_-Render\", False)\r\n #time.sleep(10) # number of seconds to wait\r\n Rhino.RhinoApp.RunScript(\"_-SaveRenderWindowAs \\n\\\"\" + outpath + \"\\\"\\n\", False)\r\n Rhino.RhinoApp.RunScript(\"_-CloseRenderWindow\", False)\r\n\r\n #rs.DeleteObjects(Boxes)\r\n exit()\r\n f.close()\r\n","sub_path":"何やってたか忘れかけてる/rendar_inp/rendar_inp.py","file_name":"rendar_inp.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"490282276","text":"\n# Importing Libraries\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nclass models:\n def __init__(self):\n self.model = None\n self.LR_model = LogisticRegression()\n self.DT_model = DecisionTreeClassifier()\n self.GB_model = GradientBoostingClassifier(random_state=0)\n self.RF_model = RandomForestClassifier(random_state=0)\n \n self.X_train = None\n self.Y_train = None\n self.X_test = None\n self.Y_test = None\n \n self.accuracy_LR_model = 0\n self.accuracy_DT_model = 0\n self.accuracy_GB_model = 0\n self.accuracy_RF_model = 0\n \n self.predLR = None\n self.predDT = None\n self.predGB = None\n self.predRF = None\n \n def feedData(self, X_train, Y_train, X_test, Y_test):\n self.X_train = X_train\n self.Y_train = Y_train\n self.X_test = X_test\n self.Y_test = Y_test\n \n def fit(self):\n self.LR_model.fit(self.X_train, self.Y_train)\n self.DT_model.fit(self.X_train, self.Y_train)\n self.GB_model.fit(self.X_train, self.Y_train)\n self.RF_model.fit(self.X_train, self.Y_train)\n \n def predict(self):\n self.predLR = self.LR_model.predict(self.X_test)\n self.predDT = self.DT_model.predict(self.X_test)\n self.predGB = self.GB_model.predict(self.X_test)\n self.predRF = self.RF_model.predict(self.X_test)\n \n def selectModel(self):\n self.accuracy_LR_model = self.LR_model.score(self.X_test, self.Y_test)\n print(\"accuracy_LR_model: \", self.accuracy_LR_model)\n self.accuracy_DT_model = self.DT_model.score(self.X_test, self.Y_test)\n print(\"accuracy_DT_model: \", self.accuracy_DT_model)\n self.accuracy_GB_model = self.GB_model.score(self.X_test, self.Y_test)\n print(\"accuracy_GB_model: \", self.accuracy_GB_model)\n self.accuracy_RF_model = self.RF_model.score(self.X_test, self.Y_test)\n print(\"accuracy_RF_model: \", self.accuracy_RF_model)\n \n if self.accuracy_LR_model > self.accuracy_DT_model and self.accuracy_LR_model > self.accuracy_GB_model and self.accuracy_LR_model > self.accuracy_RF_model:\n self.model = self.LR_model\n elif self.accuracy_DT_model > self.accuracy_LR_model and self.accuracy_DT_model > self.accuracy_GB_model and self.accuracy_DT_model > self.accuracy_RF_model:\n self.model = self.DT_model\n elif self.accuracy_GB_model > self.accuracy_LR_model and self.accuracy_GB_model > self.accuracy_DT_model and self.accuracy_GB_model > self.accuracy_RF_model:\n self.model = self.GB_model\n else:\n self.model = self.RF_model\n \n def giveModel(self):\n return self.model\n \n \n \n ","sub_path":"Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503507051","text":"totalArr = [];\ntotalAnswer = 0;\n\nwith open(\"Input.txt\") as f:\n for line in f.readlines():\n totalArr.append(line.replace('\\n', \"\").split(\"x\"))\n\n\nfor arr in totalArr:\n\n largest = 0\n largestIndex = 0\n\n for i,val in enumerate(arr):\n arr[i] = int(val)\n\n ribbon = 0;\n bow = 1;\n\n for i, val in enumerate(arr):\n if val > largest:\n largest = val\n largestIndex = i\n\n\n for i, val in enumerate(arr):\n\n if i != largestIndex:\n ribbon += val *2\n\n bow *= val\n totalAnswer += ribbon+bow\n\n\n\nprint(totalAnswer)\n","sub_path":"2016/Day 2: I Was Told There Would Be No Math/mainp2.py","file_name":"mainp2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485069489","text":"from .defaults import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']\n\nINSTALLED_APPS += ['drupal_migrator', 'debug_toolbar']\n\nALLOWED_HOSTS = ['localhost', '127.0.0.1', 'dev.comses.asu.edu', 'cms']\n","sub_path":"django/core/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224757594","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 6 16:25:14 2019\n\n@author: adsims\n\"\"\"\n\nfrom PyQt5.uic import loadUiType\n\nimport sys\nfrom PyQt5 import QtWidgets, QtCore, uic\nimport numpy as np \nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\nimport threading\nimport dagr_backend\n\n__author__ = \"Andrew Sims, Michael Hudson\"\n__copyright__ = \"None\"\n__license__ = \"None\"\n__version__ = \"0.2\"\n__maintainer__ = \"Andrew Sims, Michael Hudson\"\n__email__ = \"andrew.sims.d@gmail.com\"\n__status__ = \"Prototype\"\n\t\nUi_MainWindow, QMainWindow = loadUiType('DAGR.ui')\n\nclass Gui(QMainWindow, Ui_MainWindow):\n def __init__(self, ):\n super(Gui, self).__init__()\n self.setupUi(self)\n self.dataset_fig_dict = {}\n self.algorithim_fig_dict = {}\n self.learning_curve_fig_dict = {}\n \n self.pb_begin_analysis = self.findChild(QtWidgets.QPushButton, 'pb_begin_analysis')\n self.pb_begin_analysis.clicked.connect(self.begin_analysis)\n \n self.cb_feature_box_and_whisker = self.findChild(QtWidgets.QCheckBox, 'cb_feature_box_and_whisker')\n self.cb_correlation_matrix = self.findChild(QtWidgets.QCheckBox, 'cb_correlation_matrix')\n self.cb_feature_histogram = self.findChild(QtWidgets.QCheckBox, 'cb_feature_histogram')\n self.cb_raw_features = self.findChild(QtWidgets.QCheckBox, 'cb_raw_features')\n self.cb_scatter_matrix = self.findChild(QtWidgets.QCheckBox, 'cb_scatter_matrix')\n \n # self.html_viewer = self.findChild(QtWebEngineWidgets.QWebEngineView, 'webEngineView')\n \n self.dataset_prog = self.findChild(QtWidgets.QProgressBar, 'dataset_prog')\n self.algorithim_prog = self.findChild(QtWidgets.QProgressBar, 'algorithim_prog')\n \n self.cb_adaboost = self.findChild(QtWidgets.QCheckBox, 'cb_adaboost')\n self.cb_dtc = self.findChild(QtWidgets.QCheckBox, 'cb_dtc')\n self.cb_gaussian_process = self.findChild(QtWidgets.QCheckBox, 'cb_gaussian_process')\n self.cb_linear_svm = self.findChild(QtWidgets.QCheckBox, 'cb_linear_svm')\n self.cb_naive_bayes = self.findChild(QtWidgets.QCheckBox, 'cb_naive_bayes')\n self.cb_nearest_neighbors = self.findChild(QtWidgets.QCheckBox, 'cb_nearest_neighbors')\n self.cb_neural_network = self.findChild(QtWidgets.QCheckBox, 'cb_neural_network')\n self.cb_qda = self.findChild(QtWidgets.QCheckBox, 'cb_qda')\n self.cb_random_forest = self.findChild(QtWidgets.QCheckBox, 'cb_random_forest')\n self.cb_rbf_svm = self.findChild(QtWidgets.QCheckBox, 'cb_rbf_svm')\n \n self.dataset_mpl_figs.itemClicked.connect(self.dataset_changefig)\n self.algorithim_mpl_figs.itemClicked.connect(self.algorithim_changefig)\n self.learning_curve_mpl_figs.itemClicked.connect(self.learning_curve_changefig)\n \n datset_base_fig = Figure()\n self.dataset_addmpl(datset_base_fig)\n algorithim_base_fig = Figure()\n self.algorithim_addmpl(algorithim_base_fig)\n learning_curve_base_fig = Figure()\n self.learning_curve_addmpl(learning_curve_base_fig)\n \n def begin_analysis(self,):\n # with open(r'C:/Users/andre/Documents/Data Analysis/bokeh_test_file.html', 'r') as fh:\n # file = fh.read()\n # self.html_viewer.setHtml(file)\n \n df, feature_names = dagr_backend.gen_dataset()\n dataset_figs = {}\n if self.cb_feature_box_and_whisker.isChecked():\n dataset_figs['Feature Box Plot'] = dagr_backend.plot_feature_box(df, feature_names)\n if self.cb_correlation_matrix.isChecked():\n dataset_figs['Correlation Matrix'] = dagr_backend.plot_corr_mat(df, feature_names)\n if self.cb_feature_histogram.isChecked():\n dataset_figs['Histograms'] = dagr_backend.plot_histograms(df, feature_names)\n if self.cb_raw_features.isChecked():\n dataset_figs['Raw Features'] = dagr_backend.plot_raw_features(df, feature_names)\n if self.cb_scatter_matrix.isChecked():\n dataset_figs['Scatter Matrix'] = dagr_backend.plot_scatter_matrix(df, feature_names)\n \n algorithims_to_use = []\n if self.cb_adaboost.isChecked():\n algorithims_to_use.append('adaboost')\n if self.cb_dtc.isChecked():\n algorithims_to_use.append('dtc') \n if self.cb_gaussian_process.isChecked():\n algorithims_to_use.append('gaussian_process')\n if self.cb_linear_svm.isChecked():\n algorithims_to_use.append('linear_svm')\n if self.cb_naive_bayes.isChecked():\n algorithims_to_use.append('naive_bayes')\n if self.cb_nearest_neighbors.isChecked():\n algorithims_to_use.append('nearest_neighbors')\n if self.cb_neural_network.isChecked():\n algorithims_to_use.append('neural_network')\n if self.cb_qda.isChecked():\n algorithims_to_use.append('qda')\n if self.cb_random_forest.isChecked():\n algorithims_to_use.append('random_forest')\n if self.cb_rbf_svm.isChecked():\n algorithims_to_use.append('rbf_svm')\n \n models = dagr_backend.build_models(df, feature_names, algorithims_to_use) \n \n algorithim_fig_list = []\n learning_curve_fig_list = []\n algorithim_fig_list.append(('Algorithim Accuracy', dagr_backend.plot_algorithim_accuracy(df, feature_names, models)))\n for model in models:\n model_name = model.steps[-1][0]\n # x = threading.Thread(target=self.append_figure(), args=(algorithim_fig_list, model_name, dagr_backend.plot_algorithim_class_space(), (df, feature_names, model)))\n # x.start()\n algorithim_fig_list.append((model_name, dagr_backend.plot_algorithim_class_space(df, feature_names, model)))\n learning_curve_fig_list.append(\n (\n model_name,\n dagr_backend.plot_learning_curve_(df, feature_names, model)\n )\n )\n \n for i, (name, figure) in enumerate(dataset_figs.items()):\n self.dataset_prog.setValue(((i+1)/len(dataset_figs))*100)\n gui.dataset_addfig(name, figure)\n for i, (name, figure) in enumerate(algorithim_fig_list):\n self.algorithim_prog.setValue(((i+1)/len(algorithim_fig_list))*100)\n gui.algorithim_addfig(name, figure)\n for name, figure in learning_curve_fig_list:\n gui.learning_curve_addfig(name, figure)\n \n def append_figure(lst, model_name, func, func_args):\n lst.append((model_name, func(func_args)))\n def dataset_changefig(self, item):\n text = item.text()\n self.dataset_rmmpl()\n self.dataset_addmpl(self.dataset_fig_dict[text])\n \n def dataset_addfig(self, name, fig):\n self.dataset_fig_dict[name] = fig\n self.dataset_mpl_figs.addItem(name)\n \n def dataset_addmpl(self, fig):\n self.dataset_canvas = FigureCanvas(fig)\n self.dataset_mplvl.addWidget(self.dataset_canvas)\n self.dataset_canvas.draw()\n self.toolbar = NavigationToolbar(self.dataset_canvas, \n self.dataset_mpl_window, coordinates=True)\n self.dataset_mplvl.addWidget(self.toolbar)\n \n def dataset_rmmpl(self,):\n self.dataset_mplvl.removeWidget(self.dataset_canvas)\n self.dataset_canvas.close()\n self.dataset_mplvl.removeWidget(self.toolbar)\n self.toolbar.close() \n \n def algorithim_changefig(self, item):\n text = item.text()\n self.algorithim_rmmpl()\n self.algorithim_addmpl(self.algorithim_fig_dict[text])\n \n def algorithim_addfig(self, name, fig):\n self.algorithim_fig_dict[name] = fig\n self.algorithim_mpl_figs.addItem(name)\n \n def algorithim_addmpl(self, fig):\n self.algorithim_canvas = FigureCanvas(fig)\n self.algorithim_mplvl.addWidget(self.algorithim_canvas)\n self.algorithim_canvas.draw()\n self.toolbar = NavigationToolbar(self.algorithim_canvas, \n self.algorithim_mpl_window, coordinates=True)\n self.algorithim_mplvl.addWidget(self.toolbar)\n \n def algorithim_rmmpl(self,):\n self.algorithim_mplvl.removeWidget(self.algorithim_canvas)\n self.algorithim_canvas.close()\n self.algorithim_mplvl.removeWidget(self.toolbar)\n self.toolbar.close()\n \n def learning_curve_changefig(self, item):\n text = item.text()\n self.learning_curve_rmmpl()\n self.learning_curve_addmpl(self.learning_curve_fig_dict[text])\n \n def learning_curve_addfig(self, name, fig):\n self.learning_curve_fig_dict[name] = fig\n self.learning_curve_mpl_figs.addItem(name)\n \n def learning_curve_addmpl(self, fig):\n self.learning_curve_canvas = FigureCanvas(fig)\n self.learning_curve_mplvl.addWidget(self.learning_curve_canvas)\n self.learning_curve_canvas.draw()\n self.toolbar = NavigationToolbar(self.learning_curve_canvas, \n self.learning_curve_mpl_window, coordinates=True)\n self.learning_curve_mplvl.addWidget(self.toolbar)\n \n def learning_curve_rmmpl(self,):\n self.learning_curve_mplvl.removeWidget(self.learning_curve_canvas)\n self.learning_curve_canvas.close()\n self.learning_curve_mplvl.removeWidget(self.toolbar)\n self.toolbar.close()\n \n def closeEvent(self, ce):\n #prevents blocking after each run\n QtWidgets.QApplication.quit()\n \nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n app.processEvents()\n gui = Gui()\n gui.show()\n sys.exit(app.exec_())","sub_path":"dagr_rev02.py","file_name":"dagr_rev02.py","file_ext":"py","file_size_in_byte":9810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516212729","text":"# Corentin Pinato\r\n# This is program has to find the decryption key of an ElGamal encryption method by using a brute force strategy.\r\n\r\n#publicKey = [8347852664524685394671539,1725419847553,6544719840647906985061541]\r\n#message = [2030735454253481748649532,1340127105753313239218903]\r\n\r\npublicKey = [29, 2, 3]\r\nmessage = [23, 27]\r\n\r\n#publicKey = [24852977,2744,8414508]\r\n#message = [15268076,743675]\r\n\r\ndef decrypt(publicKey, message, privateKey):\r\n p = publicKey[0]\r\n g = publicKey[1]\r\n gxmodp = publicKey[2]\r\n\r\n gy = message[0]\r\n mgxy = message[1]\r\n\r\n c1 = (gy**(p-1-privateKey))%p\r\n c2 = (mgxy*c1)%p\r\n\r\n return c2\r\n\r\ndef encrypt(message,publicKey,randomNum):\r\n p = publicKey[0]\r\n g = publicKey[1]\r\n gxmodp = publicKey[2]\r\n\r\n m1 = (g**randomNum)%p\r\n m2 = (message*(gxmodp**randomNum))%p\r\n\r\n result = [m1,m2]\r\n\r\n return result\r\n\r\ndef modPow(number,power,mod):\r\n\r\n if(power == 0):\r\n return 1\r\n elif(power%2==0):\r\n halfpower=modPow(number,(power/2),mod)\r\n return modMult(halfpower,halfpower,mod)\r\n else:\r\n halfpower=modPow(number,((power)/2)+1,mod)\r\n firstbit = modMult(halfpower,halfpower,mod)\r\n return modMult(firstbit,number,mod)\r\n\r\n\r\ndef modMult(first,second,mod):\r\n if(second==0):\r\n return 0\r\n elif(second%2==0):\r\n half=modMult(first,(second/2)+1,mod)\r\n return(half+half)%mod\r\n else:\r\n half=modMult(first,((second)/2)+1,mod)\r\n return(half+half+first)%mod\r\n\r\n\r\nMessage = 1\r\n\r\nencrypted = encrypt(Message,publicKey,2)\r\nprint(encrypted)\r\n\r\nkey = 1\r\n\r\ndecrypted = decrypt(publicKey,encrypted,key)\r\nprint(\"Tryed key = \"+str(key))\r\n\r\nwhile(decrypted != Message):\r\n key += 1\r\n decrypted = decrypt(publicKey,encrypted,key)\r\n print(\"Tryed key = \"+str(key))\r\n\r\nprint(\"The key is: \"+str(key))\r\n\r\nprint(\"So the secret Message is: \"+str(decrypt(publicKey,message,key)))","sub_path":"FindKey.py","file_name":"FindKey.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491310936","text":"class Solution:\n def findMaxAverage(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: float\n \"\"\"\n\n maxHere = sum(nums[:k])\n maxSum = maxHere\n maxHereStart = 0\n for i in range(k, len(nums)):\n maxHere += (nums[i] - nums[maxHereStart])\n maxHereStart += 1\n # Note do not use max() here since it is slower\n maxSum = maxHere if maxHere > maxSum else maxSum\n\n return maxSum / k\n","sub_path":"643 Maximum Average Subarray I.py","file_name":"643 Maximum Average Subarray I.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561449994","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport imagekit.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ('slug', models.SlugField()),\n ('text', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('last_modified_at', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', imagekit.models.fields.ProcessedImageField(upload_to=b'')),\n ('caption', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('last_modified_at', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='entry',\n name='image',\n field=models.ForeignKey(to='blog.Image'),\n preserve_default=True,\n ),\n ]\n","sub_path":"blog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351522870","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 2 22:46:07 2018\r\n\r\n@author: MCA\r\n\"\"\"\r\n\r\nimport pygame\r\nimport sys\r\nfrom random import randrange, choice\r\n\r\nBLACK = 0, 0, 0 #barva je reprezentovana jako RGB tuple\r\nWHITE = 255, 255, 255\r\n\r\nMAX_STARS = 250\r\nSTAR_SPEED = 2\r\n\r\n#stars coords generation\r\ndef init_stars(screen):\r\n global stars\r\n stars = []\r\n for i in range(MAX_STARS):\r\n # A star is represented as a list with this format: [X,Y]\r\n star = [randrange(0, screen.get_width() - 1), \r\n randrange(0,screen.get_height() - 1),\r\n choice([1,2,3])]\r\n stars.append(star) #a star has a coordinate\r\n\r\ndef move_and_draw_stars(screen):\r\n \"\"\" Move and draw the stars in the given screen \"\"\"\r\n global stars\r\n for star in stars:\r\n #ycoord += zcoord\r\n star[1] += star[2]\r\n # If the star hit the bottom border then we reposition\r\n # it in the top of the screen with a random X coordinate.\r\n if star[1] >= screen.get_height():\r\n star[1] = 0\r\n star[0] = randrange(0,639)\r\n star[2] = choice([1, 2, 3])\r\n \r\n \r\n # Adjust the star color acording to the speed.\r\n # The slower the star, the darker should be its color.\r\n if star[2] == 1:\r\n color = (100,100,100)\r\n elif star[2] == 2:\r\n color = (190,190,190)\r\n elif star[2] == 3:\r\n color = (255,255,255)\r\n \r\n # Draw the star as a rectangle.\r\n # The star size is proportional to its speed.\r\n screen.fill(color,(star[0],star[1],star[2],star[2]))\r\n\r\n \r\ndef main():\r\n pygame.init()\r\n screen = pygame.display.set_mode((640, 480))\r\n pygame.display.set_caption(\"this is a test\")\r\n clock = pygame.time.Clock() #objekt clock ovlada FPS aplikace\r\n init_stars(screen)\r\n box_x = 300\r\n box_dir = 3\r\n\r\n while True:\r\n clock.tick(50) #uzavreni FPS na 50 FPS\r\n \r\n for event in pygame.event.get():\r\n #smycka ceka zda uzivatel ukoncil aplikaci krizkem tzv.QUIT EVENT\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n #clear the screen\r\n screen.fill(BLACK)\r\n\r\n box_x += box_dir\r\n if box_x >= 620:\r\n box_x = 620\r\n box_dir = -3\r\n elif box_x <= 0:\r\n box_x = 0\r\n box_dir = 3\r\n \r\n #update screen\r\n pygame.draw.rect(screen, WHITE, (box_x, 200, 20, 20))\r\n move_and_draw_stars(screen)\r\n pygame.display.flip()\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"stars.py","file_name":"stars.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409531870","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nfrom sevenapps.manage.apidata.views import \\\n ItunesLanguageDataView, ItunesCountryDataView, ItunesGenreDataView\n\n\nurlpatterns = patterns(\n '',\n url(\n r'^itunes/languages.json$',\n ItunesLanguageDataView.as_view(),\n name='itunes-languages'\n ),\n url(\n r'^itunes/countries.json$',\n ItunesCountryDataView.as_view(),\n name='itunes-countries'\n ),\n url(\n r'^itunes/genres.json$',\n ItunesGenreDataView.as_view(),\n name='itunes-genres'\n ),\n)\n","sub_path":"apps/sevenapps/manage/apidata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"567849831","text":"from django.shortcuts import render\nfrom rest_framework.generics import *\nfrom .models import *\nfrom rest_framework.response import Response\nfrom .serializers import *\n# Create your views here.\n\n\nclass Image_upload(ListCreateAPIView):\n serializer_class = ImageUploadSerializer\n queryset = Image.objects.all()\n\nclass ImageView(RetrieveAPIView):\n lookup_field = 'id'\n serializer_class = ImageUploadSerializer\n queryset = Image.objects.all()\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n if (PageView.objects.count() <= 0):\n x = PageView.objects.create()\n x.save()\n else:\n x = PageView.objects.all()[0]\n x.hits = x.hits + 1\n x.save()\n print(x.hits)\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n\ndef home(request):\n if (PageView.objects.count() <= 0):\n x = PageView.objects.create()\n x.save()\n else:\n x = PageView.objects.all()[0]\n x.hits = x.hits + 1\n x.save()\n print(x.hits)\n context = {'page':x.hits}\n return render(request,'home.html',context=context)","sub_path":"image/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41480434","text":"\"\"\"\nGiven a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its bottom-up level order traversal as:\n[\n [15,7],\n [9,20],\n [3]\n]\n\"\"\"\nfrom utils.tree import TreeNode, list_create_tree\nfrom collections import deque\n\n\"\"\"\nclass Solution:\n def levelOrderBottom(self, root: TreeNode):\n # bfs + queue 56 ms\n res = []\n q = deque([(root, 0)])\n while q:\n node, level = q.popleft()\n if node:\n if len(res) == level:\n res.append([])\n res[level].append(node.val)\n q.append((node.left, level + 1))\n q.append((node.right, level + 1))\n return res[::-1]\n\"\"\"\n\n\"\"\"\nclass Solution:\n def levelOrderBottom(self, root: TreeNode):\n # dfs + stack 44ms\n stack = [(root, 0)]\n res = []\n while stack:\n node, level = stack.pop()\n if node:\n if len(res) == level:\n res.append([])\n res[level].append(node.val)\n # right must be ahead of left\n stack.append((node.right, level + 1))\n stack.append((node.left, level + 1))\n return res[::-1]\n\"\"\"\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode):\n # recursive dfs 44ms\n def helper(node, level):\n if node:\n if len(res) == level:\n res.append([])\n res[level].append(node.val)\n helper(node.left, level+1)\n helper(node.right, level+1)\n\n res = []\n helper(root, 0)\n return res[::-1]\n\n\nif __name__ == '__main__':\n s = Solution()\n root = TreeNode(None)\n llist = [3,9,20,None,None,15,7]\n root = list_create_tree(root, llist, 0)\n print(s.levelOrderBottom(root))","sub_path":"python/problems/0107.BinaryTreeLevelOrderTraversalII.py","file_name":"0107.BinaryTreeLevelOrderTraversalII.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499060126","text":"#Modules used in the code\r\nimport sys\r\nimport requests\r\nimport json\r\nimport string\r\nimport time\r\nimport argparse\r\nimport re\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"routeName\", help=\" Route name in which user wants to travel\",type=str)\r\nparser.add_argument(\"stopName\", help=\" Stop name user has to board the bus\",type=str)\r\nparser.add_argument(\"directionValue\", help=\" Direction of the route ( North, South, East, West)\",type=str)\r\nargs = parser.parse_args()\r\n\r\ndef input_check ():\r\n\r\n ''' This is the start of the execution\r\n function used to format the user input into required format and \r\n validates the direction parameter '''\r\n\r\n global routeName \r\n global stopName \r\n global direction\r\n\r\n #formating the inputs \r\n routeName = args.routeName.lower().strip()\r\n stopName = args.stopName.lower().strip()\r\n directionValue = args.directionValue.lower().strip()\r\n\r\n #predefined direction values for the inputs\r\n directdict = dict (south=1, north=4, east=2, west=3)\r\n \r\n if directionValue in directdict:\r\n direction = directdict[directionValue]\r\n else:\r\n print(\"Direction should be any one among SOUTH, NORTH, EAST, WEST\")\r\n sys.exit()\r\n \r\n get_route_details()\r\n\r\ndef get_route_details():\r\n ''' This function valitades the routeName\r\n fetch the respective routeNo '''\r\n\r\n global routeNo\r\n condCheck = \"\"\r\n urli = \"http://svc.metrotransit.org/NexTrip/Routes\"\r\n headers = {'content-type':'Application/json', 'accept':'Application/json'}\r\n\r\n routes = requests.get(url=urli, headers=headers).json()\r\n\r\n for r in routes: \r\n if routeName == r.get(\"Description\").lower() :\r\n routeNo = r.get(\"Route\")\r\n break \r\n else:\r\n #print(\"The Route requested is not avaliable\")\r\n print(\"Route:\"+routeName+\" \"+\"Does not exist\")\r\n print(\"\\n1.)To get the list of supported routes Enter:R\\n2)To quit enter:Q\")\r\n condCheck=input(\"Enter your inputs\").lower()\r\n if condCheck == \"\" or condCheck == \"Q\" or condCheck == \"q\":\r\n sys.exit()\r\n elif condCheck == \"routes\":\r\n for i in routes:\r\n print(i.get(\"Description\"))\r\n print(\"See you soon!!!\")\r\n sys.exit()\r\n direction_validation()\r\n\r\ndef direction_validation(): \r\n ''' This function with respective to routeNo \r\n validates the direction avaliable '''\r\n dirText = \"\"\r\n urli = \"http://svc.metrotransit.org/NexTrip/Directions/\"\r\n headers = {'content-type':'Application/json', 'accept':'Application/json'}\r\n allowedDict = requests.get(url=urli+str(routeNo), headers=headers).json()\r\n\r\n for dire in allowedDict:\r\n if int(dire.get(\"Value\")) == int(direction):\r\n dirText = dire.get(\"Text\")\r\n break\r\n if not dirText :\r\n print(\"No Routes in the request directions\")\r\n print(\"Below are the allowed direction for requested route\")\r\n for i in allowedDict:\r\n print(i.get(\"Text\")+\"\\n\")\r\n sys.exit()\r\n get_stops()\r\ndef get_stops():\r\n ''' This function valitades the stopName\r\n fetch the respective stopName'''\r\n global stopCode\r\n urli = f\"http://svc.metrotransit.org/NexTrip/Stops/{str(routeNo)}/{direction}\"\r\n headers = {'content-type':'Application/json', 'accept':'Application/json'}\r\n\r\n stops = requests.get(url=urli, headers=headers).json()\r\n\r\n for stop in stops: \r\n if stopName == stop.get(\"Text\").lower().strip():\r\n stopCode = stop.get(\"Value\")\r\n #stopText = stop.get(\"Text\")\r\n break \r\n else:\r\n print(\"Matching Stops not found\")\r\n print(\"Stops:\"+stopName+\" \"+\"Does not exist\")\r\n print(\"To get the list of avaliable stops in this route Enter:Stops\\nTo quit enter:Q\")\r\n condCheck=input().lower()\r\n if condCheck == \"\" or condCheck == \"Q\" or condCheck == \"q\":\r\n sys.exit()\r\n elif \"stops\" in condCheck:\r\n for i in stops:\r\n print(i.get(\"Text\"))\r\n print(\"Thank you!!!Please Try Again\")\r\n sys.exit()\r\n else:\r\n print(\"Please try again with valid Inputs\")\r\n if not stopCode:\r\n print(\"Matching Stops not found\")\r\n print(\"Stops:\"+stopName+\" \"+\"Does not exist\")\r\n print(\"To get the list of avaliable stops in this route Enter:Stops\\nTo quit enter:Q\")\r\n condCheck=input().lower()\r\n if condCheck == \"\" or condCheck == \"Q\" or condCheck == \"q\":\r\n sys.exit()\r\n elif \"stops\" in condCheck:\r\n print(stops)\r\n for i in stops:\r\n print(i.get(\"Text\"))\r\n print(\"See you soon!!!\")\r\n sys.exit()\r\n else:\r\n print(\"Please try again with valid Inputs.See you soon!!!\")\r\n sys.exit()\r\n get_time()\r\ndef get_time():\r\n ''' Based on the routeNo,direction&stopName\r\n fetch the next train time avaliable in minutes'''\r\n urli = f\"http://svc.metrotransit.org/NexTrip/{str(routeNo)}/{direction}/{stopCode}\"\r\n headers = {'content-type':'Application/json', 'accept':'Application/json'}\r\n\r\n depart_response = requests.get(url=urli,headers=headers).json()\r\n\r\n #print(depart_response)\r\n try:\r\n time_stamp_temp = (depart_response[0].get('DepartureTime'))\r\n time_stamp_temp= (((int(re.match(r\"^(\\/Date\\()(\\d{10})\",time_stamp_temp).group(2)) - time.time())/60 ))\r\n print(f\"{round(time_stamp_temp)} Minutes\")\r\n except IndexError:\r\n print(f\"\\nLast bus for the day already left from the {stopName}\\n\")\r\n \r\n#Start of the program\r\n\r\ninput_check() # Prepares the input for the program excution\r\n\r\n\r\n\r\n\r\n","sub_path":"UsingApi.py","file_name":"UsingApi.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213696869","text":"from models import City\nfrom models import CasesLocation\nfrom sqlalchemy.sql import or_\n\n\nclass ReportService:\n\n def search_on_location_by_term(self, query):\n cases = City.query.filter(\n or_(City.city.like(query), City.state.like(query))\n ).all()\n\n result = []\n\n for case in cases:\n current_case = {\n 'city': case.city,\n 'state': case.state,\n 'cases': {\n 'activeCases': case.active_cases,\n 'suspectedCases': case.suspects,\n 'recoveredCases': case.recovered,\n 'deaths': case.deaths\n }\n }\n result.append(current_case)\n\n return result\n\n def get_all_city_cases(self):\n all_cases = City.query.all()\n return compile_cases(all_cases)\n\n def search_city_cases_by_state(self, state_code):\n city_situation = City.query.filter_by(\n state=state_code).all()\n return compile_cases(city_situation)\n\n def get_cases_near_location(self, latitude, longitude):\n # FIX: Why those variables has not been used?\n all_cases = CasesLocation.query.all()\n return compile_cases_near_location(all_cases)\n\n\ndef compile_cases(data):\n active_cases = sum(\n [city.active_cases\n for city in data]) or 0\n suspected_cases = sum(\n [city.suspects for city in data]) or 0\n recovered_cases = sum(\n [city.recovered for city in data]) or 0\n deaths = sum([city.deaths for city in data]) or 0\n\n return {\n 'activeCases': active_cases,\n 'suspectedCases': suspected_cases,\n 'recoveredCases': recovered_cases,\n 'deaths': deaths\n }\n\n\ndef compile_cases_near_location(data):\n for case in data:\n return {\n 'status': case.status,\n 'location': {\n 'latitude': case.longitude,\n 'longitude': case.latitude\n }\n }\n","sub_path":"src/apis/data/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421829337","text":"\"\"\"\nThere are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.\n\nSome courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n\nGiven the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?\n\nExample 1:\n\nInput: numCourses = 2, prerequisites = [[1,0]]\nOutput: true\nExplanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0. So it is possible.\n\nExample 2:\n\nInput: numCourses = 2, prerequisites = [[1,0],[0,1]]\nOutput: false\nExplanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0, and to take course 0 you should\n also have finished course 1. So it is impossible.\n\nConstraints:\n\n The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.\n You may assume that there are no duplicate edges in the input prerequisites.\n 1 <= numCourses <= 10^5\n\n\"\"\"\n\nfrom collections import defaultdict \n \nclass Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n \"\"\"\n from : https://codereview.stackexchange.com/questions/86021/check-if-a-directed-graph-contains-a-cycle\n \"\"\"\n def cyclic(g):\n path = set()\n\n visited = set()\n path = [object()]\n path_set = set(path)\n stack = [iter(graph)]\n while stack:\n for v in stack[-1]:\n if v in path_set:\n return True\n elif v not in visited:\n visited.add(v)\n path.append(v)\n path_set.add(v)\n stack.append(iter(graph.get(v, ())))\n break\n else:\n path_set.remove(path.pop())\n stack.pop()\n return False\n \n graph = defaultdict(list)\n \n for a,b in prerequisites:\n graph[a].append(b)\n \n return not cyclic(graph)\n \n ","sub_path":"leetcode/May-31-day/week5/course_schedule.py","file_name":"course_schedule.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231510969","text":"# -*- coding: utf-8 -*-\n\n## \\package wizbin.changelog\n\n# MIT licensing\n# See: docs/LICENSE.txt\n\n\nimport commands, os, wx\n\nfrom dbr.language import GT\nfrom dbr.log import Logger\nfrom f_export.ftarget import FileOTarget\nfrom globals.bitmaps import ICON_WARNING\nfrom globals.changes import FormatChangelog\nfrom globals.errorcodes import dbrerrno\nfrom globals.execute import GetExecutable\nfrom globals.fileio import ReadFile\nfrom globals.ident import btnid\nfrom globals.ident import chkid\nfrom globals.ident import inputid\nfrom globals.ident import pgid\nfrom globals.ident import selid\nfrom globals.paths import ConcatPaths\nfrom globals.strings import TextIsEmpty\nfrom globals.system import GetOSDistNames\nfrom globals.tooltips import SetPageToolTips\nfrom input.pathctrl import PathCtrlESS\nfrom input.select import Choice\nfrom input.select import ComboBox\nfrom input.text import TextArea\nfrom input.text import TextAreaPanel\nfrom input.text import TextAreaPanelESS\nfrom input.toggle import CheckBox\nfrom input.toggle import CheckBoxESS\nfrom ui.button import CreateButton\nfrom ui.dialog import DetailedMessageDialog\nfrom ui.layout import BoxSizer\nfrom ui.style import layout as lyt\nfrom wiz.helper import ErrorTuple\nfrom wiz.helper import GetFieldValue\nfrom wiz.helper import GetMainWindow\nfrom wiz.wizard import WizardPage\n\n\n## Changelog page\nclass Page(WizardPage):\n ## Constructor\n #\n # \\param parent\n # Parent wx.Window instance\n def __init__(self, parent):\n WizardPage.__init__(self, parent, pgid.CHANGELOG)\n \n txt_package = wx.StaticText(self, label=GT(u'Package'), name=u'package')\n self.ti_package = TextArea(self, inputid.PACKAGE, name=txt_package.Name)\n \n txt_version = wx.StaticText(self, label=GT(u'Version'), name=u'version')\n self.ti_version = TextArea(self, inputid.VERSION, name=txt_version.Name)\n \n dist_names = GetOSDistNames()\n \n txt_dist = wx.StaticText(self, label=GT(u'Distribution'), name=u'dist')\n \n if dist_names:\n self.ti_dist = ComboBox(self, inputid.DIST, choices=dist_names, name=txt_dist.Name)\n \n # Use regular text input if could not retrieve distribution names list\n else:\n self.ti_dist = TextArea(self, inputid.DIST, name=txt_dist.Name)\n \n opts_urgency = (\n u'low',\n u'medium',\n u'high',\n u'emergency',\n )\n \n txt_urgency = wx.StaticText(self, label=GT(u'Urgency'), name=u'urgency')\n self.sel_urgency = Choice(self, selid.URGENCY, choices=opts_urgency, name=txt_urgency.Name)\n \n txt_maintainer = wx.StaticText(self, label=GT(u'Maintainer'), name=u'maintainer')\n self.ti_maintainer = TextArea(self, inputid.MAINTAINER, name=txt_maintainer.Name)\n \n txt_email = wx.StaticText(self, label=GT(u'Email'), name=u'email')\n self.ti_email = TextArea(self, inputid.EMAIL, name=txt_email.Name)\n \n btn_import = CreateButton(self, btnid.IMPORT, GT(u'Import'), u'import', name=u'btn import')\n txt_import = wx.StaticText(self, label=GT(u'Import information from Control page'))\n \n # Changes input\n self.ti_changes = TextAreaPanel(self, size=(20,150), name=u'changes')\n \n # *** Target installation directory\n \n # FIXME: Should this be set by config or project file???\n self.pnl_target = FileOTarget(self, u'/usr/share/doc/', name=u'target default',\n defaultType=CheckBoxESS, customType=PathCtrlESS, pathIds=(chkid.TARGET, inputid.TARGET,))\n \n self.btn_add = CreateButton(self, btnid.ADD, GT(u'Add'), u'add', name=u'btn add')\n txt_add = wx.StaticText(self, label=GT(u'Insert new changelog entry'))\n \n self.chk_indentation = CheckBox(self, label=GT(u'Preserve indentation'), name=u'indent')\n \n self.dsp_changes = TextAreaPanelESS(self, inputid.CHANGES, monospace=True, name=u'log')\n self.dsp_changes.EnableDropTarget()\n \n SetPageToolTips(self)\n \n # *** Event Handling *** #\n \n btn_import.Bind(wx.EVT_BUTTON, self.OnImportFromControl)\n self.btn_add.Bind(wx.EVT_BUTTON, self.AddInfo)\n \n # *** Layout *** #\n \n LEFT_BOTTOM = lyt.ALGN_LB\n LEFT_CENTER = wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL\n RIGHT_CENTER = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL\n \n lyt_info = wx.FlexGridSizer(2, 6)\n \n lyt_info.AddGrowableCol(1)\n lyt_info.AddGrowableCol(3)\n lyt_info.AddGrowableCol(5)\n lyt_info.AddMany((\n (txt_package, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.ti_package, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT, 5),\n (txt_version, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.ti_version, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT, 5),\n (txt_dist, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.ti_dist, 1, wx.EXPAND|wx.BOTTOM, 5),\n (txt_urgency, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.sel_urgency, 1, wx.RIGHT, 5),\n (txt_maintainer, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.ti_maintainer, 1, wx.EXPAND|wx.RIGHT, 5),\n (txt_email, 0, RIGHT_CENTER|wx.RIGHT, 5),\n (self.ti_email, 1, wx.EXPAND)\n ))\n \n lyt_details = wx.GridBagSizer()\n lyt_details.SetCols(3)\n lyt_details.AddGrowableRow(2)\n lyt_details.AddGrowableCol(1)\n \n lyt_details.Add(btn_import, (0, 0))\n lyt_details.Add(txt_import, (0, 1), flag=LEFT_CENTER)\n lyt_details.Add(wx.StaticText(self, label=GT(u'Changes')), (1, 0), flag=LEFT_BOTTOM)\n lyt_details.Add(wx.StaticText(self, label=GT(u'Target')), (1, 2), flag=LEFT_BOTTOM)\n lyt_details.Add(self.ti_changes, (2, 0), (1, 2), wx.EXPAND|wx.RIGHT, 5)\n lyt_details.Add(self.pnl_target, (2, 2))\n lyt_details.Add(self.btn_add, (3, 0), (2, 1))\n lyt_details.Add(txt_add, (3, 1), flag=LEFT_BOTTOM|wx.TOP, border=5)\n lyt_details.Add(self.chk_indentation, (4, 1), flag=LEFT_BOTTOM)\n \n lyt_main = BoxSizer(wx.VERTICAL)\n lyt_main.AddSpacer(10)\n lyt_main.Add(lyt_info, 0, wx.EXPAND|lyt.PAD_LR, 5)\n lyt_main.AddSpacer(10)\n lyt_main.Add(lyt_details, 1, wx.EXPAND|lyt.PAD_LR, 5)\n lyt_main.Add(wx.StaticText(self, label=u'Changelog Output'),\n 0, LEFT_BOTTOM|lyt.PAD_LT, 5)\n lyt_main.Add(self.dsp_changes, 1, wx.EXPAND|lyt.PAD_LR|wx.BOTTOM, 5)\n \n self.SetAutoLayout(True)\n self.SetSizer(lyt_main)\n self.Layout()\n \n \n ## Formats input text from 'changes' field for new entry in changelog\n def AddInfo(self, event=None):\n new_changes = self.ti_changes.GetValue()\n \n if TextIsEmpty(new_changes):\n DetailedMessageDialog(GetMainWindow(), GT(u'Warning'), ICON_WARNING,\n GT(u'\"Changes\" section is empty')).ShowModal()\n \n self.ti_changes.SetInsertionPointEnd()\n self.ti_changes.SetFocus()\n \n return\n \n package = self.ti_package.GetValue()\n version = self.ti_version.GetValue()\n dist = self.ti_dist.GetValue()\n urgency = self.sel_urgency.GetStringSelection()\n maintainer = self.ti_maintainer.GetValue()\n email = self.ti_email.GetValue()\n \n new_changes = FormatChangelog(new_changes, package, version, dist, urgency,\n maintainer, email, self.chk_indentation.GetValue())\n \n # Clean up leading & trailing whitespace in old changes\n old_changes = self.dsp_changes.GetValue().strip(u' \\t\\n\\r')\n \n # Only append newlines if log isn't already empty\n if not TextIsEmpty(old_changes):\n new_changes = u'{}\\n\\n\\n{}'.format(new_changes, old_changes)\n \n # Add empty line to end of log\n if not new_changes.endswith(u'\\n'):\n new_changes = u'{}\\n'.format(new_changes)\n \n self.dsp_changes.SetValue(new_changes)\n \n # Clear \"Changes\" text\n self.ti_changes.Clear()\n self.ti_changes.SetFocus()\n \n \n ## Exports page's data to file\n #\n # \\param out_dir\n # Target directory where file will be written\n # \\out_name\n # Filename of output file\n # \\compress\n # If True, compresses file with gzip\n def Export(self, out_dir, out_name=wx.EmptyString, compress=False):\n ret_value = WizardPage.Export(self, out_dir, out_name=out_name)\n \n absolute_filename = u'{}/{}'.format(out_dir, out_name).replace(u'//', u'/')\n \n CMD_gzip = GetExecutable(u'gzip')\n \n if compress and CMD_gzip:\n commands.getstatusoutput(u'{} -n9 \"{}\"'.format(CMD_gzip, absolute_filename))\n \n return ret_value\n \n \n ## Export instructions specifically for build phase\n #\n # \\param stage\n # Formatted staged directory where file heirarchy is temporarily kept\n # \\return\n # Tuple containing a return code & string value of page data\n def ExportBuild(self, stage):\n target = self.pnl_target.GetPath()\n \n if target == self.pnl_target.GetDefaultPath():\n target.replace(u'', GetFieldValue(pgid.CONTROL, inputid.PACKAGE))\n \n stage = ConcatPaths((stage, target))\n \n if not os.path.isdir(stage):\n os.makedirs(stage)\n \n # FIXME: Allow user to set filename\n self.Export(stage, u'changelog', True)\n \n export_summary = GT(u'Changelog export failed')\n changelog = ConcatPaths((stage, u'changelog.gz'))\n \n if os.path.isfile(changelog):\n export_summary = GT(u'Changelog export to: {}').format(changelog)\n \n return(0, export_summary)\n \n \n ## Retrieves changelog text\n #\n # The output is a text file that uses sections defined by braces ([, ])\n #\n # \\param getModule\n # If True, returns a tuple of the module name\n # & page data, otherwise return only page data string\n # \\return\n # tuple(str, str): Filename & formatted string of changelog target & body\n def Get(self, getModule=False):\n target = self.pnl_target.GetPath()\n \n if target == self.pnl_target.GetDefaultPath():\n target = u'DEFAULT'\n \n body = self.dsp_changes.GetValue()\n \n if TextIsEmpty(body):\n page = None\n \n else:\n page = u'[TARGET={}]\\n\\n[BODY]\\n{}'.format(target, body)\n \n if getModule:\n page = (__name__, page,)\n \n return page\n \n \n ## Retrieves plain text of the changelog field\n #\n # \\return\n # Formatted changelog text\n def GetChangelog(self):\n return self.dsp_changes.GetValue()\n \n \n ## Reads & parses page data from a formatted text file\n #\n # \\param filename\n # File path to open\n def ImportFromFile(self, filename):\n if not os.path.isfile(filename):\n return dbrerrno.ENOENT\n \n clog_data = ReadFile(filename, split=True)\n \n sections = {}\n \n def parse_section(key, lines):\n value = u'\\n'.join(lines).split(u'\\n[')[0]\n \n if u'=' in key:\n key = key.split(u'=')\n value = (key[-1], value)\n key = key[0]\n \n sections[key] = value\n \n # NOTE: This would need to be changed were more sections added to project file\n for L in clog_data:\n line_index = clog_data.index(L)\n \n if not TextIsEmpty(L) and u'[' in L and u']' in L:\n L = L.split(u'[')[-1].split(u']')[0]\n parse_section(L, clog_data[line_index+1:])\n \n for S in sections:\n Logger.Debug(__name__, GT(u'Changelog section: \"{}\", Value:\\n{}').format(S, sections[S]))\n \n if isinstance(sections[S], (tuple, list)):\n value_index = 0\n for I in sections[S]:\n Logger.Debug(__name__, GT(u'Value {}: {}').format(value_index, I))\n value_index += 1\n \n if S == u'TARGET':\n Logger.Debug(__name__, u'SECTION TARGET FOUND')\n \n if sections[S][0] == u'DEFAULT':\n Logger.Debug(__name__, u'Using default target')\n \n if not self.pnl_target.UsingDefault():\n self.pnl_target.Reset()\n \n else:\n Logger.Debug(__name__, GT(u'Using custom target: {}').format(sections[S][0]))\n \n self.pnl_target.SetPath(sections[S][0])\n \n continue\n \n if S == u'BODY':\n Logger.Debug(__name__, u'SECTION BODY FOUND')\n \n self.dsp_changes.SetValue(sections[S])\n \n continue\n \n return 0\n \n \n ## Checks the page's fields for exporting\n #\n # \\return\n # False if page cannot be exported\n def IsOkay(self):\n return not TextIsEmpty(self.dsp_changes.GetValue())\n \n \n ## Imports select field values from the 'Control' page\n def OnImportFromControl(self, event=None):\n fields = (\n (self.ti_package, inputid.PACKAGE),\n (self.ti_version, inputid.VERSION),\n (self.ti_maintainer, inputid.MAINTAINER),\n (self.ti_email, inputid.EMAIL),\n )\n \n for F, FID in fields:\n field_value = GetFieldValue(pgid.CONTROL, FID)\n \n if isinstance(field_value, ErrorTuple):\n err_msg1 = GT(u'Got error when attempting to retrieve field value')\n err_msg2 = u'\\tError code: {}\\n\\tError message: {}'.format(field_value.GetCode(), field_value.GetString())\n Logger.Error(__name__, u'{}:\\n{}'.format(err_msg1, err_msg2))\n \n continue\n \n if not TextIsEmpty(field_value):\n F.SetValue(field_value)\n \n \n ## Sets values of page's fields with given input\n #\n # \\param data\n # Text to parse for values\n def Set(self, data):\n changelog = data.split(u'\\n')\n target = changelog[0].split(u'<>')[1].split(u'<>')[0]\n \n if target == u'DEFAULT':\n if not self.pnl_target.UsingDefault():\n self.pnl_target.Reset()\n \n else:\n self.pnl_target.SetPath(target)\n \n self.dsp_changes.SetValue(u'\\n'.join(changelog[1:]))\n","sub_path":"wizbin/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":15184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457136900","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the twoStrings function below.\ndef twoStrings(s1, s2):\n chars_1 = list(s1)\n chars_2 = list(s2)\n lngth = len(set(chars_1)) + len(set(chars_2))\n st = set(chars_1 + chars_2)\n if (len(st) == lngth):\n return(\"NO\")\n else: \n return(\"YES\")\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n s1 = input()\n\n s2 = input()\n\n result = twoStrings(s1, s2)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","sub_path":"python_random/two-strings.py","file_name":"two-strings.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301890008","text":"import sys\n\nxyz = []\n#try:\nwith open(sys.argv[1], \"r\") as f:\n for line in f:\n point = []\n if not line.strip():\n for num in line.split(\" \"):\n print(\"-->\" + num)\n point.append(float(num))\n xyz.append(point)\n\n# except:\n# print(\"Unable to load input point cloud\")\n# sys.exit()\n\nxyz.sort(key = lambda x: x[2])\n\nmin_x = min(i[0] for i in xyz)\nmin_y = min(i[1] for i in xyz)\nmin_z = min(i[2] for i in xyz)\nif min_x < 0:\n for i in xyz:\n i[0] -= min_x\nif min_y < 0:\n for i in xyz:\n i[1] -= min_y\nif min_z < 0:\n for i in xyz:\n i[2] -= min_z\n\nf = open(sys.argv[1] + \"-s\", \"w\")\nfor point in xyz:\n for num in point:\n f.write(str(num) + \" \")\n f.write(\"\\n\")\n","sub_path":"old/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203548727","text":"import pickle\n\n\n# blue_mode: 0 = not use bluetooth mode, 1 = use bluetooth mode\n\n# refresh_history: the time to take a sample of the face and save it in the history\n# depends on the speed of your CPU\n\n# refresh_blue: a reset time which is used when a person is recognized using bluetooth mode and\n# we don't want to scan anything, 330 is equal to 5 seconds\n\n# average_num, a smaller number represents a more accurate match\n# votes_num, a higher number represents a more accurate match\n# it is stricter if it is more accurate\n\n# unknown_num, how much time the system has to wait to recognized a person as unknown\n\naction = int(input(\"input action: \"))\n\nsettings = {'blue_mode': 0,\n 'blue_com': \"COM11\",\n 'blue_bauds': 9600,\n 'refresh_blue': 330,\n 'refresh_history': 25,\n 'average_num': 0.45,\n 'votes_num': 5,\n 'unknown_num': 10}\n\nif action == 1:\n f = open(\"settings.pickle\", \"wb\")\n f.write(pickle.dumps(settings))\n f.close()\nelse:\n load_settings = pickle.loads(open(\"settings.pickle\", \"rb\").read())\n print(load_settings)\n","sub_path":"data/SetSettings.py","file_name":"SetSettings.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89068280","text":"\"\"\"An Accessor consists of a list of MultiHopAccessors. It provides a method `get_session`\nwhich will iterate through the MultiHopAccessors until a session can be obtained to\na target account.\"\"\"\nfrom dataclasses import dataclass\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Type\n\nimport boto3\n\nfrom altimeter.aws.auth.cache import AWSCredentialsCache\nfrom altimeter.aws.auth.exceptions import AccountAuthException\nfrom altimeter.aws.auth.multi_hop_accessor import MultiHopAccessor\nfrom altimeter.core.log import Logger\nfrom altimeter.core.log_events import LogEvent\n\n\n@dataclass(frozen=True)\nclass Accessor:\n \"\"\"An Accessor consists of a list of MultiHopAccessors. It provides a method `get_session`\n which will iterate through the MultiHopAccessors until a session can be obtained to\n a target account. If an Accessor has no MultiHopAccessors it simply uses the local\n session to attempt to access the account. If the session does not match the requested\n target account id, ValueError is thrown.\n\n Args:\n multi_hop_accessors: List of MultiHopAccessors\n credentials_cache: AWSCredentialsCache\n \"\"\"\n\n multi_hop_accessors: List[MultiHopAccessor]\n credentials_cache: Optional[AWSCredentialsCache] = None\n\n def get_session(self, account_id: str, region_name: Optional[str] = None) -> boto3.Session:\n \"\"\"Get a boto3 session for a given account.\n\n Args:\n account_id: target account id\n region_name: session region name\n\n Returns:\n boto3.Session object\n \"\"\"\n logger = Logger()\n errors = []\n if self.multi_hop_accessors:\n for mha in self.multi_hop_accessors: # pylint: disable=not-an-iterable\n with logger.bind(auth_accessor=str(mha)):\n try:\n session = mha.get_session(\n account_id=account_id,\n region_name=region_name,\n credentials_cache=self.credentials_cache,\n )\n return session\n except Exception as ex:\n errors.append(ex)\n logger.debug(event=LogEvent.AuthToAccountFailure, exception=str(ex))\n raise AccountAuthException(f\"Unable to access {account_id} using {str(self)}: {errors}\")\n # local run mode\n session = boto3.Session(region_name=region_name)\n sts_client = session.client(\"sts\")\n sts_account_id = sts_client.get_caller_identity()[\"Account\"]\n if sts_account_id != account_id:\n raise ValueError(f\"BUG: sts_account_id {sts_account_id} != {account_id}\")\n return session\n\n def __str__(self) -> str:\n return \",\".join(\n [str(mha) for mha in self.multi_hop_accessors] # pylint: disable=not-an-iterable\n )\n\n @classmethod\n def from_dict(\n cls: Type[\"Accessor\"], data: Dict[str, Any], cache_creds: bool = True\n ) -> \"Accessor\":\n mhas = data.get(\"multi_hop_accessors\", [])\n credentials_cache = None\n credentials_cache_dict = data.get(\"credentials_cache\")\n if credentials_cache_dict is None:\n if cache_creds:\n credentials_cache = AWSCredentialsCache()\n else:\n credentials_cache = AWSCredentialsCache.from_dict(credentials_cache_dict)\n return cls(\n multi_hop_accessors=[MultiHopAccessor.from_dict(mha) for mha in mhas],\n credentials_cache=credentials_cache,\n )\n\n @classmethod\n def from_file(cls: Type[\"Accessor\"], filepath: Path, cache_creds: bool = True) -> \"Accessor\":\n \"\"\"Create an Accessor from json content in a file\n\n Args:\n filepath: Path to json accessor definition\n\n Returns:\n Accessor\n \"\"\"\n with filepath.open(\"r\") as fp:\n config_dict = json.load(fp)\n if cache_creds:\n credentials_cache = AWSCredentialsCache()\n config_dict[\"credentials_cache\"] = credentials_cache.to_dict()\n return cls.from_dict(config_dict)\n\n def to_dict(self) -> Dict[str, Any]:\n credentials_cache_dict = (\n None if self.credentials_cache is None else self.credentials_cache.to_dict()\n )\n return {\n \"multi_hop_accessors\": [mha.to_dict() for mha in self.multi_hop_accessors],\n \"credentials_cache\": credentials_cache_dict,\n }\n","sub_path":"altimeter/aws/auth/accessor.py","file_name":"accessor.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457592737","text":"c = ('\\033[m', # cor 0 - sem cores;\n '\\033[0;30;41m', # cor 1 - vermelho;\n '\\033[0;30;42m', # cor 2 - verde;\n '\\033[0;30;43m', # cor 3 - amarelo;\n '\\033[0;30;44m', # cor 4 - azul;\n '\\033[0;30;45m', # cor 5 - roxo;\n '\\033[7;30' # cor 6 - branco.\n )\n\n\ndef ajuda(com):\n título(f'Acessando o manual do comando \\'{com}\\'', 4)\n help(com)\n\n\ndef título(msg, cor=0):\n tam = len(msg) + 4\n print(f'{c[cor]}', end='')\n print('~' * tam)\n print(f\" {msg}\")\n print('~' * tam)\n print(f\"{c[0]}\", end='')\n\n\n# principal:\ncomando = ''\nwhile True:\n título(\"Sistema de ajuda Pyhelp.\", 1)\n comando = str(input(\"Função ou biblioteca > \"))\n if comando.upper() == 'FIM':\n break\n else:\n ajuda(comando)\n título(\"Até logo!\", 5)\n","sub_path":"Estruturas/Funções/manual interativo.py","file_name":"manual interativo.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465177648","text":"\"\"\"Debug logging support.\"\"\"\n\nimport sublime_plugin\nimport time\n\n\nlogs = {}\n\n\nclass WindowLog:\n\n \"\"\"Collection of log messages tied to a window.\"\"\"\n\n view = None\n\n def __init__(self):\n self.messages = []\n\n def clear(self):\n self.messages.clear()\n if self.view:\n self.view.run_command(\"select_all\")\n self.view.run_command(\"right_delete\")\n\n def add_message(self, msg, args):\n if self.messages:\n previous_time = self.messages[-1].time\n else:\n previous_time = None\n lm = LogMessage(msg, args, previous_time)\n self.messages.append(lm)\n self._display_message(lm)\n\n def _display_message(self, msg):\n if self.view:\n text = msg.render()\n self.view.run_command('append', {'characters': text,\n 'scroll_to_end': True})\n\n def open_view(self, window):\n view = window.new_file()\n view.set_scratch(True)\n view.settings().set('rust_log_view', window.id())\n view.settings().set('word_wrap', True)\n view.set_name('Rust Enhanced Debug Log')\n self.view = view\n for m in self.messages:\n self._display_message(m)\n\n\nclass LogMessage:\n def __init__(self, msg, args, previous_time):\n self.msg = msg\n self.args = args\n self.previous_time = previous_time\n self.time = time.time()\n\n def render(self):\n if self.previous_time is None:\n last_time = '+0.000'\n else:\n last_time = '+%.3f' % (self.time - self.previous_time,)\n if self.args:\n rendered = self.msg % self.args\n else:\n rendered = self.msg\n return '%s %s\\n' % (last_time, rendered.rstrip())\n\n\ndef critical(window, msg, *args):\n \"\"\"Add a log message and display it to the console.\"\"\"\n log(window, msg, *args)\n if args:\n print(msg % args)\n else:\n print(msg)\n\n\ndef log(window, msg, *args):\n \"\"\"Add a log message.\"\"\"\n global logs\n wlog = logs.setdefault(window.id(), WindowLog())\n wlog.add_message(msg, args)\n\n\ndef clear_log(window):\n \"\"\"Clear log messages.\"\"\"\n try:\n logs[window.id()].clear()\n except KeyError:\n pass\n\n\nclass RustOpenLog(sublime_plugin.WindowCommand):\n\n \"\"\"Opens a view to display log messages generated by the Rust Enhanced\n plugin.\"\"\"\n\n def run(self):\n wlog = logs.setdefault(self.window.id(), WindowLog())\n if wlog.view:\n self.window.focus_view(wlog.view)\n else:\n wlog.open_view(self.window)\n\n\nclass RustLogEvent(sublime_plugin.ViewEventListener):\n\n @classmethod\n def is_applicable(cls, settings):\n return settings.has('rust_log_view')\n\n def on_pre_close(self):\n try:\n wlog = logs[self.view.settings().get('rust_log_view')]\n except KeyError:\n return\n else:\n wlog.view = None\n","sub_path":"rust/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560221858","text":"import socket\nimport struct\nimport json\nimport select\nfrom enum import Enum\n\nclass Action(Enum):\n\tLOGIN = 1\n\tLOGOUT = 2\n\tMOVE = 3\n\tUPGRADE = 4\n\tTURN = 5\n\tPLAYER = 6\n\tMAP = 10\n\nclass Result(Enum):\n\tOKEY = 0\n\tBAD_COMMAND = 1\n\tRESOURCE_NOT_FOUND = 2\n\tACCESS_DENIED = 3\n\tNOT_READY = 4\n\tTIMEOUT = 5\n\tINTERNAL_SERVER_ERROR = 500\n\nclass Socket():\n\turl='wgforge-srv.wargaming.net'\n\tport=443\n\t\n\tdef __init__(self):\n\t\tprint(\"sock init\")\n\t\tself.sock=socket.socket()\n\t\tself.sock.connect((self.url, self.port))\n\t\t\n\tdef __del__(self):\n\t\tprint(\"destroyed\")\n\t\tself.logout()\n\t\tself.sock.close()\n\t\n\tdef inter(self, data):\n\t\tchoice=data[0]\n\t\tif choice==Action.LOGIN.value:\n\t\t\treturn self.login(data[1])\n\t\telif choice==Action.LOGOUT.value:\n\t\t\treturn self.logout()\n\t\telif choice==Action.MOVE.value:\n\t\t\treturn self.move(data[1], data[2], data[3])\n\t\telif choice==Action.UPGRADE.value:\n\t\t\treturn self.upgrade(data[1], data[2])\n\t\telif choice==Action.TURN.value:\n\t\t\treturn self.nextTurn()\n\t\telif choice==Action.PLAYER.value:\n\t\t\treturn self.player()\n\t\telif choice==Action.MAP.value:\n\t\t\treturn self.getmap(data[1])\n\t\telse:\n\t\t\tprint(\"Unknown command\")\n\t\n\tdef action(self, tmp, string):\n\t\tbytes = tmp.to_bytes(4, byteorder='little') + len(string).to_bytes(4, byteorder='little') + string.encode()\n\t\tself.sock.send(bytes)\n\t\tresponse = self.sock.recv(4)\n\t\tif response:\n\t\t\tresponse = struct.unpack('L', response)[0]\n\t\telse:\n\t\t\treturn response\n\t\tprint(response)\n\t\tif response != Result.OKEY.value:\n\t\t\twhile True:\n\t\t\t\tready, a, b = select.select([self.sock], [], [], 10)\n\t\t\t\tif len(ready)==0: break\n\t\t\t\tself.sock.recv(1)\n\t\t\tself.errorReport(response)\n\t\t\treturn response\n\t\tdatalen = self.sock.recv(4)\n\t\tdatalen = struct.unpack('L', datalen)[0]\n\t\tif datalen:\n\t\t\tprint(\"\\nlen(data)\\n\")\n\t\t\tprint(datalen)\n\t\t\tself.sock.settimeout(10)\n\t\t\tdata = self.sock.recv(datalen)\n\t\t\twhile datalen > len(data):\n\t\t\t\tdata += self.sock.recv(datalen)\n\t\t\tdata = data.decode('utf8').replace(\"\\n\", '').replace(\" \", '')\n\t\t\tprint(data)\n\t\t\tdata = json.loads(data)\n\t\t\treturn response, data\n\t\treturn response\n\t\n\tdef login(self, name):\n\t\tstring = json.dumps({\"name\": name})\n\t\treturn self.action(Action.LOGIN.value, string)\n\n\tdef player(self):\n\t\treturn self.action(Action.PLAYER.value, '')\n\t\n\tdef logout(self):\n\t\tdata= self.action(Action.LOGOUT.value, '')\n\t\treturn data\n\t\n\tdef getmap(self, layer):\n\t\tprint(layer)\n\t\tstring = json.dumps({\"layer\": layer})\n\t\treturn self.action(Action.MAP.value, string)\n\t\n\tdef nextTurn(self):\n\t\treturn self.action(Action.TURN.value, '')\n\t\n\tdef move(self, line_idx, speed, train_idx):\n\t\tstring = json.dumps({\"line_idx\": line_idx, \"speed\": speed, \"train_idx\": train_idx})\n\t\treturn self.action(Action.MOVE.value, string)\n\t\n\tdef upgrade(self, posts, trains):\n\t\tposts=[posts]\n\t\ttrains=[trains]\n\t\tstring = json.dumps({\"posts\": posts, \"trains\": trains})\n\t\treturn self.action(Action.UPGRADE.value, string)\n\t\t\n\tdef errorReport(self, code):\n\t\tif code == Result.BAD_COMMAND.value:\n\t\t\tprint(\"Bad command\")\n\t\telif code == Result.RESOURCE_NOT_FOUND.value:\n\t\t\tprint(\"Resource not found\")\n\t\telif code == Result.ACCESS_DENIED.value:\n\t\t\tprint(\"Access denied\")\n\t\telif code == Result.NOT_READY.value:\n\t\t\tprint(\"Not ready\")\n\t\telif code == Result.TIMEOUT.value:\n\t\t\tprint(\"Timeout\")\n\t\telse:\n\t\t\tprint(\"Internal server error\")","sub_path":"src/serverinteraction.py","file_name":"serverinteraction.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486456700","text":"#!/usr/bin/env python\n\nimport time\nimport math\nimport sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../core\"))\n\nfrom canvas import *\nfrom map import *\nfrom particle_controller import *\nfrom robot import *\nfrom ultrasonic_sensor import *\nfrom wayfinder import *\n\ndef add_walls_to_map(mymap):\n mymap.add_wall((0, 0, 0, 168)); # a\n mymap.add_wall((0, 168, 84, 168)); # b\n mymap.add_wall((84, 126, 84, 210)); # c\n mymap.add_wall((84, 210, 168, 210)); # d\n mymap.add_wall((168, 210, 168, 84)); # e\n mymap.add_wall((168, 84, 210, 84)); # f\n mymap.add_wall((210, 84, 210, 0)); # g\n mymap.add_wall((210, 0, 0, 0)); # h\n\n\nif __name__ == \"__main__\":\n\n navigatePoints = [\n # (84, 30), # Initial position\n (180, 30),\n (180, 54),\n (126, 54),\n (126, 168),\n (127, 168), # We use this to look around, 90 degrees. Will be removed in the future\n (126, 126),\n (30, 54),\n (84, 54),\n (84, 30)\n ]\n\n canvas = Canvas()\n bricky = Robot()\n wayfinder = Wayfinder(bricky, 84, 30, 0)\n\n\n with UltrasonicSensor(bricky.get_interface()) as ultrasonicSensor:\n mymap = Map(navigatePoints)\n add_walls_to_map(mymap)\n mymap.draw(canvas);\n\n wayfinder.particle_controller.draw_particles(canvas)\n\n for (x, y) in navigatePoints:\n\n gotToWaypoint = False\n while not gotToWaypoint:\n\n wayfinder.rotate_to_navigate_to_waypoint(x, y)\n wayfinder.particle_controller.draw_particles(canvas)\n # time.sleep(1)\n\n sonarReading = ultrasonicSensor.get_median_reading()\n wayfinder.particle_controller.update_particle_weights(sonarReading, mymap)\n wayfinder.particle_controller.draw_particles(canvas)\n # time.sleep(1)\n\n wayfinder.rotate_to_navigate_to_waypoint(x, y)\n gotToWaypoint = wayfinder.move_to_navigate_to_waypoint(x, y)\n wayfinder.particle_controller.draw_particles(canvas)\n # time.sleep(1)\n\n sonarReading = ultrasonicSensor.get_median_reading()\n wayfinder.particle_controller.update_particle_weights(sonarReading, mymap)\n wayfinder.particle_controller.draw_particles(canvas)\n # time.sleep(1)\n\n time.sleep(0.1)\n","sub_path":"submissionScripts/task4/navigationTest.py","file_name":"navigationTest.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590029431","text":"import os\nimport random\nimport requests\n\nfrom lxml import html\n\nCONFIG_FILE = os.path.join(os.path.dirname(__file__), '..','config_service_client.py')\nconfig = CONFIG_FILE.ConfigService().get(\"newsPipelineConfig\").get(\"scrapers\")\n\nGET_CNN_NEWS_XPATH = config.get(\"GET_CNN_NEWS_XPATH\")\n\nUSER_AGENTS_FILE = os.path.join(os.path.dirname(__file__), 'user_agents.txt')\nUSER_AGENTS =[]\n\nwith open(USER_AGENTS_FILE, 'rb') as uaf:\n for ua in uaf.readlines():\n if ua:\n USER_AGENTS.append(ua.strip()[1:-1])\n\ndef getHeaders():\n ua = random.choice(USER_AGENTS)\n headers={\n \"Connection\": \"close\",\n \"User-Agent\": ua\n }\n\ndef extract_news(news_url):\n session_requests = requests.session()\n response = session_requests.get(news_url, headers=getHeaders())\n \n news={}\n\n try:\n tree = html.fromstring(response.content)\n news = tree.xpath(GET_CNN_NEWS_XPATH)\n news = ''.join(news)\n except Exception:\n return{}\n\n return news","sub_path":"week2/news_pipeline/scrapers/cnn_news_scraper.py","file_name":"cnn_news_scraper.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65493524","text":"def permute(A1,A2):\r\n if len(A1)!=len(A2):\r\n return False\r\n A1.sort()\r\n A2.sort()\r\n if A1==A2:\r\n return True\r\n else:\r\n return False\r\n\r\nA1=[int(x) for x in input().strip().split()]\r\nA2=[int(x) for x in input().strip().split()]\r\nprint(permute(A1,A2))\r\n","sub_path":"Array and List Based Codes/Permutation/permute.py","file_name":"permute.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282999208","text":"import cv2\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\n# 이미지 읽기\nimg = cv2.imread('img1.jpg')\nplt.imshow(img)\nplt.title('RGB')\nplt.show()\n\n# 이미지를 grayscale로 변환\nimg_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nplt.imshow(img_gray,cmap='gray')\nplt.title('gray_scale')\nplt.show()\nprint(img_gray.shape)\n\n# 2중 for문을 활용한 histogram 계산\nhist = [0]*256\nh,w = img_gray.shape\nfor x in range(w):\n for y in range(h):\n hist[img_gray[y][x]]+=1\nplt.title('for')\nplt.bar(range(256),hist)\nplt.show()\n\n# cv2를 활용한 histogram 계산\nhist_cv2 = cv2.calcHist([img_gray],[0],None,[256],[0,256])\nhist_cv2 = hist_cv2.reshape(-1)\nplt.bar(range(256),hist_cv2)\nplt.title('cv2')\nplt.show()","sub_path":"Study_main/SW-AI/Pattern_Recognization/2주차/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423520722","text":"import tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport random\r\nimport time\r\n\r\ntitle = \"FrameWork\"\r\n\r\nclass CreateToolTip(object):\r\n\r\n def __init__(self, widget, text='widget info'):\r\n self.widget = widget\r\n self.text = text\r\n self.widget.bind(\"\", self.enter)\r\n self.widget.bind(\"\", self.close)\r\n\r\n def enter(self, event=None):\r\n x = y = 0\r\n x, y, cx, cy = self.widget.bbox(\"insert\")\r\n x += self.widget.winfo_rootx() + 45\r\n y += self.widget.winfo_rooty() + 25\r\n # creates a toplevel window\r\n self.tooltip = tk.Toplevel(self.widget)\r\n # Leaves only the label and removes the app window\r\n self.tooltip.wm_overrideredirect(True)\r\n self.tooltip.wm_geometry(\"+%d+%d\" % (x, y))\r\n label = tk.Label(self.tooltip, text=self.text, relief=\"flat\", justify='right', borderwidth=1, font=(\"SEGOE UI\", \"8\", \"normal\"))\r\n label.pack(ipady=1)\r\n\r\n def close(self, event=None):\r\n if self.tooltip != True:\r\n self.tooltip.destroy()\r\n\r\nclass main_frame(tk.Tk):\r\n\r\n def __init__(self, *args, **kwargs):\r\n\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n container = tk.Frame(self)\r\n container.pack(side=\"top\", fill=\"both\", expand = True)\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n\r\n self.frames = {}\r\n\r\n for F in (IntroPage1,IntroPage2,IntroPage3):\r\n\r\n frame = F(container, self)\r\n self.frames[F] = frame\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n\r\n self.show_frame(IntroPage1)\r\n\r\n def show_frame(self, navigation): \r\n\r\n frame = self.frames[navigation]\r\n frame.tkraise()\r\n\r\nclass IntroPage1(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n\r\n tk.Frame.__init__(self, parent)\r\n canvas_main = tk.Canvas(self)\r\n self.canvas_main = canvas_main\r\n canvas_layer1 = tk.Canvas(self)\r\n self.canvas_layer1 = canvas_layer1\r\n exit_ = ttk.Button(self, text=\"Exit\",command=quit)\r\n exit_.grid(row=0,column=0)\r\n Next = ttk.Button(self, text=\"Next\",command=lambda:controller.show_frame(IntroPage2))\r\n Next.grid(row=0, column=1)\r\n Page1_Label = ttk.Label(self, text=\"Page1\")\r\n Page1_Label.grid(row=1, column=0)\r\n CreateToolTip(exit_, f\"Exit {title}\")\r\n CreateToolTip(Next, \"Go to Page 2\")\r\n CreateToolTip(Page1_Label,\"Info\")\r\n\r\nclass IntroPage2(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n\r\n tk.Frame.__init__(self, parent)\r\n back = ttk.Button(self, text=\"Back\",command=lambda:controller.show_frame(IntroPage1))\r\n back.grid(row=0,column=0)\r\n Next = ttk.Button(self, text=\"Next\", command=lambda:controller.show_frame(IntroPage3))\r\n Next.grid(row=0,column=1)\r\n Page2_Label = tk.Label(self, text=\"Page2\")\r\n Page2_Label.grid(row=1,column=0)\r\n CreateToolTip(back, \"Go back to Page 1\")\r\n CreateToolTip(Next, \"Go to Page 3\")\r\n CreateToolTip(Page2_Label,\"Info\")\r\n\r\nclass IntroPage3(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n\r\n tk.Frame.__init__(self, parent)\r\n back = ttk.Button(self, text=\"Back\", command=lambda:controller.show_frame(IntroPage2))\r\n back.pack(side='left',fill=X)\r\n Home = ttk.Button(self, text=\"Home\", command=lambda:controller.show_frame(IntroPage1))\r\n Home.pack(side='left',fill=X) \r\n Page3_Label = tk.Label(self, text=\"Page3\")\r\n Page3_Label.pack(side='bottom',fill=X,anchor=tk.W)\r\n CreateToolTip(Home, \"Go Home\")\r\n CreateToolTip(back, \"Go back to Page 2\")\r\n CreateToolTip(Page3_Label,\"Info\")\r\n\r\napp = main_frame()\r\napp.geometry(\"640x640\")\r\napp.title(str(title))\r\napp.wm_state('zoomed')\r\napp.configure(bg=\"white\")\r\napp.mainloop()\r\n","sub_path":"Tkinter Reference/Tkinter_Navigation_Frame_Work.py","file_name":"Tkinter_Navigation_Frame_Work.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379082309","text":"import pygame\r\nfrom pygame import display\r\nfrom pygame import surface\r\nfrom pygame import draw \r\nfrom pygame.locals import *\r\nfrom buttons import button\r\nimport os\r\nimport sys\r\nimport random\r\nfrom pygame import mixer\r\n\r\ndef run_numbers():\r\n\r\n pygame.init()\r\n clock = pygame.time.Clock()\r\n\r\n # Loading the song\r\n mixer.music.load(\"clock_sound.mp3\")\r\n \r\n # Setting the volume\r\n mixer.music.set_volume(2)\r\n\r\n WINDOW_SIZE = (1920,1080)\r\n screen = pygame.display.set_mode(WINDOW_SIZE)\r\n\r\n background = pygame.Rect(0,0,1920,1080)\r\n refresh_button = button(3,3,200,50,color=(200,100,0),text='Refresh')\r\n scramble_button = button(1920-3-200,3,200,50,color=(200,100,0),text='Scramble')\r\n menu_button = button(3,1080-50-3,200,50,color=(200,100,0),text='Menu')\r\n\r\n numbers =[]\r\n hide_num_buttons = False\r\n can_scramble = True\r\n show_target_number = False\r\n draw_num = False\r\n pick_target_number = False\r\n ticker = -1\r\n start_countdown = False\r\n countdown_ticker = -1\r\n large_numbers = 10 #just some random number that's not 0-4\r\n ##Creating the 'how many large numbers' buttons##\r\n\r\n how_many_large_buttons = []\r\n for i in range(5):\r\n how_many_large_buttons.append(\r\n button(245+20+(1920-450)*i/5,885,170,170,color=(150,150,255),text=f'{i}')\r\n )\r\n \r\n #Function to pick numbers#\r\n def pick_numbers(num_large_numbers):\r\n large_numbers = [25,50,75,100]\r\n small_numbers = [1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10]\r\n temp_numbers = []\r\n\r\n for i in range(num_large_numbers):\r\n i = random.randint(0,len(large_numbers)-1)\r\n temp_numbers.append(large_numbers[i])\r\n large_numbers.pop(i)\r\n num_small_numbers = 6- num_large_numbers\r\n for j in range(num_small_numbers):\r\n i = random.randint(0,len(small_numbers)-1)\r\n temp_numbers.append(small_numbers[i])\r\n small_numbers.pop(i)\r\n\r\n return temp_numbers\r\n\r\n while True:\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n\r\n for event in pygame.event.get():\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == MOUSEBUTTONDOWN:\r\n if refresh_button.isOver(mouse_pos):\r\n run_numbers()\r\n return\r\n if menu_button.isOver(mouse_pos):\r\n return\r\n for i in range(5):\r\n button_ = how_many_large_buttons[i]\r\n if button_.isOver(mouse_pos) and not hide_num_buttons:\r\n large_numbers = i\r\n numbers = pick_numbers(large_numbers)\r\n hide_num_buttons = True\r\n if can_scramble and scramble_button.isOver(mouse_pos):\r\n pick_target_number = True\r\n can_scramble = False\r\n\r\n ##Calculating####\r\n #Scrambling#\r\n if pick_target_number:\r\n ticker +=1\r\n x = 97\r\n if ticker %3 == 0 and ticker <=x:\r\n show_target_number = True\r\n target_number = random.randint(101,999)\r\n if ticker == x+1:\r\n target_number = random.randint(101,999)\r\n if ticker == x+120:\r\n start_countdown = True\r\n\r\n #Handling the countdown#\r\n if start_countdown:\r\n countdown_ticker+=1\r\n\r\n '''Handling the countdown'''\r\n if countdown_ticker%60 == 0 and countdown_ticker < 60*30-1:\r\n if countdown_ticker == 0:\r\n mixer.music.play()\r\n draw_num = True\r\n num_button = button(1600,350,100,100,text=f'{30 - countdown_ticker//60}')\r\n if countdown_ticker >= 60*30:\r\n draw_num = False\r\n else:\r\n countdown_ticker=-1\r\n\r\n #####Drawing######\r\n\r\n #Background#\r\n pygame.draw.rect(screen,color=(0,100,255),rect=background)\r\n #Draw the number thingee#\r\n pygame.draw.rect(screen,color=(0,0,0),rect=Rect(1920/2 -600/2,100,600,200))\r\n if show_target_number:\r\n font = pygame.font.SysFont('comicsans', size=200)\r\n text = font.render(str(target_number), 1, (255,255,255))\r\n screen.blit(text, (1920/2 -600/2 + (600/2 - text.get_width()/2), 100 + (200/2 - text.get_height()/2)))\r\n #Number holders\r\n\r\n for i in range(6):\r\n pygame.draw.rect(screen,color=(150,150,255),rect=Rect(245+(1920-450)*i/6,645,170,170)) # The outline\r\n pygame.draw.rect(screen,color=(255,255,255),rect=Rect(250+(1920-450)*i/6,650,160,160))\r\n\r\n #Now draw the selected numbers on\r\n if len(numbers) >= 1:\r\n for i in range(len(numbers)):\r\n number = button(250+(1920-450)*i/6,650,160,160,text=str(numbers[i]))\r\n number.draw_text(screen,color=(0,0,0),size=100)\r\n\r\n ### Drawing the 'how many large numbers' buttons ###\r\n if not hide_num_buttons:\r\n for button_ in how_many_large_buttons:\r\n button_.draw_wtext(screen,font_size=100)\r\n\r\n ##Drawing countdown##\r\n\r\n if draw_num:\r\n num_button.draw_text(screen,size=300)\r\n\r\n ##Refresh button##\r\n refresh_button.draw_wtext(screen)\r\n #Scramble button#\r\n scramble_button.draw_wtext(screen)\r\n #Menu button#\r\n menu_button.draw_wtext(screen)\r\n\r\n clock.tick(60)\r\n pygame.display.update()\r\n","sub_path":"Countdown/countdown_numbers.py","file_name":"countdown_numbers.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440165836","text":"import scipy.sparse as sp\nimport scipy.sparse.linalg as la\nimport numpy as np\nimport math as m\nimport time\nimport sys\nimport os\n\nresultDir = os.environ.get('RESULTS')\nif resultDir == None :\n print (\"WARNING! $RESULTS not set! Attempt to write results will fail!\\n\")\n\nbotConc = float(sys.argv[1])\ntopConc = float(sys.argv[2])\nl = float(sys.argv[3])\nL = np.uint32(int(sys.argv[4]))\nnumVecs = int(sys.argv[5])\nboundMult = float(sys.argv[6])\ntolerance = float(sys.argv[7])\nfileInfo = sys.argv[8]\n\n\nresultsPlace = resultDir+\"/\"+fileInfo+\"/\"\n\nif not os.path.exists(resultsPlace):\n os.makedirs(resultsPlace)\n\nwith open(resultsPlace+'settings', 'w') as f:\n f.write('BotConcentration = ' + str(botConc) +'\\n')\n f.write('TopConcentration = ' + str(topConc) +'\\n')\n f.write('Lambda = ' + str(l) +'\\n')\n f.write('SysSize = ' + str(L) +'\\n')\n f.write('NumVecs = ' + str(numVecs)+'\\n')\n f.write('BoundMult = ' + str(boundMult)+'\\n')\n f.write('Tolerance = ' +str(tolerance)+'\\n')\n\nN = np.uint32(2**(L+4))\n\nrateMatrix = sp.lil_matrix((N, N), dtype = np.float64)\ndensityMatrix = sp.lil_matrix((L+4, N), dtype = np.float64)\ncurrentMatrix = sp.lil_matrix((L+3, N), dtype = np.float64)\n\ntopIncRate = (1.0+l)*boundMult*m.sqrt(l*topConc/(1.0-topConc))\ntopOutRate = (1.0+l)*boundMult*m.sqrt(l*(1.0-topConc)/topConc)\nbotIncRate = (1.0+l)*boundMult*m.sqrt(l*botConc/(1.0-botConc))\nbotOutRate = (1.0+l)*boundMult*m.sqrt(l*(1.0-botConc)/botConc)\n\n\nt0 = time.clock()\nfor i in range(0, N):\n state = format(i, '032b')\n totLeakage = 0.0\n for position in range(32 - 4 - L, 32-2 - L):\n if state[position] == '1':\n newState = state[:position]+'0'+state[(position+1):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n rateMatrix[j, i] += botOutRate\n totLeakage += botOutRate\n densityMatrix[position-(32-4-L), i] = 1\n else:\n newState = state[:position]+'1'+state[(position+1):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n rateMatrix[j, i] += botIncRate\n totLeakage += botIncRate\n for position in range(32-3 - L , 32-1):\n if state[position] == '1':\n densityMatrix[position-(32-4-L), i] = 1\n if state[position-1] == '0':\n newState = state[:(position-1)]+'1'+'0'+state[(position+1):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n if state[position+1] == '0':\n rateMatrix[j, i] += 1.0\n currentMatrix[position - (32-3-L), i] -= 1.0\n totLeakage += 1.0\n else:\n rateMatrix[j, i] += l\n currentMatrix[position - (32-3-L), i] -= l\n totLeakage += l\n if state[position+1] == '0':\n newState = state[:(position)]+'0'+'1'+state[(position+2):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n if state[position-1] == '0':\n rateMatrix[j, i] += 1.0\n currentMatrix[position + 1 - (32-3-L), i] += 1.0\n totLeakage += 1.0\n else:\n rateMatrix[j, i] += l\n currentMatrix[position + 1 - (32-3-L), i] += l\n totLeakage += l\n for position in range(32 - 2, 32):\n if state[position] == '1':\n densityMatrix[position-(32-4-L), i] = 1\n newState = state[:position]+'0'+state[(position+1):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n rateMatrix[j, i] += topOutRate\n totLeakage += topOutRate\n else:\n newState = state[:position]+'1'+state[(position+1):]\n# print state\n# print newState\n j = np.uint32(int(newState, 2))\n# print format(j, '032b')+\"\\n\"\n rateMatrix[j, i] += topIncRate\n totLeakage += topIncRate\n rateMatrix[i, i] -= totLeakage\n\n\n\n#print(\"RateMatrix created.\")\n\ncscRateMatrix = rateMatrix.tocsc()\ncscDensityMatrix = densityMatrix.tocsc()\ncscCurrentMatrix = currentMatrix.tocsc()\n\n#print(\"RateMatrix reformatted.\")\n\nvalsLR, vecsLR = la.eigs(cscRateMatrix, k=numVecs, tol=tolerance, which='SR', maxiter=100*N)\n#valsSR, vecsSR = la.eigs(cscRateMatrix, k=numVecs, tol=tolerance, which='SR', maxiter=100*N)\n#valsLI, vecsLI = la.eigs(cscRateMatrix, k=numVecs, tol=tolerance, which='LI', maxiter=100*N)\n#valsSI, vecsSI = la.eigs(cscRateMatrix, k=numVecs, tol=tolerance, which='SI', maxiter=100*N)\nerrs = []\nfor index in range(0, numVecs):\n vecsLR[:, index] = np.sign(vecsLR[N/2, index])*vecsLR[:, index]/(np.linalg.norm(vecsLR[:, index], 1))\n errs.append(2.0*np.linalg.norm(cscRateMatrix.dot(vecsLR[:, index])-valsLR[index]*vecsLR[:, index], 1)/(np.linalg.norm(cscRateMatrix.dot(vecsLR[:, index]), 1)+np.abs(valsLR[index])*np.linalg.norm(vecsLR[:, index], 1)))\n# vecsSR[:, index] = np.sign(vecsSR[N/2, index])*vecsSR[:, index]/(np.linalg.norm(vecsSR[:, index], 1))\n# errs.append(2.0*np.linalg.norm(cscRateMatrix.dot(vecsSR[:, index])-valsSR[index]*vecsSR[:, index], 1)/(np.linalg.norm(cscRateMatrix.dot(vecsSR[:, index]), 1)+np.abs(valsSR[index])*np.linalg.norm(vecsSR[:, index], 1)))\n# vecsLI[:, index] = np.sign(vecsLI[N/2, index])*vecsLI[:, index]/(np.linalg.norm(vecsLI[:, index], 1))\n# errs.append(2.0*np.linalg.norm(cscRateMatrix.dot(vecsLI[:, index])-valsLI[index]*vecsLI[:, index], 1)/(np.linalg.norm(cscRateMatrix.dot(vecsLI[:, index]), 1)+np.abs(valsLI[index])*np.linalg.norm(vecsLI[:, index], 1)))\n# vecsSI[:, index] = np.sign(vecsSI[N/2, index])*vecsSI[:, index]/(np.linalg.norm(vecsSI[:, index], 1))\n# errs.append(2.0*np.linalg.norm(cscRateMatrix.dot(vecsSI[:, index])-valsSI[index]*vecsSI[:, index], 1)/(np.linalg.norm(cscRateMatrix.dot(vecsSI[:, index]), 1)+np.abs(valsSI[index])*np.linalg.norm(vecsSI[:, index], 1)))\n\nt1 = time.clock()\n\nprint(str(t1-t0)+\"s for computation of this session's eigenpairs.\\n\")\n\n\n\n\n#print(\"So final result for the eigenvalues is \"+str(vals)+\"\\n\")\n#print(\"|Ax-lambda x| = \")\n#for index in range(0, numVecs):\n# print str(np.linalg.norm(cscRateMatrix.dot(vecs[:, index])-vals[index]*vecs[:, index], 1))+\" with |x| = \"+str(np.linalg.norm(vecs[:, index], 1))\n\n#print(\"\\nThe mean occupation should be:\\n\")\navDens = cscDensityMatrix.dot(vecsLR)\n#print avDens\n#print(\"\\nThe mean current should be:\\n\")\navCurr = cscCurrentMatrix.dot(vecsLR)\n#print avCurr\n\n\nwith open(resultsPlace+'eigenvalues.dat', 'w') as f:\n for eig in valsLR:\n f.write(str(np.real(eig))+'\\n')\n\n#with open(resultsPlace+'fullEigenvalues.dat', 'w') as f:\n# for eig in vals:\n# f.write(str(eig)+'\\n')\n\nwith open(resultsPlace+'fullEigenvalues.dat', 'w') as f:\n for eig in valsLR:\n f.write(str(eig)+'\\n')\n# for eig in valsSR:\n# f.write(str(eig)+'\\n')\n# for eig in valsLI:\n# f.write(str(eig)+'\\n')\n# for eig in valsSI:\n# f.write(str(eig)+'\\n')\n\n\n\n\n#for index in range(0, 1):\n# with open(resultsPlace+'densVec'+str(index)+'.dat', 'w') as f:\n# for position in range(0, L+4):\n# f.write(str(np.real(avDens[position, index]))+'\\n')\n\n#for index in range(0, numVecs):\n# with open(resultsPlace+'fullDensVec'+str(index)+'.dat', 'w') as f:\n# for position in range(0, L+4):\n# f.write(str(avDens[position, index])+' ')\n\n#for index in range(0, 1):\n# with open(resultsPlace+'currVec'+str(index)+'.dat', 'w') as f:\n# for position in range(0, L+3):\n# f.write(str(np.real(avCurr[position, index]))+'\\n')\n\n#for index in range(0, numVecs):\n# with open(resultsPlace+'fullCurrVec'+str(index)+'.dat', 'w') as f:\n# for position in range(0, L+3):\n# f.write(str(avCurr[position, index])+' ')\n\nwith open(resultsPlace+'eigenErrs.dat', 'w') as f:\n for err in errs:\n f.write(str(err)+'\\n')\n\n\n\n#with open(resultsPlace+'multiProds.dat', 'w') as f:\n# for i in range(0, numVecs):\n# for j in range(0, numVecs):\n# f.write(str(abs(np.vdot(vecsLR[:, j], vecsLR[:, i]))/(np.linalg.norm(vecsLR[:, j], 2)*np.linalg.norm(vecsLR[:, i], 2)))+' ')\n# f.write('\\n')\n \n\n#solvedSoln = la.lsmr(cscRateMatrix, b)\n#print solvedSoln\n#print(\"\\nThe mean occupation should be:\\n\")\n#newAvDens = cscDensityMatrix.dot(solvedSoln)\n#print newAvDens\n#print(\"\\nThe mean current should be:\\n\")\n#newAvCurr = cscCurrentMatrix.dot(solvedSoln)\n#print newAvCurr\n","sub_path":"codes/exact/matStuff/singleCalcSparseSysRep.py","file_name":"singleCalcSparseSysRep.py","file_ext":"py","file_size_in_byte":8862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506914483","text":"from numpy import linalg as la\n\n\ndef calculate_davies_bouldin(data):\n \"\"\"\n example: https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index\n :param data:\n :return:\n \"\"\"\n if not data.is_clusterized():\n return -1\n\n if data.clusters_amount() == 1:\n return 1\n\n db_for_clusters = []\n for label in data.get_labels_list():\n db_for_clusters.append(calc_davies_bouldin_for_cluster(data, label))\n\n return sum(db_for_clusters)/data.clusters_amount()\n\n\ndef calc_davies_bouldin_for_cluster(data, cluster_label):\n # calculating r_ij_values\n r_ij_values = []\n\n if data.clusters_amount() == 1:\n return 1\n\n for label in data.get_labels_list():\n if label == cluster_label:\n continue\n r_ij_values.append(r_ij(data, cluster_label, label))\n\n return max(r_ij_values)\n\n\ndef r_ij(data, ci, cj):\n \"\"\"\n A measure of similarity of clusters ci and cj\n defined as sum of clusters sparsity divided by\n clusters dissimilarity\n \"\"\"\n return (s(data, ci) + s(data, cj))/d(data.cluster(ci), data.cluster(cj))\n\n\ndef s(data, cluster_label):\n \"\"\"\n s identifies sparsity of cluster\n the more average distance between cluster elements is,\n the more is cluster sparsity\n \"\"\"\n\n cluster = data.cluster(cluster_label)\n center = data.get_cluster_center(cluster_label).as_matrix()\n\n # calculate all distances in cluster\n distances = []\n for index, row in cluster.iterrows():\n element = row.as_matrix()\n distances.append(la.norm(center - element))\n\n return sum(distances) / data.amount_of_elements_in_cluster(cluster_label)\n\n\ndef d(ci, cj):\n \"\"\"\n function identifies \"dissimilarity\" of two clusters\n as minimum distance between elements of those clusters\n \"\"\"\n distances = []\n\n for index, row in ci.iterrows():\n for another_index, another_row in cj.iterrows():\n dist = la.norm(row.as_matrix() - another_row.as_matrix())\n if dist != 0:\n distances.append(dist)\n\n return min(distances)\n","sub_path":"Processor/Validity/davies_bolduin.py","file_name":"davies_bolduin.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443109937","text":"import numpy as np\nfrom multiagent.core import World, Agent, Landmark, Goal\nfrom multiagent.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n\n # add agents\n world.agents = [Agent() for i in range(2)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n agent.size = 0.1\n\n # add boxes\n n_box = 1 # One box and pushing to left\n self.boxes = [Landmark() for _ in range(n_box)]\n for i, box in enumerate(self.boxes):\n box.name = 'box %d' % i\n box.collide = True\n box.movable = True\n box.size = 0.25\n box.initial_mass = 7.\n box.index = i\n world.landmarks.append(box)\n\n # add targets\n self.targets = [Landmark() for _ in range(2)]\n for i, target in enumerate(self.targets):\n target.name = 'target %d' % i\n target.collide = False\n target.movable = False\n target.size = 0.05\n target.index = i\n world.landmarks.append(target)\n\n # add goals (used only for vis)\n world.goals = [Goal() for i in range(len(world.agents))]\n for i, goal in enumerate(world.goals):\n goal.name = 'goal %d' % i\n goal.collide = False\n goal.movable = False\n\n # make initial conditions\n self.reset_world(world)\n \n return world\n\n def reset_world(self, world):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n if i == 0:\n agent.color = np.array([1.0, 0.0, 0.0])\n elif i == 1:\n agent.color = np.array([0.0, 1.0, 0.0])\n else:\n raise NotImplementedError()\n\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n if \"box\" in landmark.name and landmark.index == 0:\n landmark.state.p_pos = np.array([-0.25, 0.0])\n elif \"target\" in landmark.name and landmark.index == 0:\n landmark.state.p_pos = np.array([-0.85, 0.0])\n elif \"target\" in landmark.name and landmark.index == 1:\n landmark.state.p_pos = np.array([+0.85, 0.0])\n else:\n raise ValueError()\n\n # random properties for goals (vis purpose)\n for i, goal in enumerate(world.goals):\n goal.color = world.agents[i].color\n goal.state.p_pos = np.zeros(world.dim_p) - 2 # Initialize outside of the box\n goal.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n for i, landmark in enumerate(world.landmarks):\n if \"box\" in landmark.name and landmark.index == 0:\n box0 = landmark\n elif \"target\" in landmark.name and landmark.index == 0:\n target0 = landmark\n elif \"target\" in landmark.name and landmark.index == 1:\n target1 = landmark\n else:\n raise ValueError()\n\n # Move box0 to target0 (One Box)\n dist = np.sum(np.square(box0.state.p_pos - target0.state.p_pos))\n\n return -dist\n\n def observation(self, agent, world):\n # get positions of all entities\n entity_pos = []\n for entity in world.landmarks:\n entity_pos.append(entity.state.p_pos)\n assert len(entity_pos) == len(self.boxes) + len(self.targets)\n\n # Add other agent position\n other_pos = []\n for other in world.agents:\n if other is agent: \n continue\n other_pos.append(other.state.p_pos)\n\n return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos)\n","sub_path":"multiagent-particle-envs/multiagent/scenarios/complex_push.py","file_name":"complex_push.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528274961","text":"import os\nimport shutil\nimport glob\nimport re\nfrom urllib.parse import urlparse\n\ngh_tree_base_url = 'https://github.com/EnterpriseDB/edb-k8s-doc/tree/master/'\n\ndef rewrite_any_relative_links(line, gh_relative_path):\n match = re.search(r'\\[.+\\]\\((.+)\\)', line)\n if match and match[1]:\n domain = urlparse(match[1]).netloc\n if not domain and not match[1].startswith('#') and not 'github.com' in match[1]:\n split_path = gh_relative_path.split('/')\n dot_dot_count = len(re.findall(r'\\.\\./', match[1]))\n gh_relative_folder_path = '/'.join(split_path[0:len(split_path) - 1 - dot_dot_count])\n new_link = gh_tree_base_url+gh_relative_folder_path+'/'+match[1].replace('../', '')\n return re.sub(r'\\]\\(.+\\)', ']({})'.format(new_link), line)\n return line\n\ndef process_md(file_path):\n if file_path.endswith('README.md'):\n new_file_path = file_path.replace('README.md', 'index.mdx')\n else:\n new_file_path = file_path.replace('.md', '.mdx')\n\n with open(new_file_path, 'w') as new_file:\n with open(file_path, 'r') as md_file:\n copying = False\n previous_line_was_blank = False\n gh_relative_path = file_path.replace('external_sources/k8s_docs/kubernetes/', '')\n\n for line in md_file:\n if copying:\n line_blank = line.strip() == ''\n if line_blank and not previous_line_was_blank:\n previous_line_was_blank = True\n new_file.write(line)\n elif not line_blank:\n previous_line_was_blank = False\n new_file.write(\n rewrite_any_relative_links(\n line.replace('
', '
'),\n gh_relative_path\n )\n )\n if line.startswith('#') and not copying:\n copying = True\n new_file.write(\"---\\ntitle: '{0}'\\noriginalFilePath: '{1}'\\n---\\n\\n\".format(\n re.sub(r'#+ ', '', line).strip(),\n gh_relative_path\n ))\n os.remove(file_path)\n\ndef source_k8s_docs():\n print('Pulling k8s_docs...')\n os.system('git clone -b master https://github.com/EnterpriseDB/edb-k8s-doc.git external_sources/k8s_docs/kubernetes')\n\n print('Processing k8s_docs...')\n files = glob.glob('external_sources/k8s_docs/kubernetes/**/*.md', recursive=True)\n for file_path in files:\n process_md(file_path)\n\nif __name__ == '__main__':\n source_k8s_docs()\n","sub_path":"scripts/source/source_k8s_docs.py","file_name":"source_k8s_docs.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114800642","text":"# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport pytest\nimport numpy as np\nfrom biotite.structure.info import residue\nfrom biotite.structure import Atom\nfrom biotite.structure import array\nfrom biotite.structure import BondList\nfrom biotite.structure import charges\n\n\n# First testing partial charge of carbon in the molecules given in table\n# 3 of the publication\n# Since some of the molecules are not available in the Chemical \n# Components Dictionary, the respective AtomArrays are constructed via\n# Biotite and the coordinates are arbitrarily set to the origin since\n# the decisive information is the BondList\n\n# Creating atoms to build molecules with\ncarbon = Atom([0, 0, 0], element=\"C\")\n\nhydrogen = Atom([0, 0, 0], element =\"H\")\n\noxygen = Atom([0, 0, 0], element =\"O\")\n\nnitrogen = Atom([0, 0, 0], element =\"N\")\n\nfluorine = Atom([0, 0, 0], element =\"F\")\n\n# Building molecules\nmethane = array([carbon, hydrogen, hydrogen, hydrogen, hydrogen])\nmethane.bonds = BondList(\n methane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4]])\n)\n\n\nethane = array(\n [carbon, carbon, hydrogen, hydrogen, hydrogen, hydrogen, hydrogen,\n hydrogen]\n)\nethane.bonds = BondList(\n ethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4], [1,5], [1,6], [1,7]])\n)\n\n\nethylene = array(\n [carbon, carbon, hydrogen, hydrogen, hydrogen, hydrogen]\n)\nethylene.bonds = BondList(\n ethylene.array_length(),\n np.array([[0,1], [0,2], [0,3], [1,4], [1,5]])\n)\n\n\nacetylene = array(\n [carbon, carbon, hydrogen, hydrogen]\n)\nacetylene.bonds = BondList(\n acetylene.array_length(),\n np.array([[0,1], [0,2], [1,3]])\n)\n\n\nfluoromethane = array(\n [carbon, fluorine, hydrogen, hydrogen, hydrogen]\n)\nfluoromethane.bonds = BondList(\n fluoromethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4]])\n)\n\n\ndifluoromethane = array(\n [carbon, fluorine, fluorine, hydrogen, hydrogen]\n)\ndifluoromethane.bonds = BondList(\n difluoromethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4]])\n)\n\n\ntrifluoromethane = array(\n [carbon, fluorine, fluorine, fluorine, hydrogen]\n)\ntrifluoromethane.bonds = BondList(\n trifluoromethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4]])\n)\n\n\ntetrafluoromethane = array(\n [carbon, fluorine, fluorine, fluorine, fluorine]\n)\ntetrafluoromethane.bonds = BondList(\n tetrafluoromethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4]])\n)\n\n\nfluoroethane = array(\n [carbon, carbon, fluorine, hydrogen, hydrogen, hydrogen, \n hydrogen, hydrogen]\n)\nfluoroethane.bonds = BondList(\n fluoroethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4], [1,5], [1,6], [1,7]])\n)\n\n\ntrifluoroethane = array(\n [carbon, carbon, fluorine, fluorine, fluorine, hydrogen,\n hydrogen, hydrogen]\n)\ntrifluoroethane.bonds = BondList(\n trifluoroethane.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4], [1,5], [1,6], [1,7]])\n)\n\n\nmethanole = array(\n [carbon, oxygen, hydrogen, hydrogen, hydrogen, hydrogen]\n)\nmethanole.bonds = BondList(\n methanole.array_length(),\n np.array([[0,1], [0,2], [0,3], [0,4], [1,5]])\n)\n\n\ndimethyl_ether = array(\n [carbon, carbon, oxygen, hydrogen, hydrogen, hydrogen, hydrogen,\n hydrogen, hydrogen]\n)\ndimethyl_ether.bonds = BondList(\n dimethyl_ether.array_length(),\n np.array([[0,2], [1,2], [0,3], [0,4], [0,5], [1,6], [1,7], [1,8]])\n)\n\n\nformaldehyde = array(\n [carbon, oxygen, hydrogen, hydrogen]\n)\nformaldehyde.bonds = BondList(\n formaldehyde.array_length(),\n np.array([[0,1], [0,2], [0,3]])\n)\n\n\nacetaldehyde = array(\n [carbon, carbon, oxygen, hydrogen, hydrogen, hydrogen, hydrogen]\n)\nacetaldehyde.bonds = BondList(\n acetaldehyde.array_length(),\n np.array([[0,1], [1,2], [0,3], [0,4], [0,5], [1,6]])\n)\n\n\nacetone = array(\n [carbon, carbon, carbon, oxygen, hydrogen, hydrogen, hydrogen,\n hydrogen, hydrogen, hydrogen]\n)\nacetone.bonds = BondList(\n acetone.array_length(),\n np.array([[0,1], [1,2], [1,3], [0,4], [0,5], [0,6], [2,7], [2,8],\n [2,9]])\n)\n\n\nhydrogen_cyanide = array(\n [carbon, nitrogen, hydrogen]\n)\nhydrogen_cyanide.bonds = BondList(\n hydrogen_cyanide.array_length(),\n np.array([[0,1], [0,2]])\n)\n\n\nacetonitrile = array(\n [carbon, carbon, nitrogen, hydrogen, hydrogen, hydrogen]\n)\nacetonitrile.bonds = BondList(\n acetonitrile.array_length(),\n np.array([[0,1], [1,2], [0,3], [0,4], [0,5]])\n)\n\n# For this purpose, parametrization via pytest is performed\n@pytest.mark.parametrize(\"molecule, expected_results\", [\n (methane, (-0.078,)),\n (ethane, (-0.068, -0.068)),\n (ethylene, (-0.106, -0.106)),\n (acetylene, (-0.122, -0.122)),\n (fluoromethane, (0.079,)),\n (difluoromethane, (0.23,)),\n (trifluoromethane, (0.38,)),\n (tetrafluoromethane, (0.561,)),\n (fluoroethane, (0.087, -0.037)),\n (trifluoroethane, (0.387, 0.039)),\n (methanole, (0.033,)),\n (dimethyl_ether, (0.036, 0.036)),\n (formaldehyde, (0.115,)),\n (acetaldehyde, (-0.009, 0.123)),\n (acetone, (-0.006, 0.131, -0.006)),\n (hydrogen_cyanide, (0.051,)),\n (acetonitrile, (0.023, 0.06))\n])\ndef test_partial_charges(molecule, expected_results):\n \"\"\"\n Test whether the partial charges of the carbon atoms comprised in\n the molecules given in table 3 of the publication computed in this\n implementation correspond to the values given in the publication\n within a certain tolerance range.\n \"\"\"\n charges = charges.partial_charges(molecule)\n assert charges[molecule.element == \"C\"].tolist() == \\\n pytest.approx(expected_results, abs=1e-2)\n\n\n@pytest.mark.parametrize(\"molecule\", [\n (methane),\n (ethane),\n (ethylene),\n (acetylene),\n (fluoromethane),\n (difluoromethane),\n (trifluoromethane),\n (tetrafluoromethane),\n (fluoroethane),\n (trifluoroethane),\n (methanole),\n (dimethyl_ether),\n (formaldehyde),\n (acetaldehyde),\n (acetone),\n (hydrogen_cyanide),\n (acetonitrile)\n])\ndef test_total_charge_zero(molecule):\n \"\"\"\n In the case of the 17 molecules given in table 3, it is verified\n whether the sum of all partial charges equals the sum\n of all formal charges (in our case zero since we are exclusively\n dealing with uncharged molecules).\n \"\"\"\n total_charge = np.sum(charges.partial_charges(molecule))\n assert total_charge == pytest.approx(0, abs=1e-15)","sub_path":"tests/structure/test_charges.py","file_name":"test_charges.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461498919","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 19 18:23:00 2017\n\n@author: ajaver\n\"\"\"\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nimport random\nimport multiprocessing as mp\n\nimport time\nimport matplotlib.pylab as plt\n\ncol2ignore = ['Unnamed: 0', 'exp_name', 'id', 'base_name', 'date', \n 'original_video', 'directory', 'strain',\n 'strain_description', 'allele', 'gene', 'chromosome',\n 'tracker', 'sex', 'developmental_stage', 'ventral_side', 'food',\n 'habituation', 'experimenter', 'arena', 'exit_flag', 'experiment_id',\n 'n_valid_frames', 'n_missing_frames', 'n_segmented_skeletons',\n 'n_filtered_skeletons', 'n_valid_skeletons', 'n_timestamps',\n 'first_skel_frame', 'last_skel_frame', 'fps', 'total_time',\n 'microns_per_pixel', 'mask_file_sizeMB', 'skel_file', 'frac_valid',\n 'worm_index', 'n_frames', 'n_valid_skel', 'first_frame']\n\n\ndef _h_cross_validate(args):\n feats_r, col_feats, id_index_str, n_samples, cross_validation_fold, n_estimators = args\n ss = feats_r.groupby('strain').apply(lambda x :x.iloc[random.sample(range(0,len(x)), n_samples)])\n xx = ss[col_feats].values\n \n #center, this shouldn't make that much different in random forest\n #xx = (xx-np.mean(xx, axis=0))/(np.std(xx, axis=0))\n #xx = xx[:, ~np.any(np.isnan(xx), axis=0)]\n \n yy = ss[id_index_str].values\n \n clf = RandomForestClassifier(n_estimators=n_estimators)\n scores = cross_val_score(clf, xx, yy, cv = cross_validation_fold)\n #print(scores)\n return scores\n\n\n\nif __name__ == '__main__':\n cross_validation_fold = 5\n base_strains = ['JU393', 'ED3054', 'JU394', \n 'N2', 'JU440', 'ED3021', 'ED3017', \n 'JU438', 'JU298', 'JU345', 'RC301', \n 'AQ2947', 'ED3049',\n 'LSJ1', 'JU258', 'MY16', \n 'CB4852', 'CB4856', 'CB4853',\n ]\n save_dir = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/manual_features/SWDB/'\n feat_files = {'OW_old' : 'ow_features_old_SWDB.csv'}\n\n #%%\n feat_data = {}\n for db_name, bn in feat_files.items():\n fname = os.path.join(save_dir, 'F_' + bn)\n feats = pd.read_csv(fname)\n \n ss = np.sort(feats['strain'].unique())\n s_dict = {s:ii for ii,s in enumerate(ss)}\n feats['strain_id'] = feats['strain'].map(s_dict)\n \n base_strains_id = {x:i for i,x in enumerate(base_strains)}\n feats['strain_base_id'] = feats['strain'].map(base_strains_id)\n feat_data[db_name] = feats\n \n col2ignore_r = col2ignore + ['strain_base_id', 'strain_id', 'set_type']\n #%% \n #SET TO TRUE TO FIT A MODEL USING ALL THE FEATURES\n if False:\n n_trials = 200\n \n n_batch= mp.cpu_count()\n p = mp.Pool(n_batch)\n \n id_index_str = 'strain_base_id'\n \n cross_val_results = {}\n \n start = time.time()\n for db_name, feats in feat_data.items():\n print(db_name)\n \n \n col_feats = [x for x in feats.columns if x not in col2ignore_r]\n good = ~feats['strain_base_id'].isnull() & (feats['set_type'] == 'train')\n feats_r = feats[good]\n \n n_samples = feats_r['strain'].value_counts().min()\n args = feats_r, col_feats, id_index_str, n_samples, cross_validation_fold, 1000\n \n #func = partial(_h_cross_validate, )\n scores = list(p.map(_h_cross_validate, n_trials*[args]))\n scores = np.concatenate(scores)\n #scores = _h_cross_validate(feats_r, col_feats, id_index_str, n_samples, cross_validation_fold)\n \n print(\"%s Accuracy: %0.2f (+/- %0.2f)\" % (db_name, scores.mean(), scores.std() * 2)) \n cross_val_results[db_name] = scores\n \n \n print(time.time() - start)\n \n \n #%%\n #SET TO TRUE TO FIT TO SELECT FEATURES IN BASE OF THE CLASSIFICATION ACCURACY\n if False:\n #I am using less trees and trials too speed up things\n n_trials = 20 \n n_trees = 150\n \n n_batch= mp.cpu_count()\n p = mp.Pool(n_batch)\n \n id_index_str = 'strain_base_id'\n \n cross_val_results = {}\n \n \n for db_name, feats in feat_data.items():\n print(db_name)\n \n col_feats = [x for x in feats.columns if x not in col2ignore_r]\n good = ~feats['strain_base_id'].isnull() & (feats['set_type'] == 'train')\n feats_r = feats[good]\n \n n_samples = feats_r['strain'].value_counts().min()\n \n \n selected_feats = []\n \n for n_feat in range(30):\n \n \n all_scores = []\n for iif, ff in enumerate(col_feats):\n if ff in selected_feats:\n continue\n \n start = time.time()\n c_feats = selected_feats + [ff]\n \n args = feats_r, c_feats, id_index_str, n_samples, cross_validation_fold, n_trees\n #func = partial(_h_cross_validate, )\n scores = list(p.map(_h_cross_validate, n_trials*[args]))\n scores = np.concatenate(scores)\n #scores = _h_cross_validate(feats_r, col_feats, id_index_str, n_samples, cross_validation_fold)\n \n print(len(c_feats), iif+1, len(col_feats), c_feats)\n print(\"%s Accuracy: %0.2f (+/- %0.2f)\" % (db_name, scores.mean(), scores.std() * 2)) \n print(scores.min(), scores.max(), scores.mean() - 2*scores.std())\n all_scores.append((c_feats, scores))\n \n print(time.time() - start)\n \n selected_feats = max(all_scores, key=lambda x : x[1].mean() - 2*x[1].std())[0]\n with open('/Users/ajaver/OneDrive - Imperial College London/classify_strains/manual_features/SWDB/best_feats_OW.txt', 'a+') as fid:\n fid.write(', '.join(selected_feats) + '\\n')\n #%%\n #calculate feature accuracy changes depending on the number of features\n selected_feats = ['midbody_crawling_amplitude_abs', 'foraging_amplitude_abs', 'hips_bend_mean_pos', 'midbody_width_forward', 'neck_bend_sd_forward_abs', 'midbody_speed_pos', 'foraging_speed_pos', 'eigen_projection_4_forward_neg', 'bend_count_backward', 'tail_crawling_frequency_pos', 'length_forward', 'tail_bend_mean_forward_neg', 'head_bend_sd_forward_pos', 'midbody_bend_sd', 'head_bend_sd_neg', 'tail_tip_motion_direction_backward_pos', 'eigen_projection_3_paused_abs', 'head_tip_speed_paused_pos', 'eigen_projection_1_forward_neg', 'eigen_projection_5_forward_pos', 'head_tip_speed_forward', 'tail_speed_forward_neg', 'tail_bend_mean_forward', 'upsilon_turns_time_pos', 'path_range_paused', 'midbody_crawling_frequency', 'head_motion_direction_forward', 'width_length_ratio_backward', 'head_motion_direction_pos', 'path_curvature_paused']\n \n feats = feat_data['OW_old']\n id_index_str = 'strain_base_id'\n \n n_batch= mp.cpu_count()\n p = mp.Pool(n_batch)\n \n col_feats = [x for x in feats.columns if x not in col2ignore_r]\n good = ~feats['strain_base_id'].isnull()\n feats_r = feats[good]\n \n n_samples = feats_r['strain'].value_counts().min()\n \n all_scores = []\n for n_feats in range(len(selected_feats)):\n start = time.time()\n feat_cols = selected_feats[:n_feats+1]\n \n print(n_feats, feat_cols)\n n_trials = 200\n args = feats_r, feat_cols, id_index_str, n_samples, cross_validation_fold, 1000\n #func = partial(_h_cross_validate, )\n scores = list(p.map(_h_cross_validate, n_trials*[args]))\n scores = np.concatenate(scores)\n #scores = _h_cross_validate(feats_r, col_feats, id_index_str, n_samples, cross_validation_fold)\n \n print(\"%s Accuracy: %0.2f (+/- %0.2f)\" % (db_name, scores.mean(), scores.std() * 2)) \n all_scores.append(scores)\n print(time.time() - start) \n \n \n yy = [x.mean() for x in all_scores]\n err = [x.std() for x in all_scores]\n \n plt.figure()\n plt.errorbar(np.arange(1, len(yy)+1), yy, yerr=err)\n plt.ylabel('Accuracy')\n plt.xlabel('Number of features')\n plt.savefig('classification_accuracy.png')\n \n","sub_path":"manual_features/compare_classifier_reduced.py","file_name":"compare_classifier_reduced.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"3422","text":"import time\nimport tkinter as tk\n\nclass BouncingBalls(tk.Frame):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.pack(fill=\"both\", expand=True)\n self.canvas = tk.Canvas(self, bg=\"white\")\n self.canvas.pack(fill=\"both\", expand=True)\n self.animate_yellow_ball()\n self.animate_red_ball()\n\n def animate_yellow_ball(self):\n x = 0\n ball = self.canvas.create_oval(x, 100, x+50, 150, fill=\"yellow\")\n while True:\n while x < 450:\n self.canvas.coords(ball, x, 100, 50+x, 150)\n x += 5\n self.update()\n time.sleep(0.02)\n\n while x > 0:\n self.canvas.coords(ball, x, 100, 50+x, 150)\n x -= 5\n self.update()\n time.sleep(0.02)\n\n def animate_red_ball(self):\n y = 0\n ball = self.canvas.create_oval(300, y, 350, y+50, fill=\"red\")\n while True:\n while y < 350:\n self.canvas.coords(ball, 300, y, 350, y+50)\n y += 3\n self.update()\n time.sleep(0.02)\n\n while y > 0:\n self.canvas.coords(ball, 300, y, 350, y+50)\n y -= 3\n self.update()\n time.sleep(0.02)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title(\"Bouncing Balls\")\n root.geometry(\"500x400\")\n root.resizable(False, False)\n app = BouncingBalls(root)\n root.mainloop()\n\n","sub_path":"Midterm/midterm-part2/bouncing-balls-stub.py","file_name":"bouncing-balls-stub.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630904303","text":"#!/usr/bin/python\nimport sys\nimport re\nimport os\n\n# These should point to the respective commands\nSVNLOOK = '/usr/bin/svnlook'\nMAX_FILENAME_LENGTH = 144\nALLOWED_CHARACTERS = \"A-Za-z0-9_ \\.\" #note that . would match any character so we have \\.\n\ndef reg(expression,value):\n return bool(re.search(expression,value))\n\n# Gets a command's output\ndef commandOutput(command):\n import subprocess\n process = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n return process.communicate()[0] # 0 is stdout\n\n# Returns an array of the changed files' names\ndef getChangedFiles(svnPath, transaction):\n # Run svnlook to find the files that were changed\n output = commandOutput('{} changed {} --transaction {}'.format(SVNLOOK, svnPath, transaction))\n\n # The output from svnlook looks like the following:\n # U folder/file1.cpp\n # A folder/file2.cpp\n # where U means updated and A means added\n def changed(fileName):\n return len(line) > 0 and line[0] in ('A', 'U')\n changedFiles = [line[4:] for line in output.split('\\n') if changed(line)]\n\n # svnlook inserts an empty line, so output.split() will have an extra\n # line with nothing in it - ignore the last lines if they're empty\n while len(changedFiles)>0 and 0 == len(changedFiles[-1]):\n changedFiles = changedFiles[:-1]\n\n return changedFiles\n\ndef validateFileName(fileName):\n # We only check the basename of the path. You can't create /1/2\n # without first creating /1, so this should work.\n basename = os.path.basename (fileName)\n diff = len(basename) - MAX_FILENAME_LENGTH\n if diff > 0:\n print >> sys.stderr, \"Filename \\'{}\\' too long by {} characters.\".format(fileName,diff)\n return 1 # Error found\n if not reg(\"^[\"+ALLOWED_CHARACTERS+\"]+\\Z\",fileName):\n print >> sys.stderr, \"Filename \\'{}\\' contains illegal characters.\".format(fileName)\n return 1 # Error found\n if reg(\"^[ ].+\\Z|^.+[ ]\\Z\",fileName):\n print >> sys.stderr, \"Filename \\'{}\\' starts or ends with a space.\".format(fileName)\n return 1 # Error found\n if reg(\"(PRN|CON|AUX|CLOCK|NUL|COM\\d|LPT\\d)\",fileName):\n print >> sys.stderr, \"Filename \\'{}\\' is a reserved filename.\".format(fileName)\n return 1 # Error found\n return 0 # No errors\n\n\nsvnPath = sys.argv[1]\ntransaction = sys.argv[2]\nfiles = getChangedFiles(svnPath, transaction)\n#files = [\"eaea\",\"ee #e\", \" e\"]\n\nerrorCount = 0\n\nfor fileN in files:\n errorCount = errorCount + validateFileName(fileN)\n\nsys.exit(errorCount)\n","sub_path":"bin/pre-commit.py","file_name":"pre-commit.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86871563","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Utilities for text input preprocessing.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport re\r\nimport random\r\n\r\nfrom six import iterkeys, iteritems, u, string_types\r\n\r\nfrom six.moves import range\r\nfrom six.moves import zip\r\n\r\nimport torch\r\nfrom smart_open import *\r\n\r\ndef sequence_mask(lengths, max_len=None):\r\n \"\"\"\r\n Creates a boolean mask from sequence lengths.\r\n \"\"\"\r\n batch_size = lengths.numel()\r\n max_len = max_len or lengths.max()\r\n return (torch.arange(0, max_len)\r\n .type_as(lengths)\r\n .repeat(batch_size, 1)\r\n .lt(lengths.unsqueeze(1)))\r\n\r\ndef encode(input_lines, word_dict):\r\n \"\"\"\r\n encode list of strings into word-level representation\r\n \"\"\"\r\n lines = list(map(lambda t: list(map(lambda m: word_dict[m], t)), input_lines))\r\n return lines\r\n\r\n\r\ndef encode2Tensor(input_lines, word_dict, unk):\r\n \"\"\"\r\n encode list of strings into word-level representation (tensor) with unk\r\n \"\"\"\r\n lines = list(map(lambda t: torch.LongTensor(list(map(lambda m: word_dict.get(m, unk), t))), input_lines))\r\n return lines\r\n\r\ndef one_hot_(y, num_classes=None):\r\n\r\n \"\"\"Converts a class vector (integers) to binary class matrix.\r\n E.g. for use with categorical_crossentropy.\r\n # Arguments\r\n\r\n y: class vector to be converted into a matrix\r\n (integers from 0 to num_classes).\r\n num_classes: total number of classes.\r\n # Returns\r\n A binary matrix representation of the input.\r\n \"\"\"\r\n\r\n y = np.array(y, dtype='int')\r\n\r\n input_shape = y.shape\r\n\r\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\r\n\r\n input_shape = tuple(input_shape[:-1])\r\n\r\n y = y.ravel()\r\n\r\n if not num_classes:\r\n\r\n num_classes = np.max(y) + 1\r\n\r\n n = y.shape[0]\r\n\r\n categorical = np.zeros((n, num_classes), dtype=np.float32)\r\n\r\n categorical[np.arange(n), y] = 1\r\n\r\n output_shape = input_shape + (num_classes,)\r\n\r\n categorical = np.reshape(categorical, output_shape)\r\n\r\n return categorical\r\n\r\ndef any2utf8(text, errors='strict', encoding='utf8'):\r\n\r\n if isinstance(text, unicode):\r\n return text.encode('utf8')\r\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\r\n return unicode(text, encoding, errors=errors).encode('utf8')\r\n\r\n\r\nto_utf8 = any2utf8\r\n\r\n\r\ndef any2unicode(text, encoding='utf8', errors='strict'):\r\n\r\n if isinstance(text, unicode):\r\n return text\r\n return unicode(text, encoding, errors=errors)\r\n\r\n\r\nto_unicode = any2unicode\r\n\r\ndef lower_to_unicode(text, encoding='utf8', errors='strict'):\r\n \"\"\"Lowercase `text` and convert to unicode.\"\"\"\r\n return to_unicode(text.lower(), encoding, errors)\r\n\r\n\r\ndef clean_str(string):\r\n \"\"\"\r\n Tokenization/string cleaning for all datasets except for SST.\r\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\r\n \"\"\"\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n string = re.sub(r\"\\'s\", \" \\'s\", string)\r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n string = re.sub(r\"\\'re\", \" \\'re\", string)\r\n string = re.sub(r\"\\'d\", \" \\'d\", string)\r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n string = re.sub(r\",\", \" , \", string)\r\n string = re.sub(r\"!\", \" ! \", string)\r\n string = re.sub(r\"\\(\", \" \\( \", string)\r\n string = re.sub(r\"\\)\", \" \\) \", string)\r\n string = re.sub(r\"\\?\", \" \\? \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip()\r\n\r\ndef pad_sequences(sequences, pad, max_length,is_pad_end=True):\r\n \"\"\"\r\n Args:\r\n sequences: a generator of list or tuple.\r\n pad: the value to pad with.\r\n Returns:\r\n a list of list where each sublist has same length.\r\n \"\"\"\r\n sequence_padded = []\r\n\r\n for seq in sequences:\r\n seq = list(seq)\r\n if is_pad_end:\r\n seq_ = seq[:max_length] + [pad] * max(max_length - len(seq), 0)\r\n else:\r\n seq_ = [pad] * max(max_length - len(seq), 0) + seq[:max_length]\r\n sequence_padded += [seq_]\r\n \r\n\r\n return sequence_padded\r\n\r\ndef load_glove_vocab(filename):\r\n \"\"\"Loads GloVe's vocab from a file.\r\n Args:\r\n filename (str): path to the glove vectors.\r\n Returns:\r\n set: a set of all words in GloVe.\r\n \"\"\"\r\n print('Building vocab...')\r\n with open(filename,encoding='utf-8') as f:\r\n vocab = {line.strip().split()[0] for line in f} #set\r\n print('- done. {} tokens'.format(len(vocab)))\r\n return dict(enumerate(vocab)) #return set\r\n\r\ndef load_word_embeddings(vocab, glove_filename, dim):\r\n \"\"\"Loads GloVe vectors in numpy array.\r\n Args:\r\n vocab (): dictionary vocab[word] = index.\r\n glove_filename (str): a path to a glove file.\r\n dim (int): dimension of embeddings.\r\n Returns:\r\n numpy array: an array of word embeddings.\r\n \"\"\"\r\n embeddings = np.zeros([len(vocab)+1, dim],dtype=np.float32) #0为PAD\r\n with open(glove_filename,encoding='utf-8') as f:\r\n for line in f:\r\n line = line.strip().split(' ')\r\n word = line[0]\r\n embedding = [float(x) for x in line[1:]]\r\n if word in vocab:\r\n word_idx = vocab[word]\r\n embeddings[word_idx] = np.asarray(embedding,dtype=np.float32)\r\n\r\n return embeddings\r\n\r\n\r\ndef word2idx(sents, word2idx):\r\n return [[word2idx[w] for w in s if w in word2idx] for s in sents] \r\n\r\ndef revdict(d):\r\n return {v: k for (k, v) in iteritems(dict(d))}\r\n\r\ndef get_entities(seq):\r\n \"\"\"Gets entities from sequence.\r\n Args:\r\n seq (list): sequence of labels.\r\n Returns:\r\n list: list of (chunk_type, chunk_start, chunk_end).\r\n Example:\r\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\r\n >>> print(get_entities(seq))\r\n [('PER', 0, 2), ('LOC', 3, 4)]\r\n \"\"\"\r\n i = 0\r\n chunks = []\r\n seq = seq + ['O'] # add sentinel\r\n types = [tag.split('-')[-1] for tag in seq]\r\n while i < len(seq):\r\n if seq[i].startswith('B'):\r\n for j in range(i+1, len(seq)):\r\n if seq[j].startswith('I') and types[j] == types[i]:\r\n continue\r\n break\r\n chunks.append((types[i], i, j))\r\n i = j\r\n else:\r\n i += 1\r\n return chunks\r\n\r\ndef utf_8_encoder(doc):\r\n\r\n for line in doc:\r\n\r\n yield line.encode('utf-8')\r\n\r\n\r\ndef skipgrams(sequence, vocabulary_size,\r\n window_size=4, negative_samples=1., shuffle=True,\r\n categorical=False, sampling_table=None, seed=None):\r\n \"\"\"Generates skipgram word pairs.\r\n\r\n Takes a sequence (list of indexes of words),\r\n returns couples of [word_index, other_word index] and labels (1s or 0s),\r\n where label = 1 if 'other_word' belongs to the context of 'word',\r\n and label=0 if 'other_word' is randomly sampled\r\n\r\n # Arguments\r\n sequence: a word sequence (sentence), encoded as a list\r\n of word indices (integers). If using a `sampling_table`,\r\n word indices are expected to match the rank\r\n of the words in a reference dataset (e.g. 10 would encode\r\n the 10-th most frequently occurring token).\r\n Note that index 0 is expected to be a non-word and will be skipped.\r\n vocabulary_size: int. maximum possible word index + 1\r\n window_size: int. actually half-window.\r\n The window of a word wi will be [i-window_size, i+window_size+1]\r\n negative_samples: float >= 0. 0 for no negative (=random) samples.\r\n 1 for same number as positive samples. etc.\r\n shuffle: whether to shuffle the word couples before returning them.\r\n categorical: bool. if False, labels will be\r\n integers (eg. [0, 1, 1 .. ]),\r\n if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]\r\n sampling_table: 1D array of size `vocabulary_size` where the entry i\r\n encodes the probability to sample a word of rank i.\r\n seed: random seed.\r\n\r\n # Returns\r\n couples, labels: where `couples` are int pairs and\r\n `labels` are either 0 or 1.\r\n\r\n # Note\r\n By convention, index 0 in the vocabulary is\r\n a non-word and will be skipped.\r\n \"\"\"\r\n couples = []\r\n labels = []\r\n for i, wi in enumerate(sequence):\r\n if not wi:\r\n continue\r\n if sampling_table is not None:\r\n if sampling_table[wi] < random.random():\r\n continue\r\n\r\n window_start = max(0, i - window_size)\r\n window_end = min(len(sequence), i + window_size + 1)\r\n for j in range(window_start, window_end):\r\n if j != i:\r\n wj = sequence[j]\r\n if not wj:\r\n continue\r\n couples.append([wi, wj])\r\n if categorical:\r\n labels.append([0, 1])\r\n else:\r\n labels.append(1)\r\n\r\n if negative_samples > 0:\r\n num_negative_samples = int(len(labels) * negative_samples)\r\n words = [c[0] for c in couples]\r\n random.shuffle(words)\r\n\r\n couples += [[words[i % len(words)],\r\n random.randint(1, vocabulary_size - 1)] for i in range(num_negative_samples)]\r\n if categorical:\r\n labels += [[1, 0]] * num_negative_samples\r\n else:\r\n labels += [0] * num_negative_samples\r\n\r\n if shuffle:\r\n if seed is None:\r\n seed = random.randint(0, 10e6)\r\n random.seed(seed)\r\n random.shuffle(couples)\r\n random.seed(seed)\r\n random.shuffle(labels)\r\n\r\n return couples, labels\r\n\r\ndef remove_long_seq(maxlen, seq, label):\r\n \"\"\"Removes sequences that exceed the maximum length.\r\n\r\n # Arguments\r\n maxlen: int, maximum length\r\n seq: list of lists where each sublist is a sequence\r\n label: list where each element is an integer\r\n\r\n # Returns\r\n new_seq, new_label: shortened lists for `seq` and `label`.\r\n \"\"\"\r\n new_seq, new_label = [], []\r\n for x, y in zip(seq, label):\r\n if len(x) < maxlen:\r\n new_seq.append(x)\r\n new_label.append(y)\r\n return new_seq, new_label\r\n\r\nclass tDictionary(object):\r\n def __init__(self):\r\n self.word2idx = {}\r\n self.idx2word = {}\r\n self.wordfreq={}\r\n self.len = 0\r\n\r\n def add_word(self, word):\r\n if word not in self.word2idx:\r\n self.word2idx[word] = self.len\r\n self.wordfreq[word]=1\r\n self.len +=1\r\n else:\r\n self.wordfreq[word] = self.wordfreq[word]+1\r\n\r\n def __len__(self):\r\n return len(self.word2idx)\r\n\r\n def revdict(self):\r\n self.idx2word = {v:k for (k, v) in iteritems(self.word2idx)}\r\n\r\n def docs2idx(self,sents):\r\n return [[self.word2idx[w] for w in s if w in self.word2idx] for s in sents] \r\n\r\n def build_vocab(self,sents):\r\n for s in sents:\r\n for w in s:\r\n self.add_word(w)\r\n self.revdict()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"utils/Text.py","file_name":"Text.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459855412","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 9 19:41:10 2019\r\n\r\n@author: Charl\r\n\"\"\"\r\n#%% Import data\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom time import time\r\n\r\n#%% Read dataset\r\n\r\ntrain = pd.read_csv('D:/Documents/UFS/5th Year/Honours Project/Data Sources/train_V2.csv')\r\n\r\ntrain.winPlacePerc.fillna(1,inplace=True)\r\ntrain.loc[train['winPlacePerc'].isnull()]\r\n\r\n# Create distance feature\r\ntrain[\"distance\"] = train[\"rideDistance\"]+train[\"walkDistance\"]+train[\"swimDistance\"]\r\ntrain.drop(['rideDistance','walkDistance','swimDistance'],inplace=True,axis=1)\r\n\r\n# Create headshot_rate feature\r\ntrain['headshot_rate'] = train['headshotKills'] / train['kills']\r\ntrain['headshot_rate'] = train['headshot_rate'].fillna(0)\r\n\r\n# Create playersJoined feature - used for normalisation\r\ntrain['playersJoined'] = train.groupby('matchId')['matchId'].transform('count')\r\n\r\n#%% Data cleaning - removing outliers\r\n\r\n# Row with NaN 'winPlacePerc' value - pointed out by averagemn (https://www.kaggle.com/donkeys)\r\ntrain.drop(2744604, inplace=True)\r\n\r\n# Players who got kills without moving\r\ntrain['killsWithoutMoving'] = ((train['kills'] > 0) & (train['distance'] == 0))\r\ntrain.drop(train[train['killsWithoutMoving'] == True].index, inplace=True)\r\n\r\n# Players who got more than 10 roadkills\r\ntrain.drop(train[train['roadKills'] > 10].index, inplace=True)\r\n\r\n# Players who got more than 30 kills\r\ntrain[train['kills'] > 30].head(10)\r\n\r\n# Players who made a minimum of 9 kills and have a headshot_rate of 100%\r\ntrain[(train['headshot_rate'] == 1) & (train['kills'] > 8)].head(10)\r\n\r\n# Players who made kills with a distance of more than 1 km\r\ntrain.drop(train[train['longestKill'] >= 1000].index, inplace=True)\r\n\r\n# Players who acquired more than 80 weapons\r\ntrain.drop(train[train['weaponsAcquired'] >= 80].index, inplace=True)\r\n\r\n# Players how use more than 40 heals\r\ntrain['heals'] = train['boosts']+train['heals']\r\ntrain.drop(train[train['heals'] >= 40].index, inplace=True)\r\n\r\n# Create normalised features\r\ntrain['killsNorm'] = train['kills']*((100-train['playersJoined'])/100 + 1)\r\ntrain['damageDealtNorm'] = train['damageDealt']*((100-train['playersJoined'])/100 + 1)\r\ntrain['maxPlaceNorm'] = train['maxPlace']*((100-train['playersJoined'])/100 + 1)\r\ntrain['matchDurationNorm'] = train['matchDuration']*((100-train['playersJoined'])/100 + 1)\r\ntrain['assistsNorm'] = train['assists']*((100-train['playersJoined'])/100 + 1)\r\ntrain['roadKillsNorm'] = train['roadKills']*((100-train['playersJoined'])/100 + 1)\r\ntrain['vehicleDestroysNorm'] = train['matchDuration']*((100-train['playersJoined'])/100 + 1)\r\ntrain['killPointsNorm'] = train['vehicleDestroys']*((100-train['playersJoined'])/100 + 1)\r\ntrain['headshotKillsNorm'] = train['headshotKills']*((100-train['playersJoined'])/100 + 1)\r\ntrain['revivesNorm'] = train['revives']*((100-train['playersJoined'])/100 + 1)\r\n\r\n#%%\r\n\r\n# Features that will be used for training\r\npredictors = [\r\n \"numGroups\",\r\n \"distance\",\r\n \"boosts\",\r\n \"killStreaks\",\r\n \"DBNOs\",\r\n \"killPlace\",\r\n \"killStreaks\",\r\n \"longestKill\",\r\n \"heals\",\r\n \"weaponsAcquired\",\r\n \"headshot_rate\",\r\n \"assistsNorm\",\r\n \"headshotKillsNorm\",\r\n \"damageDealtNorm\",\r\n \"killPointsNorm\",\r\n \"revivesNorm\",\r\n \"roadKillsNorm\",\r\n \"vehicleDestroysNorm\",\r\n \"killsNorm\",\r\n \"maxPlaceNorm\",\r\n \"matchDurationNorm\",\r\n ]\r\n\r\nX = train[predictors]\r\nX.head()\r\n\r\ny = train['winPlacePerc']\r\ny.head()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\r\n\r\n#%% Hyperparameter tuning\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ndef log(x):\r\n # can be used to write to a log file\r\n print(x)\r\n\r\n# Utility function to report best scores (from scikit-learn.org)\r\ndef report(results, n_top=3):\r\n for i in range(1, n_top + 1):\r\n candidates = np.flatnonzero(results['rank_test_score'] == i)\r\n for candidate in candidates:\r\n log(\"Model with rank: {0}\".format(i))\r\n log(\"Mean validation score: {0:.5f} (std: {1:.5f})\".format(\r\n results['mean_test_score'][candidate],\r\n results['std_test_score'][candidate]))\r\n log(\"Parameters: {0}\".format(results['params'][candidate]))\r\n log(\"\")\r\n\r\n# Function to determine the best fit (from scikit-learn.org)\r\ndef best_fit(clf, X_train, y_train):\r\n \r\n param_grid = {\r\n 'max_features':['sqrt','log2',None],\r\n 'max_depth': np.arange(1, 15),\r\n 'min_samples_split': range(2,16,2),\r\n 'min_samples_leaf': range(2,20,2),\r\n 'max_leaf_nodes': [5,10,None],\r\n }\r\n\r\n # Run grid search\r\n grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5, n_jobs=8)\r\n\r\n import time as ttt\r\n now = time()\r\n log(ttt.ctime())\r\n \r\n grid_search.fit(X_train, y_train)\r\n \r\n report(grid_search.cv_results_, n_top=10)\r\n \r\n log(100*\"-\")\r\n log(ttt.ctime())\r\n log(\"Search (5-fold cross validation) took %.5f seconds for %d candidate parameter settings.\"\r\n % (time() - now, len(grid_search.cv_results_['params'])))\r\n log('')\r\n log(\"The best parameters are %s with a score of %0.5f\"\r\n % (grid_search.best_params_, grid_search.best_score_))\r\n \r\n return grid_search\r\n\r\n#%% Model\r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\n\r\ndtree = DecisionTreeRegressor(random_state=101, max_features=None, max_depth=14, min_samples_split=2,\r\n min_samples_leaf=18, max_leaf_nodes=None)\r\n\r\n# Hyperparameter tuning method call (~3 hours run time)\r\nbest_tree = best_fit(dtree, X_train, y_train)\r\n\r\n# Output:\r\n# The best parameters are {'max_leaf_nodes': None, 'min_samples_leaf': 18,\r\n# 'min_samples_split': 2, 'max_features': None,\r\n# 'max_depth': 14} with a score of 0.89639\r\n\r\ndtree.fit(X_train, y_train)\r\n\r\npredictions = dtree.predict(X_test)\r\n\r\n#%% Feature importances\r\n\r\n# List the feature importance value\r\nimportances = dtree.feature_importances_\r\nprint(importances)\r\n\r\n# Sort feature importances in descending order\r\nindices = np.argsort(importances)[::-1]\r\nindices\r\n\r\n# Rearrange feature names so they match the sorted feature importances\r\nfeature_names = [X_train.columns[i] for i in indices]\r\nfeature_names\r\n\r\nX_columns = X_train.columns\r\nX_columns\r\n\r\n# Print the feature ranking\r\nprint(\"Feature ranking (index):\")\r\n\r\nfor f in range(X_train.shape[1]):\r\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\r\n \r\n# Print the feature ranking\r\nprint(\"Feature ranking (column name):\")\r\n\r\nfor f in range(indices.shape[0]):\r\n print(\"%2d) %-*s %0.9f\" % (f + 1, 10,\r\n X_columns[indices[f]],\r\n importances[indices[f]]))\r\n\r\n# Output:\r\n# Feature ranking (column name):\r\n# 1) distance 0.695173563\r\n# 2) killPlace 0.233122504\r\n# 3) numGroups 0.017167211\r\n# 4) boosts 0.010391879\r\n# 5) assistsNorm 0.009764422\r\n# 6) killsNorm 0.007610490\r\n# 7) killStreaks 0.006952296\r\n# 8) headshotKillsNorm 0.004577564\r\n# 9) maxPlaceNorm 0.004277439\r\n# 10) DBNOs 0.001806396\r\n# 11) vehicleDestroysNorm 0.001694837\r\n# 12) killStreaks 0.001486443\r\n# 13) matchDurationNorm 0.001398479\r\n# 14) roadKillsNorm 0.001211661\r\n# 15) revivesNorm 0.001046521\r\n# 16) heals 0.000673904\r\n# 17) killPointsNorm 0.000578243\r\n# 18) longestKill 0.000505060\r\n# 19) weaponsAcquired 0.000403372\r\n# 20) damageDealtNorm 0.000147940\r\n# 21) headshot_rate 0.000009776\r\n\r\n#%% Evaluation\r\n\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import r2_score\r\n\r\nMAE = mean_absolute_error(y_test, predictions)\r\nMSE = mean_squared_error(y_test, predictions)\r\nR2 = r2_score(y_test, predictions)\r\n\r\nprint(\"Metrics:\")\r\nprint(\"-------------------------------\")\r\nprint(\"Mean Absolute Error: {}\".format(MAE))\r\nprint(\"Mean Squared Error: {}\".format(MSE))\r\nprint(\"R2 Score: {}\".format(R2))\r\n\r\n# Cross-validation\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import cross_val_predict\r\n\r\ncross_val_prediction = cross_val_predict(dtree, X_train, y_train, cv=5)\r\n\r\nprint(\"\\n---------------------------------\")\r\nprint(\"5-FOLD CROSS-VALIDATION\")\r\nprint(\"---------------------------------\")\r\nprint(\"Cross-validation score (R2): {}\".format(cross_val_score(dtree, X_train, y_train, cv=5)))\r\n\r\n#%% Data cleaning (Test set)\r\n\r\ntest = pd.read_csv('D:/Documents/UFS/5th Year/Honours Project/Data Sources/test_V2.csv')\r\n\r\n# Create distance feature\r\ntest[\"distance\"] = test[\"rideDistance\"]+test[\"walkDistance\"]+test[\"swimDistance\"]\r\ntest.drop(['rideDistance','walkDistance','swimDistance'],inplace=True,axis=1)\r\n\r\n# Create headshot_rate feature\r\ntest['headshot_rate'] = test['headshotKills'] / test['kills']\r\ntest['headshot_rate'] = test['headshot_rate'].fillna(0)\r\n\r\n# Create playersJoined feature - used for normalisation\r\ntest['playersJoined'] = test.groupby('matchId')['matchId'].transform('count')\r\n\r\n# Players who got kills without moving\r\ntest['killsWithoutMoving'] = ((test['kills'] > 0) & (test['distance'] == 0))\r\ntest.drop(test[test['killsWithoutMoving'] == True].index, inplace=True)\r\n\r\n# Players who got more than 10 roadkills\r\ntest.drop(test[test['roadKills'] > 10].index, inplace=True)\r\n\r\n# Players who made a minimum of 9 kills and have a headshot_rate of 100%\r\ntest[(test['headshot_rate'] == 1) & (test['kills'] > 8)].head(10)\r\n\r\n# Players who made kills with a distance of more than 1 km\r\ntest.drop(test[test['longestKill'] >= 1000].index, inplace=True)\r\n\r\n# Players who acquired more than 80 weapons\r\ntest.drop(test[test['weaponsAcquired'] >= 80].index, inplace=True)\r\n\r\n# Players how use more than 40 heals\r\ntest['heals'] = test['boosts']+test['heals']\r\ntest.drop(test[test['heals'] >= 40].index, inplace=True)\r\n\r\n# Create normalised features\r\ntest['killsNorm'] = test['kills']*((100-test['playersJoined'])/100 + 1)\r\ntest['damageDealtNorm'] = test['damageDealt']*((100-test['playersJoined'])/100 + 1)\r\ntest['maxPlaceNorm'] = test['maxPlace']*((100-test['playersJoined'])/100 + 1)\r\ntest['matchDurationNorm'] = test['matchDuration']*((100-test['playersJoined'])/100 + 1)\r\ntest['assistsNorm'] = test['assists']*((100-test['playersJoined'])/100 + 1)\r\ntest['roadKillsNorm'] = test['roadKills']*((100-test['playersJoined'])/100 + 1)\r\ntest['vehicleDestroysNorm'] = test['matchDuration']*((100-test['playersJoined'])/100 + 1)\r\ntest['killPointsNorm'] = test['vehicleDestroys']*((100-test['playersJoined'])/100 + 1)\r\ntest['headshotKillsNorm'] = test['headshotKills']*((100-test['playersJoined'])/100 + 1)\r\ntest['revivesNorm'] = test['revives']*((100-test['playersJoined'])/100 + 1)\r\n\r\n#%% Prediction\r\n\r\nx_test = test[predictors]\r\ny_predict = dtree.predict(x_test)\r\n\r\ny_predict[y_predict > 1] = 1\r\ny_predict[y_predict < 0] = 0.0\r\n\r\ntest['winPlacePercPredictions'] = y_predict\r\n\r\n#%% Submission\r\n\r\naux = test.groupby(['matchId','groupId'])['winPlacePercPredictions'].agg('mean').groupby('matchId').rank(pct=True).reset_index()\r\naux.columns = ['matchId','groupId','winPlacePerc']\r\ntest = test.merge(aux, how='left', on=['matchId','groupId'])\r\nsubmission = test[['Id','winPlacePerc']]\r\n\r\nprint(\"Submission head\\n {}\".format(submission.head()))\r\nsubmission.to_csv(\"submission.csv\", index=False)\r\n\r\n#%%\r\n","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":11636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596745068","text":"#!/usr/bin/env python\n_interpreters_ = []\n__author__ = \"Russ Robbins\"\n__license__ = \"MIT\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Russ Robbins\"\n__email__ = \"russ.robbins@outlook.com\"\n__encoding__ = \"UTF-8\"\n__acknowledgements = []\n\n\n# make module importable by embedding code in functions\ndef function():\n ...\n return x\n\n\n\"\"\" make module executable from command line using\n> syntax\"\"\"\nif __name__ == '__main__':\n call\n to\n functions\n go\n here\n","sub_path":"4_resources/0_.py","file_name":"0_.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506365071","text":"import os\nfrom qibo import config\nfrom qibo.config import raise_error, log\n\n# versions requirements\nTF_MIN_VERSION = '2.2.0'\n\n\nclass Backend:\n\n def __init__(self):\n\n self.available_backends = {}\n self.hardware_backends = {}\n active_backend = \"numpy\"\n\n # load profile from default file\n from pathlib import Path\n profile_path = Path(os.environ.get(\n 'QIBO_PROFILE', Path(__file__).parent / \"profiles.yml\"))\n try:\n with open(profile_path) as f:\n import yaml\n profile = yaml.safe_load(f)\n except FileNotFoundError: # pragma: no cover\n raise_error(FileNotFoundError,\n f\"Profile file {profile_path} not found.\")\n\n # check if numpy is installed\n if self.check_availability(\"numpy\"):\n from qibo.backends.numpy import NumpyBackend\n self.available_backends[\"numpy\"] = NumpyBackend\n else: # pragma: no cover\n raise_error(ModuleNotFoundError, \"Numpy is not installed. \"\n \"Please install it using \"\n \"`pip install numpy`.\")\n\n for backend in profile.get('backends'):\n name = backend.get('name')\n if self.check_availability(name):\n if name == 'tensorflow' or name == 'qibotf':\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = str(\n config.TF_LOG_LEVEL)\n import tensorflow as tf # pylint: disable=E0401\n if tf.__version__ < TF_MIN_VERSION: # pragma: no cover\n raise_error(\n RuntimeError, f\"TensorFlow version not supported, minimum is {TF_MIN_VERSION}.\")\n import importlib\n custom_backend = getattr(importlib.import_module(\n backend.get('from')), backend.get('class'))\n self.available_backends[name] = custom_backend\n if backend.get('is_hardware', False): # pragma: no cover\n self.hardware_backends[name] = custom_backend\n if profile.get('default') == name:\n active_backend = name\n\n self.constructed_backends = {}\n self._active_backend = None\n self.qnp = self.construct_backend(\"numpy\")\n # Create the default active backend\n if \"QIBO_BACKEND\" in os.environ: # pragma: no cover\n self.active_backend = os.environ.get(\"QIBO_BACKEND\")\n else:\n self.active_backend = active_backend\n\n # raise performance warning if qibojit and qibotf are not available\n self.show_config()\n if active_backend == \"numpy\": # pragma: no cover\n log.warning(\"numpy backend uses `np.einsum` and supports CPU only. \"\n \"Consider installing the qibojit or qibotf backends for \"\n \"increased performance and to enable GPU acceleration.\")\n elif active_backend == \"tensorflow\": # pragma: no cover\n # case not tested because CI has tf installed\n log.warning(\"qibotf library was not found. `tf.einsum` will be \"\n \"used to apply gates. In order to install Qibo's \"\n \"high performance custom operators for TensorFlow \"\n \"please use `pip install qibotf`. Alternatively, \"\n \"consider installing the qibojit backend.\")\n\n @property\n def active_backend(self):\n return self._active_backend\n\n @active_backend.setter\n def active_backend(self, name):\n self._active_backend = self.construct_backend(name)\n\n def construct_backend(self, name):\n \"\"\"Constructs and returns a backend.\n\n If the backend already exists in previously constructed backends then\n the existing object is returned.\n\n Args:\n name (str): Name of the backend to construct.\n See ``available_backends`` for the list of supported names.\n\n Returns:\n Backend object.\n \"\"\"\n if name not in self.constructed_backends:\n if name not in self.available_backends:\n available = [\" - {}: {}\".format(n, b.description)\n for n, b in self.available_backends.items()]\n available = \"\\n\".join(available)\n raise_error(ValueError, \"Unknown backend {}. Please select one of \"\n \"the available backends:\\n{}.\"\n \"\".format(name, available))\n new_backend = self.available_backends.get(name)()\n if self.active_backend is not None:\n new_backend.set_precision(self.active_backend.precision)\n self.constructed_backends[name] = new_backend\n return self.constructed_backends.get(name)\n\n def __getattr__(self, x):\n return getattr(self.active_backend, x)\n\n def __str__(self):\n return self.active_backend.name\n\n def __repr__(self):\n return str(self)\n\n def show_config(self):\n log.info(\n f\"Using {self.active_backend.name} backend on {self.active_backend.default_device}\")\n\n @staticmethod\n def check_availability(module_name):\n \"\"\"Check if module is installed.\n\n Args:\n module_name (str): module name.\n\n Returns:\n True if the module is installed, False otherwise.\n \"\"\"\n from pkgutil import iter_modules\n return module_name in (name for _, name, _ in iter_modules())\n\n\nK = Backend()\nnumpy_matrices = K.qnp.matrices\n\n\ndef set_backend(backend=\"qibojit\"):\n \"\"\"Sets backend used for mathematical operations and applying gates.\n\n The following backends are available:\n 'qibojit': Numba/cupy backend with custom operators for applying gates,\n 'qibotf': Tensorflow backend with custom operators for applying gates,\n 'tensorflow': Tensorflow backend that applies gates using ``tf.einsum``,\n 'numpy': Numpy backend that applies gates using ``np.einsum``.\n\n Args:\n backend (str): A backend from the above options.\n \"\"\"\n if not config.ALLOW_SWITCHERS and backend != K.name:\n log.warning(\"Backend should not be changed after allocating gates.\")\n K.active_backend = backend\n K.show_config()\n\n\ndef get_backend():\n \"\"\"Get backend used to implement gates.\n\n Returns:\n A string with the backend name.\n \"\"\"\n return K.name\n\n\ndef set_precision(dtype=\"double\"):\n \"\"\"Set precision for states and gates simulation.\n\n Args:\n dtype (str): possible options are 'single' for single precision\n (complex64) and 'double' for double precision (complex128).\n \"\"\"\n if not config.ALLOW_SWITCHERS and dtype != K.precision:\n log.warning(\"Precision should not be changed after allocating gates.\")\n for bk in K.constructed_backends.values():\n bk.set_precision(dtype)\n\n\ndef get_precision():\n \"\"\"Get precision for states and gates simulation.\n\n Returns:\n A string with the precision name ('single', 'double').\n \"\"\"\n return K.precision\n\n\ndef set_device(name):\n \"\"\"Set default execution device.\n\n Args:\n name (str): Device name. Should follow the pattern\n '/{device type}:{device number}' where device type is one of\n CPU or GPU.\n \"\"\"\n if not config.ALLOW_SWITCHERS and name != K.default_device:\n log.warning(\"Device should not be changed after allocating gates.\")\n K.set_device(name)\n for bk in K.constructed_backends.values():\n if bk.name != \"numpy\" and bk != K.active_backend:\n bk.set_device(name)\n\n\ndef get_device():\n return K.default_device\n\n\ndef set_threads(nthreads):\n \"\"\"Set number of CPU threads.\n\n Args:\n nthreads (int): number of threads.\n \"\"\"\n K.set_threads(nthreads)\n\n\ndef get_threads():\n \"\"\"Returns number of threads.\"\"\"\n return K.nthreads\n","sub_path":"src/qibo/backends/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527853670","text":"#Stephen Lekko\r\n#Computing and Informatics Capstone\r\n#This is the file used for making a connection to RAWG.io\r\n#key = 24cad30c5b3949ff9140d8617254ff06\r\n\r\nimport requests\r\nimport json\r\nimport mysql.connector\r\n\r\n#START DB\r\nuserin = input(\"Enter the Game Name: \")\r\nmydb = mysql.connector.connect(\r\n host = \"localhost\",\r\n user = \"stephen\", \r\n passwd = \"cowAttorney48\",\r\n database = \"capstone_db\"\r\n)\r\n\r\nmycursor = mydb.cursor()\r\nmycursor.execute(\"use capstone_db\")\r\n\r\nurl = \"https://api.rawg.io/api/games?key=24cad30c5b3949ff9140d8617254ff06&page_size=1&search=\" + userin;\r\n\r\nresultCount = 0\r\n\r\nwhile resultCount < 1:\r\n r = requests.get(url)\r\n # print(url)\r\n data = json.loads(r.text)\r\n url = data['next']\r\n for game in data['results']:\r\n sql = \"INSERT INTO rawg_api (rawg_id, rawg_name, rawg_slug, rawg_release_date, rawg_metacritic) VALUES (%s, %s, %s, %s, %s )\"\r\n val = (game['id'], game['name'], game['slug'], game['released'], game['metacritic'])\r\n mycursor.execute(sql,val)\r\n mydb.commit()\r\n print(mycursor.rowcount,\"record inserted.\")\r\n print('Game Name: ',game['name'])\r\n print('Game Slug: ',game['slug'])\r\n print('Release Date: ',game['released'])\r\n print('Game ID: ', game['id'])\r\n print('Metacritic Score: ',game['metacritic'])\r\n print('-------------------------------------')\r\n resultCount += 1\r\n \r\n#END DB\r\n\r\n\r\n\r\n \r\n","sub_path":"rawg.py","file_name":"rawg.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553149386","text":"\nimport sys\n\nimport classifier_commands\nimport collection1_parser\nimport files_parser\nimport misc\nimport news20_parser\nimport parameters\nimport reuters_parser\nimport wui\n\n\ndef main (arguments) :\n\tif len (arguments) < 1 :\n\t\traise Exception ()\n\tcommand_identifier = arguments[0]\n\tcommand_arguments = arguments[1:]\n\tif command_identifier not in commands :\n\t\traise Exception ()\n\tcommand_handler = commands[command_identifier]\n\tcommand_handler (command_arguments)\n\n\ndef run_command_handler (arguments) :\n\tif len (arguments) < 1 :\n\t\traise Exception ()\n\tfor rc_path in arguments :\n\t\tparameter = parameters.parse_file (run_parameter_definition, rc_path)\n\t\tnew_arguments = list (parameter['arguments'])\n\t\tfor index in xrange (len (new_arguments)) :\n\t\t\tif new_arguments[index] == '__rc__' :\n\t\t\t\tnew_arguments[index] = rc_path\n\t\tmain (new_arguments)\n\n\nrun_parameter_definition = {\n\t'arguments' : parameters.list_type\n}\n\n\ncommands = {\n\t\t'run' : run_command_handler,\n\t\t'classifiers' : classifier_commands.main_command_handler,\n\t\t'wui' : wui.main_command_handler,\n\t\t'parse-collection1' : collection1_parser.main_command_handler,\n\t\t'parse-news20' : news20_parser.main_command_handler,\n\t\t'parse-reuters' : reuters_parser.main_command_handler,\n\t\t'parse-files' : files_parser.main_command_handler,\n\t\t'list-categories' : misc.list_categories_command_handler,\n}\n\n\nif __name__ == '__main__' :\n\tmain (sys.argv[1:])\n","sub_path":"mindsoft/py/mindsoft.py","file_name":"mindsoft.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380810944","text":"### TD02 - piles\n# question 1 : fonction est_vide\ndef est_vide(pile):\n return len(pile)==0\n \n# >>> est_vide((1,2,3))\n# False\n# >>> est_vide(())\n# True \n\n# question 2 : fonction est pleine\ndef est_pleine(pile,nb):\n return len(pile)==nb\n \n# >>> est_pleine((1,2,3),3)\n# True\n# >>> est_pleine((1,2,3),6)\n# False\n\n# question 3 : ajouter un element (un peu tiré par les cheveux)\ndef push(pile,el):\n pile=pile+(el,0)#je ne peux pas concatener un tuple avec un seul élément, minimum 2\n pile=pile[:-1]\n return(pile)\n \n# >>> push((1,2,3),(94))\n# (1, 2, 3, 94)\n\ndef pop(pile):\n dernier=pile[-1]\n pile=pile[:-1] #je n'arrive pas à changer le tuple qui est non mutable\n return dernier,pile\n \n# >>> pop((1,2,3,4,5))\n# 5\n# >>> pop((1,2,3,4,5))\n# (5, (1, 2, 3, 4))\n\n### Exercice 2 : notation polonaise inversee avec l'utilisation de listes\n# est-ce un element au hasard ?\n# la pile est un tuple de strings\ndef est_nombre(pile,i):\n return pile[i] not in ['+','-','*','/']\n \n# >>> est_nombre(('+','1','3','*'),1)\n# True \n\ndef est_operation(pile,i):\n return pile[i] in ['+','-','*','/']\n \n# >>> est_operation(('+','1','3','*'),0)\n# True\n# >>> est_operation(('+','1','3','*'),1)\n# False\n\n\ndef evaluer_tuple(exp):# travailler avec une liste\n ''' l'expression exp doit être postfixée '''\n pile=()\n for element in exp:\n pile=push(pile,element)\n# return pile resultat OK\n res=()\n for elt in pile:\n if elt == '+':\n b = float(pop(res)[0])\n res=pop(res)[1]\n a=float(pop(res)[0])\n res=pop(res)[1]\n res=push(res,(a+b))\n elif elt == '*':\n b = float(pop(res)[0])\n res=pop(res)[1]\n a=float(pop(res)[0])\n res=pop(res)[1]\n res=push(res,(a*b))\n elif elt == '-':\n b = float(pop(res)[0])\n res=pop(res)[1]\n a=float(pop(res)[0])\n res=pop(res)[1]\n res=push(res,(a-b))\n elif elt == '/':\n b = float(pop(res)[0])\n res=pop(res)[1]\n a=float(pop(res)[0])\n res=pop(res)[1]\n res=push(res,(a/b))\n else:\n res=push(res,(float(elt)))\n return res[0]\n \ndef evaluer_liste(exp):\n reserve=[]\n if est_nombre(exp,0) and est_operation(exp,1):\n exp=exp+(exp[0:2])\n del exp[0]\n del exp[0]\n for element in exp:\n if element=='+':\n reserve.append(reserve.pop()+reserve.pop())\n elif element=='-':\n a=reserve.pop()\n reserve.append(reserve.pop()-a)\n elif element=='*':\n reserve.append(reserve.pop()*reserve.pop())\n elif element=='/':\n reserve.append(1/reserve.pop()*reserve.pop())\n else:\n reserve.append(element)\n return reserve[0]\n\n# >>> evaluer_liste([1,2,'+',4,'*',3,'-',5,'+'])\n# 14 \n\n# >>> evaluer_liste([5,'+',1,2,'+',4,'*',3,'-'])\n# 14 \n \n\n# Question 4 : '12+4*3-5+'\n\n### Exercice 3 - croisement routier\n#creation de listes aléatoires\nimport random as rd\nf1=[rd.randint(0,1) for i in range(10)]\nf2=[rd.randint(0,1) for i in range(8)]\n\ndef croisement(f1,f2):\n f3=[]\n while len(f1)!=0 and len(f2)!=0:\n if f1[-1]==1: # si un véhicule dans la file 1 il est prioritaire\n f3.append(1) # la file 3 reçoit le véhicule de la file 1\n f1.pop() #la file 1 est dépilée\n if f2[-1]==0:\n f2.pop() #si pas de voiture sur la file 2 du stop avancer d'un véhicule\n else: # si pas de véhicule sur la file 1 dépiler la file 2\n if f2[-1]==1: \n f3.append(1)\n f1.pop()\n f2.pop()\n else:\n f3.append(0)\n f1.pop()\n f2.pop()\n if len(f1)!=0: #quand une file est vide les véhicules de la file suivante se vide dans file 3\n for i in range(len(f1)):\n f3.append(f1.pop())\n else:\n for i in range(len(f2)):\n f3.append(f2.pop())\n f3.reverse() #inverser la file 3 pour avoir les véhicules dans l'ordre d'arrivée\n return f3\n \n# >>> croisement([0, 1, 1, 0, 0, 1, 1, 0, 1, 1],[0, 1, 0, 1, 1, 1, 1, 0])\n# [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n \n \n \n \n","sub_path":"P_05_AlgorithmiqueProgrammation/02_Piles/TD_02/programmes/TD02_piles_Patricia_02.py","file_name":"TD02_piles_Patricia_02.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186453747","text":"from glumpy import app, gloo, gl\n\nfilestream = open('vertex_program.glsl')\nprogram_vertex = str(filestream.readlines())\n\nfilestream.close()\n\nfilestream = open('fragment_program.glsl')\nprogram_fragment = str(filestream.readlines())\n\nwindow = app.Window()\n\nquad = gloo.Program(program_vertex, program_fragment, count=4)\n\nquad['position'] = (-1, +1), (+1, +1), (-1, -1), (+1, -1)\n\n@window.event\ndef on_draw(dt):\n window.clear()\n quad.draw(gl.GL_TRIANGLE_STRIP)\n\napp.run()\n\n","sub_path":"example/book_example_Python&OpenGL.py","file_name":"book_example_Python&OpenGL.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26336560","text":"from manual_auth import drive\n\n#\"共有フォルダ/09.その他\"のフォルダID(URLの末端文字列)\nfolder_id = '0B6UTBGc4fcPNLTFKYkp6U3I1VWc'\n#ListFileメソッドでファイル種別を指定するための工夫。\"folder_id\"で指定したフォルダ内かつ、削除されていないファイルのみを抽出する\nquery = \"'{}' in parents and trashed=false\".format(folder_id)\n\n#settings.yamlの設定によって、このAPIからアップロードしたファイルしか読み書きできないので、最初にyojirei.csvをアップロードしたときの名残\n#f = drive.CreateFile({'title': 'yojirei.csv', 'mimeType': 'text/csv', 'parents': [{'kind': 'drive#fileLink', 'id':folder_id}]})\n\n#yojirei.csvのダウンロードをする関数\ndef dl_csv():\n file_list = drive.ListFile({'q': query}).GetList()\n for target_file in file_list:\n if target_file['title'] == 'yojirei.csv':\n f = target_file\n f.GetContentFile('yojirei.csv')\n\n#yojirei.csvのアップロードをする関数\ndef ul_csv():\n file_list = drive.ListFile({'q': query}).GetList()\n for target_file in file_list:\n if target_file['title'] == 'yojirei.csv':\n f = target_file\n f.SetContentFile('yojirei.csv')\n f.Upload()\n","sub_path":"yojirei_bot/drive_file.py","file_name":"drive_file.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147927832","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nresult = []\nwith open(\"tmp\",\"r\") as f:\n for eachline in f.readlines():\n loss = eachline.rstrip()\n result.append(float(loss))\n\nplt.plot(range(len(result)),result)\n#plt.xlabel(\"FPR\")\n#plt.ylabel(\"TPR\")\n#plt.text(0,0,\"auc:%f\" % auc)\n#print(auc)\nplt.savefig(\"./roi_loss.jpg\")\n","sub_path":"24net/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151002225","text":"import requests\nimport json\nfrom unittest import mock\n\nclass GitHubAPI:\n \n def __init__(user, id):\n user.id = id \n \n\n user.repos = []\n user.commits = {}\n\n def get_repos(user):\n try:\n r = requests.get(f'https://api.github.com/users/{user.id}/repos')\n \n except requests.exceptions as error:\n return\n for repo in r.json():\n user.repos.append(repo[\"name\"])\n return user.repos\n\n def get_commits(user):\n for repo in user.repos:\n try:\n r = requests.get(f'https://api.github.com/repos/{user.id}/{repo}/commits')\n except requests.exceptions as error:\n return\n user.commits[repo] = len(r.json())\n return user.commits\n\n def print_data(user):\n for repo in user.repos:\n print(f'Repo: {repo} Number of commits: {user.commits[repo]}')\n\n \n\nif __name__ == \"__main__\":\n githubapi = GitHubAPI(\"ImroseSingh\")\n \n","sub_path":"Getinfo.py","file_name":"Getinfo.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588929688","text":"# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\n#new comment\nimport random as rnd\n\n# visualization\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\ntrain_df = pd.read_csv('input/train.csv')\n\ntest_df = pd.read_csv('input/test.csv')\n\ncombine = [train_df, test_df]\n\ntrain_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)\n\n\n# grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')\ngrid = sns.FacetGrid(train_df, col='Survived', row='Pclass', size=2.2, aspect=1.6)\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\ngrid.add_legend();\n\n\ngrid = sns.FacetGrid(train_df, row='Embarked', size=2.2, aspect=1.6)\ngrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')\ngrid.add_legend()\n\n\n\nageMean=train_df[['Age','Survived']].groupby(['Age'], as_index=False).mean()\n\ntrain_df[['Title','Survived']].groupby(['Title'], as_index=False).mean()\n\nage=pd.merge(ageMean,ageCount, how='left', left_on='Age', right_on='Age', suffixes=('_ageMean', '_ageCount'))\n\n\n\ntrain_df = train_df.drop(['Ticket', 'Cabin'], axis=1)\ntest_df = test_df.drop(['Ticket', 'Cabin'], axis=1)\ncombine = [train_df, test_df]\n\n\nfor dataset in combine:\n dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\n\n\npd.crosstab(train_df['Title'], train_df['Sex'])\n\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n\ntrain_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()\n\n\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\ntrain_df.head()\n\n\ntrain_df = train_df.drop(['Name', 'PassengerId'], axis=1)\ntest_df = test_df.drop(['Name'], axis=1)\ncombine = [train_df, test_df]\ntrain_df.shape, test_df.shape\n\n\nfor dataset in combine:\n dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\n\n\n\n\n\nX_train = train_df.drop(\"Survived\", axis=1)\nY_train = train_df[\"Survived\"]\nX_test = test_df.drop(\"PassengerId\", axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape\n\n\nlogreg = LogisticRegression()\n\n\nlogreg.fit(X_train, Y_train)\n\n\nY_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, Y_train) * 100, 2)\nacc_log\n\n\n","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143222472","text":"#!/usr/bin/env python3\n#Created by xuchao on 16/7/25.\nimport pika,time\nHOST = '192.168.56.2'\nGROUP = 'logs'\nWAY = 'fanout'\nTIME = 5\nCOMMONS = 'ls'\n\nclass Center(object):\n def __init__(self):\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(\n host=HOST))\n\n self.channel = self.connection.channel()\n\n self.channel.exchange_declare(exchange=GROUP,\n type=WAY)\n\n # 定义接收返回消息的队列\n result = self.channel.queue_declare(exclusive=True)\n # 随机队列名称\n self.callback_queue = result.method.queue\n # 添加 接收返回消息\n self.channel.basic_consume(self.callback,\n no_ack=True,\n queue=self.callback_queue)\n\n # 定义接收到返回消息的处理方法\n def callback(self, ch, method, props, body):\n # self.response = body\n print(body.decode())\n\n def request(self, n):\n self.response = None\n #发送计算请求,并声明返回队列 # reply_to 返回队列名称\n self.channel.basic_publish(exchange=GROUP,\n routing_key=self.callback_queue,\n properties=pika.BasicProperties(\n reply_to = self.callback_queue,),\n body=str(n))\n # 设定接收时间\n Nowtime = time.time() + TIME\n while time.time() < Nowtime:\n # 接收返回的数据,等待返回数据\n self.connection.process_data_events()\n # return self.response\n # self.channel.start_consuming()\n\ncenter = Center()\n\nresponse = center.request(COMMONS)\n","sub_path":"RabMQ_队列消息/server/RPC_server.py","file_name":"RPC_server.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106817962","text":"class Graduacao:\n def __init__(self):\n self.sequencia_formacao = None\n self.nivel = None\n self.titulo_do_trabalho_de_conclusao_de_curso = None\n self.nome_do_orientador = None\n self.codigo_instituicao = None\n self.nome_instituicao = None\n self.codigo_orgao = None\n self.nome_orgao = None\n self.codigo_curso = None\n self.nome_curso = None\n self.codigo_area_curso = None\n self.status_do_curso = None\n self.ano_de_inicio = None\n self.ano_de_conclusao = None\n self.flag_bolsa = None\n self.codigo_agencia_financiadora = None\n self.nome_agencia = None\n self.numero_id_orientador = None\n self.codigo_curso_capes = None\n self.titulo_do_trabalho_de_conclusao_de_curso_ingles = None\n self.nome_curso_ingles = None\n self.formacao_academica_titulacao = None\n self.tipo_graduacao = None\n self.codigo_instituicao_grad = None\n self.nome_instituicao_grad = None\n self.codigo_instituicao_outra_grad = None\n self.nome_instituicao_outra_grad = None\n self.nome_orientador_grad = None\n\n def parse(self, data):\n self.sequencia_formacao = data.get('SEQUENCIA-FORMACAO')\n self.nivel = data.get('NIVEL')\n self.titulo_do_trabalho_de_conclusao_de_curso = data.get('TITULO-DO-TRABALHO-DE-CONCLUSAO-DE-CURSO')\n self.nome_do_orientador = data.get('NOME-DO-ORIENTADOR')\n self.codigo_instituicao = data.get('CODIGO-INSTITUICAO')\n self.nome_instituicao = data.get('NOME-INSTITUICAO')\n self.codigo_orgao = data.get('CODIGO-ORGAO')\n self.nome_orgao = data.get('NOME-ORGAO')\n self.codigo_curso = data.get('CODIGO-CURSO')\n self.nome_curso = data.get('NOME-CURSO')\n self.codigo_area_curso = data.get('CODIGO-AREA-CURSO')\n self.status_do_curso = data.get('STATUS-DO-CURSO')\n self.ano_de_inicio = data.get('ANO-DE-INICIO')\n self.ano_de_conclusao = data.get('ANO-DE-CONCLUSAO')\n self.flag_bolsa = data.get('FLAG-BOLSA')\n self.codigo_agencia_financiadora = data.get('CODIGO-AGENCIA-FINANCIADORA')\n self.nome_agencia = data.get('NOME-AGENCIA')\n self.numero_id_orientador = data.get('NUMERO-ID-ORIENTADOR')\n self.codigo_curso_capes = data.get('CODIGO-CURSO-CAPES')\n self.titulo_do_trabalho_de_conclusao_de_curso_ingles = data.get('TITULO-DO-TRABALHO-DE-CONCLUSAO-DE-CURSO-INGLES')\n self.nome_curso_ingles = data.get('NOME-CURSO-INGLES')\n self.formacao_academica_titulacao = data.get('FORMACAO-ACADEMICA-TITULACAO')\n self.tipo_graduacao = data.get('TIPO-GRADUACAO')\n self.codigo_instituicao_grad = data.get('CODIGO-INSTITUICAO-GRAD')\n self.nome_instituicao_grad = data.get('NOME-INSTITUICAO-GRAD')\n self.codigo_instituicao_outra_grad = data.get('CODIGO-INSTITUICAO-OUTRA-GRAD')\n self.nome_instituicao_outra_grad = data.get('NOME-INSTITUICAO-OUTRA-GRAD')\n self.nome_orientador_grad = data.get('NOME-ORIENTADOR-GRAD')\n\n def __dict__(self):\n return {\n 'sequencia_formacao': self.sequencia_formacao,\n 'nivel': self.nivel,\n 'titulo_do_trabalho_de_conclusao_de_curso': self.titulo_do_trabalho_de_conclusao_de_curso,\n 'nome_do_orientador': self.nome_do_orientador,\n 'codigo_instituicao': self.codigo_instituicao,\n 'nome_instituicao': self.nome_instituicao,\n 'codigo_orgao': self.codigo_orgao,\n 'nome_orgao': self.nome_orgao,\n 'codigo_curso': self.codigo_curso,\n 'nome_curso': self.nome_curso,\n 'codigo_area_curso': self.codigo_area_curso,\n 'status_do_curso': self.status_do_curso,\n 'ano_de_inicio': self.ano_de_inicio,\n 'ano_de_conclusao': self.ano_de_conclusao,\n 'flag_bolsa': self.flag_bolsa,\n 'codigo_agencia_financiadora': self.codigo_agencia_financiadora,\n 'nome_agencia': self.nome_agencia,\n 'numero_id_orientador': self.numero_id_orientador,\n 'codigo_curso_capes': self.codigo_curso_capes,\n 'titulo_do_trabalho_de_conclusao_de_curso_ingles': self.titulo_do_trabalho_de_conclusao_de_curso_ingles,\n 'nome_curso_ingles': self.nome_curso_ingles,\n 'formacao_academica_titulacao': self.formacao_academica_titulacao,\n 'tipo_graduacao': self.tipo_graduacao,\n 'codigo_instituicao_grad': self.codigo_instituicao_grad,\n 'nome_instituicao_grad': self.nome_instituicao_grad,\n 'codigo_instituicao_outra_grad': self.codigo_instituicao_outra_grad,\n 'nome_orientador_grad': self.nome_orientador_grad,\n }\n","sub_path":"cv/graduacao.py","file_name":"graduacao.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235032988","text":"import pyaudio\nimport wave\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nfrom scipy.fftpack import fft\nfrom pylab import*\n\n#CHUNK = 1024\nCHUNK = 64\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nRECORD_SECONDS = 1\nWAVE_OUTPUT_FILENAME = \"output.wav\"\nDEVICE = 2\n\np = pyaudio.PyAudio()\nfile = open(\"raw_data.txt\", 'a')\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK,\n\t\tinput_device_index=DEVICE)\n\nprint(\"* recording\")\n\nframes = []\n\n\n\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n#for i in range(1):\n data = stream.read(CHUNK)\n frames.append(data)\n \nprint(\"* done recording\")\nfile.write(data)\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(CHANNELS)\nwf.setsampwidth(p.get_sample_size(FORMAT))\nwf.setframerate(RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n\nfs, input_data = wavfile.read('output.wav')\ndata = input_data.T[0]\nprint(data)\nraw_data = [(ele/2**8.)*2-1 for ele in data]\nraw_transform = fft(raw_data)\nfreqs = fftfreq(len(raw_transform))\nprint(freqs.min(), freqs.max())\n\nidx = np.argmax(np.abs(raw_transform))\nprint(freqs)\nfreq = freqs[idx]\nfreq_in_hertz = abs(freq * RATE)\nprint(freq_in_hertz)\n##fs, data = wavfile.read('output.wav')\n##a = data.T[0]\n##b = [(ele/2**8.)*2-1 for ele in a]\n##c = fft(b)\n##d = len(c)/2\n##plt.plot(abs(c[:(d-1)]), 'r')\n##plt.show()\n","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264108990","text":"import unittest\n\nfrom math_blocks.algebra.polynomials import SimplePoly, Variable\nfrom math_blocks.algebra.core import Chain, Number\n\n\nclass poly_add(unittest.TestCase):\n\n \n def test_number_add(self):\n x = Variable(\"x\", value=1)\n main_poly = SimplePoly([1,2,3], x)\n \n n = Number(3)\n\n result = main_poly + n\n \n result_latex = \"1x^{2}+2x+3+3\"\n result_value = 9\n self.assertEqual(result.latex(), result_latex)\n self.assertEqual(result.evaluate(), result_value)\n\n\n def test_poly_eq(self):\n x = Variable(\"x\", value=1)\n main_poly = SimplePoly([1,2,3], x)\n \n a = Variable(\"a\", value=1)\n p = SimplePoly([1,2,3], a)\n\n result = main_poly + p\n \n result_latex = \"1x^{2}+2x+3+1a^{2}+2a+3\"\n result_value = 12\n \n self.assertEqual(result.latex(), result_latex)\n self.assertEqual(result.evaluate(), result_value)\n\n def test_chain_eq(self):\n x = Variable(\"x\", value=1)\n main_poly = SimplePoly([1,2,3], x)\n \n alt_chain_b = Chain([1,3,2])\n\n result = main_poly + alt_chain_b\n \n result_latex = \"1x^{2}+2x+3+(1+3+2)\"\n result_value = 12\n \n self.assertEqual(result.latex(), result_latex)\n self.assertEqual(result.evaluate(), result_value)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/algebra/polynomials/operations/__add__.py","file_name":"__add__.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460700481","text":"from flask_app_mvc_webservices.configuration import db,app\nfrom flask_app_mvc_webservices.model import*\nfrom flask import render_template,request,session\n\n@app.route('/account/',methods=['GET','POST'])\ndef save_update_account():\n if 'userinfo' in session:\n msg=''\n if request.method=='POST':\n accno=int(request.form['accno'])\n acctype = request.form['accty']\n accbal= request.form['accbal']\n dbacc=Account.query.filter_by(id=accno).first()\n if dbacc:\n dbacc.type=acctype\n dbacc.balance=accbal\n db.session.commit()\n msg='Account Updated Succesfully..!'\n else:\n dbacc=Account(id=accno,type=acctype,balance=accbal)\n db.session.add(dbacc)\n db.session.commit()\n msg='Account Created Succefully..!'\n\n return render_template('account.html',\n reso=msg,user=session['userinfo'],\n account=Account.get_dummy_account(),\n acclist=Account.query.all(),\n hotlist=Hotel.query.all(),\n menulist=Menu.query.all(),\n roomlist=Room.query.all()\n )\n\n return render_template('login,html', resp='')\n\n\n\n@app.route('/edit/')\ndef edit_account(acid):\n if 'userinfo' in session:\n return render_template('account.html',user=session['userinfo'],\n account=Account.query.filter_by(id=acid).first(),\n acclist=Account.query.all(),\n hotlist=Hotel.query.all(),\n menulist=Menu.query.all(),\n roomlist=Room.query.all()\n )\n return render_template('login.html',resp='')\n\n\n@app.route('/delete/')\ndef delete_account(acid):\n if 'userinfo' in session:\n msg=''\n acc=Account.query.filter_by(id=acid).first()\n if acc:\n db.session.delete(acc)\n db.session.commit()\n msg='Account Delete Succesfully..!'\n return render_template('account.html',\n resp=msg,user=session['userinfo'],\n account=Account.get_dummy_account(),\n acclist=Account.query.all(),\n hotlist=Hotel.query.all(),\n menulist=Menu.query.all(),\n roomlist=Room.query.all()\n )\n\n return render_template('login.html',resp='')\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"flask_app_mvc_webservices/accountcontroller.py","file_name":"accountcontroller.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441471552","text":"\"\"\" Common module\nimplement commonly used functions here\n\"\"\"\n\nimport random\nimport ui\nimport gui\nimport data_manager\nfrom PyQt5.QtWidgets import QLineEdit\n\n\ndef choose(table, header, filename):\n inputs = ui.get_inputs([[\"Please enter a number\", int]], \"\")\n option = inputs[0]\n if option == \"1\":\n show_table(table, header)\n elif option == \"2\":\n add(table, header)\n elif option == \"3\":\n id_ = ui.get_inputs([[\"ID\", str]], \"To remove a record please type it's ID.\")[0]\n remove(table, id_)\n elif option == \"4\":\n id_ = ui.get_inputs([[\"ID\", str]], \"To update a record please type it's ID.\")[0]\n update(table, header, id_)\n elif option == \"5\":\n return 5\n elif option == \"6\":\n return 6\n elif option == \"0\":\n ui.clear_screen()\n data_manager.write_table_to_file(filename, table)\n return False\n else:\n raise KeyError(\"That is not a valid option.\")\n return True\n\n\ndef get_index_of_element(list_, element):\n \"\"\"\n Search for index in a list.\n\n Args:\n list_ (list): list to search the index\n element: element to search the index in the list\n\n Returns:\n integer: index of the element in the list\n None: the element is not in list\n \"\"\"\n for i, item in enumerate(list_):\n if item == element:\n return i\n\n\ndef count_item(table, item):\n \"\"\"\n Count an item in the table\n\n Args:\n table (list): table to count the item in it.\n item: value which has to be counted\n\n Returns:\n integer: count of value, 0 if no value in table\n \"\"\"\n count = 0\n for element in table:\n if element == item:\n count += 1\n return count\n\n\ndef order_list(list_, descending=True):\n \"\"\"\n Order a list to descending or incrementing\n\n Args:\n list_ (list): list to order.\n descending: True by default, if incrementing order needed, it has to be set to False\n\n Returns:\n list: ordered list\n \"\"\"\n for i in range(len(list_) - 1):\n for j in range(1, len(list_)):\n if descending:\n if list_[j] < list_[j - 1]:\n temp = list_[j - 1]\n list_[j - 1] = list_[j]\n list_[j] = temp\n else:\n if list_[j] > list_[j - 1]:\n temp = list_[j - 1]\n list_[j - 1] = list_[j]\n list_[j] = temp\n return list_\n\n\ndef show_table(table, header):\n \"\"\"\n Display a table\n\n Args:\n table (list): list of lists to be displayed.\n\n Returns:\n None\n \"\"\"\n ui.print_table(table, header)\n\n\ndef add(table, header):\n \"\"\"\n Asks user for input and adds it into the table.\n\n Args:\n table (list): table to add new record to\n header (list): header of the table, contains which datas, has to be requested\n\n Returns:\n list: Table with a new record\n \"\"\"\n new_row = ui.get_inputs(header[1:], \"Please provide the following data:\")\n new_row.insert(0, generate_random(table))\n table.append(new_row)\n return table\n\n\ndef remove(table, id_):\n \"\"\"\n Remove a record with a given id from the table.\n\n Args:\n table (list): table to remove a record from\n id_ (str): id of a record to be removed\n\n Returns:\n list: Table without specified record.\n \"\"\"\n list_of_ids = [row[0] for row in table]\n index = get_index_of_element(list_of_ids, id_)\n del table[index]\n return table\n\n\ndef update(table, header, id_):\n \"\"\"\n Updates specified record in the table. Ask users for new data.\n\n Args:\n table: list in which record should be updated\n id_ (str): id of a record to update\n\n Returns:\n list: table with updated record\n \"\"\"\n list_of_ids = [row[0] for row in table]\n index = get_index_of_element(list_of_ids, id_)\n new_details = ui.get_inputs(header[1:], \"Please provide the following data:\")\n new_details.insert(0, id_)\n table[index] = new_details\n return table\n\n\ndef generate_random(table):\n \"\"\"\n Generates random and unique string. Used for id/key generation:\n - at least 2 special characters (except: ';'), 2 number, 2 lower and 2 upper case letter\n - it must be unique in the table (first value in every row is the id)\n\n Args:\n table (list): Data table to work on. First columns containing the keys.\n\n Returns:\n string: Random and unique string\n \"\"\"\n # a-z: 97-122\n # 0-9: 48-57\n special_characters = [\"#\", \"&\", \"@\", \"$\", \"§\"]\n generated = ''\n for i in range(8):\n if i == 0 or i == 5:\n generated += chr(random.randint(97, 122))\n elif i == 1 or i == 4:\n generated += chr(random.randint(97, 122)).upper()\n elif i == 2 or i == 3:\n generated += chr(random.randint(48, 57))\n else:\n generated += special_characters[random.randrange(len(special_characters))]\n return generated\n\n\ndef get_gui_table():\n result = []\n module = gui.ui.windowTitle()\n table = gui.ui.storeTable\n rows = table.rowCount()\n columns = table.columnCount()\n for row in range(rows):\n rowData = []\n for column in range(columns):\n item = table.item(row, column)\n rowData.append(item.text())\n result.append(rowData)\n return result\n\n\ndef get_gui_header():\n result = []\n table = gui.ui.storeTable\n columns = table.columnCount()\n for column in range(columns):\n result.append(table.horizontalHeaderItem(column).text())\n return result\n\n\ndef get_filename(module):\n return \"{}/{}.csv\".format(module, module)\n\n\ndef save_gui_table():\n module = gui.ui.windowTitle()\n data_manager.write_table_to_file(get_filename(module), get_gui_table())\n\n\ndef delete_gui_item():\n module = gui.ui.windowTitle()\n table = gui.ui.storeTable\n row = table.currentRow()\n id = table.item(row, 0).text()\n file_data = data_manager.get_table_from_file(get_filename(module))\n new_table = remove(file_data, id)\n data_manager.write_table_to_file(get_filename(module), new_table)\n gui.show_table(new_table)\n\n\ndef add_gui_item():\n module = gui.ui.windowTitle()\n header = get_gui_header()\n new_item = [gui.ui.input1.text()]\n for column in range(2, len(header)+1):\n input_field = gui.ui.findChild(QLineEdit, 'input' + str(column))\n new_item.append(input_field.text())\n file_data = data_manager.get_table_from_file(get_filename(module))\n file_data.append(new_item)\n data_manager.write_table_to_file(get_filename(module), file_data)\n gui.show_table(file_data)\n gui. show_form(get_gui_header())\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536021177","text":"from math import ceil\n\nfrom Cb_constants import CbServer, DocLoading\nfrom basetestcase import ClusterSetup\nfrom collections_helper.collections_spec_constants import \\\n MetaConstants, MetaCrudParams\nfrom couchbase_helper.durability_helper import DurabilityHelper\nfrom membase.api.rest_client import RestConnection\nfrom BucketLib.BucketOperations import BucketHelper\nfrom BucketLib.bucket import Bucket\nfrom remote.remote_util import RemoteMachineShellConnection\nfrom cb_tools.cbstats import Cbstats\nfrom sdk_exceptions import SDKException\nfrom java.lang import Exception as Java_base_exception\n\n\nclass CollectionBase(ClusterSetup):\n def setUp(self):\n super(CollectionBase, self).setUp()\n self.log_setup_status(\"CollectionBase\", \"started\")\n\n self.key = 'test_collection'.rjust(self.key_size, '0')\n self.simulate_error = self.input.param(\"simulate_error\", None)\n self.error_type = self.input.param(\"error_type\", \"memory\")\n self.doc_ops = self.input.param(\"doc_ops\", None)\n # If True, creates bucket/scope/collections with simpler names\n self.use_simple_names = self.input.param(\"use_simple_names\", True)\n self.spec_name = self.input.param(\"bucket_spec\",\n \"single_bucket.default\")\n self.data_spec_name = self.input.param(\"data_spec_name\",\n \"initial_load\")\n self.remove_default_collection = \\\n self.input.param(\"remove_default_collection\", False)\n\n self.action_phase = self.input.param(\"action_phase\",\n \"before_default_load\")\n self.skip_collections_cleanup = \\\n self.input.param(\"skip_collections_cleanup\", False)\n self.validate_docs_count_during_teardown = \\\n self.input.param(\"validate_docs_count_during_teardown\", False)\n self.batch_size = self.input.param(\"batch_size\", 200)\n self.process_concurrency = self.input.param(\"process_concurrency\", 1)\n self.retry_get_process_num = \\\n self.input.param(\"retry_get_process_num\", 200)\n self.change_magma_quota = self.input.param(\"change_magma_quota\", False)\n self.crud_batch_size = 100\n self.num_nodes_affected = 1\n if self.num_replicas > 1:\n self.num_nodes_affected = 2\n\n if self.doc_ops:\n self.doc_ops = self.doc_ops.split(';')\n\n self.durability_helper = DurabilityHelper(\n self.log, len(self.cluster.nodes_in_cluster),\n self.durability_level)\n\n # Disable auto-failover to avoid failover of nodes\n status = RestConnection(self.cluster.master) \\\n .update_autofailover_settings(False, 120, False)\n self.assertTrue(status, msg=\"Failure during disabling auto-failover\")\n self.bucket_helper_obj = BucketHelper(self.cluster.master)\n self.disk_optimized_thread_settings = self.input.param(\"disk_optimized_thread_settings\", False)\n if self.disk_optimized_thread_settings:\n self.set_num_writer_and_reader_threads(num_writer_threads=\"disk_io_optimized\",\n num_reader_threads=\"disk_io_optimized\")\n\n try:\n self.collection_setup()\n except Java_base_exception as exception:\n self.handle_setup_exception(exception)\n except Exception as exception:\n self.handle_setup_exception(exception)\n self.supported_d_levels = \\\n self.bucket_util.get_supported_durability_levels()\n self.log_setup_status(\"CollectionBase\", \"complete\")\n\n def tearDown(self):\n shell = RemoteMachineShellConnection(self.cluster.master)\n cbstat_obj = Cbstats(shell)\n for bucket in self.cluster.buckets:\n if bucket.bucketType != Bucket.Type.MEMCACHED:\n result = cbstat_obj.all_stats(bucket.name)\n self.log.info(\"Bucket: %s, Active Resident ratio(DGM): %s%%\"\n % (bucket.name,\n result[\"vb_active_perc_mem_resident\"]))\n self.log.info(\"Bucket: %s, Replica Resident ratio(DGM): %s%%\"\n % (bucket.name,\n result[\"vb_replica_perc_mem_resident\"]))\n if not self.skip_collections_cleanup \\\n and bucket.bucketType != Bucket.Type.MEMCACHED:\n self.bucket_util.remove_scope_collections_for_bucket(\n self.cluster, bucket)\n shell.disconnect()\n if self.validate_docs_count_during_teardown:\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n if self.disk_optimized_thread_settings:\n self.set_num_writer_and_reader_threads(num_writer_threads=\"default\",\n num_reader_threads=\"default\")\n super(CollectionBase, self).tearDown()\n\n @staticmethod\n def create_sdk_clients(num_threads, master,\n buckets, sdk_client_pool, sdk_compression):\n # Fetch num_collections per bucket. Used for 'req_clients' calc\n cols_in_bucket = dict()\n for bucket in buckets:\n collections_in_bucket = 0\n for _, scope in bucket.scopes.items():\n for _, _ in scope.collections.items():\n collections_in_bucket += 1\n cols_in_bucket[bucket.name] = collections_in_bucket\n\n # Create clients in SDK client pool\n bucket_count = len(buckets)\n max_clients = num_threads\n clients_per_bucket = int(ceil(max_clients / bucket_count))\n for bucket in buckets:\n sdk_client_pool.create_clients(\n bucket=bucket, servers=[master],\n req_clients=min(cols_in_bucket[bucket.name],\n clients_per_bucket),\n compression_settings=sdk_compression)\n\n def collection_setup(self):\n self.bucket_util.add_rbac_user(self.cluster.master)\n\n # Create bucket(s) and add rbac user\n if self.bucket_storage == Bucket.StorageBackend.magma:\n # get the TTL value\n buckets_spec_from_conf = \\\n self.bucket_util.get_bucket_template_from_package(\n self.spec_name)\n bucket_ttl = buckets_spec_from_conf.get(Bucket.maxTTL, 0)\n # Blindly override the bucket spec if the backend storage is magma.\n # So, Bucket spec in conf file will not take any effect.\n self.spec_name = \"single_bucket.bucket_for_magma_collections\"\n magma_bucket_spec = \\\n self.bucket_util.get_bucket_template_from_package(\n self.spec_name)\n magma_bucket_spec[Bucket.maxTTL] = bucket_ttl\n buckets_spec = magma_bucket_spec\n else:\n buckets_spec = self.bucket_util.get_bucket_template_from_package(\n self.spec_name)\n buckets_spec[MetaConstants.USE_SIMPLE_NAMES] = self.use_simple_names\n\n doc_loading_spec = \\\n self.bucket_util.get_crud_template_from_package(\n self.data_spec_name)\n\n # Process params to over_ride values if required\n self.over_ride_bucket_template_params(buckets_spec)\n self.over_ride_doc_loading_template_params(doc_loading_spec)\n self.set_retry_exceptions_for_initial_data_load(doc_loading_spec)\n self.bucket_util.create_buckets_using_json_data(self.cluster,\n buckets_spec)\n self.bucket_util.wait_for_collection_creation_to_complete(self.cluster)\n\n # Prints bucket stats before doc_ops\n self.bucket_util.print_bucket_stats(self.cluster)\n\n # Change magma quota only for bloom filter testing\n if self.change_magma_quota:\n bucket_helper = BucketHelper(self.cluster.master)\n bucket_helper.set_magma_quota_percentage()\n self.sleep(30, \"Sleep while magma storage quota setting is taking effect\")\n\n # Init sdk_client_pool if not initialized before\n if self.sdk_client_pool is None:\n self.init_sdk_pool_object()\n\n self.log.info(\"Creating required SDK clients for client_pool\")\n self.create_sdk_clients(self.task_manager.number_of_threads,\n self.cluster.master,\n self.cluster.buckets,\n self.sdk_client_pool,\n self.sdk_compression)\n\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n doc_loading_spec,\n mutation_num=0,\n batch_size=self.batch_size,\n process_concurrency=self.process_concurrency)\n if doc_loading_task.result is False:\n self.fail(\"Initial doc_loading failed\")\n\n self.cluster_util.print_cluster_stats(self.cluster)\n\n ttl_buckets = [\n \"multi_bucket.buckets_for_rebalance_tests_with_ttl\",\n \"multi_bucket.buckets_all_membase_for_rebalance_tests_with_ttl\",\n \"volume_templates.buckets_for_volume_tests_with_ttl\",\n \"magma_dgm.5_percent_dgm.5_node_2_replica_magma_ttl_256\",\n \"magma_dgm.5_percent_dgm.5_node_2_replica_magma_ttl_512\",\n \"magma_dgm.5_percent_dgm.5_node_2_replica_magma_ttl_1024\",\n \"magma_dgm.5_percent_dgm.5_node_1_replica_magma_ttl_256_single_bucket\",\n \"magma_dgm.5_percent_dgm.5_node_1_replica_magma_ttl_512_single_bucket\",\n \"magma_dgm.10_percent_dgm.5_node_2_replica_magma_ttl_256\",\n \"magma_dgm.10_percent_dgm.5_node_2_replica_magma_ttl_512\",\n \"magma_dgm.10_percent_dgm.5_node_2_replica_magma_ttl_1024\",\n \"magma_dgm.20_percent_dgm.5_node_2_replica_magma_ttl_256\",\n \"magma_dgm.20_percent_dgm.5_node_2_replica_magma_ttl_512\",\n \"magma_dgm.20_percent_dgm.5_node_2_replica_magma_ttl_1024\",\n \"magma_dgm.40_percent_dgm.5_node_2_replica_magma_ttl_512\",\n \"magma_dgm.80_percent_dgm.5_node_2_replica_magma_ttl_512\",\n ]\n\n # Verify initial doc load count\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets,\n timeout=1200)\n if self.spec_name not in ttl_buckets:\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster, timeout=2400)\n\n # Prints bucket stats after doc_ops\n self.bucket_util.print_bucket_stats(self.cluster)\n\n def over_ride_bucket_template_params(self, bucket_spec):\n if self.bucket_storage == Bucket.StorageBackend.magma:\n # Blindly override the following params\n bucket_spec[Bucket.evictionPolicy] = \\\n Bucket.EvictionPolicy.FULL_EVICTION\n else:\n for key, val in self.input.test_params.items():\n if key == \"replicas\":\n bucket_spec[Bucket.replicaNumber] = self.num_replicas\n elif key == \"bucket_size\":\n bucket_spec[Bucket.ramQuotaMB] = self.bucket_size\n elif key == \"num_items\":\n bucket_spec[MetaConstants.NUM_ITEMS_PER_COLLECTION] = \\\n self.num_items\n elif key == \"remove_default_collection\":\n bucket_spec[MetaConstants.REMOVE_DEFAULT_COLLECTION] = \\\n self.input.param(key)\n elif key == \"bucket_storage\":\n bucket_spec[Bucket.storageBackend] = self.bucket_storage\n elif key == \"compression_mode\":\n bucket_spec[Bucket.compressionMode] = self.compression_mode\n elif key == \"flushEnabled\":\n bucket_spec[Bucket.flushEnabled] = int(self.flush_enabled)\n elif key == \"bucket_type\":\n bucket_spec[Bucket.bucketType] = self.bucket_type\n\n def over_ride_doc_loading_template_params(self, target_spec):\n for key, value in self.input.test_params.items():\n if key == \"durability\":\n target_spec[MetaCrudParams.DURABILITY_LEVEL] = \\\n self.durability_level\n elif key == \"sdk_timeout\":\n target_spec[MetaCrudParams.SDK_TIMEOUT] = self.sdk_timeout\n elif key == \"doc_size\":\n target_spec[\"doc_crud\"][MetaCrudParams.DocCrud.DOC_SIZE] \\\n = self.doc_size\n elif key == \"randomize_value\":\n target_spec[\"doc_crud\"][MetaCrudParams.DocCrud.RANDOMIZE_VALUE] \\\n = self.randomize_value\n\n def load_data_for_sub_doc_ops(self, verification_dict=None):\n new_data_load_template = \\\n self.bucket_util.get_crud_template_from_package(\"initial_load\")\n new_data_load_template[MetaCrudParams.DURABILITY_LEVEL] = \"\"\n new_data_load_template[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n new_data_load_template[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 50\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n new_data_load_template,\n mutation_num=0,\n batch_size=self.batch_size,\n process_concurrency=self.process_concurrency)\n if doc_loading_task.result is False:\n self.fail(\"Extra doc loading task failed\")\n\n if verification_dict:\n self.update_verification_dict_from_collection_task(\n verification_dict,\n doc_loading_task)\n\n def update_verification_dict_from_collection_task(self,\n verification_dict,\n doc_loading_task):\n for bucket, s_dict in doc_loading_task.loader_spec.items():\n for s_name, c_dict in s_dict[\"scopes\"].items():\n for c_name, _ in c_dict[\"collections\"].items():\n c_crud_data = doc_loading_task.loader_spec[\n bucket][\"scopes\"][\n s_name][\"collections\"][c_name]\n for op_type in c_crud_data.keys():\n total_mutation = \\\n c_crud_data[op_type][\"doc_gen\"].end \\\n - c_crud_data[op_type][\"doc_gen\"].start\n if op_type in DocLoading.Bucket.DOC_OPS:\n verification_dict[\"ops_%s\" % op_type] \\\n += total_mutation\n elif op_type in DocLoading.Bucket.SUB_DOC_OPS:\n verification_dict[\"ops_update\"] \\\n += total_mutation\n if c_crud_data[op_type][\"durability_level\"] \\\n in self.supported_d_levels:\n verification_dict[\"sync_write_committed_count\"] \\\n += total_mutation\n\n def validate_cruds_from_collection_mutation(self, doc_loading_task):\n # Read all the values to validate the CRUDs\n for bucket, s_dict in doc_loading_task.loader_spec.items():\n client = self.sdk_client_pool.get_client_for_bucket(bucket)\n for s_name, c_dict in s_dict[\"scopes\"].items():\n for c_name, _ in c_dict[\"collections\"].items():\n c_crud_data = doc_loading_task.loader_spec[\n bucket][\"scopes\"][\n s_name][\"collections\"][c_name]\n client.select_collection(s_name, c_name)\n for op_type in c_crud_data.keys():\n doc_gen = c_crud_data[op_type][\"doc_gen\"]\n is_sub_doc = False\n if op_type in DocLoading.Bucket.SUB_DOC_OPS:\n is_sub_doc = True\n task = self.task.async_validate_docs(\n self.cluster, bucket,\n doc_gen, op_type,\n scope=s_name, collection=c_name,\n batch_size=self.batch_size,\n process_concurrency=self.process_concurrency,\n sdk_client_pool=self.sdk_client_pool,\n is_sub_doc=is_sub_doc)\n self.task_manager.get_task_result(task)\n\n def set_retry_exceptions_for_initial_data_load(self, doc_loading_spec):\n retry_exceptions = list()\n retry_exceptions.append(SDKException.AmbiguousTimeoutException)\n retry_exceptions.append(SDKException.TimeoutException)\n retry_exceptions.append(SDKException.RequestCanceledException)\n retry_exceptions.append(SDKException.DocumentNotFoundException)\n retry_exceptions.append(SDKException.ServerOutOfMemoryException)\n if self.durability_level:\n retry_exceptions.append(SDKException.DurabilityAmbiguousException)\n retry_exceptions.append(SDKException.DurabilityImpossibleException)\n doc_loading_spec[MetaCrudParams.RETRY_EXCEPTIONS] = retry_exceptions\n\n def set_num_writer_and_reader_threads(self, num_writer_threads=\"default\", num_reader_threads=\"default\",\n num_storage_threads=\"default\"):\n bucket_helper = BucketHelper(self.cluster.master)\n bucket_helper.update_memcached_settings(num_writer_threads=num_writer_threads,\n num_reader_threads=num_reader_threads,\n num_storage_threads=num_storage_threads)\n","sub_path":"pytests/bucket_collections/collections_base.py","file_name":"collections_base.py","file_ext":"py","file_size_in_byte":18085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181365617","text":"\"\"\"\nThis code is written by BACHOTTI SAI KRISHNA SHANMUKH EE19B009\n EE2703 Assignment 5 Solution\n The Resistor Problem\n\"\"\"\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport scipy.linalg as slg\n\n\"\"\"\nThis block of code make to correct input from the command line and raises a customized message in case of error\nIn case no input parameters are passed, the code takes default parameters as shown below\n\"\"\"\n\nif len(sys.argv)==5:\n try:\n Nx = int(sys.argv[1])\n Ny = int(sys.argv[2])\n radius = float(sys.argv[3])\n Niter = int(sys.argv[4])\n \n except ValueError:\n print(\"Input arguments must be integers. Radius can be float too\")\n exit()\n\nelif len(sys.argv)==1:\n Nx = 25\n Ny = 25\n radius = 8\n Niter = 1500\n print(\"No input parameters entered. Default parameters are chosen\")\n\nelse:\n print('\\nExpected Usage: $python3 %s ' % sys.argv[0])\n exit()\n\n\"\"\"\nPrint out the values entered by the user/ default values\n\"\"\"\nprint('Nx is',Nx)\nprint('Ny is',Ny)\nprint('radius is',radius)\nprint('No of iterations is',Niter)\n\n\"\"\"\nInitialization\nCreating a 2D array for potential and a meshgrid for visualization purposes\n\"\"\"\nphi = np.zeros((Ny,Nx)) \n#No of columns is Nx and No of rows is Ny\nx = np.arange(Nx) - (Nx-1)/2 \n# x contains the discrete positions along X direction (right)\ny = np.flip(np.arange(Ny) - (Ny-1)/2) \n# y contains the discrete positions along Y direction (top)\nX,Y = np.meshgrid(x,y)\nii = np.where(X*X + Y*Y <= radius**2) \n# indices /postions of points which lie within the radius\nphi[ii] =1 # These set of points always remain at potential = 1\n\n\"\"\"\nPlot for Initial Potential\n\"\"\"\nfig,ax = plt.subplots(figsize=(6,6),num =0)\nplt.xlabel(r'+ $x\\longrightarrow$')\nplt.ylabel(r'+ $y\\longrightarrow$')\nax.contour(X,Y,phi)\nax.scatter(ii[1]-(Nx-1)/2,ii[0]-(Ny-1)/2,marker = 'o', color ='r')\nplt.title(r'Contour plot of Initial Potential')\n\n\"\"\"\nFinding phi in an iterative method using the Laplace Equation in difference method\nAlso recording absolute error in each iteration\n\"\"\"\nerrors = np.zeros(Niter)\nfor k in range(Niter):\n oldphi = phi.copy() # saving a copy \n phi[1:-1,1:-1] = 0.25*(phi[1:-1,0:-2] + phi[1:-1,2:] + phi[0:-2,1:-1] + phi[2:,1:-1]) \n # estimating phi from laplace equation\n phi[ii] =1 # restoring the condition of Potential =1 inside circle\n phi[1:-1,0] = phi[1:-1,1] # Boundary conditions\n phi[1:-1,-1] = phi[1:-1,-2] # On hanging\n phi[0,1:-1] = phi[1,1:-1] #Sides\n errors[k] = np.max(np.abs(phi-oldphi)) # error\n\n\"\"\"\nUsing Least Square Method (LSTSQ) to get parameters for best fit curve for\ny = A*exp(Bx) or\nlogy = logA + B*x\n\nM = [1 x]\nv = [p1 p2]T\nc = [logy]\n\nHere our parameters p1 and p2 are logA and B respectively \n\"\"\" \ni = np.arange(1,Niter+1) \none_array = np.ones(Niter) # array with all 1s\nM = np.c_[one_array,i] # for all Niter\nM_500 = np.c_[one_array[500:],i[500:]] # from 500th iter\nv = slg.lstsq(M,np.log(errors))[0] #Least Squares\nv_500 = slg.lstsq(M_500,np.log(errors[500:]))[0]\n\n\"\"\"\nSemilog Plot of Error\n\"\"\"\nfig,ax = plt.subplots(num =1)\nplt.semilogy(i,errors)\nplt.xlabel(r'No. of iterations') # Labels\nplt.ylabel(r'Error')\nplt.title(r'Error in semilog plot') #Title\nplt.grid() # Grid\n\n\"\"\"\nLog Log plot of Error along with plot of every 50th iter\n\"\"\"\nfig,ax = plt.subplots(num =2)\nax.loglog(i,errors, label='Error')\nax.loglog(i[::50],errors[::50], 'ro',label ='Every 50th iter')\nplt.xlabel(r'No. of iterations') # Labels\nplt.ylabel(r'Error')\nplt.title(r'Error in log-log plot') #Title\nplt.legend() #Legend\nplt.grid() #Grid\n\n\"\"\"\nError using best fit parameters for all iterations\nand Error using best fit parameter from 500th iter\n\"\"\"\nerror_lstsq = np.exp(np.dot(M,v))\nerror500_lstsq = np.exp(np.dot(M_500,v_500))\n\n\"\"\"\nComparing the actual error with lstsq predicted error\n\"\"\"\nfig,ax = plt.subplots(num =3)\nax.loglog(errors, label='Iterative Error (True Error)')\nax.loglog(i[::50],error_lstsq[::50],'ro', label = 'Least Sqaures Fit')\nax.loglog(i[500::50],error500_lstsq[::50],'go', label = 'Least Sqaures Fit from 500th iteration')\nplt.xlabel(r'No. of iterations') #Labels\nplt.ylabel(r'Error')\nplt.title(r'Best fit for Error in log-log plot') #Title\nplt.legend() #Legend\nplt.grid() #Grid\n\n\"\"\"\nv is the solution of lstsq and the parameters are logA and B\n\"\"\"\nA = np.exp(v[0])\nB = v[1]\n\ndef cum_error(x):\n \"\"\"\n Input argument x: No. of iterations\n Output is cumulative max possible error\n \"\"\"\n return -A/B*np.exp(B*(x+0.5))\n\n\"\"\"\nLog log plot for cumulative error\n\"\"\"\nfig, ax = plt.subplots(num =4)\nax.loglog(i[100::100],cum_error(i[100::100]),'ro')\nplt.xlabel(r'No. of iterations')\nplt.ylabel(r'Cumulative Error')\nplt.title(r'Cumulative Error in log-log plot (Every 100th iter)')\nplt.grid()\n\n\"\"\"\nContour plot of potential\n\"\"\"\nfig,ax = plt.subplots(figsize=(6,6),num =5)\nplt.xlabel(r'+ $x\\longrightarrow$')\nplt.ylabel(r'+ $y\\longrightarrow$')\nplt.title(r'Contour Plot of Potential $\\phi$')\ncs = ax.contour(X,Y,phi)\nax.scatter(ii[1]-(Nx-1)/2,ii[0]-(Ny-1)/2,marker = 'o', color ='r')\nax.clabel(cs, inline =1, fontsize = 9) \n\n\"\"\"\nSurface plot of potential\n\"\"\"\nfig1=plt.figure(6) # open a new figure\nax=p3.Axes3D(fig1) # Axes3D is the means to do a surface plot\nplt.title('The 3-D surface plot of the potential')\nplt.xlabel(r'+ $x\\longrightarrow$')\nplt.ylabel(r'+ $y\\longrightarrow$')\nsurf = ax.plot_surface(X, Y, phi, rstride=1, cstride=1, cmap=plt.cm.jet)\n\n\n\"\"\"\nComputing Current density J from the equations\n\"\"\"\nJx = 0.5*(phi[1:-1,:-2]-phi[1:-1,2:]) # 0.5*(phi(x-1,y) - phi(x+1,y)) in cartesian\nJy = 0.5*(phi[2:,1:-1]-phi[:-2,1:-1]) # 0.5*(phi(x, y-1) - phi(x,y+1)) in cartesian\n\n\"\"\"\nPlot for Current flow\n\"\"\"\nfig,ax = plt.subplots(figsize=(6,6),num =7)\nax.quiver(X[1:-1,1:-1],Y[1:-1,1:-1],Jx,Jy)\nax.scatter(ii[1]-(Nx-1)/2,ii[0]-(Ny-1)/2,marker = 'o', color ='r')\nplt.xlabel(r'+ $x\\longrightarrow$')\nplt.ylabel(r'+ $y\\longrightarrow$')\nplt.title('Vector plot of current flow')\nplt.show()\n\n","sub_path":"week5-resistor/EE2703_Assignment5_EE19B009.py","file_name":"EE2703_Assignment5_EE19B009.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69061271","text":"from lib.base import AzureBaseADAction\n\n\nclass AzureCreateVMAction(AzureBaseADAction):\n def run(self, user_object_id, first_name, last_name):\n if not last_name and not first_name:\n return {\"updated\": False}\n\n update = {}\n\n if first_name:\n update['givenName'] = first_name\n\n if last_name:\n update['surName'] = last_name\n\n self.graphrbac_client.users.update(user_object_id, update)\n return {\"updated\": True}\n","sub_path":"actions/update_user_name.py","file_name":"update_user_name.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"77896257","text":"import os\nimport sys\n\nimport re\nimport csv\n\ndef dict2csv(dim_dict, filename):\n\twith open(os.path.join(DATA_PATH, filename), 'w', newline='') as csvfile:\n\t\tfieldnames = [\"room_size\"]\n\t\tproblem_sizes = []\n\t\tfor col_name in dim_dict:\n\t\t\tfieldnames.append(col_name)\n\t\t\tfor ps in dim_dict[col_name]:\n\t\t\t\tif ps not in problem_sizes:\n\t\t\t\t\tproblem_sizes.append(ps)\n\n\t\tproblem_sizes = sorted(problem_sizes)\n\t\t\t\t\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\n\t\tfor ps in problem_sizes:\n\t\t\trow = {\"room_size\": ps}\n\t\t\tfor col_name in dim_dict:\n\t\t\t\t#print(col_name, dim_dict[col_name])\n\t\t\t\tif dim_dict[col_name] is not None and ps in dim_dict[col_name]:\n\t\t\t\t\trow[col_name] = dim_dict[col_name][ps]\n\t\t\t\telse:\n\t\t\t\t\tdim_dict[col_name] = None\n\t\t\t\n\t\t\twriter.writerow(row)\n\n\ndef find_ints_in_string(string):\n\treturn list(map(int, re.findall(r'\\d+', string)))\n\ndef find_floats_in_string(string):\n\treturn list(map(float, re.findall(r'\\d+\\.\\d+', string)))\n\ndef get_avg_runtime(path, filename):\n\toutput = open(os.path.join(path, filename), \"r\").read()\n\tif not output:\n\t\treturn None\n\n\tstr_runtimes = \" - \".join([s.split(\"seconds\")[0] for s in output.split(\"The process took\") if \"seconds\" in s])\n\truntimes = find_floats_in_string(str_runtimes)\n\tif len(runtimes) == 0:\n\t\tprint(\"missing value for '%s'\"%filename)\n\t\treturn None\n\n\treturn sum(runtimes)/len(runtimes)\n\ndef outputs2csv():\n\tresults = {}\n\tfor filename in os.listdir(OUTPUTS_PATH):\n\t\tif filename.split(\".\")[-1] != \"dat\":\n\t\t\tprint(\"incompatible file: %s\" % filename)\n\t\t\tcontinue\n\n\t\tbasename = filename.split(\".\")[0]\n\t\tif basename.count(\"_\") == 2:\n\t\t\tprogram_type, dim, problem_size = basename.split(\"_\")\n\t\t\tname = program_type\n\t\telif basename.count(\"_\") == 4:\n\t\t\tprogram_type, dim, problem_size, slot_distribute, num_slots = basename.split(\"_\")\n\t\t\tname = \"_\".join([program_type, num_slots, slot_distribute])\n\t\telse:\n\t\t\tprint(\"unsupported_basename: %s\"%basename)\n\n\t\tproblem_size = int(problem_size)\n\t\tif dim not in results:\n\t\t\tresults[dim] = {}\n\t\tif name not in results[dim]:\n\t\t\tresults[dim][name] = {}\n\t\t#print(\"extracting data from %s\" % filename)\n\n\t\tresults[dim][name][problem_size] = get_avg_runtime(OUTPUTS_PATH, filename)\n\n\t#print(results)\n\tfor dim in results:\n\t\tprint(dim)\n\t\tdict2csv(results[dim], dim+\".csv\")\n\t\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) != 3:\n\t\tprint(\"wrong number of arguments: \"+str(sys.argv[1:]))\n\t\tsys.exit(-1)\n\n\tOUTPUTS_PATH = sys.argv[1]\n\tDATA_PATH = sys.argv[2]\n\n\toutputs2csv()\n","sub_path":"proseminar/03/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394614283","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Problem 2: Points to map\n# \n# In this problem we continue to learn how to turn latitude and longitude coordinates in to geometries.\n# \n\n\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point\n# YOUR CODE HERE 1 to read data\ndata = pd.read_csv('data/some_posts.csv')\ndata[\"geometry\"]=\"\"\n\nfor i,r in data.iterrows():\n a = Point(r[\"lat\"],r[\"lon\"])\n data.at[i, \"geometry\"] = a\n\n\n# CODE FOR TESTING YOUR SOLUTION\n\n# Check the result\nprint(\"Number of rows:\", len(data))\n\n\n# CODE FOR TESTING YOUR SOLUTION\n\n# Check the result\nprint(data['geometry'].head())\n\n\n# YOUR CODE HERE 2\nimport geopandas as gpd\nfrom pyproj import CRS\ngeo = gpd.GeoDataFrame(data, geometry ='geometry',crs = CRS.from_epsg(4326).to_wkt())\nfp = 'Kruger_posts.shp'\ngeo.to_file(fp)\n\n# Convert DataFrame into a GeoDataFrame\ngeo=None\n# CODE FOR TESTING YOUR SOLUTION\n\n# Check the geodataframe head\nprint(\"Number of rows:\", len(geo))\nprint(geo.head())\n\n\n# CODE FOR TESTING YOUR SOLUTION\n\n# Check that the output file exists\nimport os\nassert os.path.isfile(fp), \"output shapefile does not exist\"\n\n\n# **Finally:** \n# - **Create a simple map of the points** using the `plot()` -funtion. \n\n# YOUR CODE HERE 3\ngeo.plot()\n# Well done! Now you can move on to Exercise_9_problem_3.\n\ndef func5():\n return data\n\ndef func6():\n return geo\n\n\n","sub_path":"Exercise_9_problem_2.py","file_name":"Exercise_9_problem_2.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647632785","text":"# Network structure #\nNN_HIDDEN_COUNT = 8\n\n# Training #\nLEARNING_RATE = 1e-4\nTRAIN_ITERATIONS = 5000\nREWARD_HISTORY_DISCOUNT = 0.99\nUPDATE_FREQUENCY = 20\nSTEP_TIMEOUT = .3\n\n# Printing #\nPRINT_TIMEOUT_ITERATIONS = 100\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80364292","text":"import torch\nimport numpy as np\nimport config\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel\n\n\ndef main(corpus):\n\n type_dict = {} # {type: index}\n with open(config.DATA_ROOT + corpus + \"type_set.txt\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n tokens = line[:-1].split(\" \")\n type_dict[tokens[1]] = int(tokens[0])\n\n # mat = get_bert_sent_embedding_by_desc(type_dict, corpus)\n mat = get_word_embedding_by_type_str(type_dict, corpus)\n torch.save(mat, config.DATA_ROOT + corpus + config.TYPE_ATTEN_FILE)\n\n\ndef get_bert_sent_embedding_by_desc(type_dict, corpus):\n with open(config.DATA_ROOT + corpus + \"type_desc.txt\", 'r') as f:\n matrix = [[]] * len(type_dict)\n line = f.readline()\n while line != \"\":\n no = line[0: line.find(\" \")]\n pair = line[line.find(\" \") + 1:-1].split(\"\\t\")\n ttype = pair[0]\n desc = pair[1]\n\n print(f\"no: {no}, pair: [{type}, {desc}]\")\n line = f.readline()\n\n device = config.CUDA\n bert = BertModel.from_pretrained(config.BERT_MODEL_PATH)\n bert.to(device)\n bert.eval()\n tokenizer = BertTokenizer.from_pretrained(config.BERT_MODEL_PATH)\n tokenized_text = tokenizer.tokenize(desc)\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n segments_ids = [0] * len(indexed_tokens)\n tokens_tensor = torch.tensor([indexed_tokens], device=device)\n segments_tensors = torch.tensor([segments_ids], device=device)\n _, sent_embedding = bert(tokens_tensor, segments_tensors)\n\n matrix[type_dict[ttype]] = sent_embedding\n\n return torch.stack(matrix)\n\n\ndef get_word_embedding_by_type_str(type_dict, corpus):\n\n with open(config.DATA_ROOT + corpus + config.TYPE_SET_INDEX_FILE) as f:\n lines = f.readlines()\n type_str = {} # type_word: idx\n test = {}\n for line in lines:\n tokens = line[:-1].split(\" \")\n idx = int(tokens[0])\n tstr = tokens[1]\n words = tstr[tstr.rindex(\"/\") + 1:].split(\"_\")\n type_str[idx] = words\n\n glove_dict = {}\n with open(config.EMBEDDING_ROOT, 'r') as glove:\n line = glove.readline()\n while line != \"\":\n wd = line[:line.index(\" \")]\n embed = line[line.index(\" \")+1: -1]\n glove_dict[wd] = embed\n line = glove.readline()\n\n matrix = [[] for _ in range(len(type_dict))]\n result = []\n for i in range(len(type_dict)):\n words = type_str[i]\n for w in words:\n if glove_dict.get(w) is not None:\n matrix[i].append([float(i) for i in glove_dict[w].split(\" \")])\n else:\n print(f\"Warning: {w} is not in the embedding dict, we will random initialize it values\")\n\n for i, row in enumerate(matrix):\n\n r = None\n if len(row) > 1:\n r = torch.from_numpy(np.mean(row, axis=0, dtype=np.float32))\n elif len(row) == 0:\n r = torch.randn(config.EMBEDDING_DIM, dtype=torch.float)\n else:\n r = torch.tensor(row, dtype=torch.float).squeeze()\n\n result.append(r)\n\n return torch.stack(result)\n\n\nif __name__ == '__main__':\n main(config.WIKI)\n","sub_path":"preprocess/type_atten_matrix.py","file_name":"type_atten_matrix.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645576484","text":"my_foods = ['pizza', 'falafel', 'carrot cake']\nfriend_foods = my_foods[:]\n\nmy_foods.append('cannoli')\nfriend_foods.append('ice cream')\n\nfor foods in my_foods:\n\tprint(foods)\n\t\nfor foods in friend_foods:\n\tprint(foods)\n","sub_path":"Chapter4/4-12_more_loops.py","file_name":"4-12_more_loops.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530843651","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport fnmatch\nimport os\nimport time\nstart_time = time.time()\ndirpath1 = \"/home/routray/Desktop/test/slice_output/6/\"\ndirpath = \"/home/routray/Desktop/test/slice_output/7/\"\ncount=len(fnmatch.filter(os.listdir(dirpath), '*.png'))\ncounter=0\n\ndef make_ax(grid=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.grid(grid)\n return ax\nprint(\"If you want to give colors to voxel press follow the rules below \\n\\r To give single color to voxels press 0 \\n \\r To color by layes press 1\\n\\r To give different colors to voxels press 2.\\n\\r To give different colors to half of structure Press 3. \")\ndif = int(input(\" Please give the input to proceed: \"))\nprint(\"Your decision: \", dif)\n\n# read image into matrix.\n#image = cv2.imread(\"out_test/output001.png\")\n#m = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# reading images into matrix.\nprint(\"No.of Slices:\", count)\nif count > 1000:\n\timage = cv2.imread(\"slice_output/output0001.png\")\n\tm = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nelif count > 100:\n\timage = cv2.imread(\"slice_output/output001.png\")\n\tm = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nelif count > 10:\n\timage = cv2.imread(\"slice_output/output01.png\")\n\tm = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nelse:\n\timage = cv2.imread(\"slice_output/output1.png\")\n\tm = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# get image properties.\n#h,w,bpp = np.shape(m)\nh,w = np.shape(m)\nprint(np.shape(m))\n#k=[]\n# print pixel value\n#y = 2\n#x = 0\n#print(m[y][x])\nl = np.zeros(shape=(h,w))\n#Iterate all image slides output00.png to output50.png\nfor py in range(0,h):\n for px in range(0,w):\n l[py][px]=np.int(m[py][px])\n #print(m[py][px])\npy=0\npx=0\nn_images=count-1\n#print(m[y])\ng = np.zeros(shape=(h,w))\nk = np.zeros(shape=(count,h,w))\ncolors = np.array([[['#ffffffff']*w]*h]*count)\n\nk1=np.zeros(shape=(w))\nk2=np.zeros(shape=(h,w))\nfor j in range(0,count):\n\t# iterate over the entire image.\n\n\ti=count-j-1\n\tif count >= 100:\n\t\tif i<10:\n\t\t\timage_name = \"output00\"+str(i)+\".png\"\n\t\telif i<100:\n\t\t\timage_name = \"output0\"+str(i)+\".png\"\n\t\telse:\n\t\t\timage_name = \"output\"+str(i)+\".png\"\n\telif count>=10:\n\t\tif i<10:\n\t\t\timage_name = \"output0\"+str(i)+\".png\"\n\t\telse:\n\t\t\timage_name = \"output\"+str(i)+\".png\"\n\telse:\n\t\timage_name = \"output\"+str(i)+\".png\"\n\n\timage = cv2.imread(\"slice_output/\"+image_name)\n\tprint(image_name)\n\tprint(i)\n\tm = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t#k2=[]\n\tfor py in range(0,h):\n\t\t#k1=[]\n\t\t#print(k1[0])\n\t\tfor px in range(0,w):\n\t\t\tif (m[py][px]==255):\n\t\t\t\t#x=1,y=1\n\t\t\t\tg[py][px]=g[py][px]+1\n\t\t\t\t#(j*py*px)%2\n\t\t\t\t#counter%2 for voxel\n\t\t\t\t#colors[j][py][]=str('#ff00ffff')\n\t\t\t\tif dif==1:\n\t\t\t\t\tc=j%2\n\t\t\t\telif dif==2:\n\t\t\t\t\tc=counter%2\n\t\t\t\telif dif==3:\n\t\t\t\t\tif j < count/2:\n\t\t\t\t\t\tc=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tc=0\n\t\t\t\telse:\n\t\t\t\t\tc=1\n\n\t\t\t\tif c==1:\n\t\t\t\t\t#if dif==1:\n\t\t\t\t\tcolors[j][py][px]=str('#00ffffff')\n\t\t\t\t\tk[j][py][px]=int(1)\n\t\t\t\telse:\n\t\t\t\t\t#if dif==1:\n\t\t\t\t\tcolors[j][py][px]=str('#ff00ffff')\n\t\t\t\t\t#colors[j][py][px]=str('#ff00ffff')\n\t\t\t\t\tk[j][py][px]=int(2)\n\t\t\t\tcounter=counter+1\n\t\t\telse:\n\t\t\t\tk[j][py][px]=int(0)\t\t\n\t\tprint(k[j])\n\t\t#k2[py]=k1\n\t#k[j]=k2\n\t#filled = np.array(k)\n\t#ax = make_ax(True)\n\t#print(ax)\n\t#ax.voxels(filled,facecolors=colors, edgecolors='gray')\n\t#print(\"k:\\n\")\n\t#print(k)\n\t\n#print(k)\nprint(\"Total no.of Voxels:\")\nprint(count*h*w)\n#print(\"Your decision:\",dif)\n\n#print(m[y][x])\n#print(len(k))\n#print(k)\n#print(len(k[0]))\n#print(len(k))\n#print(g)\n#print(k[1])\n#print(\"Filled Voxel:\")\n#print(counter)\n#colors=explode(colors)\n#print(colors[j])\nfilled = np.array(k)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nax = make_ax(True)\nprint(ax)\nax.voxels(filled,facecolors=colors, edgecolors='gray')\n\nplt.show()\n\n\n","sub_path":"voxel_testing.py","file_name":"voxel_testing.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"477113340","text":"from dataset import DataBank\r\nfrom utils_for_wp import file_abs_path, DataPacker, create_mask\r\nfrom eval import update_with_gps\r\nfrom pathlib import Path\r\nfrom create_gt_gps_dist import create_dist\r\nimport argparse\r\n\r\nif __name__ == '__main__':\r\n # wp_reid_dataset = DataBank(minframes=3)\r\n\r\n parser = argparse.ArgumentParser(description=\"GPS\")\r\n parser.add_argument('--exp_name', type=str)\r\n parser.add_argument('--k', default=9, type=int)\r\n parser.add_argument('--delta', default=74, type=int)\r\n parser.add_argument('--iters', default=5, type=int)\r\n args = parser.parse_args()\r\n\r\n print('MTMCT')\r\n exp_name = args.exp_name\r\n k = args.k\r\n delta = args.delta\r\n iters = args.iters\r\n\r\n root = Path('/data4/shensj/datasets/my_files/gps/GPSReID/MOT/GPSReID/crop_images/')\r\n file_path = root / exp_name / 'gt_gps_dist.pkl'\r\n if not file_path.is_file():\r\n create_dist(exp_name)\r\n mask_path = root / exp_name / 'mask_for_timestamp.pkl'\r\n if not mask_path.is_file():\r\n create_mask(mask_path)\r\n mask_for_timestamp = DataPacker.load(mask_path)\r\n distmat = DataPacker.load(root / exp_name / 'feature_origin.pkl')['g2g_distmat']\r\n # m, n = distmat.shape\r\n # for i in range(m):\r\n # for j in range(n):\r\n # if mask_for_timestamp[i][j] == 1 and i != j:\r\n # distmat[i][j] = 1e6\r\n gt_gps_dist = DataPacker.load(root / exp_name / 'gt_gps_dist.pkl')\r\n update_with_gps(distmat, gt_gps_dist, k=k, delta=delta, iters=iters, root=root / exp_name, mask=mask_for_timestamp)","sub_path":"src/tracker/update_with_gps/WP-ReID/main_debug.py","file_name":"main_debug.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"210297136","text":"from datetime import datetime\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ValidationError\n\nfrom ..models import Delivery\nfrom ..models import deliverytypes\nfrom ..testhelper import TestHelper\n\nclass TestDelivery(TestCase, TestHelper):\n def setUp(self):\n TestHelper.set_memory_deliverystore()\n\n def _create_testdata(self):\n self.add(nodes=\"uio:admin(uioadmin).ifi:admin(ifiadmin)\",\n subjects=[\"inf1100\"],\n periods=[\"period1:admin(teacher1):begins(-5):ends(10)\"],\n assignments=[\"assignment1:pub(60)\"],\n assignmentgroups=[\"g1:candidate(student1):examiner(examiner1)\",\n \"g2:candidate(student2):examiner(examiner2)\",\n \"g3:candidate(student3,student2):examiner(examiner1,examiner2,examiner3)\",\n \"g4:candidate(student4):examiner(examiner3)\"],\n deadlines=['d1:ends(1)'])\n self.goodFile = {\"good.py\": \"print awesome\"}\n self.add_delivery(\"inf1100.period1.assignment1.g1\", self.goodFile)\n self.add_delivery(\"inf1100.period1.assignment1.g2\", self.goodFile)\n self.add_delivery(\"inf1100.period1.assignment1.g3\", self.goodFile)\n\n def test_where_is_admin(self):\n self._create_testdata()\n teacher1 = User.objects.get(username='teacher1')\n self.assertEquals(Delivery.where_is_admin(teacher1).count(), 3)\n delivery0 = self.inf1100_period1_assignment1_g1_d1.deliveries.all()[0]\n delivery0.successful = False\n delivery0.save()\n self.assertEquals(Delivery.where_is_admin(teacher1).count(), 2)\n\n def test_published_where_is_examiner(self):\n self._create_testdata()\n examiner1 = User.objects.get(username='examiner1')\n deliveries = Delivery.published_where_is_examiner(examiner1)\n self.assertEquals(deliveries.count(), 2)\n delivery0 = deliveries.all()[0]\n delivery0.successful = False\n delivery0.save()\n self.assertEquals(Delivery.published_where_is_examiner(examiner1).count(), 1)\n\n def test_delivery(self):\n self._create_testdata()\n assignmentgroup = self.inf1100_period1_assignment1_g3\n d = self.add_delivery(\"inf1100.period1.assignment1.g3\", self.goodFile,\n time_of_delivery=datetime(2005, 1, 1))\n self.assertEquals(d.deadline.assignment_group, assignmentgroup)\n self.assertTrue(d.successful)\n self.assertEquals(d.number, 2)\n self.assertTrue(d.time_of_delivery, datetime(2005, 1, 1))\n\n # TODO find a graceful way to handle this error:\n d.number = 1\n self.assertRaises(IntegrityError, d.save())\n\n def test_noalias_missing_feedback(self):\n self._create_testdata()\n deadline = self.inf1100_period1_assignment1_g3_d1\n delivery = deadline.deliveries.create(successful=True,\n delivery_type=deliverytypes.ALIAS,\n alias_delivery=None)\n with self.assertRaises(ValidationError):\n delivery.clean()\n\n def test_noalias_with_feedback(self):\n self._create_testdata()\n deadline = self.inf1100_period1_assignment1_g3_d1\n delivery = deadline.deliveries.create(successful=True,\n delivery_type=deliverytypes.ALIAS,\n alias_delivery=None)\n delivery.feedbacks.create(\n grade = 'A',\n is_passing_grade = True,\n points = 100,\n rendered_view = '',\n saved_by = self.examiner1\n )\n delivery.clean()\n\n def test_with_alias(self):\n self._create_testdata()\n deadline = self.inf1100_period1_assignment1_g3_d1\n otherdelivery = deadline.deliveries.create(successful=True)\n delivery = deadline.deliveries.create(successful=True,\n delivery_type=deliverytypes.ALIAS,\n alias_delivery=otherdelivery)\n delivery.clean()\n\n def test_delete_delivered_by_candidate(self):\n self._create_testdata()\n delivery = self.add_delivery(\"inf1100.period1.assignment1.g2\", self.goodFile)\n delivery = Delivery.objects.get(id=delivery.id) # Re-get from DB just to be doubly sure we are using the same delivery below\n self.assertEquals(delivery.delivered_by.student, self.student2)\n group = self.inf1100_period1_assignment1_g2\n group.candidates.all()[0].delete()\n delivery = Delivery.objects.get(id=delivery.id) # Re-get from DB\n self.assertEquals(delivery.delivered_by, None)\n\n def test_delivery_numbering(self):\n self._create_testdata()\n deadline = self.inf1100_period1_assignment1_g1_d1\n self.assertEquals(deadline.deliveries.count(), 1)\n self.assertEquals(deadline.deliveries.all()[0].number, 1)\n d2 = Delivery(deadline=deadline,\n delivered_by=deadline.assignment_group.candidates.all()[0])\n d2.save()\n d3 = Delivery(deadline=deadline,\n delivered_by=deadline.assignment_group.candidates.all()[0])\n d3.save()\n self.assertEquals(d2.number, 0)\n self.assertEquals(d3.number, 0)\n d3.successful = True\n d3.save()\n self.assertEquals(d3.number, 2)\n d2.successful = True\n d2.save()\n self.assertEquals(d2.number, 3)\n\n def test_published_where_is_candidate(self):\n self._create_testdata()\n # Add 2 on g1\n d = self.add_delivery(\"inf1100.period1.assignment1.g1\", self.goodFile)\n d = self.add_delivery(\"inf1100.period1.assignment1.g1\", self.goodFile)\n # Add 3 on g2\n d = self.add_delivery(\"inf1100.period1.assignment1.g2\", self.goodFile)\n d = self.add_delivery(\"inf1100.period1.assignment1.g2\", self.goodFile)\n d = self.add_delivery(\"inf1100.period1.assignment1.g2\", self.goodFile)\n # Add 2 on g3\n d = self.add_delivery(\"inf1100.period1.assignment1.g3\", self.goodFile)\n d = self.add_delivery(\"inf1100.period1.assignment1.g3\", self.goodFile)\n\n self.assertEquals(Delivery.published_where_is_candidate(self.student1).count(), 3)\n self.assertEquals(Delivery.published_where_is_candidate(self.student2).count(), 7)\n self.assertEquals(Delivery.published_where_is_candidate(self.student3).count(), 3)\n self.assertEquals(Delivery.published_where_is_candidate(self.student4).count(), 0)\n\n delivery = Delivery.published_where_is_candidate(self.student3)[0]\n delivery.successful = False\n delivery.save()\n self.assertEquals(Delivery.published_where_is_candidate(self.student3).count(), 2)\n\n\n def test_hard_deadline(self):\n self._create_testdata()\n self.add_to_path('uni.ifi;inf1100.period1.assignment0.g1:candidate(student1):examiner(examiner1).d0:ends(1)')\n\n # Soft deadlines work without any errors\n deadline = self.inf1100_period1_assignment0_g1_d0\n self.assertTrue(deadline.deadline < datetime.now())\n self.add_delivery(\"inf1100.period1.assignment0.g1\", self.goodFile)\n\n # Hard deadlines\n assignment = self.inf1100_period1_assignment0\n assignment.deadline_handling = 1\n assignment.save()\n with self.assertRaises(ValidationError):\n self.add_delivery(\"inf1100.period1.assignment0.g1\", self.goodFile)\n\n\n def test_override_autoset(self):\n self.add(nodes=\"uni\",\n subjects=[\"sub\"],\n periods=[\"p1\"],\n assignments=['a1'])\n self.add_to_path('uni;sub.p1.a1.g1:candidate(student1):examiner(examiner1).d1')\n d1 = self.sub_p1_a1_g1_d1\n\n time_of_delivery = datetime(2005, 1, 1, 0, 0, 0)\n delivery = Delivery(deadline=d1,\n number=10,\n successful=False,\n time_of_delivery=time_of_delivery)\n delivery.full_clean()\n delivery.save(autoset_number=False,\n autoset_time_of_delivery=False)\n self.assertEquals(delivery.number, 10)\n self.assertEquals(delivery.successful, False)\n self.assertEquals(delivery.time_of_delivery, time_of_delivery)\n\n\n def _create_copydata(self):\n self.add(nodes=\"uni\",\n subjects=[\"sub\"],\n periods=[\"p1\"],\n assignments=['a1'])\n self.add_to_path('uni;sub.p1.a1.g1:candidate(student1):examiner(examiner1).d1')\n\n # Create delivery for alias_delivery\n self.add_to_path('uni;sub.p_old.a1.g1:candidate(student1):examiner(examiner1).d1')\n old_delivery = self.add_delivery(\"sub.p_old.a1.g1\", {\"secondtry.py\": \"print second\"})\n\n # Make a delivery without any of the default/generated values, so when\n # we check that they are copied, we get no generate stuff\n g1 = self.sub_p1_a1_g1\n d1 = self.sub_p1_a1_g1_d1\n time_of_delivery = datetime(2005, 1, 1, 0, 0, 0)\n delivery = Delivery(deadline=d1,\n number=10,\n successful=False,\n delivery_type=1,\n delivered_by=g1.candidates.all()[0],\n alias_delivery=old_delivery,\n time_of_delivery=time_of_delivery)\n delivery.full_clean()\n delivery.save(autoset_number=False,\n autoset_time_of_delivery=False)\n delivery.add_file('test.txt', ['Hello', ' world'])\n return delivery, old_delivery\n\n\n def test_copy_own_attributes(self):\n delivery, old_delivery = self._create_copydata()\n self.add_to_path('uni;sub.p1.a1.g2:candidate(student2).d1')\n newdeadline = self.sub_p1_a1_g2_d1\n copy = delivery.copy(newdeadline)\n\n self.assertEquals(copy.deadline, newdeadline)\n self.assertEquals(copy.delivery_type, 1)\n self.assertEquals(copy.number, 10)\n self.assertEquals(copy.successful, False)\n self.assertEquals(copy.time_of_delivery, datetime(2005, 1, 1, 0, 0, 0))\n self.assertEquals(copy.delivered_by.student, self.student1)\n self.assertEquals(copy.alias_delivery, old_delivery)\n\n # Check copy_of and the virtual reverse relationship\n self.assertEquals(copy.copy_of, delivery)\n self.assertEquals(list(delivery.copies.all()),\n [copy])\n\n def test_copy_filemetas(self):\n delivery, old_delivery = self._create_copydata()\n self.add_to_path('uni;sub.p1.a1.g2:candidate(student2).d1')\n newdeadline = self.sub_p1_a1_g2_d1\n copy = delivery.copy(newdeadline)\n\n self.assertEquals(delivery.filemetas.count(), 1)\n self.assertEquals(copy.filemetas.count(), 1)\n copied_filemeta = copy.filemetas.all()[0]\n self.assertEquals(copied_filemeta.get_all_data_as_string(),\n 'Hello world')\n\n\n def test_copy_feedbacks(self):\n delivery, old_delivery = self._create_copydata()\n self.add_feedback(delivery=delivery,\n verdict={'grade': 'F', 'points':10, 'is_passing_grade':False},\n rendered_view='This was bad',\n timestamp=datetime(2005, 1, 1, 0, 0, 0))\n self.add_feedback(delivery=delivery,\n verdict={'grade': 'C', 'points':40, 'is_passing_grade':True},\n rendered_view='Better',\n timestamp=datetime(2010, 1, 1, 0, 0, 0))\n\n self.add_to_path('uni;sub.p1.a1.g2:candidate(student2).d1')\n newdeadline = self.sub_p1_a1_g2_d1\n self.assertEquals(delivery.feedbacks.count(), 2)\n copy = delivery.copy(newdeadline)\n\n self.assertEquals(delivery.feedbacks.count(), 2)\n feedbacks = copy.feedbacks.order_by('save_timestamp')\n self.assertEquals(len(feedbacks), 2)\n self.assertEquals(feedbacks[0].grade, 'F')\n self.assertEquals(feedbacks[0].points, 10)\n self.assertEquals(feedbacks[0].is_passing_grade, False)\n self.assertEquals(feedbacks[0].save_timestamp, datetime(2005, 1, 1, 0, 0, 0))\n self.assertEquals(feedbacks[0].rendered_view, 'This was bad')\n\n self.assertEquals(feedbacks[1].grade, 'C')\n self.assertEquals(feedbacks[1].points, 40)\n self.assertEquals(feedbacks[1].is_passing_grade, True)\n self.assertEquals(feedbacks[1].save_timestamp, datetime(2010, 1, 1, 0, 0, 0))\n self.assertEquals(feedbacks[1].rendered_view, 'Better')\n","sub_path":"src/devilry/devilry/apps/core/tests/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":12602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626621870","text":"'''\r\n<단순 변수 사용>\r\n 3. 반복형\r\n 3-5. 인구 계산 프로그램 (2020년 인구 4천 955만, 인구 증가율 - 0.02)\r\n - 이 때, 인구가 0 밑으로 떨어지는 년도는?\r\n 1626035 이주호\r\n'''\r\n\r\n#변수 초기화\r\npop = 49550000 #인구\r\nratio = -0.02 #인구 증가율\r\nn = 0 #2020년으로부터 지난 해 수\r\n\r\n##계산\r\nwhile(pop > 1):\r\n pop = pop + (pop * ratio)\r\n n = n + 1\r\n##계산 종료\r\n\r\n#결과 출력\r\nprint(\"인구가 1 밑으로 떨어지는 년도는 %d년입니다.\" % (2020 + n))\r\n","sub_path":"paper/Paper03-05.py","file_name":"Paper03-05.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27620422","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 21 10:14:11 2018\r\n\r\n@author: NTPU\r\n\"\"\"\r\nimport time\r\nfrom mcpi.minecraft import Minecraft\r\nHank=Minecraft.create()\r\n\r\n\r\n\r\nx,y,z=Hank.player.getTilePos()\r\n\r\nwidth=80\r\nlength=40\r\nheight=8\r\n\r\nblock=5\r\nair=0\r\n\r\nHank.setBlocks(x,y,z,x+width,y+height,z+length,block)\r\nHank.setBlocks(x+1,y+1,z+1,x+width-1,y+height-1,z+length-1,air)\r\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262785935","text":"from django.shortcuts import render, redirect\nfrom . import models\nfrom .models import User, Wish\nimport bcrypt\n\n\ndef home_page(request):\n return render(request, \"index.html\")\n\n\ndef register(request):\n if request.method == 'POST':\n name = request.POST['name']\n username = request.POST['username']\n errors = models.register_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n user = models.add_user(request.POST)\n if 'user' not in request.session:\n request.session['username'] = username\n request.session['name'] = name\n request.session['user_id'] = user.id\n return redirect('/welcome/'+str(user.id))\n\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST['username']\n user = models.get_user(username)\n print(user)\n if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):\n if 'user' not in request.session:\n request.session['username'] = username\n request.session['name'] = name\n request.session['user_id'] = user.id\n return redirect('/welcome/'+str(user.id))\n errors = {\n 'login': 'Incorrect username or password'\n }\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n\n\ndef logout(request):\n del request.session['username']\n del request.session['name']\n del request.session['user_id']\n return redirect('/')\n\n\ndef welcome(request, id):\n all_users = User.objects.all()\n all_wishes = Wish.objects.all()\n user = User.objects.get(id=id)\n all_user_wishes = user.wishes.all()\n context = {\n \"all_users\": all_users,\n \"all_wishes\": all_wishes,\n \"user\": user,\n \"all_user_wishes\": all_user_wishes\n }\n return render(request, 'welcome.html', context)\n\n\ndef new_wish_page(request):\n return render(request, 'new_wish.html')\n\n\ndef new_wish(request, user_id):\n if request.method == 'POST':\n title = request.POST['title']\n desc = request.POST['desc']\n errors = models.wish_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/new_wish/'+str(user_id))\n new_wish = models.add_new_wish(user_id, title, desc)\n return ('/welcome/'+str(user_id))\n\n\ndef add(request, user_id, wish_id):\n models.add_wish(user_id, wish_id)\n return redirect('/welcome/'+str(user_id))\n\n\ndef remove(request, user_id, wish_id):\n models.remove_wish(user_id, wish_id)\n return redirect('/welcome/'+str(user_id))\n","sub_path":"Django/wishlist/wishlist_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485278378","text":"import cv2\r\n\r\ndef showbitmap(row,col,bg,h,w):\r\n for y in range(row,row+h):\r\n print(str('{:0>2d}') .format(y)+\":\" ,end=\"\")\r\n for x in range(col,col+w):\r\n print(bg[y][x],end=\",\")\r\n print()\r\n print() \r\n \r\ndef area(row, col):\r\n global nn\r\n if bg[row][col] != 255:\r\n return\r\n bg[row][col] = lifearea #記錄生命區的編號 \r\n if col>1: #左方\r\n if bg[row][col-1]==255:\r\n nn +=1\r\n area(row,col-1)\r\n if col< w-1: #右方\r\n if bg[row][col+1]==255:\r\n nn +=1\r\n area(row,col+1) \r\n if row>1: #上方\r\n if bg[row-1][col]==255:\r\n nn+=1 \r\n area(row-1,col)\r\n if row>> y_m(1)\r\n 7\r\n \"\"\"\r\n return (x**4+4**x)\r\nx=int(input('Введите x = '))\r\nprint('Функция = ', y_m(x))\r\nif __name__==\"__main__\":\r\n import doctest\r\n doctest.testmod()\r\n","sub_path":"doctest_7.5.py","file_name":"doctest_7.5.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"276144612","text":"\nimport sqlite3\nimport numpy as np\n\n# initialize the SQLite database connection, get list of existing tables\ncon = sqlite3.connect('db_wiki.sqlite')\ncursor = con.cursor()\ntitles = [x[0] for x in cursor.execute(\"\"\"SELECT title FROM wiki_attrs\"\"\")]\ncon.close()\nprint('{} pages in the database'.format(len(titles)))","sub_path":"data-incubator 2016/wikipedia/db_count.py","file_name":"db_count.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371353693","text":"from keras.datasets import mnist\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Activation\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.optimizers import Adam\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.layers.core import Reshape, Dense, Dropout, Flatten\nfrom keras import initializers\n\n# exp5: batch size 128, add two layers to generator\n\n# Load data\n(X_train, _), (_, _) = mnist.load_data()\n\n# Preprocessing\nX_train = X_train.reshape(-1, 784)\nX_train = X_train.astype('float32')/255\n\n# Set the dimensions of the noise\nz_dim = 100\n\n# Optimizer\nadam = Adam(lr=0.0002, beta_1=0.5)\n\n# #demo G\n# g = Sequential()\n# g.add(Dense(256, input_dim=z_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))\n# g.add(LeakyReLU(0.2))\n# g.add(Dense(512))\n# g.add(LeakyReLU(0.2))\n# g.add(Dense(1024))\n# g.add(LeakyReLU(0.2))\n# g.add(Dense(784, activation='tanh'))\n# g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n\n# Generator\ng = Sequential()\ng.add(Dense(units=256,input_dim = z_dim))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(units=512))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(units=1024))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(784, activation='tanh'))\ng.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n\n\n\n# Discrinimator\nd = Sequential()\nd.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02)))\nd.add(LeakyReLU(0.2))\nd.add(Dropout(0.3))\nd.add(Dense(512))\nd.add(LeakyReLU(0.2))\nd.add(Dropout(0.3))\nd.add(Dense(256))\nd.add(LeakyReLU(0.2))\nd.add(Dropout(0.3))\nd.add(Dense(1, activation='sigmoid'))\nd.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n\n# GAN\nd.trainable = False\ninputs = Input(shape=(z_dim, ))\nhidden = g(inputs)\noutput = d(hidden)\ngan = Model(inputs, output)\ngan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n\n# Training\ndef train(epochs=1, plt_frq=1, BATCH_SIZE=128):\n batchCount = int(X_train.shape[0] / BATCH_SIZE)\n print('Epochs:', epochs)\n print('Batch size:', BATCH_SIZE)\n print('Batches per epoch:', batchCount)\n\n\n for i in range(batchCount):\n # Create a batch by drawing random index numbers from the training set\n image_batch = X_train[np.random.randint(0, X_train.shape[0], size=BATCH_SIZE)]\n # Create noise vectors for the generator\n noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))\n\n # Generate the images from the noise\n generated_images = g.predict(noise)\n X = np.concatenate((image_batch, generated_images))\n\n # Create labels\n y = np.zeros(2 * BATCH_SIZE)\n y[:BATCH_SIZE] = 1\n\n # Train discriminator on generated images\n d.trainable = True\n d_loss = d.train_on_batch(X, y)\n\n\n # Train generator\n noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))\n y2 = np.ones(BATCH_SIZE)\n d.trainable = False\n g_loss = gan.train_on_batch(noise, y2)\n\n if i == batchCount-1:\n return [g_loss[0],d_loss[0]]\n\n\nepochCount = 200\ng_loss_arr = []\nd_loss_arr = []\n\nfor i in range(epochCount):\n loss_arr = train(i)\n g_loss_arr.append(loss_arr[0])\n d_loss_arr.append(loss_arr[1])\n\n# plot loss\nLossFigure = plt.figure()\nplt.xlabel(u'Epochs')\n\nplt.ylabel(u'Loss')\n\nmy_x_ticks = np.arange(0,epochCount,20)\nplt.xticks(my_x_ticks)\n\nplt.plot(np.arange(0,epochCount),g_loss_arr)\nplt.plot(np.arange(0,epochCount),d_loss_arr)\nplt.legend([\"g__training_loss\",\"d_training_loss\"],loc=2)\nLossFigure.savefig(\"LossFigures/exp5_loss.png\")\nplt.show()\n\n#================================================\n# 测试阶段\n#================================================\n# Generate images\nnp.random.seed(504)\nh = w = 28\nnum_gen = 25\n\nz = np.random.normal(size=[num_gen, z_dim])\ngenerated_images = g.predict(z)\n\n# plot of generation\nn = np.sqrt(num_gen).astype(np.int32)\nI_generated = np.empty((h*n, w*n))\nfor i in range(n):\n for j in range(n):\n I_generated[i*h:(i+1)*h, j*w:(j+1)*w] = generated_images[i*n+j, :].reshape(28, 28)\n\nGeneratedImages = plt.figure(figsize=(4, 4))\nplt.axis(\"off\")\nplt.imshow(I_generated, cmap='gray')\nGeneratedImages.savefig(\"GeneratedImages/exp5_res.png\")\nplt.show()\n\n\n# # serialize model to JSON\n# model_json = g.to_json()\n# with open(\"generator.json\", \"w\") as json_file:\n# json_file.write(model_json)\n# # serialize weights to HDF5\n# g.save_weights(\"generator.h5\")\n# print(\"Saved model to disk\")\n# for i in range(1,20):\n# train(epochs=i)","sub_path":"exp5.py","file_name":"exp5.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330962825","text":"\"\"\"\nspark.py\n~~~~~~~~\nModule containing helper function for use with Apache Spark\n\"\"\"\n\nfrom pyspark.sql import SparkSession\nfrom utils import logging\nfrom os import listdir, path\nimport json\nfrom pyspark import SparkFiles\n\n\ndef start_spark(app_name='etl_app', master='local[*]',\n files=[], spark_config={}):\n\n spark_builder = (\n SparkSession\n .builder\n .master(\"local\")\n .appName(app_name))\n\n spark_builder.config('spark.files', files)\n spark_builder.config(\"spark.sql.autoBroadcastJoinThreshold\", -1)\n\n for key, val in spark_config.items():\n spark_builder.config(key, val)\n\n # create session and retrieve Spark logger object\n spark_sess = spark_builder.getOrCreate()\n spark_logger = logging.Log4j(spark_sess)\n\n # get config file if sent to cluster with --files\n spark_files_dir = SparkFiles.getRootDirectory()\n config_files = [filename\n for filename in listdir(spark_files_dir)\n if filename.endswith('config.json')]\n\n try:\n path_to_config_file = path.join(spark_files_dir, config_files[0])\n with open(path_to_config_file, 'r') as config_file:\n config_dict = json.load(config_file)\n except Exception as e:\n spark_logger.warn(\"no config file found\" + str(e))\n\n return spark_sess, spark_logger, config_dict","sub_path":"utils/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334287963","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 27 19:05:38 2018\n@author: xiaoqian\n\"\"\"\nimport os\nimport os.path as op\nfrom glob import glob\nimport re\nimport pandas as pd\nimport numpy as np\nimport nibabel as ni\nimport nilearn\nimport subprocess\nfrom sklearn.covariance import GraphLassoCV\nimport shutil\nfrom scipy import stats\nfrom scipy.special import comb\nfrom scipy.stats import pearsonr\n\nProjDir = '/Users/xiaoqian/Projects/aTBS'\ndataDir = ProjDir\nbehavDir = os.path.join(dataDir, 'source/behavioral')\nresultDir = os.path.join(dataDir, 'cor_addROI')\nsub_list = np.array([1,2,3,4,11,14,16,18,19,21,24,26,28,29])\n#FC\n#mat_baseline\n#mat_1w\n#disMat_1wBaselie\n#beahv\n#HAMD\n#MADRS\n\n#load behavioral data\nbaseline_file = os.path.join(behavDir, 'Behavior_baseline.csv')\nb_baseline = pd.read_csv(baseline_file)\nb1w_file = os.path.join(behavDir, 'Behavior_1w.csv')\nb_1w = pd.read_csv(b1w_file)\nbase_hamd = b_baseline.loc[:,\"hamd21_total\"]\nw1_hamd = b_1w.loc[:,\"hamd21_total\"]\nbase_madrs = b_baseline.loc[:,\"hamd21_total\"]\nw1_madrs = b_1w.loc[:,\"madrs_score\"]\ndiff_hamd = b_baseline.loc[:,\"hamd21_total\"] - b_1w.loc[:,\"hamd21_total\"]\ndiff_madrs = b_baseline.loc[:,\"madrs_score\"] - b_1w.loc[:,\"madrs_score\"]\n\nbaseFC_baseH = pd.DataFrame()\nbaseFC_baseM = pd.DataFrame()\nbaseFC_1wH = pd.DataFrame()\nbaseFC_1wM = pd.DataFrame()\nbaseFC_diffH = pd.DataFrame()\nbaseFC_diffM = pd.DataFrame()\nw1FC_baseH = pd.DataFrame()\nw1FC_baseM = pd.DataFrame()\nw1FC_1wH = pd.DataFrame()\nw1FC_1wM = pd.DataFrame()\nw1FC_diffH = pd.DataFrame()\nw1FC_diffM = pd.DataFrame()\ndiffFC_baseH = pd.DataFrame()\ndiffFC_baseM = pd.DataFrame()\ndiffFC_1wH = pd.DataFrame()\ndiffFC_1wM = pd.DataFrame()\ndiffFC_diffH = pd.DataFrame()\ndiffFC_diffM = pd.DataFrame()\nfor i in range(mat_1w.shape[0]):\n base_FC = pd.DataFrame(np.transpose(mat_baseline[i,:,:]))\n w1_FC = pd.DataFrame(np.transpose(mat_1w[i,:,:]))\n diff_FC = w1_FC - base_FC \n baseFC_baseH[i] = base_FC.corrwith(base_hamd)\n baseFC_baseM[i] = base_FC.corrwith(base_madrs)\n baseFC_1wH[i] = base_FC.corrwith(w1_hamd)\n baseFC_1wM[i] = base_FC.corrwith(w1_madrs)\n baseFC_diffH[i] = base_FC.corrwith(diff_hamd)\n baseFC_diffM[i] = base_FC.corrwith(diff_madrs)\n w1FC_baseH[i] = w1_FC.corrwith(base_hamd)\n w1FC_baseM[i] = w1_FC.corrwith(base_madrs)\n w1FC_1wH[i] = w1_FC.corrwith(w1_hamd)\n w1FC_1wM[i] = w1_FC.corrwith(w1_madrs)\n w1FC_diffH[i] = w1_FC.corrwith(diff_hamd)\n w1FC_diffM[i] = w1_FC.corrwith(diff_madrs) \n diffFC_baseH[i] = diff_FC.corrwith(base_hamd)\n diffFC_baseM[i] = diff_FC.corrwith(base_madrs)\n diffFC_1wH[i] = diff_FC.corrwith(w1_hamd)\n diffFC_1wM[i] = diff_FC.corrwith(w1_madrs)\n diffFC_diffH[i] = diff_FC.corrwith(diff_hamd)\n diffFC_diffM[i] = diff_FC.corrwith(diff_madrs)\n\nzbaseFC_baseH = 0.5*(np.log(1+baseFC_baseH)-np.log(1-baseFC_baseH))\nzbaseFC_baseM = 0.5*(np.log(1+baseFC_baseM)-np.log(1-baseFC_baseM))\nzbaseFC_1wH = 0.5*(np.log(1+baseFC_1wH)-np.log(1-baseFC_1wH))\nzbaseFC_1wM = 0.5*(np.log(1+baseFC_1wM)-np.log(1-baseFC_1wM))\nzbaseFC_diffH = 0.5*(np.log(1+baseFC_diffH)-np.log(1-baseFC_diffH))\nzbaseFC_diffM = 0.5*(np.log(1+baseFC_diffM)-np.log(1-baseFC_diffM))\nzw1FC_baseH = 0.5*(np.log(1+w1FC_baseH)-np.log(1-w1FC_baseH))\nzw1FC_baseM = 0.5*(np.log(1+w1FC_baseM)-np.log(1-w1FC_baseM))\nzw1FC_1wH = 0.5*(np.log(1+w1FC_1wH)-np.log(1-w1FC_1wH))\nzw1FC_1wM = 0.5*(np.log(1+w1FC_1wM)-np.log(1-w1FC_1wM))\nzw1FC_diffH = 0.5*(np.log(1+w1FC_diffH)-np.log(1-w1FC_diffH))\nzw1FC_diffM = 0.5*(np.log(1+w1FC_diffM)-np.log(1-w1FC_diffM))\nzdiffFC_baseH = 0.5*(np.log(1+diffFC_baseH)-np.log(1-diffFC_baseH))\nzdiffFC_baseM = 0.5*(np.log(1+diffFC_baseM)-np.log(1-diffFC_baseM))\nzdiffFC_1wH = 0.5*(np.log(1+diffFC_1wH)-np.log(1-diffFC_1wH))\nzdiffFC_1wM = 0.5*(np.log(1+diffFC_1wM)-np.log(1-diffFC_1wM))\nzdiffFC_diffH = 0.5*(np.log(1+diffFC_diffH)-np.log(1-diffFC_diffH))\nzdiffFC_diffM = 0.5*(np.log(1+diffFC_diffM)-np.log(1-diffFC_diffM))\n\nnp.where(zbaseFC_baseH>=0.6)\n","sub_path":"FC/AromaO/FC_behave.py","file_name":"FC_behave.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333079569","text":"\nimport os\nimport sys\nimport numpy as np\nimport math\n\nfrom shapely.geometry import mapping, shape\nimport rasterio\nfrom rasterio.mask import mask\nfrom pygeoprocessing import routing\nimport pandas as pd\nimport geopandas as gpd\nfrom scipy.signal import medfilt\nimport pickle\n\nfrom skimage import img_as_float\nfrom skimage.feature import peak_local_max\nfrom shapely.geometry import Point, Polygon, MultiPolygon\nfrom shapely.ops import cascaded_union\n\n\ndef _raster_mask(input_dem, polygon, out_name):\n \"\"\" mask a raster file along the polygon and saves the new raster\n\n Parameters\n ----------\n input_dem : str\n path to the raster file\n polygon : shapely.geometry.Polygon instance\n outline polygon\n out_name : str\n name of the output raster\n\n Returns\n -------\n path to the output raster\n \"\"\"\n\n output_dem = os.path.join(os.path.dirname(input_dem), out_name+'.tif')\n\n geoms = [mapping(polygon)]\n\n with rasterio.open(input_dem) as src:\n out_image, out_transform = mask(src, geoms, nodata=np.nan, crop=False)\n out_meta = src.meta.copy()\n\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"nodata\": np.nan,\n \"transform\": out_transform})\n with rasterio.open(output_dem, \"w\", **out_meta) as dest:\n dest.write(out_image)\n\n return output_dem\n\n\ndef _convert_to_polygon(gpd_obj):\n \"\"\"convert geopandas object, which could include Multipolygons to a new\n geopandas object containing only Polygons\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n\n Returns\n -------\n gpd.GeoDataFrame\n \"\"\"\n for i in gpd_obj.index:\n if gpd_obj.loc[i, 'geometry'].type is not 'Polygon':\n geom = gpd_obj.loc[i, 'geometry']\n area = [glac.area for glac in geom]\n gpd_obj.set_value(i, 'geometry', geom[np.argmax(area)])\n for j in range(len(geom)):\n if not j == np.argmax(area) and geom[j].area > 1:\n gpd_obj, bool = _merge_sliver(gpd_obj, geom[j])\n return gpd_obj\n\n\ndef _compactness(polygon):\n \"\"\" check if polygon satisfy glacier compactness (Allen,1998)\n\n Parameters\n ----------\n polygon: shapely.geometry.Polygon instance\n\n Returns\n -------\n bool\n \"\"\"\n coord = np.array(polygon.exterior.coords)\n\n y_min = Point(coord[np.argmin(coord[:, 1])])\n y_max = Point(coord[np.argmax(coord[:, 1])])\n x_min = Point(coord[np.argmin(coord[:, 0])])\n x_max = Point(coord[np.argmax(coord[:, 0])])\n # calculate max distance(perimeter)\n max_dist = y_min.distance(y_max)\n x_dist = x_min.distance(x_max)\n if x_dist > max_dist:\n max_dist = x_dist\n if max_dist*math.pi/polygon.boundary.length > 0.5:\n return True\n else:\n return False\n\n\ndef _compute_altitude(dem, polygon):\n \"\"\" compute altitude range for a polygon\n\n Parameters\n ----------\n dem : str\n path to a DEM file\n polygon : shapely.geometry.Polygon\n polygon of the glacier divide\n Returns\n -------\n float : altitude range\n \"\"\"\n geoms = [mapping(polygon)]\n with rasterio.open(dem) as src:\n out_image, out_transform = mask(src, geoms, nodata=np.nan, crop=False)\n altitude = np.nanmax(out_image)-np.nanmin(out_image)\n return altitude\n\n\ndef _filter_divides(gpd_obj, filter_area, filter_alt_range,\n filter_perc_alt_range):\n \"\"\" filter divides\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n filter_area : bool\n (True: keep a divide only if it's area is not\n smaller than 2% of the largest divide)\n filter_alt_range : bool\n (True: keep a divide only if the absolute\n altitude range of the divide is larger than 100m\n filter_perc_alt_range : bool\n (True: keep a divide only if the altitude range\n of the divide is larger than 10% of the glaciers\n total altitude range\n\n Returns\n -------\n filtered gpd.GeoDataFrame object\n \"\"\"\n\n # initialise nokeep\n nokeep = pd.Series(np.zeros(len(gpd_obj), dtype=np.bool))\n if filter_area is True:\n nokeep = nokeep | (gpd_obj['Perc_Area'] < 0.02)\n if filter_alt_range is True:\n nokeep = nokeep | (gpd_obj['Alt_Range'] < 100)\n if filter_perc_alt_range is True:\n nokeep = nokeep | (gpd_obj['Perc_Alt_Range'] < 0.1)\n\n gpd_obj['keep'] = ~nokeep\n\n if np.sum(gpd_obj['keep']) in [0, 1]:\n # Nothing to do! The divide should be ignored\n return gpd_obj, False\n\n while not gpd_obj['keep'].all():\n geom = gpd_obj.loc[~gpd_obj['keep']].iloc[0]\n gpd_obj = gpd_obj.drop(geom.name)\n gpd_obj, bool = _merge_sliver(gpd_obj, geom.geometry)\n return gpd_obj, True\n\n\ndef _check_contain_divides(glacier_poly, id):\n \"\"\"\n check if any object from glacier_poly contains glacier_poly.loc[id,\n 'geometry'] and correct it\n\n Parameters\n ----------\n glacier_poly : gpd.GeoDataFrame\n id : int\n\n Returns\n -------\n gpd.GeoDataFrame\n \"\"\"\n exterior = glacier_poly.copy()\n for i in exterior.index:\n coord = exterior.loc[i, 'geometry'].exterior\n exterior.loc[i, 'geometry'] = Polygon(coord)\n contain = glacier_poly[exterior.contains(glacier_poly.loc[id, 'geometry'])]\n glacier_fid = [(glacier_poly.loc[j].FID) for j in glacier_poly.index]\n for i in contain.index.drop(id):\n if contain.loc[i].FID in glacier_fid:\n to_merge = glacier_poly.loc[id, 'geometry']\n\n glacier_poly = glacier_poly.loc[glacier_poly.index.drop(id), :]\n glacier_poly, bool = _merge_sliver(glacier_poly, to_merge)\n return glacier_poly\n\n\ndef _fill_pits_with_saga(dem, saga_cmd=None):\n \"\"\" fill pits with SAGA GIS\n\n Parameters\n ----------\n dem : str\n path to the raster file\n saga_cmd: str\n path to saga_cmd.exe (only needed on windows system)\n\n Returns\n -------\n path to the new raster file\n \"\"\"\n saga_dir = os.path.join(os.path.dirname(dem), 'saga')\n if not os.path.exists(saga_dir):\n # create directory for saga_output\n os.makedirs(saga_dir)\n saga_filled = os.path.join(saga_dir, 'filled.sdat')\n filled_dem = os.path.join(os.path.dirname(dem), 'filled_dem.tif')\n if sys.platform.startswith('linux'):\n os.system('saga_cmd ta_preprocessor 4 -ELEV:' + dem + ' -FILLED:'\n + saga_filled + ' > /dev/null')\n os.system('gdal_translate ' + saga_filled + ' ' + filled_dem\n + ' > /dev/null')\n elif sys.platform.startswith('win'):\n os.system('\"'+saga_cmd+' ta_preprocessor 4 -ELEV:'+dem+' -FILLED:'\n + saga_filled+' \"')\n os.system('\" gdal_translate '+saga_filled+' '+filled_dem\n + ' \"')\n return filled_dem\n\n\ndef _flowacc(input_dem):\n \"\"\"create a raster which only contains flowaccumulation at the gutter,\n used for pour_point identification\n\n Parameters\n ----------\n input_dem : str\n path to a raster file\n\n Returns\n -------\n path to raster file with the flow accumulation gutter\n \"\"\"\n flow_direction = os.path.join(os.path.dirname(input_dem), 'flow_dir.tif')\n flow_accumulation = os.path.join(os.path.dirname(input_dem),\n 'flow_accumulation.tif')\n # calculate flow direction\n routing.flow_direction_d_inf(input_dem, flow_direction)\n # calculate flow_accumulation\n routing.flow_accumulation(flow_direction, input_dem, flow_accumulation)\n # mask along gutter\n gutter_shp = os.path.join(os.path.dirname(input_dem), 'gutter.shp')\n gutter = gpd.read_file(gutter_shp)['geometry'][0]\n _raster_mask(flow_accumulation, gutter, 'flow_gutter')\n\n return os.path.join(os.path.dirname(input_dem), 'flow_gutter.tif')\n\n\ndef flowshed_calculation(dem, shp):\n \"\"\" calculate flowsheds\n\n Parameters\n ----------\n dem : str\n path to a raster file\n shp : str\n path to a shape file\n\n Returns\n -------\n path to the shape file containing all the flowsheds\n (shapely.geometry.Polygon)\n \"\"\"\n dir = os.path.dirname(dem)\n watershed_dir = os.path.join(dir, 'all_watersheds.shp')\n flowshed_dir = os.path.join(dir, 'flowshed.shp')\n # calculate watersheds\n routing.delineate_watershed(dem, shp, 1, 100, watershed_dir,\n os.path.join(dir, 'snapped_outlet_points.shp'),\n os.path.join(dir, 'stream_out_uri.tif'))\n watersheds = gpd.read_file(watershed_dir).buffer(0)\n pour_points = gpd.read_file(dir + '/pour_points.shp')\n flowsheds = gpd.GeoSeries(watersheds.intersection(co), crs=crs)\n # remove empty objects\n\n flowsheds = flowsheds[(~flowsheds.is_empty) & (flowsheds.type != 'Point')\n & (flowsheds.type != 'MultiPoint')\n & (flowsheds.type != 'LineString')\n & (flowsheds.type != 'MultiLineString')]\n collections = flowsheds[flowsheds.type == 'GeometryCollection']\n for index in collections.index:\n multi = MultiPolygon()\n for geo in collections.loc[index]:\n if geo.type is 'Polygon':\n multi = multi.union(geo)\n flowsheds.loc[index] = multi\n # if object is Multipolygon split it\n for i, shed in enumerate(flowsheds):\n if shed.type is 'MultiPolygon':\n # find polygon with minimal distance to pour point\n dist = []\n for s0 in shed:\n dist.append(s0.distance(pour_points.loc[i, 'geometry']))\n # add each polygon to all_watershed.shp\n for j, s1 in enumerate(shed):\n # polygon nearest to PP get current id\n if j == np.argmin(dist):\n flowsheds.loc[i] = shape(s1)\n # all other poylgons were added at the end\n else:\n s3 = gpd.GeoSeries(s1)\n flowsheds = flowsheds.append(s3, ignore_index=True)\n result = gpd.GeoDataFrame(geometry=flowsheds)\n result.to_file(flowshed_dir)\n return flowshed_dir\n\n\ndef _gutter(masked_dem, depth):\n \"\"\" create a new raster, with lowered values at the gutter (beyond\n the outlines\n Parameters\n ----------\n masked_dem : str\n path to a raster file\n depth : int\n raster will be lowered by dept along gutter\n\n Returns\n -------\n path to the output raster\n\n \"\"\"\n # create gutter shp\n gutter_shp = os.path.join(os.path.dirname(masked_dem), 'gutter.shp')\n outline_exterior = Polygon(co.exterior)\n gutter_shape = outline_exterior.buffer(pixelsize * 2).difference(\n outline_exterior.buffer(pixelsize))\n gpd.GeoSeries(gutter_shape, crs=crs).to_file(gutter_shp)\n\n # lower dem along gutter\n gutter_dem = _raster_mask(masked_dem, gutter_shape, 'gutter')\n gutter2_dem = os.path.join(os.path.dirname(gutter_dem), 'gutter2.tif')\n with rasterio.open(masked_dem) as src1:\n mask_band = np.array(src1.read(1))\n with rasterio.open(gutter_dem) as src:\n mask_band = np.float32(mask_band - depth * (~np.isnan(np.array(\n src.read(1)))))\n with rasterio.open(gutter2_dem, \"w\", **src.meta.copy()) as dest:\n dest.write_band(1, mask_band)\n\n return gutter2_dem\n\n\ndef identify_pour_points(dem):\n \"\"\" create flow accumulation gutter and identify pour points\n\n Parameters\n ----------\n dem : str\n path to the raster file\n\n Returns\n -------\n path to a shape file containing all pour points\n \"\"\"\n # calculation of flow accumulation and flow direction\n flow_gutter = _flowacc(dem)\n\n # identify Pour Points\n pour_points_shp = _pour_points(flow_gutter)\n\n return pour_points_shp\n\n\ndef _intersection_of_glaciers(gpd_obj, index):\n \"\"\" create a GeoDataFrame object including all intersection areas between\n gpd_obj and gpd_obj[index]\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame object\n for that intersection should be identified\n index : int\n index of one element in gpd_obj\n\n Returns\n -------\n gpd.GeoDataFrame object with intersection areas\n \"\"\"\n gpd_obj = gpd_obj.intersection(gpd_obj.loc[index, 'geometry'])\n gpd_obj = gpd.GeoDataFrame(geometry=gpd_obj[gpd_obj.index != index],\n crs=crs)\n if not gpd_obj.empty:\n gpd_obj = _make_polygon(gpd_obj)\n return gpd_obj\n\n\ndef _create_p_glac(shp):\n \"\"\"\n\n Parameters\n ----------\n shp : str\n path to a shape file containing pour points\n\n Returns\n -------\n gpd.GeoDataFrame with the P_glac areas\n \"\"\"\n a = 14.3\n b = 0.5,\n c = 3500\n pp = gpd.read_file(shp)\n geoms = [co.intersection(pp.loc[i, 'geometry'].buffer(\n _p_glac_radius(a, b, c, pp.loc[i, 'flowacc']))) for i in pp.index]\n p_glac = gpd.GeoDataFrame(geometry=geoms, crs=crs)\n\n # delete empty objects\n p_glac = p_glac[~p_glac.is_empty]\n\n # if p_glac is Multipolygon choose only nearest polygon\n for i in p_glac.index:\n if p_glac.loc[i, 'geometry'].type is 'MultiPolygon':\n point = pp.loc[i, 'geometry']\n dist = [j.distance(point) for j in p_glac.loc[i, 'geometry']]\n min_dist = np.argmin(dist)\n p_glac.loc[i, 'geometry'] = p_glac.loc[i, 'geometry'][min_dist]\n return p_glac\n\n\ndef merge_flows(shed_shp, pour_point_shp):\n \"\"\"merge the flowsheds together. First, P_glac(circle which radius depends\n on the flowaccumulation) is calculated for each pour point. If one or more\n fowsheds overlaie by the area of this circle, they are merged together.\n Sliver polygons are merged to the polygon with the longest shared boundary.\n Finally, overlaps are corrected and the resulting divides will be saved in\n separated folders.\n\n Parameters\n ----------\n shed_shp : str\n path to the shape file containing the flowsheds, that should be merged\n pour_point_shp : str\n path to the shape file containing the pour points\n\n Returns\n number of glaciers\n -------\n\n \"\"\"\n import time\n start = time.time()\n p_glac_dir = os.path.join(os.path.dirname(pour_point_shp), 'p_glac.shp')\n p_glac = _create_p_glac(pour_point_shp)\n p_glac.to_file(p_glac_dir)\n\n flows = gpd.read_file(shed_shp)\n # merge overlaps (p_glac, flowsheds)\n for j in p_glac.index:\n overlaps = flows[flows.intersects(p_glac.loc[j, 'geometry'])]\n if len(overlaps) > 1:\n union = cascaded_union(overlaps.loc[:, 'geometry'])\n flows.set_value(overlaps.index[0], 'geometry', union)\n del_ids = overlaps.index.drop(overlaps.index[0])\n flows = flows.loc[flows.index.difference(del_ids)]\n # add gaps\n all_flows = cascaded_union(flows.geometry)\n if all_flows.type is Polygon:\n difference = co.difference(all_flows)\n else:\n difference = co.difference(all_flows.simplify(0.00001))\n\n # gpd.GeoDataFrame(geometry=[difference], crs=crs).plot()\n if (difference.type is 'Polygon') and (difference.area > 0.1):\n glaciers, done = _merge_sliver(flows, difference)\n last_sliver = []\n\n if difference.type is 'MultiPolygon':\n for polygon in difference:\n if polygon.area > 0.1:\n glaciers, done = _merge_sliver(flows, polygon)\n if not done:\n last_sliver.append(polygon)\n slivers = flows[flows.geometry.apply(_is_sliver)]\n glaciers = flows.loc[flows.index.difference(slivers.index)]\n if len(glaciers) <= 1:\n return 1\n glaciers.to_file(os.path.join(os.path.dirname(pour_point_shp),\n 'glaciers.shp'))\n if not slivers.empty:\n slivers.to_file(os.path.join(os.path.dirname(pour_point_shp),\n 'slivers.shp'))\n # merge slivers to glaciers\n for k in slivers.index:\n sliver = slivers.loc[k, 'geometry']\n glaciers, done = _merge_sliver(glaciers, sliver)\n if not done:\n last_sliver.append(sliver)\n\n for polygon in last_sliver:\n glaciers, done = _merge_sliver(glaciers, polygon)\n # correct overlapping of glaciers\n for id in glaciers.index:\n glaciers = _merge_overlaps(glaciers, id)\n\n for id in glaciers.index:\n if id in glaciers.index:\n glaciers = _split_overlaps(glaciers, id)\n\n glaciers = _convert_to_polygon(glaciers)\n\n # check if divide is inside another divide\n for id in glaciers.index:\n if id in glaciers.index:\n glaciers = _check_contain_divides(glaciers, id)\n # compute altitude range\n if id in glaciers.index:\n poly = glaciers.loc[id, 'geometry']\n glaciers.loc[id, 'Alt_Range'] = _compute_altitude(filled_dem, poly)\n # compute percentual altitude range\n max_alt = np.max(glaciers.loc[:, 'Alt_Range'])\n per_alt_range = glaciers.loc[:, 'Alt_Range']/max_alt\n glaciers.loc[:, 'Perc_Alt_Range'] = per_alt_range\n\n glaciers.loc[:, 'Area'] = glaciers.geometry.area/10**6\n glaciers = glaciers.sort_values('Area', ascending=False)\n glaciers = glaciers.reset_index()\n\n glaciers['Perc_Area'] = glaciers.Area / glaciers.loc[0].Area\n\n # save glaciers\n glaciers.to_file(os.path.join(os.path.dirname(pour_point_shp),\n 'divides.shp'))\n return len(glaciers)\n\n\ndef merge_sliver_poly(glacier_poly, polygon):\n \"\"\"Sliver polygon will be merged to the polygon of glacier_poly with the\n longest shared boundary.\n\n Parameters\n ----------\n glacier_poly: gpd.GeoDataFrame\n contains all glacier geometries\n polygon : shapely.geometry.Polygon instance\n geometry of the sliver polygon\n\n Returns\n -------\n new gpd.GeoDataFrame, where sliver is merged\n \"\"\"\n max_b = 0\n max_b_id = -1\n for i, glac in glacier_poly.iteritems():\n if polygon.boundary.intersection(glac).length > max_b:\n max_b_id = i\n max_b = polygon.boundary.intersection(glac).length\n if not max_b_id == -1:\n glacier_poly[max_b_id] = glacier_poly[max_b_id].union(shape(polygon))\n return glacier_poly\n\n\ndef _make_polygon(gpd_obj):\n \"\"\"select geometry which are from the type Polygon, MultiPolygon or\n GeometryCollection. The last one is converted to a Polygon/Multipolygon\n (Line Strings/MultiLineStrings are removed)\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n\n Returns\n -------\n gpd.GeoDataFrame, that only contains Polygons or MultiPolygons\n \"\"\"\n\n gpd_obj = gpd_obj[(gpd_obj.type == 'GeometryCollection') |\n (gpd_obj.type == 'Polygon') |\n (gpd_obj.type == 'MultiPolygon')]\n if not gpd_obj.empty:\n collection = gpd_obj[(~gpd_obj.is_empty) &\n (gpd_obj.type == 'GeometryCollection')]\n # choose only polygons or multipolygons\n for c in collection.index:\n geo = collection.loc[c, 'geometry']\n new = MultiPolygon()\n for obj in geo:\n if obj.type in ['Polygon', 'MultiPolygon']:\n new = new.union(obj)\n gpd_obj = gpd_obj.copy()\n gpd_obj.loc[c, 'geometry'] = new\n return gpd_obj\n\n\ndef _p_glac_radius(a, b, c, f):\n \"\"\"calculate the radius of P_glac\n\n Parameters\n ----------\n a : float (14.3)\n b : float (0.5)\n c : float (3500m)\n constrain the radius\n f : int\n flowaccumulation value\n\n Returns\n -------\n\n \"\"\"\n if a * (f ** b) < c:\n return a * (f ** b)\n else:\n return c\n\n\ndef _is_sliver(polygon):\n \"\"\"check if polygon is a sliver polygon\n\n Parameters\n ----------\n polygon : shapely.geometry.Polygon instance\n\n Returns\n -------\n bool\n \"\"\"\n if polygon.type is not 'Polygon':\n max_poly = np.argmax(poly.area for poly in polygon)\n polygon = polygon[max_poly]\n\n if polygon.area < 100000 or (polygon.area < 200000 and _compactness(\n polygon)):\n return True\n else:\n return False\n\n\ndef _merge_sliver(gpd_obj, polygon):\n \"\"\"merge sliver polygon to the glacier with the longest shared boundary.\n If polygon does not touch the glaciers, False will be returned.\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n contains the geometry of each glacier\n polygon : shapely.geometry.Polygon instance\n sliver polygon, which should be merged\n\n Returns\n -------\n new gpd.GeoDataFrame,\n bool\n \"\"\"\n\n intersection_array = gpd_obj.intersection(polygon.boundary).length\n\n if np.max(intersection_array) != 0:\n max_b = np.argmax(intersection_array)\n poly = gpd_obj.loc[max_b, 'geometry'].simplify(0.01).buffer(0)\n geom = poly.union(polygon.buffer(0)).buffer(0)\n if geom.type is not 'Polygon':\n geom = geom.buffer(-0.01).buffer(0.01)\n gpd_obj.set_value(max_b, 'geometry', geom)\n merged = True\n # sliver does not touch glacier at the moment. Try again in the end\n else:\n merged = False\n return [gpd_obj, merged]\n\n\ndef _merge_overlaps(gpd_obj, l):\n \"\"\"correct glacier overlaps from gpd_obj.loc[l, 'geometry]\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n l : int\n\n Returns\n -------\n\n \"\"\"\n if l in gpd_obj.index:\n inter = _intersection_of_glaciers(gpd_obj, l)\n # if intersection area > 50%\n merge = inter[inter.area / gpd_obj.loc[l, 'geometry'].area > 0.5]\n while not merge.empty:\n if len(merge.index) == 1:\n poly = gpd_obj.loc[l, 'geometry'].union(\n gpd_obj.loc[merge.index[0], 'geometry'])\n gpd_obj.set_value(l, 'geometry', poly)\n gpd_obj = gpd_obj.loc[gpd_obj.index.difference(merge.index)]\n inter = _intersection_of_glaciers(gpd_obj, l)\n to_merge = inter.area / gpd_obj.loc[l, 'geometry'].area > 0.5\n merge = inter[to_merge]\n\n if len(merge.index) > 1:\n cascaded = cascaded_union(gpd_obj.loc[merge.index, 'geometry'])\n gpd_obj.set_value(l, 'geometry', gpd_obj.loc[l, 'geometry'].\n union(cascaded))\n gpd_obj = gpd_obj.loc[gpd_obj.index.difference(merge.index)]\n inter = _intersection_of_glaciers(gpd_obj, l)\n to_merge = inter.area / gpd_obj.loc[l, 'geometry'].area > 0.5\n merge = inter[to_merge]\n return gpd_obj\n\n\ndef _split_overlaps(gpd_obj, l):\n \"\"\"\n if glaciers overlaps just a little bit (not more than 50 % of one of them),\n the glacier will be split. The overlapping are will be related to the\n bigger one.\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame object\n l : int\n\n Returns\n -------\n gpd.GeoDataFrame object\n \"\"\"\n split = _intersection_of_glaciers(gpd_obj, l)\n for k in split.index:\n if l in gpd_obj.index and k in gpd_obj.index:\n if gpd_obj.loc[l, 'geometry'].area > gpd_obj.loc[k,\n 'geometry'].area:\n gpd_obj = _split_glacier(gpd_obj, k, split.loc[k, 'geometry'])\n else:\n gpd_obj = _split_glacier(gpd_obj, l, split.loc[k, 'geometry'])\n return gpd_obj\n\n\ndef _split_glacier(gpd_obj, index, polygon):\n \"\"\"\n split the object\n\n Parameters\n ----------\n gpd_obj : gpd.GeoDataFrame\n index : int\n polygon : shapely.geometry object\n\n Returns\n -------\n gpd.GeoDataFrame object\n \"\"\"\n diff = gpd_obj.loc[index, 'geometry'].difference(polygon)\n\n if diff.type is 'Polygon':\n if not _is_sliver(diff):\n gpd_obj.loc[index, 'geometry'] = diff\n else:\n gpd_obj = gpd_obj[gpd_obj.index != index]\n gpd_obj, done = _merge_sliver(gpd_obj, diff)\n\n else:\n # choose largest polygon\n max_poly = np.argmax([obj.area for obj in diff])\n if not _is_sliver(diff[max_poly]):\n gpd_obj.loc[index, 'geometry'] = diff[max_poly]\n else:\n gpd_obj = gpd_obj[gpd_obj.index != index]\n gpd_obj, done = _merge_sliver(gpd_obj, diff[max_poly])\n # rest merged as sliver polygon\n rest = diff.difference(diff[max_poly])\n gpd_obj, done = _merge_sliver(gpd_obj, rest)\n return gpd_obj\n\n\ndef _smooth_dem(dem):\n \"\"\"\n smooth the dem file (5x5 median filter is applied)\n\n Parameters\n ----------\n dem : str\n path to the dem file\n\n Returns\n -------\n str to the smoothed dem file\n \"\"\"\n smoothed_dem = os.path.join(os.path.dirname(dem), 'smoothed.tif')\n with rasterio.open(dem) as src:\n array = src.read()\n profile = src.profile\n # apply a 5x5 median filter to each band\n filtered = medfilt(array, (1, 5, 5)).astype('int16')\n with rasterio.open(smoothed_dem, 'w', **profile) as dst:\n dst.write(filtered)\n return smoothed_dem\n\n\ndef _transform_coord(tupel, transform):\n \"\"\"\n transform pixel numbers to coordinates\n Parameters\n ----------\n tupel : [int,int]\n transform : transform object from rasterio\n\n Returns\n -------\n shapely.geometry.Point object\n \"\"\"\n new_x = transform[0]+(tupel[1]+1)*transform[1]-transform[1]/2\n new_y = transform[3]+tupel[0]*transform[-1]-transform[1]/2\n\n return Point(new_x, new_y)\n\n\ndef _pour_points(dem):\n \"\"\"\n\n Parameters\n ----------\n dem : str\n path to a dem file\n\n Returns\n -------\n path to a shapefile containing all pour points\n \"\"\"\n # open gutter with flow accumulation\n with rasterio.open(dem) as src:\n # TODO: new rasterio version will return affine.Affine()\n # --> order will change\n transform = src.transform\n band = np.array(src.read(1))\n im = img_as_float(band)\n nan = np.where(np.isnan(im))\n # set nan to zero\n im[nan] = 0\n # calculate maxima\n coordinates = peak_local_max(im, min_distance=1)\n # transform maxima to (flowaccumulation,coordinates)\n new_coord = []\n new = []\n dtype = [('flowaccumulation', float), ('coordinates', object)]\n # transform coordinates\n for x, y in coordinates:\n new_coord.append((im[x][y], _transform_coord([x, y], transform)))\n new.append(Point(_transform_coord([x, y], transform)))\n new_coord = np.array(new_coord, dtype=dtype)\n # sort array by flowaccumulation\n new_coord = np.sort(new_coord, order='flowaccumulation')\n # reverse array\n new_coord = new_coord[::-1]\n pp = gpd.GeoDataFrame({'flowacc': new_coord['flowaccumulation']},\n geometry=new_coord['coordinates'], crs=crs)\n pp_shp = os.path.join(os.path.dirname(dem), 'pour_points.shp')\n pp.to_file(pp_shp)\n return pp_shp\n\n\ndef preprocessing(dem, shp, saga_cmd=None):\n\n \"\"\" Run all preprocessing tasks:\n\n fill pits from DEM,\n mask DEM along buffer1,\n lower it by 100 m along buffer2\n\n Parameters\n ----------\n dem : str\n path to the DEM file\n shp : str\n path to the shape file (outlines.shp)\n saga_cmd: str\n path to the SAGA GIS executable file (needed on win system)\n\n Returns\n -------\n path to the output raster\n \"\"\"\n\n # global outlines\n global crs\n global schema\n global pixelsize\n global co\n global out1\n global filled_dem\n pixelsize = 40\n\n smoothed_dem = _smooth_dem(dem)\n # fill pits\n filled_dem = _fill_pits_with_saga(smoothed_dem, saga_cmd=saga_cmd)\n\n # read outlines with gdal\n out1 = gpd.read_file(shp)\n crs = out1.crs\n co = out1.loc[0, 'geometry'].buffer(0)\n\n # mask dem along buffer1\n masked_dem = _raster_mask(filled_dem, co.buffer(4*pixelsize), 'masked')\n\n # lower dem by l_gutter along gutter\n gutter_dem = _gutter(masked_dem, 100)\n\n return gutter_dem\n\n\ndef dividing_glaciers(input_dem, input_shp, saga_cmd=None):\n \"\"\" This is the main structure of the algorithm\n\n Parameters\n ----------\n input_dem : str\n path to the raster file(.tif) of the glacier, resolution has to be 40m!\n input_shp : str\n path to the shape file of the outline of the glacier\n\n Returns\n -------\n number of divides (int)\n \"\"\"\n if sys.platform.startswith('win'):\n saga_cmd = 'C:\\\\\"Program Files\"\\\\SAGA-GIS\\\\saga_cmd.exe'\n gutter_dem = preprocessing(input_dem, input_shp, saga_cmd=saga_cmd)\n else:\n gutter_dem = preprocessing(input_dem, input_shp)\n pour_points_dir = identify_pour_points(gutter_dem)\n flowsheds_dir = flowshed_calculation(gutter_dem, pour_points_dir)\n no_glaciers = merge_flows(flowsheds_dir, pour_points_dir)\n\n # merge_flowsheds(p_glac, flowsheds_dir)\n\n # Allocation of flowsheds to individual glaciers\n # & Identification of sliver polygons\n # no_glaciers, all_polygon = merge_flowsheds(p_glac, watersheds)\n\n # delete files which are not needed anymore\n '''\n for file in os.listdir(os.path.dirname(input_shp)):\n for word in ['P_glac', 'all', 'flow', 'glaciers', 'gutter', 'p_glac',\n 'pour', 'smoothed', 'snapped', 'stream', 'masked',\n 'slivers', 'filled']:\n if file.startswith(word):\n os.remove(os.path.join(os.path.dirname(input_shp), file))\n '''\n return no_glaciers\n","sub_path":"partitioning/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":29890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173462063","text":"from odoo import models, fields, api\nfrom odoo.exceptions import except_orm, Warning, RedirectWarning\nfrom datetime import date, datetime, timedelta\nimport calendar\nimport time\n\nclass report_ar_mutation_pdf(models.AbstractModel):\n\n _name = 'report.kg_account.report_ar_mutation_pdf'\n\n @api.model\n def get_report_values(self, docids, data=None): \n # month_range = calendar.monthrange(int(data['year']),data['period'])\n # start_date = str(data['year']) + '-' + str(data['period']).rjust(2,'0') + '-' + str(month_range[0]).rjust(2,'0')\n # end_date = str(data['year']) + '-' + str(data['period']).rjust(2,'0') + '-' + str(month_range[1]).rjust(2,'0')\n \n start_date = data['start_date']\n end_date = data['end_date']\n company_id = data['company_id']\n partner_list = []\n\n company_ids = self.env['res.company'].browse(int(data['company_id']))\n\n # search begining folio\n bg_folio_ids = self.env['account.invoice'].search([\n ('type', '=', 'out_invoice'),\n ('state', 'in', ['open', 'paid']),\n ('date_invoice', '<', start_date),\n ('company_id', '=', company_id),\n ])\n\n for inv in bg_folio_ids:\n if inv.partner_id.id not in partner_list:\n partner_list.append(inv.partner_id.id)\n\n # search beginning credit note\n bg_creditnote_ids = self.env['account.invoice'].search([\n ('type', '=', 'out_refund'),\n ('state', 'in', ['open', 'paid']),\n ('date_invoice', '<', start_date),\n ('company_id', '=', company_id),\n ])\n\n for cn in bg_creditnote_ids:\n if cn.partner_id.id not in partner_list:\n partner_list.append(cn.partner_id.id)\n\n # search begining adjustment/write off\n bg_adjustment_ids = self.env['account.payment'].search([\n # ('payment_type', '=', 'inbound'),\n ('partner_type', '=', 'customer'),\n ('writeoff_account_id', '<>', False),\n ('state', 'in', ['posted', 'sent', 'reconciled']),\n ('payment_date', '<', start_date),\n ('company_id', '=', company_id),\n ])\n\n bg_adjustment_ids = bg_adjustment_ids.filtered(lambda payment: ('POS' or 'pos') not in payment.name)\n\n for adj in bg_adjustment_ids:\n if adj.partner_id.id not in partner_list:\n partner_list.append(adj.partner_id.id)\n\n # search begining payment\n bg_payment_ids = self.env['account.payment'].search([\n # ('payment_type', '=', 'inbound'),\n ('partner_type', '=', 'customer'),\n ('state', 'in', ['posted', 'sent', 'reconciled']),\n ('payment_date', '<', start_date),\n ('company_id', '=', company_id),\n ('is_advance_payment', '=', False),\n ])\n\n bg_payment_ids = bg_payment_ids.filtered(lambda payment: ('POS' or 'pos') not in payment.name)\n\n for inv in bg_payment_ids:\n if inv.partner_id.id not in partner_list:\n partner_list.append(inv.partner_id.id)\n\n # search new invoice (current period)\n folio_ids = self.env['account.invoice'].search([\n ('type', '=', 'out_invoice'),\n ('state', 'in', ['open', 'paid']),\n ('date_invoice', '>=', start_date),\n ('date_invoice', '<=', end_date),\n ('company_id', '=', company_id),\n ])\n\n for inv in folio_ids:\n if inv.partner_id.id not in partner_list:\n partner_list.append(inv.partner_id.id)\n\n # search adjustment/write off (current period)\n adjustment_ids = self.env['account.payment'].search([\n # ('payment_type', '=', 'inbound'),\n ('partner_type', '=', 'customer'),\n ('writeoff_account_id', '<>', False),\n ('state', 'in', ['posted', 'sent', 'reconciled']),\n ('payment_date', '>=', start_date),\n ('payment_date', '<=', end_date),\n ('company_id', '=', company_id),\n ])\n\n adjustment_ids = adjustment_ids.filtered(lambda payment: ('POS' or 'pos') not in payment.name)\n\n for adj in adjustment_ids:\n if adj.partner_id.id not in partner_list:\n partner_list.append(adj.partner_id.id)\n\n # search payment (current period)\n payment_ids = self.env['account.payment'].search([\n # ('payment_type', '=', 'inbound'),\n ('partner_type', '=', 'customer'),\n ('state', 'in', ['posted', 'sent', 'reconciled']),\n ('payment_date', '>=', start_date),\n ('payment_date', '<=', end_date),\n ('company_id', '=', company_id),\n ('is_advance_payment', '=', False),\n ])\n\n payment_ids = payment_ids.filtered(lambda payment: ('POS' or 'pos') not in payment.name)\n\n for p in payment_ids:\n if p.partner_id.id not in partner_list:\n partner_list.append(p.partner_id.id)\n\n # search for credit note\n creditnote_ids = self.env['account.invoice'].search([\n ('type', '=', 'out_refund'),\n ('state', 'in', ['open', 'paid']),\n ('date_invoice', '>=', start_date),\n ('date_invoice', '<=', end_date),\n ('company_id', '=', company_id),\n ])\n\n for cn in creditnote_ids:\n if cn.partner_id.id not in partner_list:\n partner_list.append(cn.partner_id.id)\n\n\n #START LOOPING\n total_bg_folio = 0\n total_bg_payment = 0\n total_bg_creditnote = 0\n total_bg_adjustment = 0\n total_bg_balance = 0\n total_folio = 0\n total_payment = 0\n total_creditnote = 0\n total_adjustment = 0\n total_all = 0\n\n\n data_report = []\n\n for p in partner_list:\n partner_code = '-'\n partner_name = 'NO NAME'\n partner_ids = self.env['res.partner'].browse(p)\n if partner_ids:\n partner_name = partner_ids.name\n partner_code = partner_ids.id\n\n #BEGINNING BALANCE\n lines = bg_folio_ids.filtered(lambda r: r.partner_id.id == p)\n amount_bg_folio = sum(l.amount_total for l in lines)\n\n bg_cn = bg_creditnote_ids.filtered(lambda r: r.partner_id.id == p)\n amount_bg_creditnote = sum(l.amount_total for l in bg_cn)\n\n bg_adj_in = bg_adjustment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'inbound')\n amount_bg_adjustment_in = sum(p.writeoff_amount for p in bg_adj_in)\n\n bg_adj_out = bg_adjustment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'outbound')\n amount_bg_adjustment_out = sum(p.writeoff_amount for p in bg_adj_out)\n\n bg_payment_in = bg_payment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'inbound')\n amount_bg_payment_in = sum(p.amount for p in bg_payment_in)\n\n bg_payment_out = bg_payment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'outbound')\n amount_bg_payment_out = sum(p.amount for p in bg_payment_out)\n\n amount_bg_balance = amount_bg_folio - amount_bg_creditnote - amount_bg_payment_in + amount_bg_payment_out - amount_bg_adjustment_in + amount_bg_adjustment_out\n\n #AMOUNT BALANCE\n # custom code by andi\n invoices = folio_ids.filtered(lambda r: r.partner_id.id == p)\n amount_folio = sum(l.amount_total for l in invoices)\n # end of custom code\n\n #PAYMENT BALANCE\n # custom code by andi\n # amount_adv_payments = 0.0\n # amount_total_invoices = sum(invoice.amount_total for invoice in invoices) or 0.0\n # amount_residual_invoices = sum(invoice.residual for invoice in invoices) or 0.0\n\n # code for compute the amount of adv payment used for the invoice if someday needed\n # for invoice in invoices:\n # adv_payment_amount_used = 0.0\n # for adv_payment in invoice.advance_payment_ids:\n # adv_payment_amount_used = adv_payment.amount - adv_payment.residual_temp\n # amount_adv_payments += adv_payment_amount_used\n # end of code\n\n # amount_payment = amount_total_invoices - amount_residual_invoices\n payments_in = payment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'inbound')\n amount_payment_in = sum(payment.amount for payment in payments_in) or 0.0\n\n payments_out = payment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'outbound')\n amount_payment_out = sum(payment.amount for payment in payments_out) or 0.0\n\n amount_payment = amount_payment_in - amount_payment_out\n\n # end of custom code\n\n # origin code by mas mario\n # lines = payment_ids.filtered(lambda r: r.partner_id.id == p)\n # amount_payment = sum(l.amount for l in lines)\n # end of origin code\n \n # CreditNote\n lines = creditnote_ids.filtered(lambda r: r.partner_id.id == p)\n amount_creditnote = sum(l.amount_total for l in lines)\n\n # Adjustment\n adj_in = adjustment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'inbound')\n amount_adjustment_in = sum(p.writeoff_amount for p in adj_in) or 0.0\n\n adj_out = adjustment_ids.filtered(lambda r: r.partner_id.id == p and r.payment_type == 'outbound')\n amount_adjustment_out = sum(p.writeoff_amount for p in adj_out) or 0.0\n\n amount_adjustment = amount_adjustment_in - amount_adjustment_out\n \n data_report.append({\n 'partner_code' : partner_code,\n 'partner_name' : partner_name,\n 'amount_bg_balance' : amount_bg_balance,\n 'amount_folio' : amount_folio,\n 'amount_payment' : amount_payment,\n 'amount_creditnote' : amount_creditnote,\n 'amount_adjustment' : amount_adjustment,\n 'amount_total' : amount_bg_balance + amount_folio - amount_creditnote - amount_adjustment - amount_payment,\n })\n\n total_bg_balance += amount_bg_balance\n total_folio += amount_folio\n total_payment += amount_payment\n total_creditnote += amount_creditnote\n total_adjustment += amount_adjustment\n total_all = total_bg_balance + total_folio - total_creditnote - total_payment - total_adjustment\n\n data_total = []\n data_total.append(total_bg_balance)\n data_total.append(total_folio)\n data_total.append(total_creditnote)\n data_total.append(total_payment)\n data_total.append(total_adjustment)\n data_total.append(total_all)\n\n # custom code by andi to sort data_report by its partner_name\n if data_report:\n sorted_data_report = sorted(data_report, key=lambda data: (data.get('partner_name') or ''))\n data_report = sorted_data_report\n # end of custom code\n\n return {\n 'docs' : data_report,\n 'data_total': data_total,\n 'start_date': start_date,\n 'end_date' : end_date,\n 'company_id': company_ids.name,\n 'printed_by': self.env.user.name,\n 'printed_on': (datetime.now() + timedelta(hours=7)).strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n\n","sub_path":"local/kg_account/reports/report_ar_mutation_pdf.py","file_name":"report_ar_mutation_pdf.py","file_ext":"py","file_size_in_byte":11724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252687188","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/dms/edufileitem/utils.py\n\n.. enthaelt Hilfefunktionen fuer Dateien in Lernarchiven\n Django content Management System\n\nHans Rauch\nhans.rauch@gmx.net\n\nDie Programme des dms-Systems koennen frei genutzt und den spezifischen\nBeduerfnissen entsprechend angepasst werden.\n\n0.01 11.09.2007 Beginn der Arbeit\n\"\"\"\n\nfrom django.utils.translation import ugettext as _\n\nfrom dms.settings import DOWNLOAD_URL\n\nfrom dms.queries import get_item_container_data_object_by_id\n\nfrom dms_ext.extension import * # dms-Funktionen ueberschreiben\n\n# -----------------------------------------------------\ndef get_edu_file_url(item_container):\n \"\"\" liefert die URL der eigentlichen Datei \"\"\"\n # --- PROBLEM Einblendung\n if item_container.is_data_object:\n file_url = DOWNLOAD_URL + item_container.container.path\n else:\n real_item_container = get_item_container_data_object_by_id(item_container.item.id)\n if real_item_container != None:\n file_url = DOWNLOAD_URL + real_item_container[0].container.path\n else:\n file_url = ''\n return file_url + item_container.item.name\n ## --- ..[:-5] entfernt '.html' von \n #return file_url + item_container.item.name[:-5]\n","sub_path":"edufileitem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127819412","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'index'\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^(?P\\d+)/$', views.detail_game, name='detail_game'),\n url(r'^(?P\\d+)/delete/$', views.delete, name='delete'),\n url(r'^(?P\\d+)/NewOrder/$', views.order_create, name='order_create'),\n\n]\n","sub_path":"index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204131163","text":"import click\nfrom os import walk\nfrom pathlib import Path, PurePosixPath\nfrom dropbox import Dropbox\nfrom dropbox.exceptions import AuthError, BadInputError\n\n\ndef validate_dropbox_path(ctx, dropbox_path, scope_path):\n dropbox_path = Path(dropbox_path)\n if not dropbox_path.is_dir():\n ctx.fail(\"The dropbox path must be a valid directory.\")\n\n return dropbox_path\n\n\ndef validate_scope_path(ctx, dropbox_path, scope_path):\n scope_path = PurePosixPath(scope_path)\n if not scope_path.is_absolute():\n scope_path = \"/\" / scope_path\n\n target_path = dropbox_path / scope_path.relative_to(\"/\")\n if not target_path.is_dir():\n ctx.fail(\"The scope must be a valid directory inside the dropbox directory.\")\n\n return scope_path, target_path\n\n\ndef validate_access_token(ctx, access_token):\n try:\n db = Dropbox(access_token)\n db.users_get_current_account()\n except AuthError:\n ctx.fail(\"The OAuth2 access token is invalid.\")\n except BadInputError:\n ctx.fail(\"The OAuth2 access token token is malformed.\")\n\n return db\n\n\ndef os_files_and_folders(dropbox_path, target_path):\n for path, folders, files in walk(target_path):\n entries = folders + files\n for entry in entries:\n yield Path(path, entry).relative_to(dropbox_path)\n\n\n# TODO: Handle scope_path doesn't exist and other errors.\n# TODO: Could show the progress of how many pages have been retrieved.\ndef db_files_and_folders(db, scope_path):\n if scope_path == \"/\":\n scope_path = \"\"\n\n entries = []\n\n result = db.files_list_folder(scope_path, recursive=True)\n entries.extend(result.entries)\n\n while result.has_more:\n result = db.files_list_folder_continue(result.cursor)\n entries.extend(result.entries)\n\n entries = list(map(lambda entry: PurePosixPath(entry.path_display), entries))\n\n return entries\n\n\n# TODO: Handle collision and other errors.\ndef db_rename(db, old_path, new_path, temporary_suffix=\"_\"):\n intermediate_path = old_path + temporary_suffix\n db.files_move(old_path, intermediate_path)\n db.files_move(intermediate_path, new_path)\n\n\ndef create_path_lookup(paths):\n lookup = {}\n\n for path in paths:\n lookup[path.as_posix().lower()] = path\n\n return lookup\n\n\n# TODO: Could create a progress bar.\n# TODO: Could rename using the batch API call. Make the script faster.\ndef run(ctx, access_token, dropbox_path, scope_path, mode, dry_run):\n dropbox_path = validate_dropbox_path(ctx, dropbox_path, scope_path)\n scope_path, target_path = validate_scope_path(ctx, dropbox_path, scope_path)\n db = validate_access_token(ctx, access_token)\n\n db_paths = db_files_and_folders(db, scope_path.as_posix())\n db_lookup = create_path_lookup(db_paths)\n\n for os_path in os_files_and_folders(dropbox_path, target_path):\n\n lookup_path = (\"/\" / os_path).as_posix().lower()\n if lookup_path in db_lookup:\n db_path = db_lookup[lookup_path]\n\n if os_path.name != db_path.name:\n if mode == \"push\":\n new_db_path = \"/\" / os_path\n\n click.echo(\"PUSH: \" + str(db_path) + \" -> \" + os_path.name)\n\n if not dry_run:\n db_rename(db, db_path.as_posix(), new_db_path.as_posix())\n\n if mode == \"pull\":\n\n click.echo(\"PULL: \" + str(os_path) + \" -> \" + db_path.name)\n\n if not dry_run:\n old_os_path = dropbox_path / os_path\n new_os_path = old_os_path.with_name(db_path.name)\n old_os_path.rename(new_os_path)\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213837126","text":"from appium import webdriver\n\nfrom test_appium.page.basepage import BasePage\nfrom test_frame.page.main_page import MainPage\n\n\nclass App(BasePage):\n def start(self): #启动APP\n if self.driver == None:\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '6.0'\n desired_caps['deviceName'] = '127.0.0.1:7555'\n desired_caps['appPackage'] = 'com.tencent.wework'\n desired_caps['appActivity'] = '.launch.WwMainActivity'\n desired_caps['autoGrantPermissions'] = True # 自动点掉弹框\n desired_caps['dontStopAppOnReset'] = True #不停止App,继续运行\n # \"appActivity\": \".view.WelcomeActivityAlias\",\n desired_caps['noReset'] = 'true' # 此项的意思为,1,当界面有弹框,关闭界面,当再次打开应用时,弹框会消失\n # 2,是否在测试前后重置相关环境(例如首次打开弹框,参考第一条注释,或者是登录信息)\n # desired_caps['dontStopAppOnReset'] = \"true\" # 首次启动App的时候,不停止App(可以调试或者运行的时候提升运行速度)\n desired_caps['skipDeviceInitialization'] = \"true\" # 跳过安装,权限设置等操作(可以调试或者运行的时候提升运行速度)\n desired_caps['unicodeKeyBoard'] = 'true' # 设置输入框可以进行中文输入\n desired_caps['resetKeyBoard'] = 'true'\n desired_caps['newCommandTimeout'] = 300\n self.driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desired_caps)\n self.driver.implicitly_wait(80)\n else:\n self.driver.launch_app()\n return self\n\n def restart(self): #重启App\n self.driver.quit()\n self.driver.launch_app()\n return self\n\n def stop(self): #关闭App\n self.driver.quit()\n return self\n\n def goto_main(self)->MainPage: #定义进入页面\n return MainPage(self.driver) #跳转到主页面\n\n\n","sub_path":"test_frame/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"265075765","text":"#!/usr/bin/env python\n# Connection.py\n# Importing the Required Libraries.\nimport time\nimport datetime\nimport pymongo\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pymongo import MongoClient\nfrom sklearn.cross_validation import train_test_split\n# Connecting with Local MongoDB Database.\nMONGO_HOST = \"139.59.66.172\"\nMONGO_PORT = 27017\nMONGO_DB = \"sodelhi\"\n# MONGO_USER = \"Username\"\n# MONGO_PASS = \"password\"\nconnection = MongoClient(MONGO_HOST, MONGO_PORT)\ndb = connection[MONGO_DB]\n# Data_Retrival.py\n# Retriving Data from users collection of the MongoDb Database.\nfinal_data=[]\nfor obj in db.users.find({\"feed._id\":{'$exists':True},\"feed.data.like.status\":{'$exists':True},\"interests\":{'$exists':True},\"feed.data.share.status\":{'$exists':True},\"feed.data.bookmark.status\":{'$exists':True}},{\"_id\":1,\"interests\":1,\"username\":1,\"feed.unique_id\":1,\"feed.data.read.duration\":1,\"feed.data.share.status\":1,\"feed.data.like.status\":1,\"feed.data.bookmark.status\":1}):#.skip(3741 - 500):\n for feed_id in obj['feed']:\n final_obj = {}\n final_obj['_id']=obj['_id']\n final_obj['users_name'] = obj['username']\n final_obj['interests']=obj['interests']\n######## test on original dataset \n# if 'read' in feed_id['data'] and len(feed_id['data']['read'])>0 and 'duration' in feed_id['data']['read'][0]:\n# final_obj['timestamp']= feed_id['data']['read'][0]['duration']\n if 'bookmark' in feed_id['data'] and 'status' in feed_id['data']['bookmark']:\n final_obj['bookmark']= feed_id['data']['bookmark']['status']\n if 'share' in feed_id['data'] and len(feed_id['data']['share'])>0 and 'status' in feed_id['data']['share'][0]:\n final_obj['share']= feed_id['data']['share'][0]['status']\n if 'like' in feed_id['data'] and 'status' in feed_id['data']['like']:\n final_obj['like']= feed_id['data']['like']['status']\n if 'unique_id' in feed_id:\n final_obj['article_id'] = feed_id['unique_id']\n final_data.append(final_obj)\n# print final_data \nDataset=pd.DataFrame(final_data)\nDataset.set_index('_id', inplace=True)\n# Filtering out the a_users_name,unique_id from the Dataset.\nFinal_Dataset=Dataset.filter(['_id','users_name','article_id','interests'])\n#Convert the millisecond to date and time.\n# Final_Dataset[''] = pd.to_datetime(Final_Dataset[''], unit='ms')\n#Getting the date of the user\n# Final_Dataset[''] = [d.date() for d in Final_Dataset['']]\n# Filtering out the timestamp,bookmark_status,like_status,share_status from the Dataset.\nrating=Dataset.filter(['bookmark','like','share'])#Add Timestamp\n# Repalcing the NaN value with zero's.\nrating=rating.fillna(0)\n# Asigning Weight to the Timestamp,Like,Share,Bookmark.\n# like=0.75\n# share=3\n# bookmarks=1.25\n#Timestamp=?\nWeight=(1.25,0.75,3)\n# Finding the weighted Ratings.\nweighted_ratings=rating * Weight\n# weighted_ratings.\n# weighted_ratings.head()\n# Adding Ratings Field in the Final_Dataset.\nFinal_Dataset['rating']=weighted_ratings.sum(1)\n# Final_Dataset.\n#Getting Feed_id from the Unique_id\nFinal_Dataset[\"article_id\"] = Final_Dataset[\"article_id\"].map(lambda d: d.split(\"-\",2)[2:][0])\n# Final_Dataset['article_id'] = Final_Dataset['article_id'].str.extract('(......-\\d\\d\\d\\d)', expand=True)\nFinal_Dataset['interests']=Final_Dataset.interests.apply(' | '.join)\nusers = Final_Dataset['users_name'].unique()\ntrain_data, test_data = train_test_split(Final_Dataset, test_size = 0.20, random_state=0)\nFinal_Dataset.to_csv('/home/shaktisocity/Desktop/khwahish2/data.csv',encoding='utf-8')\n#test_data.to_csv('/home/shaktisocity/Desktop/khwahish2/test_rating_data.csv',encoding='utf-8')\n#train_data.to_csv('/home/shaktisocity/Desktop/khwahish2/train_rating_data.csv',encoding='utf-8')\n\n","sub_path":"data_c_c.py","file_name":"data_c_c.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56356266","text":"from typing import List, Optional\nfrom uuid import uuid4\n\nfrom lnbits.db import open_db\nfrom lnbits.settings import DEFAULT_WALLET_NAME\n\nfrom .models import User, Wallet, Payment\n\n\n# accounts\n# --------\n\n\ndef create_account() -> User:\n with open_db() as db:\n user_id = uuid4().hex\n db.execute(\"INSERT INTO accounts (id) VALUES (?)\", (user_id,))\n\n new_account = get_account(user_id=user_id)\n assert new_account, \"Newly created account couldn't be retrieved\"\n\n return new_account\n\n\ndef get_account(user_id: str) -> Optional[User]:\n with open_db() as db:\n row = db.fetchone(\"SELECT id, email, pass as password FROM accounts WHERE id = ?\", (user_id,))\n\n return User(**row) if row else None\n\n\ndef get_user(user_id: str) -> Optional[User]:\n with open_db() as db:\n user = db.fetchone(\"SELECT id, email FROM accounts WHERE id = ?\", (user_id,))\n\n if user:\n extensions = db.fetchall(\"SELECT extension FROM extensions WHERE user = ? AND active = 1\", (user_id,))\n wallets = db.fetchall(\n \"\"\"\n SELECT *, COALESCE((SELECT balance FROM balances WHERE wallet = wallets.id), 0) AS balance_msat\n FROM wallets\n WHERE user = ?\n \"\"\",\n (user_id,),\n )\n\n return (\n User(**{**user, **{\"extensions\": [e[0] for e in extensions], \"wallets\": [Wallet(**w) for w in wallets]}})\n if user\n else None\n )\n\n\ndef update_user_extension(*, user_id: str, extension: str, active: int) -> None:\n with open_db() as db:\n db.execute(\n \"\"\"\n INSERT OR REPLACE INTO extensions (user, extension, active)\n VALUES (?, ?, ?)\n \"\"\",\n (user_id, extension, active),\n )\n\n\n# wallets\n# -------\n\n\ndef create_wallet(*, user_id: str, wallet_name: Optional[str] = None) -> Wallet:\n with open_db() as db:\n wallet_id = uuid4().hex\n db.execute(\n \"\"\"\n INSERT INTO wallets (id, name, user, adminkey, inkey)\n VALUES (?, ?, ?, ?, ?)\n \"\"\",\n (wallet_id, wallet_name or DEFAULT_WALLET_NAME, user_id, uuid4().hex, uuid4().hex),\n )\n\n new_wallet = get_wallet(wallet_id=wallet_id)\n assert new_wallet, \"Newly created wallet couldn't be retrieved\"\n\n return new_wallet\n\n\ndef delete_wallet(*, user_id: str, wallet_id: str) -> None:\n with open_db() as db:\n db.execute(\n \"\"\"\n UPDATE wallets AS w\n SET\n user = 'del:' || w.user,\n adminkey = 'del:' || w.adminkey,\n inkey = 'del:' || w.inkey\n WHERE id = ? AND user = ?\n \"\"\",\n (wallet_id, user_id),\n )\n\n\ndef get_wallet(wallet_id: str) -> Optional[Wallet]:\n with open_db() as db:\n row = db.fetchone(\n \"\"\"\n SELECT *, COALESCE((SELECT balance FROM balances WHERE wallet = wallets.id), 0) AS balance_msat\n FROM wallets\n WHERE id = ?\n \"\"\",\n (wallet_id,),\n )\n\n return Wallet(**row) if row else None\n\n\ndef get_wallet_for_key(key: str, key_type: str = \"invoice\") -> Optional[Wallet]:\n with open_db() as db:\n row = db.fetchone(\n \"\"\"\n SELECT *, COALESCE((SELECT balance FROM balances WHERE wallet = wallets.id), 0) AS balance_msat\n FROM wallets\n WHERE adminkey = ? OR inkey = ?\n \"\"\",\n (key, key),\n )\n\n if not row:\n return None\n\n if key_type == \"admin\" and row[\"adminkey\"] != key:\n return None\n\n return Wallet(**row)\n\n\n# wallet payments\n# ---------------\n\n\ndef get_wallet_payment(wallet_id: str, checking_id: str) -> Optional[Payment]:\n with open_db() as db:\n row = db.fetchone(\n \"\"\"\n SELECT payhash as checking_id, amount, fee, pending, memo, time\n FROM apipayments\n WHERE wallet = ? AND payhash = ?\n \"\"\",\n (wallet_id, checking_id),\n )\n\n return Payment(**row) if row else None\n\n\ndef get_wallet_payments(\n wallet_id: str, *, complete: bool = False, pending: bool = False, outgoing: bool = False, incoming: bool = False\n) -> List[Payment]:\n \"\"\"\n Filters payments to be returned by complete | pending | outgoing | incoming.\n \"\"\"\n\n clause = \"\"\n if complete and pending:\n clause += \"\"\n elif complete:\n clause += \"AND ((amount > 0 AND pending = 0) OR amount < 0)\"\n elif pending:\n clause += \"AND pending = 1\"\n else:\n raise TypeError(\"at least one of [complete, pending] must be True.\")\n\n if outgoing and incoming:\n clause += \"\"\n elif outgoing:\n clause += \"AND amount < 0\"\n elif incoming:\n clause += \"AND amount > 0\"\n else:\n raise TypeError(\"at least one of [outgoing, incoming] must be True.\")\n\n with open_db() as db:\n rows = db.fetchall(\n f\"\"\"\n SELECT payhash as checking_id, amount, fee, pending, memo, time\n FROM apipayments\n WHERE wallet = ? {clause}\n ORDER BY time DESC\n \"\"\",\n (wallet_id,),\n )\n\n return [Payment(**row) for row in rows]\n\n\ndef delete_wallet_payments_expired(wallet_id: str, *, seconds: int = 86400) -> None:\n with open_db() as db:\n db.execute(\n \"\"\"\n DELETE\n FROM apipayments WHERE wallet = ? AND pending = 1 AND time < strftime('%s', 'now') - ?\n \"\"\",\n (wallet_id, seconds),\n )\n\n\n# payments\n# --------\n\n\ndef create_payment(\n *, wallet_id: str, checking_id: str, amount: int, memo: str, fee: int = 0, pending: bool = True\n) -> Payment:\n with open_db() as db:\n db.execute(\n \"\"\"\n INSERT INTO apipayments (wallet, payhash, amount, pending, memo, fee)\n VALUES (?, ?, ?, ?, ?, ?)\n \"\"\",\n (wallet_id, checking_id, amount, int(pending), memo, fee),\n )\n\n new_payment = get_wallet_payment(wallet_id, checking_id)\n assert new_payment, \"Newly created payment couldn't be retrieved\"\n\n return new_payment\n\n\ndef update_payment_status(checking_id: str, pending: bool) -> None:\n with open_db() as db:\n db.execute(\"UPDATE apipayments SET pending = ? WHERE payhash = ?\", (int(pending), checking_id,))\n\n\ndef delete_payment(checking_id: str) -> None:\n with open_db() as db:\n db.execute(\"DELETE FROM apipayments WHERE payhash = ?\", (checking_id,))\n","sub_path":"lnbits/core/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346044491","text":"\"\"\"\nTop-level classes and functions to help running MomentsML on GREAT3.\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom astropy.table import Table\nimport os\n\nimport utils\nfrom momentsml import tools\n\n\n\nclass GREAT3Run(utils.Branch):\n\t\"\"\"\n\tThis is a simple class to group frequently used variables, on top of the Branch class.\n\tUnlike Branch, it does specify paths to MomentsML-internal temporary files and directories, and handles a workdir.\n\t\"\"\"\n\t\n\tdef __init__(self, experiment, obstype, sheartype, datadir, truthdir, workdir, g3publicdir, subfields=None, ncpu=None, skipdone=False):\n\t\t\n\t\tutils.Branch.__init__(self, experiment, obstype, sheartype, datadir, truthdir)\n\t\tlogger.info(\"Getting ready to work on branch %s-%s-%s\" % (experiment, obstype, sheartype))\n\n\t\tself.workdir=workdir\n\t\tif self.workdir == None:\n\t\t\tlogger.warning(\"Better specify a workdir, I think.\")\n\t\t\tself.workdir = \"./%s\" % (self.get_branchacronym())\n\t\tself.mkdirs()\n\t\t\n\t\tself.g3publicdir = g3publicdir\n\t\t\n\t\tself.subfields=subfields\n\t\tif self.subfields is None:\n\t\t\tself.subfields=range(200)\n\t\t\t\n\t\tself.ncpu = ncpu\n\t\tif ncpu is None:\n\t\t\tself.ncpu = 1\n\t\t\t\n\t\tself.skipdone = skipdone\n\n\t\t# Those, and further variables, can be wildly added later:\n\t\tself.simparams_name = None\n\t\tself.trainparams_name = None\n\t\t\n\n\t\n\tdef __str__(self):\n\t\t\"\"\"\n\t\tA tiny self-description, for logging\n\t\t\"\"\"\n\t\treturn \"GREAT3Run on branch %s in workdir '%s'\" % (self.get_branchacronym(), self.workdir)\n\n\t\t\n\tdef mkdirs(self, subfield=None):\n\t\t\"\"\"\n\t\tCreates the working directories. \n\t\t\"\"\"\n\n\t\tif not os.path.isdir(self.workdir):\n\t\t\tos.makedirs(self.workdir)\n\t\n\t\tif subfield is not None:\n\t\t\tdirpath = self.subpath(subfield)\n\t\t\tif not os.path.isdir(dirpath):\n\t\t\t\tos.makedirs(dirpath)\n\t\t\t\t\n\t\t\t# Now must create the sub-directories:\n\t\t\tfor subfolder in [\"obs\",\"sim\",\"ml\",\"pred\",\"out\",\"val\"]:\n\t\t\t\tdirpath = self.subpath(subfield, subfolder)\n\t\t\t\tif not os.path.isdir(dirpath):\n\t\t\t\t\tos.makedirs(dirpath)\n\n\n\tdef path(self,*args):\n\t\t\"\"\"\n\t\tA helper function that returns a filepath within the working directory.\n\t\t\n\t\t:param args: strings, must be in order of the filepath, similar to os.path.join()\n\t\t\n\t\tExample usage::\n\t\t\n\t\t\t>>> self.path(\"obs\",\"catalogue_000.fits\")\n\t\t\t\n\t\twill return the filepath: self.workdir/obs/catalogue_000.fits\n\t\t\"\"\"\n\t\treturn os.path.join(self.workdir,\"/\".join(args))\n\t\n\t\n\tdef subpath(self, subfield, *args):\n\t\t\"\"\"\n\t\tSimilar, but first argument is a subfield number\n\t\t\"\"\"\n\t\t\n\t\t\n\t\t\n\t\treturn os.path.join(self.workdir, \"%03i\" % subfield, \"/\".join(args))\n\t\n\n","sub_path":"wrappers/momentsmlgreat3/great3.py","file_name":"great3.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532149498","text":"#EJEMPLO 13\r\n#Nombre: Carlos Homero Vacacela Velez\r\n#Aula: Software A1\r\n\r\n#Ejercicio 6: Dado el sueldo de un empleado, encontrar el nuevo sueldo si obtiene \r\n#un aumento del 10% si su sueldo es inferior a $600, en caso contrario no tendrá aumento.\r\n\r\nclass Aumento:\r\n def Dinero(self):\r\n SUELDO=float(input(\"Ingrese el sueldo del empleado:\"))\r\n if SUELDO < 600:\r\n NS=SUELDO + SUELDO*0.1\r\n else:\r\n NS=SUELDO \r\n print(\"Sueldo a recibir:\",NS)\r\n print(\"\\n\")\r\n print(\"**FIN DE LA EJECUCIÓN**\")\r\n\r\naumento= Aumento()\r\naumento.Dinero()","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403591894","text":"import os\r\nimport csv\r\n\r\nbank_csv = os.path.join('..', 'Resources', 'PyBank_File.csv')\r\n\r\nProfitLoss = []\r\n\r\nAverage = 0\r\n\r\nLines = 0\r\n\r\nLastmonth = []\r\n\r\ndate= []\r\n\r\nwith open(bank_csv, 'r') as csvfile:\r\n\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n header = next(csvreader)\r\n\r\n for row in csvreader:\r\n ProfitLoss.append(float(row[1]))\r\n Lines += 1\r\n date.append(row[0])\r\n \r\n for i in range(1,Lines):\r\n Lastmonth.append(ProfitLoss[i] - ProfitLoss[i-1]) \r\n Average = round(sum(Lastmonth)/(Lines-1),2)\r\n\r\n max_date = str(date[Lastmonth.index(max(Lastmonth))])\r\n min_date = str(date[Lastmonth.index(min(Lastmonth))])\r\n\r\n\r\nprint(\"Financial Analysis\")\r\nprint(\"----------------------------\")\r\nprint(f'Total Months: {str(Lines)}')\r\nprint(f'Total: ${sum(ProfitLoss)}')\r\nprint(f'Average Change: ${str(Average)}')\r\nprint(f'Greatest increase in profits: {max_date} (${max(Lastmonth)})')\r\nprint(f'Greatest decrease in profits: {min_date} (${min(Lastmonth)})')\r\n\r\noutput_file = os.path.join(\"analysis_results.txt\")\r\nwith open(output_file, \"w\", newline=\"\") as datafile:\r\n datafile.write('Financial Analysis\\n')\r\n datafile.write(f'Total Months: {str(Lines)}\\n')\r\n datafile.write(f'Total: ${sum(ProfitLoss)}\\n')\r\n datafile.write(f'Average Change: ${str(Average)}\\n')\r\n datafile.write(f'Greatest increase in profits: {max_date} (${max(Lastmonth)})\\n')\r\n datafile.write(f'Greatest decrease in profits: {min_date} (${min(Lastmonth)})\\n')\r\n\r\n\r\n\r\n","sub_path":"python-challenge/PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185013290","text":"import os,shutil,logging,sys,pathlib\nlogging.basicConfig(level=logging.INFO,format=' %(asctime)s - %(levelname)s - %(message)s')\n\nclass MyCopyFile:\n def __init__(self, src,dst):\n self.src = src\n if not os.path.exists(self.src):\n error_message = \"源文件或文件夹不存在——\" + src\n logging.error(error_message)\n sys.exit()\n self.dst = dst\n self.dst_type = self.judge_file_dir( self.dst )\n\n def copy(self):\n if os.path.isdir(self.src):\n if self.judge_file_dir(self.dst) == \"dir\":\n logging.debug(\"拷贝文件夹至文件夹\" + \"Source: \" + self.src + \". Destination: \" + self.dst)\n self.copy_dir_to_dir(self.src, self.dst)\n self.status = True\n elif self.judge_file_dir(self.dst) == \"file\":\n error_message = \"不能将一个文件夹拷贝至文件!\" + '\\t' + \"源文件: \" + self.src + '\\t' + \"目的文件:\" + self.dst\n logging.error(error_message)\n self.status = False\n else:\n error_message = \"目的路径非文件夹或文件!\"\n logging.error(error_message)\n self.status = False\n elif os.path.isfile(self.src):\n if self.judge_file_dir(self.dst) == \"dir\":\n logging.debug(\"拷贝文件至文件夹——\" + \"Source: \" + self.src + \". Destination: \" + self.dst)\n self.copy_file_to_dir(self.src, self.dst)\n self.dst_file = os.path.join(self.dst,os.path.basename(self.src))\n self.status = True\n elif self.judge_file_dir(self.dst) == \"file\":\n dirpath = os.path.dirname(self.dst)\n logging.info(dirpath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n logging.debug(\"拷贝文件——\" + \"Source: \" + self.src + \". Destination: \" + self.dst)\n shutil.copyfile(self.src, self.dst)\n self.dst_file = self.dst\n self.status = True\n else:\n error_message = \"请检查目的地址是否存在\"\n logging.error(error_message)\n self.status = False\n else:\n error_message = \"源文件或文件夹不正确——\" + src\n logging.error(error_message)\n self.status = False\n\n def judge_file_dir(self,path):\n if type(path) != str:\n error_message = \"路径非字符串形式,请检查。\"\n logging.error(error_message)\n sys.exit()\n paths = path.split(\"\\\\\")\n lastelement = paths[len(paths) - 1]\n logging.debug(lastelement)\n if len(lastelement.split(\".\")) > 1:\n return \"file\"\n else:\n return \"dir\"\n\n def copy_dir_to_dir(self,dst):\n base = os.path.basename(self.src)\n dst = os.path.join(dst, base)\n names = os.listdir(self.src)\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in names:\n srcname = os.path.join(self.src, name)\n if os.path.isdir(srcname):\n self.copy_dir_to_dir(srcname, dst)\n else:\n shutil.copy2(srcname, dst)\n\n def copy_file_to_dir(self,src, dst):\n base = os.path.basename(src)\n if not os.path.exists(dst):\n os.makedirs(dst)\n dst = os.path.join(dst, base)\n shutil.copyfile(src, dst)\n\n def list_all_member(self):\n for name,value in vars(self).items():\n print('%s=%s'%(name,value))\n\n","sub_path":"Work/CopyFile/MyCopyFile.py","file_name":"MyCopyFile.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309497953","text":"#Input Script template \n#Used mainly on Hackerrank.com\n#Author: pr0x1ma\ndef inputScript():\n\tA = []\n\t#Gets string of numbers\n\tstrng = input()\n\n\t#trims whitespace\n\ttrimmed = strng.replace(\" \", \"\")\n\n\t#converts to integers then to a list of integers \n\t#to be processed\n\tind = list(map(int, trimmed))\n\tprint(ind)\n\n\ninputScript()","sub_path":"inputScript.py","file_name":"inputScript.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230770251","text":"#!/usr/bin/env python\r\n\r\nimport LFPy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Define cell parameters\r\ncell_parameters = { # various cell parameters,\r\n 'morphology' : 'stick.hoc',\r\n 'cm' : 1.0, # membrane capacitance\r\n 'Ra' : 150, # axial resistance\r\n 'passive_parameters':dict(g_pas=1/30000., e_pas=-65),\r\n 'v_init' : -65., # initial crossmembrane potential\r\n 'passive' : True, # switch on passive mechs\r\n 'nsegs_method' : 'lambda_f',\r\n 'lambda_f' : 500.,\r\n 'dt' : 2.**-7, # [ms] dt's should be in powers of 2 for both,\r\n 'tstart' : 0., # start time of simulation, recorders start at t=0\r\n 'tstop' : 3., # stop simulation at 200 ms. These can be overridden\r\n # by setting these arguments i cell.simulation()\r\n \"extracellular\": True,\r\n}\r\n\r\ncell = LFPy.Cell(**cell_parameters)\r\ncell.set_pos(z=-500)\r\nn_tsteps = int(cell.tstop / cell.dt + 1)\r\nt = np.arange(n_tsteps) * cell.dt\r\n\r\n# Make a linear external field\r\next_field = np.vectorize(lambda z: 0. + z/np.max(cell.zmid) * 10.)\r\n\r\npulse = np.zeros(n_tsteps)\r\npulse[100:] = 1.\r\n\r\n# Calculate time dependent field for each cell compartment\r\nv_cell_ext = np.zeros((cell.totnsegs, n_tsteps))\r\nv_cell_ext[:, :] = ext_field(cell.zmid).reshape(cell.totnsegs, 1) * pulse.reshape(1, n_tsteps)\r\ncell.insert_v_ext(v_cell_ext, t)\r\n\r\nzs = np.linspace(np.min(cell.zmid), np.max(cell.zmid), 4)\r\ncell_plot_idxs = [cell.get_closest_idx(0, 0, z) for z in zs]\r\n\r\n# This function is used to color code compartments for plotting\r\ncell_pos_clr = lambda z: plt.cm.jet(1.0 * (z - np.min(zs)) / (np.max(zs) - np.min(zs)))\r\n\r\n\r\n# Run simulation, electrode object argument in cell.simulate\r\nprint(\"running simulation...\")\r\ncell.simulate(rec_imem=True, rec_vmem=True)\r\n\r\n# Plot results\r\nplt.close('all')\r\nfig = plt.figure(figsize=[12, 5])\r\nfig.subplots_adjust(wspace=0.6, top=0.83)\r\nfig.suptitle(\"Stick cell in linear external potential\")\r\n\r\nax1 = plt.subplot(151, title=\"Stick cell\", aspect=1, frameon=False, xlim=[-100, 100], xlabel=\"x [$\\mu$m]\", ylabel=\"y [$\\mu$m]\")\r\nax2 = plt.subplot(152, title=\"External potential\\n(Ue)\", xlim=[0, cell.tstop], xlabel=\"Time [ms]\", ylabel=\"mV\")\r\nax3 = plt.subplot(153, title=\"Membrane potential\\n(Vm)\", sharex=ax2, xlabel=\"Time [ms]\", ylabel=\"[mV]\")\r\nax4 = plt.subplot(154, title=\"Ui = Ue + Vm\", sharex=ax3, xlabel=\"Time [ms]\", ylabel=\"[mV]\")\r\nax5 = plt.subplot(155, title=\"Transmembrane currents\", sharex=ax3, xlabel=\"Time [ms]\", ylabel=\"nA\")\r\n\r\n[ax1.plot([cell.xstart[idx], cell.xend[idx]],\r\n [cell.zstart[idx], cell.zend[idx]], '-',\r\n c='k', clip_on=False) for idx in range(cell.totnsegs)]\r\n\r\nfor num, idx in enumerate(cell_plot_idxs):\r\n ax1.plot(cell.xmid[idx], cell.zmid[idx], 'D', c=cell_pos_clr(cell.zmid[idx]))\r\n ax2.plot(cell.tvec, v_cell_ext[idx], c=cell_pos_clr(cell.zmid[idx]))\r\n ax3.plot(cell.tvec, cell.vmem[idx, :], c=cell_pos_clr(cell.zmid[idx]))\r\n ax4.plot(cell.tvec, v_cell_ext[idx] + cell.vmem[idx, :], c=cell_pos_clr(cell.zmid[idx]))\r\n ax5.plot(cell.tvec, cell.imem[idx, :], c=cell_pos_clr(cell.zmid[idx]))\r\n\r\nplt.savefig('example_stick.png')\r\n","sub_path":"ephaptic_example_stick.py","file_name":"ephaptic_example_stick.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160534142","text":"import time\n\nimport phonenumbers\nimport random\nimport timeit\nimport math\nimport os\n\nif __name__ == \"__main__\":\n print(\"executed as main\")\nelse:\n\n #sum\n def sum(number1, number2):\n sum = number1 + number2\n return sum\n\n #rest\n def remainder(number1, number2):\n try:\n if number2 == 0:\n throwing_error()\n else:\n return int(number1 % number2)\n except ZeroDivisionError:\n return \"divisor is less than 1\"\n\n\n #division\n def division(number1, number2):\n try:\n if number2 == 0:\n throwing_error()\n else:\n return int(number1 / number2)\n except ZeroDivisionError:\n return \"divisor is less than 1\"\n\n\n #difference\n def difference(number1, number2):\n diff = number1 - number2\n return diff\n\n #raise Exception for asserRaises tests\n def throwing_error():\n raise Exception\n\n #power\n def power(b, p):\n try:\n if b == p == 0 or b < 0 or p < 0:\n throwing_error()\n else:\n return int(math.pow(int(b), int(p)))\n except OverflowError:\n throwing_error()\n return \"too big number\" #I guess this is not reachable\n\n\n\n # write to file 2 numbers each on a different line\n def writing_to_file(path, b, p):\n f = open(path, \"w\")\n f.write(str(b) + '\\n')\n f.write(str(p) + '\\n')\n f.close()\n pass\n\n # try:\n # return math.pow(b, p)\n # except OverflowError:\n # return \"too big number\"\n\n #read from a file\n def power_from_file(path):\n try:\n if os.path.isfile(path):\n f = open(path, \"r\")\n numbers = verify_file(path)\n base = int(numbers[0]) #\"integarize\" the elements, otherwise they are str type\n power = int(numbers[1])\n if base == power == 0 or base < 0 or power < 0: #check for corner cases\n throwing_error()\n else:\n return int(math.pow(base, power))\n else:\n throwing_error()\n\n except OverflowError:\n throwing_error()\n return \"too big number\"\n finally:\n f.close()\n\n #verify if there are only digits on the file + stripping the \\n character\n def verify_file(path):\n try:\n f = open(path, \"r\")\n numbers = []\n for number in f:\n number = number.rstrip() #strip \\n\n numbers.append(number) #form the list\n for number in numbers:\n if number.isdigit() == True:\n pass\n else:\n throwing_error()\n return numbers\n finally:\n f.close()\n\n\n\n\n\n # def power_from_file():\n # f = open(\"file.txt\", \"r\")\n # for line in f:\n # line = line.rstrip()\n # return line\n\n def raise_number_parse_exception():\n raise phonenumbers.NumberParseException\n\n\n #verify a string\n def phoneValidator(phone_number):\n phone_number_parsed = phonenumbers.parse(phone_number)\n if phonenumbers.is_possible_number(phone_number_parsed) == True:\n return True\n else:\n raise Exception\n\n\n #working with values from a defined range --- build an increasing list of a fixed length\n def rangedListFabric(number):\n list = [*range(int(number))]\n for i in list:\n list[i] = list[i]+10\n return list\n\n\n #poor sorting alghortim ----> selection sort\n def random_generator_and_selection_sort(values):\n argument = random_list_generator(values)\n for i in range(len(argument)):\n min_idx = i\n for j in range(i + 1, len(argument)):\n if argument[min_idx] > argument[j]:\n min_idx = j\n argument[i], argument[min_idx] = argument[min_idx], argument[i]\n return argument\n\n #poor sorting alghortim ----> selection sort -----> passing a list instead of value\n def selection_sort_list(list):\n argument = list\n for i in range(len(argument)):\n min_idx = i\n for j in range(i + 1, len(argument)):\n if argument[min_idx] > argument[j]:\n min_idx = j\n argument[i], argument[min_idx] = argument[min_idx], argument[i]\n return argument\n\n\n def partition(start, end, array):\n # Initializing pivot's index to start\n pivot_index = start\n pivot = array[pivot_index]\n\n while start < end:\n while start < len(array) and array[start] <= pivot:\n start += 1\n while array[end] > pivot:\n end -= 1\n if (start < end):\n array[start], array[end] = array[end], array[start]\n array[end], array[pivot_index] = array[pivot_index], array[end]\n return end\n\n\n def quick_sort(start, end, array):\n if (start < end):\n p = partition(start, end, array)\n quick_sort(start, p - 1, array)\n quick_sort(p + 1, end, array)\n return array\n\n\n #generate random list and sort it with quicksort - no shit sherlock\n def random_generator_and_quicksort(argument):\n list = random_list_generator(argument)\n return quick_sort(0, len(list)-1, list)\n\n def random_list_generator(argument):\n list = random.sample(range(10, 100), argument)\n return list\n\n\n #measure time elapsed when sorting a list\n def timer_function_quicksort(argument):\n start = timeit.timeit()\n random_generator_and_quicksort(argument)\n end = timeit.timeit()\n dif = start - end\n if dif < 0:\n dif = dif * (-1)\n return dif\n\n def timer_function_selection_sort(argument):\n start = timeit.timeit()\n random_generator_and_selection_sort(argument)\n end = timeit.timeit()\n dif = start - end\n if dif < 0:\n dif = dif * (-1)\n return dif\n\n #pass a list as argument and sort with quicksort ---- maybe for future purposes\n def quick_sort_list(list):\n return quick_sort(0, len(list)-1, list)\n","sub_path":"Methods.py","file_name":"Methods.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64537873","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nclass ConvNorm(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,\n padding=None, dilation=1, bias=True, w_init_gain='linear'):\n super(ConvNorm, self).__init__()\n if padding is None:\n assert(kernel_size % 2 == 1)\n padding = int(dilation * (kernel_size - 1) / 2)\n\n self.conv = torch.nn.Conv1d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation,\n bias=bias)\n\n torch.nn.init.xavier_uniform_(\n self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))\n\n def forward(self, signal):\n conv_signal = self.conv(signal)\n return conv_signal\n \nclass Discriminator(nn.Module):\n \"\"\"discriminator model\"\"\"\n def __init__(self, dim_deck=80, dim_pre=128):\n super(Discriminator, self).__init__()\n convolutions = []\n for i in range(3):\n conv_layer = nn.Sequential(\n ConvNorm(dim_deck if i==0 else dim_pre,\n dim_pre,\n kernel_size=5, stride=1,\n padding=2,\n dilation=1, w_init_gain='relu'),\n nn.InstanceNorm1d(dim_deck) if i==0 else nn.InstanceNorm1d(dim_pre))\n convolutions.append(conv_layer)\n self.convolutions = nn.ModuleList(convolutions)\n self.dense = nn.Linear(dim_pre,1)\n def forward(self, x):\n for conv in self.convolutions:\n x = F.relu(conv(x))\n patch_size = 32\n x_sample = torch.randint(low = 0, high = x.size(2) - patch_size, size = (1,))\n \n x = x[:,:,x_sample:x_sample+patch_size]\n x = torch.mean(x,dim=2)\n mean_val = self.dense(x)\n #mean_val = torch.clamp(mean_val, 0, 1, out=None) \n return mean_val\n\n \n\n \nclass LatentClassifier(nn.Module):\n \"\"\"discriminator model\"\"\"\n def __init__(self, nc = 376, ns=0.2, dim_pre=128, dim_deck=64):\n super(LatentClassifier, self).__init__()\n self.ns = ns\n self.nc = nc\n \n self.lstm1 = nn.LSTM(dim_deck, dim_pre, 2, batch_first=True)\n convolutions = []\n for i in range(3):\n conv_layer = nn.Sequential(\n ConvNorm(dim_deck if i==0 else dim_pre,\n dim_pre,\n kernel_size=3, stride=1,\n padding=2,\n dilation=1, w_init_gain='relu'))\n convolutions.append(conv_layer)\n self.convolutions = nn.ModuleList(convolutions)\n #self.lstm2 = nn.LSTM(dim_pre, 32, 2, batch_first=True)\n self.softmax = nn.LogSoftmax(dim=1)\n self.dense = nn.Linear(dim_pre,nc)\n def forward(self, x):\n #x,_ = self.lstm1(x)\n #x = x.transpose(1,2)\n for conv in self.convolutions:\n x = F.relu(conv(x))\n patch_size = 32\n x_sample = torch.randint(low = 0, high = x.size(2) - patch_size, size = (1,))\n \n x = x[:,:,x_sample:x_sample+patch_size]\n \n x = torch.mean(x,dim=2)\n x = self.dense(x)\n x = self.softmax(x)\n \n return x\nclass Speakernet(nn.Module):\n def __init__(self, dim_pre=128, dim_deck=64):\n super(Speakernet, self).__init__()\n \n self.lstm1 = nn.LSTM(dim_deck, dim_pre, 2, batch_first=True)\n convolutions = []\n for i in range(3):\n conv_layer = nn.Sequential(\n ConvNorm(dim_pre,\n dim_pre,\n kernel_size=3, stride=1,\n padding=1,\n dilation=1, w_init_gain='relu'))\n convolutions.append(conv_layer)\n self.convolutions = nn.ModuleList(convolutions)\n \n def forward(self, x):\n x = x.transpose(1,2)\n x,_ = self.lstm1(x)\n x = x.transpose(1,2)\n for conv in self.convolutions:\n x = F.relu(conv(x))\n \n return x\n\nclass Postnet(nn.Module):\n \"\"\"Postnet\n - Five 1-d convolution with 512 channels and kernel size 5\n \"\"\"\n\n def __init__(self):\n super(Postnet, self).__init__()\n self.convolutions = nn.ModuleList()\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(80, 512,\n kernel_size=5, stride=1,\n padding=2,\n dilation=1, w_init_gain='tanh'),\n nn.BatchNorm1d(512))\n )\n\n for i in range(1, 5 - 1):\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(512,\n 512,\n kernel_size=5, stride=1,\n padding=2,\n dilation=1, w_init_gain='tanh'),\n nn.BatchNorm1d(512))\n )\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(512, 80,\n kernel_size=5, stride=1,\n padding=2,\n dilation=1, w_init_gain='linear'),\n nn.BatchNorm1d(80))\n )\n\n def forward(self, x):\n for i in range(len(self.convolutions) - 1):\n x = torch.tanh(self.convolutions[i](x))\n\n x = self.convolutions[-1](x)\n\n return x ","sub_path":"model/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562673061","text":"__author__ = \"Cyril Jaquier\"\n__version__ = \"$Revision: 1.1 $\"\n__date__ = \"$Date: 2010-07-25 12:46:56 $\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\nimport unittest\nfrom client.jailreader import JailReader\nclass JailReaderTest(unittest.TestCase):\n\tdef setUp(self):\n\t\t\"\"\"Call before every test case.\"\"\"\n\tdef tearDown(self):\n\t\t\"\"\"Call after every test case.\"\"\"\n\tdef testSplitAction(self):\n\t\taction = \"mail-whois[name=SSH]\"\n\t\texpected = ['mail-whois', {'name': 'SSH'}]\n\t\tresult = JailReader.splitAction(action)\n\t\tself.assertEquals(expected, result)\n\t\t\n","sub_path":"fstmerge/examples/Fail2ban/rev579-732/right-branch-732/testcases/clientreadertestcase.py","file_name":"clientreadertestcase.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321475405","text":"from stable_baselines3.common.env_checker import check_env\nfrom stable_baselines3 import SAC, PPO\nfrom stable_baselines3.common.vec_env import VecNormalize, VecFrameStack, VecTransposeImage\nimport os.path as osp\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback, EventCallback, BaseCallback\nfrom sb_code.Logger import Logger\nfrom sys import platform\nimport torch\nimport gym\nfrom stable_baselines3.common.evaluation import evaluate_policy\n# Below is training on linux and if GPU is available\nfrom stable_baselines3.sac.policies import MlpPolicy\n\nPROJECT_PATH = osp.abspath(osp.dirname(osp.dirname(__file__)))\n\n# Step 1. Initialize the environment\nenv = gym.make('CartPole-v1')\n\n# Step 2. Check the custom environment. Must do it before any wrappers\ncheck_env(env)\n\n\n#eval_env = env # Make a seperate evaluation environment without vectorized version as in train env\n\n# Step 3.b. To make Vectorized Environment to be able to use Normalize or FramStack (Optional)\nenv = make_vec_env(lambda: env, n_envs=1)\n# Step 3.b Passing through Normalization and stack frame (Optional)\n\nenv = VecFrameStack(env, n_stack=1) # Use 1 for now because we use image\n#env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) # If using normalize, must save\n\n# Step 4. Make Logger corrsponding to the name of algorithm\nlogger = Logger(\"ppo\")\n\n# Step 5. Creating callbacks\nclass SaveNormalization(BaseCallback):\n \"\"\"\n Base class for triggering callback on event.\n\n :param callback: (Optional[BaseCallback]) Callback that will be called\n when an event is triggered.\n :param verbose: (int)\n \"\"\"\n def __init__(self, env = None, save_path=None):\n super(SaveNormalization, self).__init__()\n self.save_path = save_path\n\n def _on_step(self) -> bool:\n if self.model.get_vec_normalize_env() is not None:\n self.model.get_vec_normalize_env().save(self.save_path)\n if self.verbose > 1:\n print(f\"Saving VecNormalize to {self.save_path}\")\n return True\n\ncheckpoint_callback = CheckpointCallback(save_freq=30000, save_path=logger.output_dir,\n name_prefix='rl_model')\n\nsavestats_callback = SaveNormalization(save_path=osp.join(logger.output_dir, \"vec_normalization.pkl\")) # If using normalize, must create this callback\n\neval_callback = EvalCallback(eval_env = env, n_eval_episodes=5, callback_on_new_best=savestats_callback,\n eval_freq=1000,\n best_model_save_path=osp.join(logger.output_dir, \"best_model\"),\n log_path=osp.join(logger.output_dir, \"results\"))\n\ncallback = CallbackList([checkpoint_callback, eval_callback])\n\nmodel = PPO('MlpPolicy', env=env, verbose=1)\n\nmean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=100)\n\nprint(f\"Before mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}\")\n\n\nmodel.learn(total_timesteps=150000, log_interval=5, callback=callback) # Log_interval = number of episodes\n","sub_path":"sb_code/check_my_impl.py","file_name":"check_my_impl.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"196841669","text":"import numpy as np\nimport torch\nimport os\n\nclass AttrPredictor(object):\n\n def __init__(self, cfg, tops_type=[3, 5, 10]):\n \"\"\"Create the empty array to count true positive(tp),\n true negative(tn), false positive(fp) and false negative(fn).\n\n Args:\n class_num : number of classes in the dataset\n tops_type : default calculate top3, top5 and top10\n \"\"\"\n\n attr_cloth_file = open(cfg.attr_cloth_file).readlines()\n self.attr_idx2name = {}\n for i, line in enumerate(attr_cloth_file[2:]):\n self.attr_idx2name[i] = line.strip('\\n').split()[0]\n\n def print_attr_name(self, pred_idx):\n for idx in pred_idx: \n #print(self.attr_idx2name[idx],idx)\n with open('results.txt','a') as f:\n f.write(str(idx)+',')\n with open('results.txt','a') as f:\n f.write('\\n')\n for idx in pred_idx:\n with open('results.txt','a') as f:\n f.write(str(self.attr_idx2name[idx])+',')\n \n def show_prediction(self, pred,filename):\n if isinstance(pred, torch.Tensor):\n data = pred.data.cpu().numpy()\n elif isinstance(pred, np.ndarray):\n data = pred\n else:\n raise TypeError('type {} cannot be calculated.'.format(type(pred)))\n with open('results.txt','a') as f:\n f.write('\\n'+str(filename)+'\\n')\n for i in range(pred.size(0)):\n indexes = np.argsort(data[i])[::-1]\n idx5= indexes\n \n #print('[ Top5 Prediction ]')\n self.print_attr_name(idx5)\n","sub_path":"mmfashion/core/evaluation/attr_predict_demo.py","file_name":"attr_predict_demo.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548246229","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim\nimport torch.nn.functional as F\nimport os\n\nimport models\nfrom datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot, miniImageNet_few_shot, tiered_ImageNet_few_shot\n\nfrom tqdm import tqdm\nimport pandas as pd\nimport argparse\nimport random\nimport copy\nimport warnings\nfrom utils import to_one_hot, AverageMeter, loss_calc\nimport utils\nfrom methods.baselinetrain import BaselineTrain\n\n\n\n\ndef evaluate(dataloader, params): \n print(\"Loading Model: \", params.embedding_load_path)\n if params.embedding_load_path_version == 0:\n state = torch.load(params.embedding_load_path)['state']\n state_keys = list(state.keys())\n #print(state_keys)\n for _, key in enumerate(state_keys):\n if \"feature.\" in key:\n # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'\n newkey = key.replace(\"feature.\", \"\")\n state[newkey] = state.pop(key)\n else:\n state.pop(key)\n \n sd = state\n elif params.embedding_load_path_version == 1:\n sd = torch.load(params.embedding_load_path)\n\n if 'epoch' in sd:\n print(\"Model checkpointed at epoch: \", sd['epoch'])\n sd = sd['model']\n # elif params.embedding_load_path_version == 3:\n # state = torch.load(params.embedding_load_path)\n # print(\"Model checkpointed at epoch: \", state['epoch'])\n # state = state['model']\n # state_keys = list(state.keys())\n # for _, key in enumerate(state_keys):\n # if \"module.\" in key:\n # # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'\n # newkey = key.replace(\"module.\", \"\")\n # state[newkey] = state.pop(key)\n # else:\n # state.pop(key)\n # sd = state\n else:\n raise ValueError(\"Invalid load path version!\")\n\n if params.model == 'resnet10':\n pretrained_model = models.ResNet10()\n feature_dim = pretrained_model.final_feat_dim\n elif params.model == 'resnet12':\n pretrained_model = models.Resnet12(width=1, dropout=0.1)\n feature_dim = pretrained_model.output_size\n elif params.model == 'resnet18':\n pretrained_model = models.resnet18(remove_last_relu=False, \n input_high_res=True)\n feature_dim = 512\n elif params.model == 'vgg11':\n pretrained_model = models.vgg11_bn()\n pretrained_model.final_feat_dim = 512\n feature_dim = 512\n else:\n raise ValueError(\"Invalid model!\")\n\n\n pretrained_model.load_state_dict(sd)\n\n model = BaselineTrain(pretrained_model, 64)\n model.load_state_dict(torch.load(params.embedding_load_path)['state'])\n pretrained_model = model\n\n\n acc_all = []\n\n pretrained_model.cuda()\n total = 0\n correct = 0.0\n\n meters = utils.AverageMeterSet()\n for i, (x, y) in tqdm(enumerate(dataloader)):\n x = x.cuda()\n y = y.cuda()\n\n pretrained_model.eval()\n scores = pretrained_model(x)\n \n \n perf = utils.accuracy(scores.data,\n y.data, topk=(1, 5))\n meters.update('top1', perf['average'][0].item(), len(x))\n meters.update('top5', perf['average'][1].item(), len(x))\n\n #_, pred = torch.max(scores.data, 1)\n #total += y.size(0)\n #correct += (pred==y).sum().item()\n \n ###############################################################################################\n # print('Test Acc = %d %%' %\n # (100 * correct/total))\n #print(correct, total)\n\n print(\"Top1 Avg {:f}\".format(meters.__getitem__('top1')))\n\ndef main(params):\n\n #if params.target_dataset == 'ISIC':\n # datamgr = ISIC_few_shot\n #elif params.target_dataset == 'EuroSAT':\n # datamgr = EuroSAT_few_shot\n #elif params.target_dataset == 'CropDisease':\n # datamgr = CropDisease_few_shot\n #elif params.target_dataset == 'ChestX':\n # datamgr = Chest_few_shot\n #elif params.target_dataset == 'miniImageNet_train':\n # datamgr = miniImageNet_few_shot\n #elif params.target_dataset == 'miniImageNet_val':\n # datamgr = miniImageNet_few_shot\n #else:\n # print(params.target_dataset)\n # raise ValueError(\"Invalid Dataset!\")\n\n for i in ['miniImageNet_val', 'miniImageNet_train' ]:\n params.target_dataset = i\n datamgr = miniImageNet_few_shot\n \n results = {}\n shot_done = []\n print(params.target_dataset)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(params.seed)\n torch.random.manual_seed(params.seed)\n torch.cuda.manual_seed(params.seed)\n random.seed(params.seed)\n dataloader = datamgr.SimpleDataManager(params.image_size, params.batch_size).get_data_loader(aug=False, train_or_val=params.target_dataset=='miniImageNet_train')\n evaluate(dataloader, params)\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser(\n description='Evaluation script')\n parser.add_argument('--target_dataset', default='miniImagenet',\n help='test target dataset')\n parser.add_argument('--batch_size', type=int, default=128,\n help='Size of batch')\n parser.add_argument('--image_size', type=int, default=224,\n help='Resolution of the input image')\n parser.add_argument('--train_aug', action='store_true',\n help='perform data augmentation or not during training ')\n parser.add_argument('--model', default='resnet10',\n help='backbone architecture')\n parser.add_argument('--seed', default=1, type=int, help='random seed')\n parser.add_argument('--embedding_load_path', type=str,\n help='path to load embedding')\n parser.add_argument('--embedding_load_path_version', type=int, default=1, \n help='how to load the embedding')\n\n \n params = parser.parse_args()\n main(params)\n \n","sub_path":"evaluation_norm/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"3007460","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\n\nimport tf_euler\n\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\nimport tf_context as ctx\nimport base_runner\nimport graph_tag\nimport numpy as np\n\nfrom pareto_mtl import ParetoMTL\nimport tf_sort\n\n \n\nnation_sp = ctx.get_config(\"nation\")\n \n\nprint('nation',nation_sp)\n# print('flag_exp_loss',flag_exp_loss)\ngm_loss_weight = 36.\nflag_dist = 1.\nflag_RI = 0\nvar_weight=1e-5\ngate_weight = 1.\ngate_th=0.8\nflag_gate_above=True\nsame_nation_weight=0.0\nflag_uniform_nation_weight=False\ncate_align_weight=0.1\n \nprint('gm_loss_weight',gm_loss_weight)\nprint('flag_dist',flag_dist)\nprint('flag_RI',flag_RI)\nprint('var_weight',var_weight)\nprint('gate_weight',gate_weight)\nprint('gate_th',gate_th)\nprint('flag_gate_above',flag_gate_above)\nprint('same_nation_weight',same_nation_weight)\nprint('flag_uniform_nation_weight',flag_uniform_nation_weight)\nprint('cate_align_weight',cate_align_weight)\n \nif flag_RI:\n from util_pretrain_RI import *\nelse:\n from util_pretrain import *\n\nif flag_dist:\n from graph_matching_pretrain_dist import *\nelse:\n from graph_matching_pretrain_prod import *\n \n\n\ndef greater_than_zero(x):\n x=tf.maximum(0.0*x,x)\n x = tf.sign(x)\n return x\n\n\n \n \n\ndef random_randint(limit,n_row):\n a = tf.random_uniform((n_row,limit))\n a = tf.argmax(a,axis=1)\n return a\n\n \n\n\ndef get_grad_norm(grad_r):\n grad_norm = 0.0\n for i_grad_r,e_grad_r in enumerate(grad_r):\n if e_grad_r is None:\n break\n else:\n grad_norm += tf.reduce_sum(tf.square(e_grad_r))\n grad_norm = tf.maximum(0.0*grad_norm + 1e-7,grad_norm)\n grad_norm = tf.sqrt(grad_norm)\n return grad_norm\n\ndef get_grad_prod(grad_1,grad_2):\n grad_prod = 0.0\n for i in range(len(grad_1)):\n if grad_1[i] is None or grad_2[i] is None :\n break\n else:\n grad_prod += tf.reduce_sum(grad_1[i]*grad_2[i])\n \n return grad_prod\n \n\ndef stop_grad_list(x_list):\n \n return [tf.stop_gradient(x) for x in x_list]\n\ndef gen_paired_meta_loss(main_loss,aux_loss, weights, flag_stop_gradient):\n \n lm,ls = main_loss,aux_loss \n \n grad_m = tf.gradients(lm, weights)\n grad_norm_m = get_grad_norm(grad_m)\n grad_s = tf.gradients(ls, weights)\n grad_norm_s = get_grad_norm(grad_s)\n \n if flag_stop_gradient:\n grad_m = stop_grad_list(grad_m)\n grad_norm_m = tf.stop_gradient(grad_norm_m)\n \n loss_grad_match = 0.\n \n grad_prod = get_grad_prod(grad_m, grad_s)\n loss_grad_match += - grad_prod / (grad_norm_m * grad_norm_s)\n \n return tf.reduce_mean(loss_grad_match)\n\ndef get_variable_list(regex_pattern):\n import re\n# regex_pattern = 'hidden[12]'\n train_vars = []\n var_names = []\n size_list = []\n for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n if re.search(regex_pattern, var.op.name):\n train_vars.append(var)\n var_names.append(var.op.name)\n size_list.append(tf.shape(var))\n print('----------------------------- Begin: var_names of %s -----------------------------'%regex_pattern)\n print(var_names)\n print('----------------------------- End: var_names of %s -----------------------------'%regex_pattern)\n \n return train_vars,size_list\n\ndef grad_flatten(grads,size_list):\n grad_list = []\n for i,e in enumerate(grads):\n if e is not None:\n x =e \n else:\n x = tf.zeros(size_list[i])\n grad_list.append(tf.reshape(x,[-1]))\n return tf.concat(grad_list,0)\n\n\n# from epo_lp import EPO_LP\n\ndef get_high64bit(tensor):\n # 1-D tensor\n # input: [1, 2, 3, 4, 5, 6], return: [1, 3, 5]\n tensor = tf.reshape(tensor, [-1, 2])\n return tensor[:, 0]\n\n\ndef tohash128(sp_tensors):\n for i in range(len(sp_tensors)):\n sp = sp_tensors[i]\n\n indices = sp.indices\n n = tf.shape(indices)[0]\n dim = tf.shape(indices)[1]\n indices = tf.multiply(indices, [1, 2])\n zero_indices = tf.add(indices, [0, 1])\n new_indices = tf.stack([indices, zero_indices], 0)\n new_indices = tf.transpose(new_indices, perm=[1, 0, 2])\n new_indices = tf.reshape(new_indices, [2 * n, dim])\n\n values = sp.values\n zero = tf.zeros_like(values)\n new_values = tf.stack([values, zero])\n new_values = tf.transpose(new_values)\n new_values = tf.reshape(new_values, [-1])\n\n shape = sp.dense_shape\n new_shape = tf.multiply(shape, [1, 2])\n\n sp_tensors[i] = tf.SparseTensor(indices=new_indices, values=new_values, dense_shape=new_shape)\n\n\ndef sparse_embedding(sp_tensors, input_dimensions, embedding_dimension, ps_num, init_val, stddev, names):\n l = []\n print('len(sp_tensors)',len(sp_tensors))\n print('len(names)',len(names))\n for i in range(len(sp_tensors)):\n with tf.variable_scope(names[i]):\n print(\"sparse_embedding name:\" + names[i] + \" dim:\" + str(input_dimensions[i]))\n rst = full_connect_sparse(sp_tensors[i], [input_dimensions[i], embedding_dimension[i]], None, True, ps_num,\n None, init_val, stddev)\n l.append(rst)\n return l\n\ndef init_graph():\n zk_addr = ctx.get_config('euler', 'zk_addr')\n zk_path = '/euler/{}'.format(ctx.get_app_id())\n shard_num = ctx.get_config('euler', 'shard_num')\n tf_euler.initialize_graph({\n 'mode': 'remote',\n 'zk_server': zk_addr,\n 'zk_path': zk_path,\n 'shard_num': shard_num,\n 'num_retries':10\n })\n\ndef sample_by_walk(src, edge_type, walk_len=1, walk_p=1, walk_q=1):\n walk_path = [edge_type]\n walk_path.extend([['1'] for i in range(walk_len - 1)])\n path = tf_euler.random_walk(\n src, walk_path,\n p=walk_p,\n q=walk_q)\n\n path_s = tf.slice(path, [0,1], [-1, walk_len])\n pos = tf.reshape(path_s, [-1])\n negs_copy = tf_euler.sample_node_with_src(pos, 6 * walk_len)\n\n src_s = tf.reshape(src, [-1, 1])\n src_copy = tf.concat([src_s for i in range(walk_len)], 1)\n #have some problems\n neg_nodes = tf.reshape(negs_copy, [-1])\n src_nodes = tf.reshape(src_copy, [-1])\n return src_nodes, pos, neg_nodes\n\n\ndef get_neighbors(src, pos, negs,q_part_features,i_part_features, i_nei_cnt=5, q_nei_cnt=5):\n # q_part_features = ['query_norm']\n # q_features = \"query_sorted_norm_keyword\"\n\n # i_part_features = ['item_id']\n\n src_i_nodes, _, _ = tf_euler.sample_neighbor(src, edge_types=['1', '3'], count=i_nei_cnt)\n print(\"======== neighbor count: {}\".format(i_nei_cnt))\n src_i_nodes_filled = tf_euler.get_sparse_feature(src_i_nodes, i_part_features)\n src_q_nodes, _, _ = tf_euler.sample_neighbor(src, edge_types=['2'], count=q_nei_cnt)\n src_q_nodes_filled = tf_euler.get_sparse_feature(src_q_nodes, q_part_features)\n\n pos_i_nodes, _, _ = tf_euler.sample_neighbor(pos, edge_types=['1', '3'], count=i_nei_cnt)\n pos_i_nodes_filled = tf_euler.get_sparse_feature(pos_i_nodes, i_part_features)\n pos_q_nodes, _, _ = tf_euler.sample_neighbor(pos, edge_types=['2'], count=q_nei_cnt)\n pos_q_nodes_filled = tf_euler.get_sparse_feature(pos_q_nodes, q_part_features)\n\n neg_i_nodes, _, _ = tf_euler.sample_neighbor(negs, edge_types=['1', '3'], count=i_nei_cnt)\n neg_i_nodes_filled = tf_euler.get_sparse_feature(neg_i_nodes, i_part_features)\n neg_q_nodes, _, _ = tf_euler.sample_neighbor(negs, edge_types=['2'], count=q_nei_cnt)\n neg_q_nodes_filled = tf_euler.get_sparse_feature(neg_q_nodes, q_part_features)\n\n tohash128(src_i_nodes_filled)\n tohash128(src_q_nodes_filled)\n tohash128(pos_i_nodes_filled)\n tohash128(pos_q_nodes_filled)\n tohash128(neg_i_nodes_filled)\n tohash128(neg_q_nodes_filled)\n\n return src_i_nodes_filled, src_q_nodes_filled, pos_i_nodes_filled, \\\n pos_q_nodes_filled, neg_i_nodes_filled, neg_q_nodes_filled\n\ndef get_feature(src,full_features):\n src_filled = tf_euler.get_sparse_feature(src, full_features)\n tohash128(src_filled)\n return src_filled\n\ndef get_neighbor(x,q_part_features,i_part_features,c_part_features, q_nei_cnt=5,i_nei_cnt=5,c_nei_cnt=5 ):\n \n\n q_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['2','5'], count=i_nei_cnt)\n i_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['0','1', '3','7'], count=i_nei_cnt)\n c_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['46','66', '9'], count=i_nei_cnt)\n \n# q_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['2' ], count=i_nei_cnt)\n# i_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['1', '3' ], count=i_nei_cnt)\n# c_nodes, _, _ = tf_euler.sample_neighbor(x, edge_types=['46','66', '9'], count=i_nei_cnt)\n \n \n q_f = get_feature(q_nodes,q_part_features)\n i_f = get_feature(i_nodes,i_part_features)\n c_f = get_feature(c_nodes,c_part_features)\n \n return q_f,i_f,c_f\n \ndef sample_pos_n_feature(source,full_features,q_part_features,i_part_features,c_part_features):\n node_list = []\n src, pos, neg = sample_by_walk(source, ['0','1', '3'])\n node_list.append({'src':src,'pos':pos,'neg':neg})\n \n for e_level in ['6','5','4','3','2','1']:\n _, pos, neg = sample_by_walk(source, ['4'+e_level, '6'+e_level])\n node_list.append({'src':src,'pos':pos,'neg':neg})\n \n feature_dic = {'self':[],'q':[],'i':[],'c':[]}\n for e in node_list:\n dic_self,dic_q,dic_i,dic_c ={},{},{},{}\n for e_type in e:\n self_f = get_feature(e[e_type],full_features)\n q_f,i_f,c_f = get_neighbor(e[e_type],q_part_features,i_part_features,c_part_features)\n dic_self[e_type],dic_q[e_type],dic_i[e_type],dic_c[e_type]=self_f,q_f,i_f,c_f\n feature_dic['self'].append(dic_self)\n feature_dic['q'].append(dic_q)\n feature_dic['i'].append(dic_i)\n feature_dic['c'].append(dic_c)\n \n \n return feature_dic\n\n\ndef fake_convolution(node, q_node_nei,i_node_nei,c_node_nei, q_num=5, i_num=5,c_num=5, q_dim=8,i_dim=8,c_dim=8):\n q_node_nei_s = tf.reshape(q_node_nei, [-1, q_num, q_dim])\n q_nei = tf.reduce_mean(q_node_nei_s, 1)\n i_node_nei_s = tf.reshape(i_node_nei, [-1, i_num, i_dim])\n i_nei = tf.reduce_mean(i_node_nei_s, 1)\n c_node_nei_s = tf.reshape(c_node_nei, [-1, c_num, c_dim])\n c_nei = tf.reduce_mean(c_node_nei_s, 1)\n \n\n res = tf.concat([node, q_nei, i_nei, c_nei], 1)\n return res\n\n\ndef inference(tensors, keep_prob=1.0, ps_num=1):\n id_embedding_dim = 24\n\n embedding_ini_val = 1\n embedding_stddev = 0.0002\n\n dense_ini_val = 2\n dense_stddev = 0.36\n \n \n \n nation_list=['TH','VN','SG','MY','ID','PH']\n feature_list=[\n {\n 'feature_name':'query_feature1',\n 'feature_dim':int(182309924*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'query_feature2',\n 'feature_dim':int(13286936*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature1',\n 'feature_dim':int(18357245*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_id',\n 'feature_dim':int(405929642*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'global_category_id',\n 'feature_dim':int(4273*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature9',\n 'feature_dim':int(175863*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature10',\n 'feature_dim':int(1025024*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature11',\n 'feature_dim':int(461654*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature12',\n 'feature_dim':int(1030492*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature13',\n 'feature_dim':3,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature14',\n 'feature_dim':9,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature15',\n 'feature_dim':4,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature16',\n 'feature_dim':9,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature17',\n 'feature_dim':7,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature18',\n 'feature_dim':3,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature2',\n 'feature_dim':int(10683948*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature3',\n 'feature_dim':int(21677772*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature5',\n 'feature_dim':int(117184*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature6',\n 'feature_dim':int(687583*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature7',\n 'feature_dim':int(339428*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'category_feature2',\n 'feature_dim':int(3425*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'query_feature3',\n 'feature_dim':int(14318897*1.1),\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature19',\n 'feature_dim':2,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature10',\n 'feature_dim':11,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature11',\n 'feature_dim':4,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature12',\n 'feature_dim':4,\n 'emb_dim':8\n },\n {\n 'feature_name':'item_feature13',\n 'feature_dim':2,\n 'emb_dim':8\n },\n {\n 'feature_name':'level',\n 'feature_dim':7,\n 'emb_dim':8\n },\n {\n 'feature_name':'is_leaf',\n 'feature_dim':2,\n 'emb_dim':8\n },\n {\n 'feature_name':'nation',\n 'feature_dim':6,\n 'emb_dim':8\n }\n ]\n\n \n # dic = {\n # 'ID':\t6191,\n # 'MY':\t4632,\n # 'PH':\t6036,\n # 'SG':\t4224,\n # 'TH':\t7153,\n # 'VN':\t4115\n # }\n # for e_nation in nation_list:\n # feature_list.append(\n # {\n # 'feature_name':'%s_venture_category_name_l10n_seg' % e_nation,\n # 'feature_dim':int(dic[e_nation]*1.1),\n # 'emb_dim':8\n # }\n # )\n \n dic = {\n '1':\t53,\n '2':\t361,\n '3':\t1614,\n '4':\t2159,\n '5':\t7033,\n '6':\t4380\n }\n for e_level in ['1','2','3','4','5','6']:\n feature_list.append(\n {\n 'feature_name':'global_category_id_level%s' % e_level,\n 'feature_dim':int(dic[e_level]*1.1),\n 'emb_dim':8\n }\n )\n\n dic = {\n '6':\t276,\n '7':\t2708,\n '8':\t4791,\n '9':\t5432,\n '10':\t3903,\n '11':\t2534\n }\n \n for e_level in ['6','7','8','9','10','11']:\n feature_list.append(\n {\n 'feature_name':'category_feature%s' % e_level,\n 'feature_dim':int(dic[e_level]*1.1),\n 'emb_dim':8\n }\n )\n\n dic = {\n '12':\t569,\n '13':\t3985,\n '14':\t9136,\n '15':\t11316,\n '16':\t8008,\n '17':\t4697\n }\n for e_level in ['12','13','14','15','16','17']:\n feature_list.append(\n {\n 'feature_name':'category_feature%s' % e_level,\n 'feature_dim':int(dic[e_level]*1.1),\n 'emb_dim':8\n }\n )\n \n \n \n # dic = {\n # 'ID':\t6192,\n # 'MY':\t4632,\n # 'PH':\t6036,\n # 'SG':\t4224,\n # 'TH':\t7153,\n # 'VN':\t4115\n # }\n # for e_nation in nation_list:\n # feature_list.append(\n # {\n # 'feature_name':'%s_venture_category_name_l10n_seg_alllevel' % e_nation,\n # 'feature_dim':int(dic[e_nation]*1.1),\n # 'emb_dim':8\n # }\n # )\n \n full_feature_dims = []\n full_feature_names = []\n full_emb_dim = []\n\n for e in feature_list:\n full_feature_names.append(e['feature_name'])\n full_feature_dims.append(e['feature_dim'])\n full_emb_dim.append(e['emb_dim'])\n\n full_feature_names = (full_feature_names)\n \n part_q_feature_names = ['query_norm' ]\n part_q_feature_dims = [ int(182309924*1.1) ]\n part_q_emb_dim = [8 ]\n\n part_i_feature_names = ['item_id' ]\n part_i_feature_dims = [int(405929642*1.1) ]\n part_i_emb_dim = [8 ]\n \n part_c_feature_names = ['venture_category_name_en' ]\n part_c_feature_dims = [int(4273*1.1) ]\n part_c_emb_dim = [8 ]\n \n full_emb_dim_sum=np.sum(full_emb_dim)+np.sum(part_q_emb_dim)+np.sum(part_i_emb_dim)+np.sum(part_c_emb_dim)\n part_q_emb_dim_sum=np.sum(part_q_emb_dim)\n part_i_emb_dim_sum=np.sum(part_i_emb_dim)\n part_c_emb_dim_sum=np.sum(part_c_emb_dim)\n \n dic_info={\n 'self':\n {\n 'feature_names':full_feature_names,\n 'feature_dims':full_feature_dims,\n 'emb_dim':full_emb_dim\n },\n 'q':\n {\n 'feature_names':part_q_feature_names,\n 'feature_dims':part_q_feature_dims,\n 'emb_dim':part_q_emb_dim\n },\n 'i':\n {\n 'feature_names':part_i_feature_names,\n 'feature_dims':part_i_feature_dims,\n 'emb_dim':part_i_emb_dim\n },\n 'c':\n {\n 'feature_names':part_c_feature_names,\n 'feature_dims':part_c_feature_dims,\n 'emb_dim':part_c_emb_dim\n }\n }\n \n \n neg_num = 6\n walk_len=1\n\n type_cnt = 2\n\n swd_dim = 128\n swd_dim_gwd = 8\n \n swd_limit = 512\n swd_limit_gwd = 128\n\n # model_config = ctx.get_config('reader')\n batch_size = ctx.get_config('batch_size')\n\n init_graph()\n\n source=tensors.data[0][0].values\n print('len(tensors.data[0])',len(tensors.data[0]))\n print('len(tensors.data)',len(tensors.data))\n # print('source[:5]',source[:5])\n print('tensors.data[1]',tensors.data[1])\n print('tensors.data[2]',tensors.data[2])\n print('tensors.data[3]',tensors.data[3])\n print('tensors.data[4]',tensors.data[4])\n # print('tensors.data[0][1].values[:5]',tensors.data[0][1].values[:5])\n # print('tensors.data[0][2].values[:5]',tensors.data[0][2].values[:5])\n # negs = tensors.data[1][0].values\n\n \n \n print('tf.__version__', tf.__version__)\n\n import sys\n print('sys.version',sys.version)\n\n dic_nation2int={'TH':1,'VN':2,'SG':3,'MY':4,'ID':5,'PH':6}\n nation_vec = tensors.data[1]\n nation_onehot = []\n nation_idx_where = []\n \n for e_nation in nation_list:\n eq_nation = tf.to_float(tf.equal(tf.cast(nation_vec,tf.int32), tf.cast(dic_nation2int[e_nation],tf.int32)))\n nation_onehot.append(eq_nation)\n idx_where = tf.cast(tf.where(tf.equal(eq_nation,1))[:,0],dtype=tf.int32)\n idx_where = tf.concat([idx_where,[tf.shape(eq_nation)[0],tf.shape(eq_nation)[0]+1]],0)\n nation_idx_where.append(idx_where)\n \n nation_onehot = tf.concat(nation_onehot,1)\n nation_exist = tf.to_float(tf.greater(tf.reduce_sum(nation_onehot,0),0)) \n \n wd_idx = tf.range(swd_limit)\n wd_idx_norm = tf.to_float(wd_idx)/tf.to_float(swd_limit)\n \n gwd_idx_left = random_randint(swd_limit_gwd,swd_limit_gwd)\n gwd_idx_right = random_randint(swd_limit_gwd,swd_limit_gwd)\n gwd_idx = [gwd_idx_left,gwd_idx_right]\n gwd_idx_norm = [tf.to_float(gwd_idx_left)/tf.to_float(swd_limit),\n tf.to_float(gwd_idx_right)/tf.to_float(swd_limit)]\n \n cgwd_idx_left = random_randint(swd_limit_gwd,swd_limit_gwd*(walk_len+neg_num))\n cgwd_idx_right = random_randint(swd_limit_gwd*(walk_len+neg_num),swd_limit_gwd*(walk_len+neg_num))\n cgwd_idx = [cgwd_idx_left,cgwd_idx_right]\n cgwd_idx_norm = [tf.to_float(cgwd_idx_left)/tf.to_float(swd_limit),\n tf.to_float(cgwd_idx_right)/tf.to_float(swd_limit*(walk_len+neg_num))]\n \n\n Theta=tf.random_normal((swd_dim,32))\n Theta=tf.nn.l2_normalize(Theta,axis=1)\n \n Theta_gwd=tf.random_normal((swd_dim_gwd,32))\n Theta_gwd=tf.nn.l2_normalize(Theta_gwd,axis=1)\n \n\n source = get_high64bit(source)\n # negs = get_high64bit(negs)\n\n \n\n feature_dic = sample_pos_n_feature(source,full_feature_names,part_q_feature_names,part_i_feature_names,part_c_feature_names)\n\n # contain_same_cate ------------\n cate_vec = tensors.data[4]\n cate_vec = tf.to_float(cate_vec)\n cate_vec = tf.reshape(cate_vec,[-1,1])\n n_nation=len(nation_list)\n cate_mat = nation_onehot * cate_vec\n contain_same_cate = []\n for i_nation in range(n_nation):\n i_cate = tf.reshape(cate_mat[:,i_nation],[-1,1])\n i_mask = tf.reshape(nation_onehot[:,i_nation],[-1,1])\n for j_nation in range(n_nation):\n j_cate = tf.reshape(cate_mat[:,j_nation],[-1,1])\n j_mask = tf.reshape(nation_onehot[:,j_nation],[-1,1])\n ij_mask = tf.matmul(i_mask,j_mask,transpose_b=True)\n cate_join = tf.to_float(tf.equal(i_cate, tf.transpose(j_cate))) * ij_mask\n cate_join = tf.reduce_sum(cate_join,keepdims=True)\n contain_same_cate.append(cate_join)\n contain_same_cate = tf.concat(contain_same_cate,0)\n contain_same_cate = tf.reshape(contain_same_cate,[n_nation,n_nation])\n contain_same_cate = greater_than_zero(contain_same_cate)\n #--------------------------------\n\n with tf.variable_scope(\"embedding\", reuse=tf.AUTO_REUSE) as scope:\n embedding_dic={'self':[],'q':[],'i':[],'c':[]}\n for e_part in feature_dic:\n e_list = feature_dic[e_part]\n for e in e_list:\n dic={}\n for e_type in e:\n node_embed = sparse_embedding(e[e_type], dic_info[e_part]['feature_dims'],\n dic_info[e_part]['emb_dim'],\n ps_num,\n init_val=embedding_ini_val,\n stddev=embedding_stddev,\n names=dic_info[e_part]['feature_names'])\n node_embed_c = tf.concat(node_embed, 1)\n dic[e_type]=node_embed_c\n embedding_dic[e_part].append(dic)\n \n input_list = [] \n n_level = len(embedding_dic['self'])\n for i_level in range(n_level):\n dic={}\n for e_type in embedding_dic['self'][i_level]:\n e_input = fake_convolution(embedding_dic['self'][i_level][e_type],\n embedding_dic['q'][i_level][e_type],\n embedding_dic['i'][i_level][e_type],\n embedding_dic['c'][i_level][e_type],\n q_dim=part_q_emb_dim_sum,i_dim=part_i_emb_dim_sum,c_dim=part_c_emb_dim_sum)\n dic[e_type]=e_input\n input_list.append(dic)\n\n \n \n with tf.variable_scope(\"gate_current_dnn\", reuse=tf.AUTO_REUSE):\n gate_current_id_deep_list = []\n gate_current_id_deep_list_swd=[]\n current_id_ays_list_swd=[]\n current_id_ays_list_sgwd=[]\n for i_dic,e_dic in enumerate(input_list):\n current_id_deep = gate_feature_net(e_dic['src'],\n full_emb_dim_sum, ps_num,\n dense_ini_val,\n dense_stddev)\n current_id_deep_gate = gate_wd_net(current_id_deep, ps_num, dense_ini_val, dense_stddev)\n gate_current_id_deep_list.append(current_id_deep)\n \n if flag_RI:\n current_id_deep_for_sort = mapping_fun(current_id_deep, ps_num, dense_ini_val, dense_stddev,nation_list)\n else:\n current_id_deep_for_sort = current_id_deep\n x_list_wd,idx_list_wd=swd_proj_src(current_id_deep_for_sort, Theta,nation_idx_where)\n x_list_gwd,idx_list_gwd=swd_proj_src(current_id_deep_for_sort, Theta_gwd,nation_idx_where)\n \n gate_feature_list=swd_proj_src_with_idx(current_id_deep, idx_list_gwd,nation_idx_where)\n gate_list=swd_proj_src_with_idx(current_id_deep_gate, idx_list_wd,nation_idx_where)\n gate_current_id_deep_list_swd.append([gate_feature_list,gate_list])\n \n current_id_ays_list_swd.append([x_list_wd,idx_list_wd])\n current_id_ays_list_sgwd.append([x_list_gwd,idx_list_gwd])\n \n \n with tf.variable_scope(\"gate_node_dnn\", reuse=tf.AUTO_REUSE) as scope:\n gate_pos_id_deep_list,gate_node_neg_id_deep_list = [],[]\n gate_node_id_ays_list_swd=[]\n node_id_ays_list_swd = []\n for i_dic,e_dic in enumerate(input_list):\n pos_id_deep = gate_feature_net(e_dic['pos'], full_emb_dim_sum, ps_num,\n dense_ini_val,\n dense_stddev)\n node_neg_id_deep = gate_feature_net(e_dic['neg'], full_emb_dim_sum, ps_num,\n dense_ini_val,\n dense_stddev)\n gate_pos_id_deep_list.append(pos_id_deep)\n gate_node_neg_id_deep_list.append(node_neg_id_deep)\n \n pos_id_deep_gate = gate_wd_net(pos_id_deep, ps_num, dense_ini_val, dense_stddev)\n node_neg_id_deep_gate = gate_wd_net(node_neg_id_deep, ps_num, dense_ini_val, dense_stddev)\n \n if flag_RI:\n pos_id_deep_for_sort = mapping_fun(pos_id_deep, ps_num, dense_ini_val, dense_stddev,nation_list)\n node_neg_id_deep_for_sort = mapping_fun(node_neg_id_deep, ps_num, dense_ini_val, dense_stddev,nation_list)\n else:\n pos_id_deep_for_sort = pos_id_deep\n node_neg_id_deep_for_sort = node_neg_id_deep\n\n \n x_list_gwd,idx_list_gwd=swd_proj_pos_neg(pos_id_deep_for_sort,node_neg_id_deep_for_sort, Theta_gwd, nation_idx_where,neg_num)\n \n gate_feature_list=swd_proj_pos_neg_with_idx(pos_id_deep,node_neg_id_deep, idx_list_gwd, nation_idx_where,neg_num)\n gate_list=swd_proj_pos_neg_with_idx(pos_id_deep_gate,node_neg_id_deep_gate, idx_list_gwd, nation_idx_where,neg_num)\n gate_node_id_ays_list_swd.append([gate_feature_list,gate_list])\n \n node_id_ays_list_swd.append([x_list_gwd,idx_list_gwd])\n \n \n #for inference\n gate_node_src_context_ays_swd = []\n node_src_context_ays_swd = []\n node_src_context_ays_sgwd = []\n gate_node_src_context_deep = gate_feature_net(input_list[0]['src'], full_emb_dim_sum, ps_num,\n dense_ini_val,\n dense_stddev)\n gate_node_src_context_deep_gate = gate_wd_net(gate_node_src_context_deep, ps_num, dense_ini_val, dense_stddev)\n \n if flag_RI:\n gate_node_src_context_deep_for_sort = mapping_fun(gate_node_src_context_deep, ps_num, dense_ini_val, dense_stddev,nation_list)\n else:\n gate_node_src_context_deep_for_sort = gate_node_src_context_deep\n\n x_list_wd,idx_list_wd=swd_proj_src(gate_node_src_context_deep_for_sort, Theta,nation_idx_where)\n x_list_gwd,idx_list_gwd=swd_proj_src(gate_node_src_context_deep_for_sort, Theta_gwd,nation_idx_where)\n \n gate_feature_list=swd_proj_src_with_idx(gate_node_src_context_deep, idx_list_gwd,nation_idx_where)\n gate_list=swd_proj_src_with_idx(gate_node_src_context_deep_gate, idx_list_wd,nation_idx_where)\n gate_node_src_context_ays_swd.append([gate_feature_list,gate_list])\n \n node_src_context_ays_swd.append([x_list_wd,idx_list_wd])\n node_src_context_ays_sgwd.append([x_list_gwd,idx_list_gwd])\n \n \n with tf.variable_scope(\"gate_att_sim\", reuse=tf.AUTO_REUSE) as scope:\n n_level = len(gate_current_id_deep_list)\n gate_att_sim_list = []\n for i_level in range(n_level):\n current_id_ays=gate_current_id_deep_list[i_level]\n pos_id_ays = gate_pos_id_deep_list[i_level]\n node_neg_id_ays = gate_node_neg_id_deep_list[i_level]\n \n current_id_ays = tf.reshape(current_id_ays, [-1, 1, 32])\n pos_id_ays = tf.reshape(pos_id_ays, [-1, walk_len, 32])\n node_neg_id_ays_re = tf.reshape(node_neg_id_ays, [-1, neg_num, 32])\n node_id_ays = tf.concat([pos_id_ays, node_neg_id_ays_re], 1)\n att_sim_con = batch_cosine_fun(current_id_ays, node_id_ays)\n gate_att_sim_list.append(att_sim_con)\n \n att_sim_list = gate_att_sim_list\n\n print('tensors.data[1]',tensors.data[1])\n print('tensors.data[2]',tensors.data[2])\n print('tensors.data[3]',tensors.data[3])\n print('tensors.data[4]',tensors.data[4])\n print('tf.__version__', tf.__version__)\n import sys\n print('sys.version',sys.version)\n \n # n_tasks,n_params,preference = n_level,1,np.array([1,0,0,0,0,0,0])\n # epo_lp = EPO_LP(m=n_tasks, n=n_params, r=preference)\n\n align_info = [current_id_ays_list_swd,node_id_ays_list_swd,node_src_context_ays_swd,\\\n current_id_ays_list_sgwd,node_src_context_ays_sgwd,\\\n wd_idx,wd_idx_norm,gwd_idx,gwd_idx_norm,cgwd_idx,cgwd_idx_norm,nation_exist,\\\n gate_current_id_deep_list_swd,gate_node_id_ays_list_swd,gate_node_src_context_ays_swd,\\\n ps_num, dense_ini_val, dense_stddev,\\\n gate_att_sim_list,contain_same_cate]\n\n \n return att_sim_list,nation_onehot,align_info\n\n\ndef id_dnn_net(id_embedding, input_dim, ps_num, init_val, stddev):\n with tf.variable_scope(\"layer1\"):\n layer1_output0 = full_connect(id_embedding, [input_dim, 256], [256], False, ps_num, init_val, stddev)\n layer1_output = tf.nn.elu(layer1_output0)\n\n with tf.variable_scope(\"layer2\"):\n layer2_output0 = full_connect(layer1_output, [256, 256], [256], False, ps_num, init_val, stddev)\n layer2_output = tf.nn.elu(layer2_output0)\n\n return layer2_output\n\n\ndef id_ays_net(id_deep, ps_num, init_val, stddev):\n with tf.variable_scope(\"layer1\"):\n x = full_connect(id_deep, [256, 32], [32], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n x=tf.nn.l2_normalize(x,axis=1)\n return x\n\ndef gate_feature_net(id_embedding, input_dim, ps_num, init_val, stddev):\n with tf.variable_scope(\"gate_feature_layer1\"):\n x = full_connect(id_embedding, [input_dim, 256], [256], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n\n with tf.variable_scope(\"gate_feature_layer2\"):\n x = full_connect(x, [256, 256], [256], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n \n with tf.variable_scope(\"gate_feature_layer3\"):\n x = full_connect(x, [256, 32], [32], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n x=tf.nn.l2_normalize(x,axis=1)\n return x\n\ndef gate_wd_net(id_deep, ps_num, init_val, stddev):\n with tf.variable_scope(\"wd_layer1\"):\n x = full_connect(tf.stop_gradient(id_deep), [32, 6], [6], False, ps_num, init_val, stddev)\n x = tf.sigmoid(x)\n return x\n\ndef gate_gwd_net(id_deep, ps_num, init_val, stddev, sp_name):\n with tf.variable_scope(\"gwd_layer1_%s\"%sp_name):\n x = full_connect(tf.stop_gradient(id_deep), [96, 32], [32], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n \n with tf.variable_scope(\"gwd_layer2_%s\"%sp_name):\n x = full_connect(x, [32, 6], [6], False, ps_num, init_val, stddev)\n x = tf.sigmoid(x)\n \n return x\n\ndef gate_cgwd_net(id_deep, ps_num, init_val, stddev, sp_name):\n with tf.variable_scope(\"cgwd_layer1_%s\"%sp_name):\n x = full_connect(tf.stop_gradient(id_deep), [96, 32], [32], False, ps_num, init_val, stddev)\n x = tf.nn.elu(x)\n \n with tf.variable_scope(\"cgwd_layer2_%s\"%sp_name):\n x = full_connect(x, [32, 6], [6], False, ps_num, init_val, stddev)\n x = tf.sigmoid(x)\n \n return x\n\ndef batch_cosine_fun(ays_src, ays_dst):\n # ays_src [batch, 1, dim]\n # ays_dst [batch, num, dim]\n src_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_src), 2, True)) # [batch, 1, 1]\n src_norm = tf.squeeze(src_norm, -1) # [batch, 1]\n dst_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_dst), 2, True)) # [batch, num, 1]\n dst_norm = tf.squeeze(dst_norm, -1) # [batch, num]\n \n prod = tf.matmul(ays_src, ays_dst, transpose_b=True) # [batch, 1, num]\n prod = tf.squeeze(prod, 1) # [batch, num]\n norm_prod = src_norm * dst_norm # [batch, num]\n cosine = tf.truediv(prod, norm_prod)\n return cosine\n\ndef cosine_fun(ays_src, ays_dst):\n src_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_src), 1, True))\n dst_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_dst), 1, True))\n\n prod = tf.reduce_sum(tf.multiply(ays_src, ays_dst), 1, True)\n norm_prod = tf.multiply(src_norm, dst_norm)\n\n cosine = tf.truediv(prod, norm_prod)\n return cosine\n\ndef get_initializer(init_val=1, dtype=tf.float32, stddev=0.1, value=0.0):\n if init_val == 0:\n return tf.constant_initializer(value=value, dtype=dtype)\n elif init_val == 1:\n return tf.truncated_normal_initializer(dtype=dtype, stddev=stddev)\n elif init_val == 2:\n # factor*[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]\n # stddev=factor/sqrt(N)\n # where factor=input stddev!!!!!!!!!!!!!!!\n return tf.uniform_unit_scaling_initializer(factor=stddev, seed=10, dtype=tf.float32)\n else:\n return None\n\ndef full_connect_sparse(train_inputs, weights_shape, biases_shape, no_biases, ps_num, sp_weights, init_val, stddev):\n # weights\n from tf_ps.ps_context import variable_info\n with variable_info(batch_read=3000, var_type='hash'):\n if ps_num > 1:\n weights = tf.get_variable(\"weights\",\n weights_shape,\n initializer=get_initializer(init_val=init_val, stddev=stddev),\n partitioner=tf.min_max_variable_partitioner(max_partitions=ps_num))\n else:\n weights = tf.get_variable(\"weights\",\n weights_shape,\n initializer=get_initializer(init_val=init_val, stddev=stddev))\n\n train = tf.nn.embedding_lookup_sparse(weights, sp_ids=train_inputs, sp_weights=sp_weights, combiner=\"sum\") #\"mean\", \"sqrtn\" and \"sum\"\n if not no_biases:\n # biases\n biases = tf.get_variable(\"biases\",\n biases_shape,\n initializer=get_initializer(init_val=init_val, stddev=stddev))\n train = train + biases\n return train\n\n\ndef full_connect(train_inputs, weights_shape, biases_shape, no_biases, ps_num, init_val, stddev):\n # weights\n if ps_num > 1:\n weights = tf.get_variable(\"weights\",\n weights_shape,\n initializer=get_initializer(init_val=init_val, stddev=stddev),\n regularizer=tf.nn.l2_loss,\n partitioner=tf.min_max_variable_partitioner(max_partitions=ps_num))\n else:\n weights = tf.get_variable(\"weights\",\n weights_shape,\n regularizer=tf.nn.l2_loss,\n initializer=get_initializer(init_val=init_val, stddev=stddev))\n\n if no_biases:\n # matmul\n train = tf.matmul(train_inputs, weights)\n return train\n else:\n # biases\n biases = tf.get_variable(\"biases\",\n biases_shape,\n initializer=get_initializer(init_val=0, value=0.0002))\n # matmul\n train = tf.matmul(train_inputs, weights) + biases\n return train\n\n\ndef sigmoid_loss(input_list, weight_decay=0.0001, gama=5.0):\n# sim_list,epo_lp = input_list\n# m = len(sim_list)\n \n sim_list,nation_onehot,align_info = input_list\n \n current_id_ays_list_swd,node_id_ays_list_swd,node_src_context_ays_swd,\\\n current_id_ays_list_sgwd,node_src_context_ays_sgwd,\\\n wd_idx,wd_idx_norm,gwd_idx,gwd_idx_norm,cgwd_idx,cgwd_idx_norm,nation_exist,\\\n gate_current_id_deep_list_swd,gate_node_id_ays_list_swd,gate_node_src_context_ays_swd,\\\n ps_num, init_val, stddev,\\\n gate_att_sim_list,contain_same_cate= align_info\n \n \n \n \n\n \n nation_mean = nation_onehot/(tf.reduce_sum(nation_onehot, 0, keep_dims=True)+1e-6)\n \n solver=ParetoMTL()\n\n \n \n loss_sum = 0.0\n cont_loss = 0.0\n for i,sim in enumerate(sim_list):\n if i > 1 and i < len(sim_list)-1:\n continue\n one_labels = tf.ones([tf.shape(sim)[0], 1], dtype=tf.float32)\n zero_labels = tf.zeros([tf.shape(sim)[0], 6], dtype=tf.float32)\n label = tf.concat([one_labels, zero_labels], -1)\n\n if i==0:\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=label, logits=gama*sim, pos_weight=2.0)\n # loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=gama*sim, labels=label)\n else:\n # loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=gama*sim, labels=label)\n prob = tf.nn.softmax(gama * sim)\n loss_pos = -label*tf.log(prob+1e-6)\n # loss_neg = label*tf.log(1.-prob+1e-6)\n loss = loss_pos \n loss = tf.reduce_sum(loss, 1, keep_dims=True)\n loss = nation_mean*loss\n # loss = nation_onehot*loss\n\n \n \n loss = tf.reduce_sum(loss, 0) \n # loss = tf.reduce_mean(loss, 0) \n cont_loss += 1.0\n \n loss_each = loss\n \n if i == 0:\n loss = loss_each\n \n nation_sp = ctx.get_config(\"nation\")\n dic_nation2exp = {'TH':2,'VN':3,'SG':1,'MY':3,'ID':4,'PH':4}\n flag_exp = dic_nation2exp[nation_sp]\n\n flag_exp=3\n\n if flag_exp == 1 or flag_exp == 2:\n if flag_exp == 1:\n pref_vecs = tf.eye(6)\n pref_vecs = tf.concat([tf.random_uniform((1,6)),tf.ones((1,6))/6.],0)\n pref_vecs = pref_vecs/(tf.reduce_sum(pref_vecs,1,keep_dims=True)+1e-6)\n if flag_exp == 2:\n pref_vecs = tf.eye(6)\n pref_vecs = tf.concat([tf.ones((1,6))/6.,tf.ones((1,6))/6.],0)\n pref_vecs = pref_vecs/(tf.reduce_sum(pref_vecs,1,keep_dims=True)+1e-6)\n\n weights,size_list=get_variable_list('gate_feature_layer')\n grad_list = []\n for j in range(6):\n grad = tf.gradients(loss[j], weights)\n grad=grad_flatten(grad,size_list)\n grad=tf.stop_gradient(grad)\n grad_list.append(grad)\n print('grad.get_shape()',grad.get_shape())\n grads = tf.stack(grad_list)\n \n loss_value = tf.stop_gradient(loss)\n\n i_task = 0\n\n # flag, weight_init=solver.get_d_paretomtl_init(grads,loss_value,pref_vecs,i_task)\n weight=solver.get_d_paretomtl(grads,loss_value,pref_vecs,i_task)\n\n # flag = tf.cast(flag,dtype=weight_init.dtype)\n # weight = flag * weight + (1.-flag)*weight_init\n\n if flag_exp == 3:\n weight = tf.random_uniform((1,6))\n weight = weight/(tf.reduce_sum(weight)+1e-6)\n \n if flag_exp == 4:\n weight = tf.ones((1,6))/6.\n\n print('flag_exp',flag_exp)\n \n \n weight = tf.stop_gradient(tf.reshape(weight,[-1]))\n\n loss_vec = loss\n \n loss = tf.reduce_sum(loss * weight) \n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n loss_param_regu = cont_loss * weight_decay * tf.add_n(reg_losses)\n # loss_sum += loss + loss_param_regu\n loss_sum += loss \n elif i == 1:\n loss = loss_each\n \n loss_vec2 = loss\n loss = tf.reduce_mean(loss )\n loss_param_regu = cont_loss* weight_decay * tf.add_n(reg_losses)\n loss_sum += cate_align_weight * loss + loss_param_regu\n break\n# break\n\n # graph matching loss -------------------------------------\n \n loss_swd,show_list_swd = swd_loss(current_id_ays_list_swd,node_src_context_ays_swd,nation_exist,wd_idx,wd_idx_norm,\n gate_current_id_deep_list_swd,gate_node_src_context_ays_swd,nation_weights=weight,gm_loss_weight=gm_loss_weight,\n flag_dist=flag_dist,var_weight=var_weight,gate_weight=gate_weight,gate_th=gate_th,flag_gate_above=flag_gate_above,\n same_nation_weight=same_nation_weight,flag_uniform_nation_weight=flag_uniform_nation_weight,\n contain_same_cate=contain_same_cate)\n \n with tf.variable_scope(\"gate_pairwise_dnn\", reuse=tf.AUTO_REUSE) as scope:\n loss_sgwd,show_list_sgwd = sgwd_loss(current_id_ays_list_sgwd,node_src_context_ays_sgwd,nation_exist,gwd_idx,gwd_idx_norm,\n gate_current_id_deep_list_swd,gate_node_src_context_ays_swd,\n ps_num, init_val, stddev,\n mode='gwd',nation_weights=weight,gm_loss_weight=gm_loss_weight,flag_dist=flag_dist,var_weight=var_weight,\n gate_weight=gate_weight,gate_th=gate_th,flag_gate_above=flag_gate_above,\n same_nation_weight=same_nation_weight,flag_uniform_nation_weight=flag_uniform_nation_weight,\n contain_same_cate=contain_same_cate)\n \n loss_scgwd,show_list_scgwd = sgwd_loss(current_id_ays_list_sgwd,node_id_ays_list_swd,nation_exist,cgwd_idx,cgwd_idx_norm,\n gate_current_id_deep_list_swd,gate_node_id_ays_list_swd,\n ps_num, init_val, stddev,\n mode='cgwd',nation_weights=weight,gm_loss_weight=gm_loss_weight,flag_dist=flag_dist,var_weight=var_weight,\n gate_weight=gate_weight,gate_th=gate_th,flag_gate_above=flag_gate_above,\n same_nation_weight=same_nation_weight,flag_uniform_nation_weight=flag_uniform_nation_weight,\n contain_same_cate=contain_same_cate)\n \n \n loss_regu = loss_swd + loss_sgwd + loss_scgwd\n\n #-----------------------------------------------------------\n \n global_step = tf.contrib.framework.get_or_create_global_step()\n global_step = tf.to_float(global_step)\n flag_global_step = greater_than_zero(global_step-500000.)\n\n loss_sum = loss_sum + 0.1 * loss_regu\n \n return loss_sum, [loss,loss_param_regu,loss_regu,loss_vec,weight,loss_vec2,\n show_list_swd,\n show_list_sgwd,\n show_list_scgwd,flag_global_step]\n\ndef auc(input_list, num_thresholds=200, decay_rate=1):\n sim_list,nation_onehot,align_info = input_list\n\n sim=sim_list[0]\n neg_num = tf.shape(sim)[1] - 1\n sample_num = tf.shape(sim)[0]\n labels_matrix = tf.concat([tf.ones([sample_num, 1], tf.int32), tf.zeros([sample_num, neg_num], tf.int32)], axis=1)\n labels = tf.reshape(labels_matrix, [-1, 1])\n\n predictions = tf.reshape(tf.nn.sigmoid(sim, name='sigmoid_auc'), [-1, 1])\n _, auc_op = tf.contrib.metrics.streaming_auc(predictions, labels, num_thresholds=num_thresholds,\n decay_rate=decay_rate)\n return auc_op","sub_path":"International_Entity_Graphs/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":44631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176624090","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 30 00:43:06 2017\n\n@author: paulo\n\"\"\"\n\nimport tkinter as tk\nimport ttk\nimport tkMessageBox\nfrom MN.NewtonRapson import NewtonRapson\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nimport sympy as sp\nimport cmath as cm\n\nclass GUI(tk.Frame):\n \"\"\" Interfaz grafica de usuario \"\"\"\n\n def __init__(self, parent=None):\n tk.Frame.__init__(self, parent)\n self.parent = parent\n self.graph = tk.Toplevel(self.parent)\n self.initComponent()\n self.initGraph()\n self.graph.withdraw()\n self.parent.mainloop()\n\n def initComponent(self):\n \"\"\" Componentes de la aplicación \"\"\"\n self.parent.title(\"Método de Newton-Rapson (raices imaginarias)\")\n self.parent.resizable(0, 0)\n self.parent.config(bg=\"white\")\n self.renderingWindow(self.parent)\n \n # Opciones\n font = ('Verdana', 14)\n bg = \"white\"\n vcmd = (self.parent.register(self.validate),\n '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')\n \n # Objetos\n lblEcuacion = tk.Label(self.parent, text=\"Ingrese la ecuación:\",\n anchor=tk.W, justify=tk.LEFT)\n lblEcuacion.place(x=10, y=10, width=300, height=20)\n lblEcuacion.config(font=font, bg=bg)\n \n txtInstruccion = \"Ingrese un valor inicial para calcular la raiz:\"\n lblInstruccion = tk.Label(self.parent, text=txtInstruccion,\n anchor=tk.W, justify=tk.LEFT)\n lblInstruccion.place(x=10, y=50, width=700, height=20)\n lblInstruccion.config(font=font, bg=bg)\n \n lblInferior = tk.Label(self.parent, text=\"V. Inicial:\",\n anchor=tk.W, justify=tk.LEFT)\n lblInferior.place(x=10, y=90, width=140, height=20)\n lblInferior.config(font=font, bg=bg)\n \n lblError = tk.Label(self.parent, text=\"Error:\",\n anchor=tk.W, justify=tk.LEFT)\n lblError.place(x=540, y=90, width=110, height=20)\n lblError.config(font=font, bg=bg)\n \n lblRes = tk.Label(self.parent, text=\"Resultados:\",\n anchor=tk.W, justify=tk.LEFT)\n lblRes.place(x=10, y=170, width=700, height=20)\n lblRes.config(font=font, bg=bg)\n \n self.txtEcuacion = tk.Entry(self.parent)\n self.txtEcuacion.place(x=320, y=10, width=200, height=20)\n self.txtEcuacion.focus()\n \n v = tk.StringVar(self.parent, value='0')\n self.txtInferior = tk.Entry(self.parent, state='disabled',\n textvariable=v)\n self.txtInferior.place(x=160, y=90, width=130, height=20)\n \n self.txtError = tk.Entry(self.parent, validate = 'key',\n validatecommand = vcmd)\n self.txtError.place(x=660, y=90, width=130, height=20)\n \n btnGrafica = tk.Button(self.parent, text=\"Graficar\",\n command=lambda: self.graficar())\n btnGrafica.place(x=670, y=10, width=120, height=20)\n btnGrafica.config(font=font)\n \n btnRaiz = tk.Button(self.parent, text=\"Obtener raices\",\n command=lambda: self.raiz())\n btnRaiz.place(x=10, y=130, width=200, height=20)\n btnRaiz.config(font=font)\n \n self.tree = ttk.Treeview(self.parent)\n vsb = ttk.Scrollbar(self.parent, orient=\"vertical\",\n command=self.tree.yview)\n vsb.place(x=775, y=210, height=380)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree[\"columns\"]=(\"Xi\", \"f(Xi)\", \"f'(Xi)\", \"g(x)\")\n self.tree.column(\"#0\", width=50)\n self.tree.column(\"Xi\", width=170)\n self.tree.column(\"f(Xi)\", width=180)\n self.tree.column(\"f'(Xi)\", width=180)\n self.tree.column(\"g(x)\", width=185)\n self.tree.heading(\"Xi\", text=\"Xi\")\n self.tree.heading(\"f(Xi)\", text=\"f(Xi)\")\n self.tree.heading(\"f'(Xi)\", text=\"F'(Xi)\")\n self.tree.heading(\"g(x)\", text=\"g(x)\")\n self.tree.place(x=10, y=210, width=765, height=380)\n self.tree.insert(\"\" , 0, text=\"1\", values=(\"\",\"\",\"\",\"\"))\n \n def initGraph(self):\n \"\"\" Componentes de la aplicación \"\"\"\n self.graph.title(\"Grafica\")\n self.graph.resizable(0, 0)\n self.graph.config(bg=\"white\")\n self.renderingWindow(self.graph)\n #self.graph.focus_set() # Focus\n self.graph.protocol(\"WM_DELETE_WINDOW\", \"onexit\") # quitamos cerrar\n \n def renderingWindow(self, frame):\n \"\"\" Le da tamaño a la ventana y la centra en pantall \"\"\"\n w = 800\n h = 600\n ws = self.parent.winfo_screenwidth()\n hs = self.parent.winfo_screenheight()\n x = (ws/2) - (w/2)\n y = (hs/2) - (h/2)\n frame.geometry('%dx%d+%d+%d' % (w, h, x, y))\n \n def raiz(self):\n # Obtenemos los valores\n ec = self.txtEcuacion.get()\n inicial = cm.sqrt(-1)\n err = self.txtError.get()\n text = \"f(x) = \"+str(ec)+\"\\n\\n\"\n \n # verificamos que todos los campos esten llenos\n if len(ec)!=0 and len(err)!=0 :\n # Limpiamos el treeView\n self.tree.delete(*self.tree.get_children())\n \n # Creamos un objeto de la clase Regla Falsa\n nr = NewtonRapson(inicial, ec, err, self.tree)\n \n # Obtenemos el total de raices\n nr.roots()\n \n # comprobamos el numero de raices\n if (nr.root + len(nr.complex)) == 0 :\n # Mensaje a mostrar\n text = \"La función dada no tiene raices\"\n \n # Mostramos el mensaje\n tkMessageBox.showerror(title=\"Error\", message=text)\n else :\n textTemp = \"f(x) = \"+str(ec)+\"\\n\\n\"\n textTemp += \"Tiene \"+str(len(nr.complex))+\" raices imaginarias\\n\"\n textTemp += \"El valor inicial se calculara automaticamente\"\n tkMessageBox.showwarning(title=\"Atención\", message=textTemp)\n # Contador de raices\n count = 1\n \n # verificamos si tiene raices complejas\n if len(nr.complex) > 0:\n nr.inicial = (nr.complex[0] - 1j)\n raiz = nr.estimate(1)\n \n control = 2\n \n while len(nr.complex) >= control:\n nr.inicial = (nr.complex[control-1] - 1j)\n raiz = nr.estimate(1)\n \n control += 1\n \n # Obtenemos las raices reales\n for r in nr.real:\n text += \"Raiz \"+str(count)+\": \"+str(r)+\"\\n\"\n count += 1\n \n # Obtenemos las raices imaginarias\n for r in nr.complex:\n text += \"Raiz \"+str(count)+\": \"+str(r)+\"\\n\"\n count += 1\n \n # Mostramos el mensaje\n tkMessageBox.showinfo(title=\"Resultados\", message=text)\n else :\n # Mensaje de error\n text = \"Por favor llene todos los campos\"\n # Mostramos el mensaje\n tkMessageBox.showerror(title=\"Error\", message=text)\n \n def validate(self, action, index, value_if_allowed,\n prior_value, text, validation_type, trigger_type, widget_name):\n if action == \"1\":\n if text in '0123456789.\\-\\+':\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False\n else:\n return False\n else:\n return True\n \n def graficar(self):\n \"\"\"\n Graficamos la funcion con ayuda de matplotlib\n \"\"\"\n \n font = ('Verdana', 14)\n ec = self.txtEcuacion.get().lower()\n \n # verificamos que todos los campos esten llenos\n if len(ec) != 0 :\n # Limpiamos la ventana\n for widget in self.graph.winfo_children():\n widget.destroy()\n \n self.graph.deiconify()\n nr = NewtonRapson(0,ec)\n \n f = Figure(figsize=(5,5), dpi=100)\n a = f.add_subplot(111)\n # Valores del eje X que toma el gráfico.\n _x = range(-10, 15)\n # Graficar ambas funciones.\n a.plot(_x, [nr.f(i) for i in _x])\n # Establecer el color de los ejes.\n a.axhline(0, color=\"black\")\n a.axvline(0, color=\"black\")\n # Limitar los valores de los ejes.\n a.set_xlim(-10, 10)\n a.set_ylim(-50, 50)\n # Etiquetas de los ejes\n a.set_xlabel('X')\n a.set_ylabel('Y')\n a.set_title('f(x) = '+str(ec))\n a.grid(True)\n \n canvas = FigureCanvasTkAgg(f, self.graph)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.X,\n expand=True)\n \n btnClose = tk.Button(self.graph, text=\"Cerrar\",\n command=lambda: self.close())\n btnClose.config(font=font)\n btnClose.pack(side=tk.BOTTOM, fill=tk.X)\n \n toolbar = NavigationToolbar2TkAgg(canvas, self.graph)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP)\n else:\n # Mensaje de error\n text = \"Por favor ingrese la ecuacion\"\n tkMessageBox.showerror(title=\"Error\", message=text)\n \n def close(self):\n self.graph.withdraw();","sub_path":"Metodos numericos/Método Newton-Rapson (imaginarios)/principal/MN/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":10029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620237810","text":"#Calcular o juros de depósitos ao longo de 24 meses.\n\ntaxa_de_juros = float(input(\"Digite o valor da taxa de juros dos 24 meses: \"))\nmeses = 1\ntotal = 0\ncontrole = 0\n\nwhile meses <= 24:\n depósito = float(input(\"Digite o depósito do %d mês: \" % meses))\n controle += depósito\n total += depósito * taxa_de_juros / 100\n print(\"%s° mês foi depositado R$%5.2f e já foi cobrado R$%d em juros no total.\" % (meses,depósito,total))\n meses += 1\nprint(\"O total depositado nesses 24 meses foi R$%5.2f e o total de juros a ser pago é: R$%5.2f\" % (controle, total))","sub_path":"Capítulo 5 - Repetições/exe5-12.py","file_name":"exe5-12.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201737311","text":"from discord.ext import commands\n\nfrom datetime import date\nimport config\n\ncogs = [ \"minigames\" ]\n\nclass testbot(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for package in cogs:\n self.load_extension(package)\n\n async def on_ready(self):\n channel = self.get_channel(config.test_channel())\n await channel.send(f\"logged in on {date.today():%B} {date.today().day} {date.today().year}\")\n\n async def on_command_error(self, ctx, error):\n ignored = (commands.CommandNotFound, commands.CheckFailure)\n error = getattr(error, 'original', error)\n\n if isinstance(error, ignored):\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n return await ctx.send(\"Missing required argument: \" + error.param.name)\n\n return await ctx.send(\"An exception has occured: `{}`\".format(error.__class__.__name__))\n\nbot = testbot(command_prefix = commands.when_mentioned_or('.'))\ntoken = config.discord_token()\nbot.run(token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565295264","text":"# rename_file.py : 폴더 내의 파일을 불러와 이름 변경\n# original 폴더 > (hood_T/long_T/...) 폴더 > hood_T_1.jpg 파일 있음.\n# hood_T_1 (2).jpg -> hood_T_2.jpg와 같이 이름을 순서대로 변경\nimport os\n\nfolder_path = \"D:\\\\Vehicle\" # 폴더 이름에 따라 파일이름을 바꿀 것이므로 그 상위 폴더인 original을 path로 설정\n\nfolder_list = os.listdir(folder_path) # 각 폴더 이름 (hood_T/hong_T/...)\n# folder_list = [\"hood_T\", \"long_T\"]\ncount = 1\nfor folder_name in folder_list: # 각 폴더 이름(hood_T)의 파일이름 얻기\n file_path = folder_path + \"\\\\\" + folder_name # file_path : D:\\\\python_D\\\\fashion_data\\\\original\\\\hood_T\n\n file_list = os.listdir(file_path) # file_list[0] : D:\\\\python_D\\\\fashion_data\\\\original\\\\hood_T\\\\hood_T_1.jpg\n # file_list.sort()\n\n print(\"-------------------------------------------------\")\n print(folder_name + \" have \" + str(len(file_list)) + \" files.\")\n print(\"file_path : \" + file_path)\n\n # (hood_T_1(2).jpg -> 1.jpg) 이미 이름이 있는 파일이면 오류가 나기에 먼저 숫자로된 아무 이름으로 만들고, 다시 이름을 지정합니다.\n for file_name in file_list:\n old_name = file_path + \"\\\\\" + file_name # old_name = D:\\\\python_D\\\\fashion_data\\\\original\\\\hood_T\\\\hood_T_1 (2).jpg\n # new_name = file_path + \"\\\\\" + str(\n new_name = file_path + \"\\\\\" + str(\n count) + \".jpg\" # new_name = D:\\\\python_D\\\\fashion_data\\\\original\\\\hood_T\\\\1.jpg\n\n try:\n os.rename(old_name, new_name)\n print(\"success : \" + file_name + \" -> \" + str(count) + \".jpg\")\n except:\n print(\"fail : \" + file_name + \" -> \" + str(count) + \".jpg\")\n print(\"=========files already renamed original(abcde.jpg) to number(1.jpg)===========\")\n\n count = count + 1\n\n # (1.jpg -> hood_T_1.jpg) : count 값을 다시 지정해 이름을 지정함.\n file_list = os.listdir(file_path) # 1.jpg 로 바뀐 이름을 가져와야 되기 때문에 다시 로드\n count = 1\n for file_name in file_list:\n countstr = str(count).zfill(5) # 숫자 5자리로 맞추려고 추가\n old_name = file_path + \"\\\\\" + file_name # old_name = 1.jpg\n new_name = file_path + \"\\\\\" + folder_name + countstr + \".jpg\" # new_name = hood_T_1.jpg\n\n try:\n os.rename(old_name, new_name)\n print(\"success : \" + file_name + \" -> \" + folder_name + \"000\" + str(count) + \".jpg\")\n except:\n print(\"fail : \" + file_name + \" -> \" + folder_name + \"000\" + str(count) + \".jpg\")\n print(\"=========files already renamed number(1.jpg) to new_name(hood_T_1.jpg)===========\")\n break\n count = count + 1","sub_path":"ModifyData/modify_filename.py","file_name":"modify_filename.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428251032","text":"import pandas as pd\nimport datetime as dt\n\n#sample both files\n#file1 = pd.read_csv('profileIDs_file1.csv', nrows=200000) \n#file2 = pd.read_csv('profileIDs_file2.csv', names = [\"Profile ID\", \"Last Updated\"], nrows=300000)\n\n#read entire files\nfile1 = pd.read_csv('profileIDs_file1.csv')\nfile2 = pd.read_csv('profileIDs_file2.csv', names = [\"Profile ID\", \"Last Updated\"])\n\nprint(\"---------------\")\n#print(\"Raw file\")\n#print(file)\n\ndf1 = pd.DataFrame(file1)\ndf2 = pd.DataFrame(file2)\ndf3 = pd.concat([df1, df2])\n#print(df)\n\n#print(\"---------------\")\n#print(\"groupby month and year\")\n#df1['Last Updated'] = pd.to_datetime(df1['Last Updated'])\n#df2['Last Updated'] = pd.to_datetime(df2['Last Updated'])\n\n#print(\"-------- File 1 --------\")\n#print(df1.groupby([df1['Last Updated'].dt.year.rename('year'), df1['Last Updated'].dt.month.rename('month')]).agg({'count'}))\n#print(\"-------- File 2 --------\")\n#print(df2.groupby([df2['Last Updated'].dt.year.rename('year'), df2['Last Updated'].dt.month.rename('month')]).agg({'count'}))\n\nprint(\"------ whole file ------ \")\ndf3['Last Updated'] = pd.to_datetime(df3['Last Updated'])\n#print(df3)\nprint(df3.groupby([df3['Last Updated'].dt.year.rename('year'), df3['Last Updated'].dt.month.rename('month')]).agg({'count'}))\n","sub_path":"profilereport.py","file_name":"profilereport.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502926439","text":"# copy the code from sum1.py into this file, THEN:\n# change your program so it keeps reading numbers until it gets a -1, then prints the sum of all numbers read\nsum = []\nsum = int()\n\n\nNotDoneNumber = True\nwhile NotDoneNumber:\n response = input(\"Please type a number\")\n if \"-1\" in response:\n print(sum)\n NotDoneNumber = False\n\n else:\n sum = int(response) + sum","sub_path":"tests/t1/sum2.py","file_name":"sum2.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519201153","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, f1_score\nfrom sklearn.base import BaseEstimator\nfrom typing import Text, Dict\n\n\ndef evaluate(df: pd.DataFrame, target_column: Text, clf: BaseEstimator) -> Dict:\n \"\"\"Evaluate classifier on a dataset\n\n Args:\n df {pandas.DataFrame}: dataset\n target_column {Text}: target column name\n clf {sklearn.base.BaseEstimator}: classifier (trained model)\n\n Returns:\n Dict: Dict of reported metrics\n 'f1' - F1 score\n 'cm' - Confusion Matrix\n 'actual' - true values for test data\n 'predicted' - predicted values for test data\n \"\"\"\n\n # Get X and Y\n y_test = df.loc[:, target_column].values.astype('int32')\n X_test = df.drop(target_column, axis=1).values.astype('float32')\n\n prediction = clf.predict(X_test)\n f1 = f1_score(y_true=y_test, y_pred=prediction, average='macro')\n cm = confusion_matrix(prediction, y_test)\n\n return {\n 'f1': f1,\n 'cm': cm,\n 'actual': y_test,\n 'predicted': prediction\n }\n","sub_path":"src/evaluate/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495957241","text":"\nimport logging\nimport sys\nimport pandas as pd\n\n\nfrom go_overlap.go.terms_to_annotation import get_and_insert_annotations\nfrom go_overlap.compute_set_overlaps.set_overlaps import \\\n compute_go_overlap_matrix, compute_non_go_overlaps\nfrom go_overlap.create_term_subtree_gene_matrix.term_subtree_gene_matrix import \\\n create_gene_subtree_matrix, create_gene_list_bool_array\nfrom go_overlap.prepare_input_lists.lists_to_bool_vectors import \\\n prepare_gene_vectors_for_computation\n\nfrom biomartian.bm_queries.bm_query import get_attributes, get_datasets, get_marts, get_bm\n\nfrom go_overlap.statistics.stats import compute_statistics, \\\nfind_appropriate_statistical_test\n\n\ndef get_all_genes(dataset, experiment_genes):\n\n \"\"\"Return all gene names from biomart.\"\"\"\n\n logging.info(\"Getting all genes from biomart for \" + dataset)\n df = get_bm(\"external_gene_name\", \"go_id\", dataset, \"ensembl\")\n df = df[\"external_gene_name\"].drop_duplicates()\n\n # this is only true if experiment genes was given\n if isinstance(experiment_genes, pd.Series):\n df = df[df.str.upper().isin(experiment_genes.str.upper())]\n\n return df\n\n\ndef reorder_df(df):\n\n \"\"\"Reorder the columns for final output.\n\n The cols in the df are different depending on which stats method was used\n to compare the difference between input lists.\n \"\"\"\n\n if \"chi_sq\" in df:\n reordered_columns = [\"go_root\", \"ontology\", \"chi_sq_fdr\", \"a_hpgm_go_fdr\", \"a_odds\", \"b_hpgm_go_fdr\",\n \"b_odds\", \"a_I_b_I_go\", \"a_M_b_I_go\", \"b_M_a_I_go\", \"go_subtree\", \"U\", \"a_I_b\",\n \"a_M_b\", \"b_M_a\", \"chi_sq\", \"test_obs\", \"b_hpgm_go\", \"a_hpgm_go\", \"go_annotation\"]\n df = df.sort(\"chi_sq_fdr\")\n\n elif \"fisher\" in df:\n reordered_columns = [\"go_root\", \"ontology\", \"fisher_fdr\", \"a_hpgm_go_fdr\", \"a_odds\", \"b_hpgm_go_fdr\",\n \"b_odds\", \"a_I_b_I_go\", \"a_M_b_I_go\", \"b_M_a_I_go\", \"go_subtree\", \"U\", \"a_I_b\",\n \"a_M_b\", \"b_M_a\", \"fisher\", \"b_hpgm_go\", \"a_hpgm_go\", \"go_annotation\"]\n df = df.sort(\"fisher_fdr\")\n\n else:\n reordered_columns = [\"go_root\", \"ontology\", \"a_hpgm_go_fdr\", \"a_odds\", \"a\", \"a_I_go\", \"go_subtree\",\n \"U\", \"a_hpgm_go\", \"go_annotation\"]\n df = df.sort(\"a_hpgm_go_fdr\")\n\n df = df[reordered_columns]\n\n return df\n\ndef _remove_go_terms_with_too_many_genes(df, max_genes_prct_limit):\n\n if max_genes_prct_limit == 0:\n return df\n\n go_genes_prct_of_u = df.go_subtree / df.U\n go_terms_with_too_many_genes = go_genes_prct_of_u > max_genes_prct_limit\n\n df = df.drop(df[go_terms_with_too_many_genes].index)\n\n return df\n\n\n\ndef main(ontologies, dataset, gene_vectors, experiment_genes, nb_cpus, max_genes_prct_limit):\n\n # writes command run as first line to stdout\n print(\"# \" + \" \".join(sys.argv))\n\n all_genes_vector = get_all_genes(dataset, experiment_genes)\n\n bool_vectors = prepare_gene_vectors_for_computation(gene_vectors,\n all_genes_vector)\n\n gene_subtree_df = create_gene_subtree_matrix(ontologies, dataset,\n all_genes_vector, nb_cpus)\n\n\n go_overlap_matrix = compute_go_overlap_matrix(gene_subtree_df, bool_vectors,\n nb_cpus)\n\n logging.debug(\"The overlap data looks like:\\n {}\".format(go_overlap_matrix))\n\n go_overlap_matrix = _remove_go_terms_with_too_many_genes(go_overlap_matrix, max_genes_prct_limit)\n\n df = compute_statistics(go_overlap_matrix, gene_vectors)\n\n logging.debug(\"Df with stats:\\n {}\".format(df))\n\n df = get_and_insert_annotations(df, dataset, nb_cpus)\n\n logging.debug(\"Df with annotations:\\n {}\".format(df))\n\n df = reorder_df(df)\n\n logging.debug(\"Reordered df:\\n {}\".format(df))\n\n df.to_csv(sys.stdout, index=False, header=True, sep=\"\\t\", quotechar=\"'\")\n","sub_path":"go_overlap/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60412960","text":"from flask_sqlalchemy import SQLAlchemy\nfrom flask import current_app\n\ndb = SQLAlchemy(session_options=dict(expire_on_commit=False))\n\n\ndef persistence(objs):\n try:\n if isinstance(objs, dict):\n db.session.add_all(objs)\n else:\n db.session.add(objs)\n db.session.commit()\n return True\n except Exception as err:\n current_app.logger.error(err)\n db.session.rollback()\n db.session.flush()\n return False\n\n\ndef remove(objs):\n try:\n if isinstance(objs, dict):\n db.session.delete_all(objs)\n else:\n db.session.delete(objs)\n db.session.commit()\n return True\n except Exception as err:\n current_app.logger.error(err)\n db.session.rollback()\n db.session.flush()\n return False\n","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462596313","text":"from __future__ import unicode_literals\n\nfrom copy import deepcopy\n\nfrom .util import LocalParams, X, make_fq, _pop_from_kwargs\nfrom .types import instantiate, get_to_python\nfrom .pysolr import Solr\n\n\ndef zip_counts(counts, n, over=0):\n acc = ()\n for v in counts:\n acc = acc + (v,)\n if len(acc) % (n + over) == 0:\n yield acc\n acc = acc[n:]\n if len(acc) == n:\n yield acc + (None,) * over\n\n\nclass FacetField(object):\n def __init__(self, field, local_params=None, instance_mapper=None,\n type=None, **facet_params):\n self.field = field\n self.local_params = LocalParams(local_params)\n self.key = self.local_params.get('key', self.field)\n self.type = instantiate(type)\n self.to_python = get_to_python(self.type)\n self.facet_params = facet_params\n self.values = []\n self._instance_mapper = instance_mapper\n\n def __deepcopy__(self, memodict):\n # Fix for http://bugs.python.org/issue1515\n # self._instance_mapper can be instance method\n obj = type(self)(self.field, local_params=self.local_params,\n instance_mapper=self._instance_mapper,\n type=self.type, **self.facet_params)\n return obj\n\n def instance_mapper(self, ids):\n if self._instance_mapper:\n return self._instance_mapper(ids)\n return {}\n \n def get_params(self):\n params = {}\n params['facet'] = True\n params['facet.field'] = [make_fq(X(self.field), self.local_params)]\n for p, v in self.facet_params.items():\n params['f.{}.facet.{}'.format(self.field, p)] = v\n return params\n\n def process_data(self, results):\n self.values = []\n raw_facet_fields = results.raw_results.facets['facet_fields']\n facet_data = raw_facet_fields[self.key]\n for val, count in zip_counts(facet_data, 2):\n self.values.append(\n FacetValue(self.to_python(val), count, facet=self))\n\n def _populate_instances(self):\n values = [fv.value for fv in self.values]\n instances_map = self.instance_mapper(values)\n for fv in self.values:\n fv._instance = instances_map.get(fv.value)\n\n\nclass FacetValue(object):\n def __init__(self, value, count, facet=None):\n self.value = value\n self.count = count\n self.facet = facet\n self.pivot = None\n\n @property\n def instance(self):\n if not hasattr(self, '_instance'):\n if self.facet:\n self.facet._populate_instances()\n else:\n self._instance = None\n return self._instance\n\n\nclass FacetRange(object):\n def __init__(self, field, start, end, gap,\n local_params=None, type=None, **facet_params):\n self.field = field\n self.orig_start = self.start = start\n self.orig_end = self.end = end\n self.orig_gap = self.gap = gap\n self.local_params = LocalParams(local_params)\n self.key = self.local_params.get('key', self.field)\n self.type = instantiate(type)\n self.to_python = get_to_python(self.type)\n self.facet_params = facet_params\n self.values = []\n\n def get_params(self):\n params = {}\n params['facet'] = True\n params['facet.range'] = [make_fq(X(self.field), self.local_params)]\n params['f.{}.facet.range.start'.format(self.field)] = self.orig_start\n params['f.{}.facet.range.end'.format(self.field)] = self.orig_end\n gap = self.orig_gap\n if isinstance(gap, (list, tuple)):\n gap = ','.join(gap)\n params['f.{}.facet.range.gap'.format(self.field)] = gap\n for p, v in self.facet_params.items():\n params['f.{}.facet.range.{}'.format(self.field, p)] = v\n return params\n\n def process_data(self, results):\n raw_facet_data = results.raw_results.facets \\\n .get('facet_ranges', {}) \\\n .get(self.key, {})\n self.start = self.to_python(raw_facet_data.get('start', self.start))\n self.end = self.to_python(raw_facet_data.get('end', self.end))\n self.gap = raw_facet_data.get('gap', self.gap)\n facet_counts = raw_facet_data.get('counts', [])\n for start, count, end in zip_counts(facet_counts, 2, 1):\n start = self.to_python(start)\n if end is None:\n end = self.end\n else:\n end = self.to_python(end)\n self.values.append(\n FacetRangeValue(start, end, count, facet=self))\n\n\nclass FacetRangeValue(object):\n def __init__(self, start, end, count, facet=None):\n self.count = count\n self.start = start\n self.end = end\n self.facet = facet\n\n \nclass FacetQuery(object):\n def __init__(self, fq, local_params=None):\n self.fq = fq\n self.local_params = LocalParams(local_params)\n self.key = self.local_params.get('key',\n make_fq(self.fq, self.local_params))\n self.count = None\n\n def get_params(self):\n params = {}\n params['facet'] = True\n params['facet.query'] = [make_fq(self.fq, self.local_params)]\n return params\n \n def process_data(self, results):\n raw_facet_queries = results.raw_results.facets['facet_queries']\n self.count = raw_facet_queries[self.key]\n\n\nclass FacetPivot(object):\n def __init__(self, *fields, **kwargs):\n self.fields = []\n self.instance_mappers = {}\n self.types = {}\n self.to_pythons = {}\n self.facet_params = {}\n for field in fields:\n kw = {}\n if isinstance(field, (list, tuple)):\n if len(field) == 1:\n field = field[0]\n elif len(field) == 2:\n field, kw = field\n self.instance_mappers[field] = _pop_from_kwargs(kw, 'instance_mapper')\n self.types[field] = _pop_from_kwargs(kw, 'type')\n self.to_pythons[field] = get_to_python(self.types[field])\n self.facet_params[field] = kw\n self.fields.append(field)\n self.field = self.fields[0]\n self.name = ','.join(self.fields)\n self.local_params = LocalParams(\n _pop_from_kwargs(kwargs, 'local_params'))\n self.key = self.local_params.get('key', self.name)\n self.values = []\n\n def get_params(self):\n params = {}\n params['facet'] = True\n params['facet.pivot'] = [make_fq(X(self.name), self.local_params)]\n for field, facet_params in self.facet_params.items():\n for p, v in facet_params.items():\n params['f.{}.facet.{}'.format(field, p)] = v\n return params\n\n def get_value(self, value):\n for fv in self.values:\n if fv.value == value:\n return fv\n \n def process_data(self, results):\n self.values = []\n raw_pivot = results.raw_results.facets.get('facet_pivot', {}).get(self.key, {})\n self.process_pivot(raw_pivot, self)\n\n def process_pivot(self, raw_pivot, root_pivot):\n self.root_pivot = root_pivot\n for facet_data in raw_pivot:\n to_python = root_pivot.to_pythons[self.fields[0]]\n fv = FacetValue(\n to_python(facet_data['value']), facet_data['count'], facet=self)\n if 'pivot' in facet_data:\n fv.pivot = FacetPivot(*self.fields[1:])\n fv.pivot.process_pivot(facet_data['pivot'], root_pivot)\n self.values.append(fv)\n \n def _populate_instances(self, field=None):\n if field is None:\n return self.root_pivot._populate_instances(field=self.field)\n \n facet_values = []\n pivots = [self]\n while pivots:\n next_pivots = []\n for cur_pivot in pivots:\n for fv in cur_pivot.values:\n if fv.pivot:\n next_pivots.append(fv.pivot)\n if cur_pivot.field == field:\n facet_values.append(fv)\n pivots = next_pivots\n\n values = set([fv.value for fv in facet_values])\n instance_mapper = self.instance_mappers.get(field)\n instances_map = instance_mapper(values) if instance_mapper else {}\n for fv in facet_values:\n fv._instance = instances_map.get(fv.value)\n","sub_path":"solar/facets.py","file_name":"facets.py","file_ext":"py","file_size_in_byte":8534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379176633","text":"import torch\nfrom torch import nn\nfrom utils.vgg import vgg16\nimport numpy as np\nfrom my_packages.VOSProjection import VOSProjectionModule\n\nclass _SR_loss(nn.Module):\n def __init__(self):\n super(_SR_loss, self).__init__()\n vgg = vgg16()\n loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()\n for param in loss_network.parameters():\n param.requires_grad = False\n self.loss_network = loss_network\n self.mse_loss = nn.MSELoss()\n self.tv_loss = TVLoss()\n\n def forward(self, output, target):\n perception_loss = self.mse_loss(self.loss_network(output), self.loss_network(target))\n image_loss = self.mse_loss(output, target)\n tv_loss = self.tv_loss(output)\n return image_loss + 0.006 * perception_loss + 2e-8 * tv_loss\n\n\nclass TVLoss(nn.Module):\n def __init__(self, tv_loss_weight=1):\n super(TVLoss, self).__init__()\n self.tv_loss_weight = tv_loss_weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self.tensor_size(x[:, :, 1:, :])\n count_w = self.tensor_size(x[:, :, :, 1:])\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n\n @staticmethod\n def tensor_size(t):\n return t.size()[1] * t.size()[2] * t.size()[3]\n\nclass _Flow_loss(nn.Module):\n def __init__(self):\n super(_Flow_loss, self).__init__()\n self.mse_loss = nn.MSELoss()\n self.SR_loss = _SR_loss()\n\n def forward(self, outputs):\n flow_loss = []\n for i in range(len(outputs) - 1):\n '''need to test which one is better'''\n flow_loss.append(self.SR_loss(outputs[i], outputs[i + 1]))\n # flow_loss.append(self.mse_loss(outputs[i], outputs[i + 1]))\n return 0.005 * np.mean(outputs)\n\nclass _loss4object(nn.Module):\n def __iter__(self):\n super(_loss4object, self).__init__()\n self.VOS = VOSProjectionModule()\n\n def forward(self, frames, target=None):\n if target != None:\n # for SRloss\n obj_segmentation = self.VOS(frames[0], frames[1])\n num_objects = len(np.unique(obj_segmentation.flatten()))\n masks = [obj_segmentation==i for i in range(num_objects)]\n masked = [(np.bitwise_and(frames[1], mask), np.bitwise_and(target, mask)) for mask in masks]\n return masked\n\n else:\n # for Flowloss\n obj_segmentation = self.VOS(frames[0], frames[1])\n num_objects = len(np.unique(obj_segmentation.flatten()))\n masks = [obj_segmentation==i for i in range(num_objects)]\n masked_outputs = [[np.bitwise_and(output, mask) for output in frames] for mask in masks]\n return masked_outputs\n","sub_path":"loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108446138","text":"#!/bin/python3\n\nfrom os import environ\nfrom itertools import combinations\nfrom functools import reduce\nfrom operator import mul\n\ndef prod (iterable):\n #res = 1\n #for v in iterable:\n # res *= v\n #return res\n return reduce (mul, iterable, 1)\n\n# Complete the solve function below.\ndef solve (a_, days):\n a_.sort () # stay only with smallest values - remove their multiples\n a = []\n for e in a_:\n if not any (e % x == 0 for x in a):\n a.append (e)\n result = []\n for l, r in days:\n # get unique count of multiples of coins for right margin ucr\n n = len (a)\n l -= 1 # subtract all sets up to l - 1\n ucr = 0\n for c in a: ucr += r // c - l // c\n for i in range (2, n + 1):\n for cmbprd in map (prod, combinations (a, i)):\n ucr += (-1) ** (i - 1) * (r // cmbprd - l // cmbprd)\n \"\"\"\n # get unique count of multiples of coins for left margin ucl\n ucl = 0\n for c in a: ucl += l // c\n for i in range (2, n + 1):\n for cmbprd in map (prod, combinations (a, i)):\n ucl += (-1) ** (i - 1) * (l // cmbprd)\n \"\"\"\n result.append (ucr)\n return result\n\nif __name__ == '__main__':\n fptr = open (environ ['OUTPUT_PATH'], 'w')\n a_count = int (input ())\n a = []\n for _ in range(a_count): a.append (int (input ()))\n d = int (input ())\n days = []\n for _ in range (d):\n days.append (list (map (int, input ().rstrip ().split ())))\n result = solve (a, days)\n fptr.write ('\\n'.join (map (str, result)))\n fptr.write ('\\n')\n fptr.close ()\n","sub_path":"_metha_typ_supermarket.py","file_name":"_metha_typ_supermarket.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57842025","text":"import time\n\ncnt = 0\n\n# 在文件末尾追加内容,用以测试tail文件的正确性\nwhile(True):\n cnt += 1\n with open(\"./file_model/Myfun/test.txt\",mode=\"at\",encoding=\"utf-8\") as f:\n f.write(f\"{cnt} {cnt} {cnt}\\n\")\n time.sleep(1)\n print(\"已成功写入\")\n","sub_path":"file_learn/Myfun/tail_test.py","file_name":"tail_test.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494978391","text":"# depyct/color/parser.py\n# Copyright (c) 2012-2017 the Depyct authors and contributors \n#\n# This module is part of Depyct and is released under the MIT License:\n# http://www.opensource.org/licenses/mit-license.php\n\"\"\"\nFormats:\n\n l(1)\n la(1, 2)\n rgb(1, 2, 3)\n hsv(1, 2, 3)\n hsl(1, 2, 3)\n rgba(1, 2, 3, 4)\n cmyk(1, 2, 3, 4)\n #aaa\n #aaaaaa\n\n\"\"\"\nimport re\n\nl_re = re.compile(r\"^l\\((\\d+)\\)$\")\nla_re = re.compile(r\"^la\\((\\d+),(\\d+)\\)$\")\nrgb_re = re.compile(r\"^rgb\\((\\d+),(\\d+),(\\d+)\\)$\")\nhsv_re = re.compile(r\"^hsv\\((\\d+),(\\d+),(\\d+)\\)$\")\nhsl_re = re.compile(r\"^hsl\\((\\d+),(\\d+),(\\d+)\\)$\")\nrgba_re = re.compile(r\"^rgba\\((\\d+),(\\d+),(\\d+),(\\d+)\\)$\")\ncmyk_re = re.compile(r\"^cmyk\\((\\d+),(\\d+),(\\d+),(\\d+)\\)$\")\nweb_short_re = re.compile(r\"^#([\\da-fA-F])([\\da-fA-F])([\\da-fA-F])$\")\nweb_long_re = re.compile(r\"^#([\\da-fA-F]{2})([\\da-fA-F]{2})([\\da-fA-F]{2})$\")\n\ncolor_regexes = {\n \"l\": l_re,\n \"la\": la_re,\n \"rgb\": rgb_re,\n \"hsv\": hsv_re,\n \"hsl\": hsl_re,\n \"rgba\": rgba_re,\n \"cmyk\": cmyk_re\n }\n\ntest_values = [\n (l_re, \"l(10)\"),\n (la_re, \"la(10,100)\"),\n (rgb_re, \"rgb(10,20,30)\"),\n (hsv_re, \"hsv(10,20,30)\"),\n (hsl_re, \"hsl(10,20,30)\"),\n (rgba_re, \"rgba(10,20,30,40)\"),\n (cmyk_re, \"cmyk(10,20,30,40)\"),\n (web_short_re, \"#341\"),\n (web_long_re, \"#a7882d\"),\n ]\n\n\ndef parse_color_string(color):\n try:\n if color.startswith(\"#\"):\n if len(color) == 4:\n m = web_short_re.match(color)\n res = tuple(int(c*2, 16) for c in m.groups())\n else:\n m = web_long_re.match(color)\n res = tuple(int(c, 16) for c in m.groups())\n return \"web\", res\n else:\n prefix = color.split(\"(\")[0]\n regex = color_regexes[prefix]\n m = regex.match(color)\n return prefix, tuple(int(c) for c in m.groups())\n except:\n raise ValueError(\"Not a valid color\")\n\n\nif __name__ == \"__main__\":\n for _, color in test_values:\n try:\n print(parse_color_string(color))\n except ValueError:\n print(\"{} didn't parse\".format(color))\n","sub_path":"lib/depyct/color/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234247037","text":"\"\"\"\n163\nmissing ranges\neasy\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:\n\n\n res = []\n\n def add_range(left, right):\n if left == right:\n res.append(str(left))\n elif left < right:\n res.append(str(left) + \"->\" + str(right))\n\n if not nums:\n add_range(lower, upper)\n return res\n\n for i in range(len(nums)):\n if i == 0:\n left, right = lower, nums[i]-1\n else:\n left, right = nums[i-1]+1, nums[i]-1\n\n add_range(left, right)\n\n add_range(nums[-1]+1, upper)\n\n return res\n\n\nsol = Solution()\nnums = [0,1,3,50,75]\nlower = 0\nupper = 99\nprint(sol.findMissingRanges(nums, lower, upper))","sub_path":"Q163.py","file_name":"Q163.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240077343","text":"from datetime import datetime\r\nimport io\r\nimport requests\r\nfrom sqlalchemy.engine import create_engine\r\n\r\nimport pandas as pd\r\nimport psycopg2 as pg\r\nfrom DBUtil.option_data import get_list\r\n\r\n\r\n\n\r\ndef get_date():\r\n return str(datetime.today().strftime('%d%m%Y'))\r\n\r\n\r\n \r\n \r\n \r\ndef extract_volatility():\r\n conn = pg.connect(database=\"optiondata\", user = \"postgres\", password = \"superuser\", host = \"127.0.0.1\", port = \"5432\")\r\n df1=pd.DataFrame()\r\n engine = create_engine('postgresql://postgres:superuser@localhost:5432/optiondata')\r\n csv_url = 'https://www.nseindia.com/archives/nsccl/volt/CMVOLT_'+str(datetime.today().strftime('%d%m%Y'))+'.CSV'\r\n req = requests.get(csv_url).content\r\n df= pd.read_csv(io.StringIO(req.decode(encoding = 'UTF-8',errors = 'strict')))\r\n df1['Date']=df['Date']\r\n df1['symbol']=df['Symbol']\r\n df1['Previous Volatility']=df['Previous Day Underlying Volatility (D)']\r\n df1['Daily Volatility']=df['Current Day Underlying Daily Volatility (E) = Sqrt(0.94*D*D + 0.06*C*C)']\r\n df1['Annualised Volatility']=df['Underlying Annualised Volatility (F) = E*Sqrt(365)'] \r\n df1.to_sql(\"volatility_data\", engine,if_exists='append')\r\n conn.cursor().execute(\"delete from volatility_data where volatility_data.symbol not in (\"+str(get_list()).strip(\"[\").strip(\"]\")+\")\")\r\n conn.commit()\r\n\r\n","sub_path":"DataExtract/original/volatility.py","file_name":"volatility.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149916423","text":"#!/usr/bin/python3\n\nimport os\nimport subprocess\nimport config\nimport threading\nimport time\nimport random\nimport work\n\nclass SSHClient:\n def __init__(self, user, host):\n self.ssh_cmd_base = 'ssh -q -o StrictHostKeyChecking=no -o PasswordAuthentication=no'\n self.user = user\n self.host = host\n self.ssh_cmd = '{0} {1}@{2}'.format(self.ssh_cmd_base, self.user, self.host)\n\n def reachable(self):\n reachable_cmd = '{0} -n'.format(self.ssh_cmd)\n with open(os.devnull, 'w') as devnull:\n return_code = subprocess.call(reachable_cmd.split(' '), stdout = devnull, stderr = devnull)\n return return_code == 0\n\n def run_script(self, script_code):\n remote_python_ssh_cmd = '{0} -t python'.format(self.ssh_cmd)\n remote_python_process = subprocess.Popen(remote_python_ssh_cmd.split(' '), stdin=subprocess.PIPE)\n remote_python_process.communicate(input=bytes(script_code, 'UTF-8'))\n remote_python_process.wait()\n return remote_python_process.returncode == 0\n\nclass Worker(threading.Thread):\n def __init__(self, user, host):\n threading.Thread.__init__(self)\n self.user = user\n self.host = host\n self.task = None\n\n def assign_task(self, task):\n self.task = task\n\n def set_pools(self, worker_pool, task_pool):\n self.worker_pool = worker_pool\n self.task_pool = task_pool\n\n def run(self):\n success = SSHClient(self.user, self.host).run_script(\\\n 'import subprocess; import sys; sys.exit(subprocess.call({0}))'.format(self.task))\n if success:\n self.task_pool.completed(self.task)\n else:\n self.task_pool.put_back(self.task)\n self.worker_pool.put(self)\n\nclass WorkerPool:\n def __init__(self, config_stations):\n self.stations = {}\n self.global_lock = threading.Lock()\n self._build_station_dict(config_stations)\n self._filter_available_stations()\n self.free_workers_sem_max = sum(self.stations.values())\n self.free_workers_sem = threading.Semaphore(self.free_workers_sem_max)\n print('Station count: {0}'.format(len(self.stations)))\n print('Thread count: {0}'.format(sum(self.stations.values())))\n\n def _build_station_dict(self, config_stations):\n for user, host, max_threads in config_stations:\n self.stations[user, host] = max_threads\n\n def _filter_available_stations(self):\n for user, host in list(self.stations.keys()):\n if not SSHClient(user, host).reachable():\n print(user + '@' + host + ' *OFFLINE*')\n del self.stations[user, host]\n else:\n print(user + '@' + host + ' *ONLINE*')\n\n def get(self):\n self.free_workers_sem.acquire()\n self.global_lock.acquire()\n worker = None\n for (user_host, available_threads) in self.stations.items():\n if available_threads > 0:\n self.stations[user_host] -= 1\n (user, host) = user_host\n worker = Worker(user, host)\n break\n self.global_lock.release()\n return worker\n\n def put(self, worker):\n self.global_lock.acquire()\n self.stations[worker.user, worker.host] += 1\n self.free_workers_sem.release()\n self.global_lock.release()\n\n def wait_all(self):\n for i in range(0, self.free_workers_sem_max):\n self.free_workers_sem.acquire()\n print('WorkerPool: waited all workers, semaphore is now on zero.')\n\nclass UnorderedTaskPool:\n def __init__(self, work_todo):\n self.count_to_work = len(work_todo)\n self.count_done = 0\n self.todo_list = work_todo\n self.global_lock = threading.Lock()\n\n def done(self):\n self.global_lock.acquire()\n remaining = self.count_to_work - self.count_done\n self.global_lock.release()\n return remaining == 0\n\n def get(self):\n task = None\n self.global_lock.acquire()\n if self.todo_list:\n task = self.todo_list.pop()\n self.global_lock.release()\n return task\n\n def put_back(self, task):\n self.global_lock.acquire()\n self.todo_list.append(task)\n self.global_lock.release()\n\n def completed(self, task):\n self.global_lock.acquire()\n self.count_done += 1\n self.global_lock.release()\n\n def print_stats(self):\n done = self.count_done\n total = self.count_to_work\n percent = 100.0 * done / total\n print('UnorderedTaskPool: progress {0:.2f}% ({1}/{2})'.format(percent, done, total))\n\ndef wait_a_bit():\n time.sleep(1)\n\ndef do_work(workers, tasks):\n while not tasks.done():\n task = tasks.get()\n if task is not None:\n worker = workers.get()\n worker.assign_task(task)\n worker.set_pools(workers, tasks)\n worker.start()\n else:\n wait_a_bit()\n tasks.print_stats()\n print()\n workers.wait_all()\n\ndef main():\n workers = WorkerPool(config.stations)\n tasks = UnorderedTaskPool(work.todo)\n do_work(workers, tasks)\n\nif __name__ == '__main__':\n main()\n","sub_path":"osm/processing/ssh-cluster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284858971","text":"# The purpose of this code is to break the components of the atmospheric mass loss calculation into functions\nimport numpy as np\nimport matplotlib.pyplot as pp\nfrom astropy.io import ascii\nfrom astropy.table import Table\n\n\n\n\n# Global parameters\n_bol_luminosity = 1e46\n_potential_r_0 = 3000 # in pc\n_potential_z_0 = 300 # in pc\n_potential_v_0 = 3e-4 # in pc/yr\n\n# The first function we will define is the emission spectrum of the AGN\ndef emission_spec():\n richards = ascii.read('/Users/Jenna/Documents/Matt Mechtley Quasar Research/Richards_Mean_Quasar_Data.txt')\n wavelength = 3e14 / 10**richards['LogF']\n flux = richards['All']\n flux = 10**flux\n flux = flux / (10**richards['LogF'])\n my_table = Table([wavelength, flux], names=['wavelength', 'flux']) \n my_table['wavelength'].unit = 'microns'\n my_table['flux'].unit = 'erg / s / Hz'\n print(my_table)\n return my_table\n # Need wavelength and flux columns --> wavelength v. flux before dust & wavelength v. flux after dust\n\n# Calculate flux at planet using luminosity\ndef spec_in_flux(spec_in_luminosity, distance):\n distance_cm = distance * 3.086e18\n spec_in_luminosity['flux'] = spec_in_luminosity['flux'] / (4*np.pi*distance_cm**2)\n return spec_in_luminosity\n\n# The next function we will define is the luminosity (the normalization of the brightness of the spectrum)\ndef luminosity():\n # Essentially a constant value that we can plug in\n # Multiply table column by constant\n # Bolometric correction at 5100 angstroms = 12.17\n\n return\n\n# The third function we will define is the distance from the quasar as a function of the orbital parameters\ndef r_phi_z_position(current_position, current_velocity, ang_momentum, q, delta_time):\n # Parameters will include eccentricity of orbit and angular momentum\n radius, phi, height = current_position # Vector unpacking\n phi_dot = ang_momentum / radius ** 2\n vel_r_dot = ang_momentum ** 2 / radius ** 3 \\\n - radius / (radius ** 2 + height ** 2)\n vel_z_dot = -height / q / (radius ** 2 + height ** 2)\n\n # Calculate all new positions from old velocity, then calculate new velocity\n r_phi_z_dot = np.array((current_velocity[0], phi_dot, current_velocity[1] / q))\n new_position = current_position + r_phi_z_dot * delta_time\n # Calculates the change in the position with the time step\n new_velocity = current_velocity + np.array((vel_r_dot, vel_z_dot)) * delta_time\n\n return new_position, new_velocity\n\n# The fourth function we will define is the metallicity of the host star system\ndef metallicity_host():\n # Z component of the original calculation - do we need to manipulate the original equation to get this?\n # We need to talk to Patrick Young on this one\n # This will consider photochemistry as well - talk to Ariel Anbar \n\n return \n\ndef torus_obscuration(position, luminosity, r_in=20, r_out=600, tau_e=10, beta=1.1, gamma=1):\n # Units for radius: AU; min. 20, max. 600\n theta_h = np.arccos((1 + 3*luminosity / 10**42.65)**(1 - 2*0.44))\n # Values for theta_h come from Lusso et al paper and generate the dashed line from figure 19 right panel\n equatorial_density = r_out**(1-beta) / (1-beta) - r_in**(1-beta) / (1-beta)\n density_constant = tau_e / equatorial_density\n x_y_z_position = np.array((position[0]*np.cos(position[1]), position[0]*np.sin(position[1]), position[2]))\n polar_angle = np.dot(x_y_z_position, (0,0,1)) / np.sqrt(np.dot(x_y_z_position, x_y_z_position))\n\n if np.arccos(np.abs(polar_angle)) < theta_h:\n return 0\n else:\n return 0.61*density_constant*equatorial_density*np.exp(-gamma*np.abs(polar_angle))\n\n# The last function we will define is the dust obscurity as a function of where the planet lies in the taurus\ndef dust_obscurity(spectrum, a_v):\n # dust_obscurity will essentially calculate the angle between the planet and the axis of the quasar\n # k_lam is only valid for UV wavelengths (0.12 - 0.63 microns)\n spectrum = spectrum.copy()\n lam = spectrum['wavelength']\n k_lam = np.zeros_like(lam)\n uv_opt = (lam < 0.63) & (lam > 0.12)\n k_lam[uv_opt] = 2.569*(-2.156+(1.509/lam[uv_opt])-(0.198/lam[uv_opt]**2)+(0.011/lam[uv_opt]**3))+4.05\n opt_nir = (lam >= 0.63) & (lam < 2.2)\n k_lam[opt_nir] = 2.659*(-1.857+(1.040/lam[opt_nir]))+4.05\n # X-ray UV eventually\n # 4.05 is the Calzetti R_V value\n a_lam = k_lam * a_v / 4.05\n # Converting attentuation in magnitudes to attenuation in flux\n spectrum['flux'] = spectrum['flux'] * 10**(-0.4 * a_lam)\n return spectrum\n\ndef main_loop(): # Change function name? Play around with initial conditions\n step_size = 1e-3\n steps = int(30 / step_size)\n # Initial conditions\n r_phi_z_0 = np.array((2.67, 0.0, 0.17))\n vel_0 = np.array((0.0, 0.0))\n q = _potential_z_0 / _potential_r_0\n # Angular momentum of the orbit\n Lz = 2.5\n\n spectrum = emission_spec()\n\n # Calculate velocities at t=1/2 (leapfrog integration)\n pos_half, vel_half = r_phi_z_position(r_phi_z_0, vel_0, Lz, q, step_size / 2.0)\n # 2.0 rather than 2 because we want a floating point value rather than an integer\n\n current_position = r_phi_z_0\n current_velocity = vel_half\n\n all_positions = np.zeros((steps, 3))\n all_avs = np.zeros(steps)\n\n for step in range(steps):\n current_position, current_velocity = r_phi_z_position(current_position, current_velocity, Lz, q, step_size)\n current_position_phys = current_position * (_potential_r_0, 1, _potential_z_0) # Redimensionalizing units\n all_positions[step, :] = current_position_phys\n all_avs[step] = torus_obscuration(current_position_phys, _bol_luminosity)\n red_spec = dust_obscurity(spectrum, all_avs[step])\n flux_spec = spec_in_flux(red_spec, np.sqrt(current_position_phys[0]**2 + current_position_phys[2]**2))\n # Flux_spec is the thing that actually interacts with the atmosphere\n\n\n pp.plot(all_positions[:, 0], all_positions[:, 2])\n pp.xlabel('Radius')\n pp.ylabel('Disk Height')\n # No title on plots if you're putting them in a journal\n pp.show()\n\n# Plot y vs. x\n x = all_positions[:, 0]*np.cos(all_positions[:, 1])\n y = all_positions[:, 0]*np.sin(all_positions[:, 1])\n pp.plot(x, y, label='$\\~L_z = %0.2g$, $q = %0.2g$' % (Lz, q))\n pp.xlabel('$x$')\n pp.ylabel('$y$')\n pp.legend(loc='upper right')\n pp.title('X/Y Position Plot')\n pp.show()\n\n# Plot av vs. time\n steps_myr = np.arange(steps) * step_size * _potential_r_0/_potential_v_0 / 1e6\n pp.plot(steps_myr, all_avs)\n pp.xlabel('Time (Myr)')\n pp.ylabel('$A_V$')\n pp.show()\n\n# Find zero crossings (in terms of radius and radial velocity)\n# Zero crossings are where the star crosses the zero point in the z-plane (when it moves down); tells us if the star is\n# moving toward or away from the center\n#up_zc_mask = np.diff(np.sign(heights), 1) > 0\n\n# START EDITING HERE\n#plot_title = r'$\\~v_{r,\\tau=0} = %0.2g$, $\\~v_{z,\\tau=0} = %0.2g$,' \\\n #r'$\\~r_{\\tau=0} = %0.2g$, $\\~z_{\\tau=0} = %0.2g$' % \\\n #(vel_r0, vel_z0, rad0, height0) #r_phi_z_0, vel_half\n\n# Plot upward zero crossings, r vs. v_r\n#ax.plot(radii[1:][up_zc_mask], vel_rs[1:][up_zc_mask], 'o',\n #label='$\\~L_z = %0.2g$, $q = %0.2g$' % (Lz, q)) # Might need to fix this too... Check once other calculations\n# are complete\n#ax.set_xlabel('$\\~r$')\n#ax.set_ylabel('$\\~v_r$')\n#ax.legend(loc='lower right')\n#ax.set_title(plot_title)\n#pp.show()\n\n\nif __name__ == \"__main__\":\n main_loop()\n # spectrum = emission_spec()\n # pp.loglog(spectrum['wavelength'], spectrum['flux'], color=\"blue\")\n # pp.xlabel('Wavelength ($\\mu$m)')\n # pp.ylabel('Flux (erg / s / Hz)')\n # spectrum = dust_obscurity(spectrum, 2.0) # a_v here is a free variable\n # pp.loglog(spectrum['wavelength'], spectrum['flux'], color=\"red\")\n # pp.xlabel('Wavelength ($\\mu$m)')\n # pp.ylabel('Flux (erg / s / Hz)')\n # spectrum = spec_in_flux(spectrum, distance=10) # distance here is also a free variable\n # pp.loglog(spectrum['wavelength'], spectrum['flux'], color=\"green\")\n # pp.xlabel('Wavelength ($\\mu$m)')\n # pp.ylabel(r'Flux ($f_\\nu$, arbitrary)')\n # pp.show() ","sub_path":"Atmospheric Mass Loss Functions.py","file_name":"Atmospheric Mass Loss Functions.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197213541","text":"import logging\nfrom typing import Optional\n\nimport dpkt\nfrom dpkt.dns import DNS\nfrom dpkt.ip import IP\nfrom dpkt.udp import UDP\nfrom munch import Munch\n\nfrom core.configuration.data import ConfigurationData\nfrom core.packet_parsers.base import PacketParserInterface\nfrom core.lib.ip_utils import IpAddrUtils\n\n\nclass DnsPacketParser(PacketParserInterface):\n def __init__(self, config: ConfigurationData):\n self.config = config\n self.ip_utils = IpAddrUtils()\n\n @staticmethod\n def load_dns_packet_from_ip_packet(ip_packet: IP) -> Optional[DNS]:\n try:\n udp_packet = UDP(ip_packet.data)\n return DnsPacketParser.load_dns_packet_from_udp_packet(udp_packet)\n\n except BaseException as ex:\n logging.warning('Can not extract DNS packet from UDP packet. Error: `%s`', ex)\n raise ex\n\n @staticmethod\n def load_dns_packet_from_udp_packet(udp_packet: UDP) -> Optional[DNS]:\n try:\n return DNS(udp_packet.data)\n\n except Exception as ex:\n logging.warning('Can not extract DNS packet from UDP packet. Error: `%s`', ex)\n raise ex\n\n def extract_data(self, packet: DNS) -> Munch:\n data = Munch()\n try:\n data.dns_type = packet.qr\n data.dns_op = packet.op\n data.dns_rcode = packet.rcode\n\n if data.dns_type == dpkt.dns.DNS_Q:\n # This is a DNS query\n data.update(self.extract_data_from_dns_query(packet))\n\n elif data.dns_type == dpkt.dns.DNS_R:\n # This is a DNS response\n data.update(self.extract_data_from_dns_response(packet))\n\n except BaseException as ex:\n logging.warning('Unable to extract DNS from `%s`. Error: `%s`', type(packet), ex)\n raise ex\n\n return data\n\n def extract_data_from_dns_query(self, dns_packet: DNS) -> Munch:\n data = Munch()\n try:\n if len(dns_packet.qd) > 1:\n if self.config.use_numeric_values is True:\n data.dns.query_multiple_domains = 1\n else:\n data.dns_query_multiple_domains = True\n\n data.dns_query_domain = self.config.FieldDelimiter.join([q.name for q in dns_packet.qd])\n data.dns_query_type = self.config.FieldDelimiter.join([str(q.type) for q in dns_packet.qd])\n data.dns_query_cls = self.config.FieldDelimiter.join([str(q.cls) for q in dns_packet.qd])\n\n except BaseException as ex:\n logging.warning('Unable to extract DNS from `%s`. Error: `%s`', type(dns_packet), ex)\n raise ex\n\n return data\n\n def extract_data_from_dns_response(self, dns_packet: DNS) -> Munch:\n data = Munch()\n # Process and get responses based on record types listed in\n # http://en.wikipedia.org/wiki/List_of_DNS_record_types\n dns_ans_ip_list = []\n dns_ans_name_list = []\n dns_ans_ttl = []\n\n try:\n for answer in dns_packet.an:\n data.dns_ans_type = answer.type\n if answer.type == dpkt.dns.DNS_CNAME:\n data.dns_ans_cname = answer.name\n data.dns_ans_cname_ttl = answer.ttl\n\n elif answer.type == dpkt.dns.DNS_A or answer.type == dpkt.dns.DNS_AAAA:\n if hasattr(answer, 'ip'):\n dns_ans_ip_list.append(self.ip_utils.inet_to_str(answer.ip))\n dns_ans_name_list.append(answer.name)\n dns_ans_ttl.append(answer.ttl)\n # TODO: Handle other types of dns answers:\n # Ref: https://engineering-notebook.readthedocs.io/en/latest/engineering/dpkt.html#dns-answer\n\n data.dns_ans_name = self.config.FieldDelimiter.join(dns_ans_name_list)\n # We are using only max value because in experience ttl is same even if there is separate ttl for each IP\n # address in DNS response\n if dns_ans_ttl:\n data.dns_ans_ttl = max(dns_ans_ttl)\n else:\n data.dns_ans_ttl = None\n\n if self.config.use_numeric_values is True:\n dns_ans_ip_list = map(self.ip_utils.ip_to_int, dns_ans_ip_list)\n data.dns_ans_ip = self.config.FieldDelimiter.join([str(ip) for ip in dns_ans_ip_list])\n\n except Exception as ex:\n logging.error('Unable to process dns answers packet. Error: `%s`', ex)\n raise ex\n\n return data\n","sub_path":"core/packet_parsers/dns_parser.py","file_name":"dns_parser.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225106059","text":"#!/usr/bin/env python3\n\nimport sys\nimport socket\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nimport matplotlib as mpl\nfrom geolite2 import geolite2\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nclass GUI():\n\tdef __init__(self, data):\n\t\tmpl.rcParams['toolbar'] = 'None'\n\t\tmpl.rcParams.update({'font.size': 14})\n\t\tself.rows = [\"Dns\", \"Minimum latency\", \"Average latency\",\"Maximum latency\", \"errors\"]\n\t\tself.data = data\n\t\tself.cellsText = []\n\t\tself.columns = ()\n\t\tself.corners = (data[\"header\"][\"lowCorner\"], data[\"header\"][\"highCorner\"])\n\n\tdef addGraph(self):\n\t\ta = plt.figure(num=\"QuarksrouteGraph\")\n\t\tself.columns = tuple([(hop[\"responses\"][0][\"ip\"]) for hop in self.data[\"hops\"] if \"ip\" in hop[\"responses\"][0]])\n\t\tminimumLat = [hop[\"responses\"][0][\"min\"] for hop in self.data[\"hops\"] if \"min\" in hop[\"responses\"][0]]\n\t\tmaximumLat = [hop[\"responses\"][0][\"max\"] for hop in self.data[\"hops\"] if \"max\" in hop[\"responses\"][0]]\n\t\tdiff = [hop[\"responses\"][0][\"max\"] - hop[\"responses\"][0][\"min\"] + (max(maximumLat) / 20) for hop in self.data[\"hops\"] if \"max\" in hop[\"responses\"][0] and \"min\" in hop[\"responses\"][0]]\n\t\tb_color = ['b'] * len(self.rows)\n\t\tindex = np.arange(len(self.columns))\n\t\tplt.bar(index, diff, 0.3, bottom=minimumLat, color=b_color)\n\t\tplt.subplots_adjust(left=0.09, bottom=0.57, right=0.99, top=0.93)\n\n\tdef addTable(self):\n\t\ta = plt.figure(num=\"QuarksrouteGraph\")\n\t\tif self.columns:\n\t\t\tplt.title('Results for: {}.\\n{} device(s) did not responded'.format(self.data[\"header\"][\"target\"], self.data[\"header\"][\"timeouts\"]))\n\t\t\tself.cellsText.append([hop[\"responses\"][0][\"dns\"][:16] for hop in self.data[\"hops\"]])\n\t\t\tself.cellsText.append([\"{:.2f}\".format(float(hop[\"responses\"][0][\"min\"])) for hop in self.data[\"hops\"]])\n\t\t\tself.cellsText.append([\"{:.2f}\".format(float(hop[\"responses\"][0][\"avg\"])) for hop in self.data[\"hops\"]])\n\t\t\tself.cellsText.append([\"{:.2f}\".format(float(hop[\"responses\"][0][\"max\"])) for hop in self.data[\"hops\"]])\n\t\t\tself.cellsText.append([\"{}\".format(hop[\"responses\"][0][\"errors\"]) for hop in self.data[\"hops\"]])\n\t\t\tc_color = ['c'] * len(self.rows)\n\t\t\ttable = plt.table(cellText=self.cellsText,\n\t\t\t rowLabels=self.rows,\n\t\t\t rowColours=c_color,\n\t\t\t colLabels=self.columns,\n\t\t\t cellLoc='center',\n\t\t\t bbox=[0, -1.50, 1., 1.5],\n\t\t\t loc='bottom')\n\t\t\tfor cell in table._cells:\n\t\t\t\tif cell[1] != -1:\n\t\t\t\t\ttable._cells[cell].get_text().set_rotation(50)\n\t\t\t\t\ttable._cells[cell].get_text().set_wrap(True)\n\t\t\ttable.auto_set_font_size(False)\n\t\t\ttable.set_fontsize(9)\n\t\t\tmaximumLat = [res[\"max\"] for hop in self.data[\"hops\"] for hop in self.data[\"hops\"] for res in hop[\"responses\"]]\t\t\n\t\t\tplt.ylabel(\"Latency\")\n\t\t\tplt.yticks(np.arange(0, max(maximumLat) * 1.1, step=max(maximumLat) / 10))\n\t\t\tplt.xticks([])\t\t\n\t\telse:\n\t\t\tplt.title('No results found for: {}'.format(self.data[\"header\"][\"target\"]))\n\n\tdef addMap(self):\n\t\tplt.figure(num=\"QuarksrouteMap\")\n\t\tm=Basemap(llcrnrlon=self.corners[0][0], llcrnrlat=self.corners[0][1],urcrnrlon=self.corners[1][0],urcrnrlat=self.corners[1][1])\n\t\tm.drawmapboundary(fill_color='#A6CAE0', linewidth=0)\n\t\tm.fillcontinents(color='grey', alpha=0.6, lake_color='blue')\n\t\tm.drawcountries(color=\"white\")\n\t\tm.drawcoastlines(linewidth=0.1, color=\"white\")\n\t\thead = ()\n\t\tindex = 0\n\t\tfor hop in self.data[\"hops\"]:\n\t\t\tif hop[\"geo\"]:\n\t\t\t\tif head is not ():\n\t\t\t\t\tm.drawgreatcircle(head[0], head[1], hop[\"geo\"][\"location\"][\"longitude\"], hop[\"geo\"][\"location\"][\"latitude\"], linewidth=2, color='blue')\n\t\t\t\thead = (hop[\"geo\"][\"location\"][\"longitude\"], hop[\"geo\"][\"location\"][\"latitude\"])\n\t\t\t\tm.plot(head[0], head[1], linestyle='none', marker=\"o\", markersize=8, alpha=0.6, c=\"cyan\", markeredgecolor=\"black\", markeredgewidth=1)\n\t\t\t\tplt.annotate(index, xy=(head[0], head[1] + 2))\n\t\t\t\tindex=index+1\n\n\tdef addTree(self):\n\t\tplt.figure(num=\"QuarksrouteTree\")\n\t\tG = nx.Graph()\n\t\tlightData = []\n\t\tfor hops in self.data[\"hops\"]:\n\t\t\thop = []\n\t\t\tfor resp in hops[\"responses\"]:\n\t\t\t\tif resp[\"ip\"] and not [True for h in hop if h[\"ip\"] == resp[\"ip\"]]:\n\t\t\t\t\thop.append(resp)\n\t\t\tif hop: lightData.append(hop)\n\t\tcpt = 0\n\t\ttmp = {}\n\t\tedgeLabels = {}\n\t\tfor hop in lightData:\n\t\t\tfirst = True\n\t\t\tfor resp in hop:\n\t\t\t\tif not tmp:\n\t\t\t\t\ttmp = resp\n\t\t\t\tif first:\n\t\t\t\t\tif cpt == 0 or cpt == len(lightData) - 1:\n\t\t\t\t\t\tG.add_node(resp[\"ip\"], ends=False, start=True)\n\t\t\t\t\telse:\n\t\t\t\t\t\tG.add_node(resp[\"ip\"], ends=True, start=False,)\n\t\t\t\t\tfirst = False\n\t\t\t\t\tif tmp:\n\t\t\t\t\t\tG.add_edge(resp[\"ip\"], tmp[\"ip\"], path=True, label=\"toto\")\n\t\t\t\t\t\tedgeLabels[(tmp[\"ip\"],resp[\"ip\"])] = resp[\"avg\"]\n\t\t\t\t\t\ttmp = resp\n\t\t\t\telse:\n\t\t\t\t\tG.add_node(resp[\"ip\"], ends=False, start=False,)\n\t\t\t\t\tif tmp:\n\t\t\t\t\t\tG.add_edge(tmp[\"ip\"], resp[\"ip\"], path=False, label=\"toto\")\n\t\t\t\t\t\tedgeLabels[(tmp[\"ip\"],resp[\"ip\"])] = resp[\"avg\"]\n\t\t\tcpt = cpt + 1\n\t\tpos = nx.spring_layout(G,k=0.2)\n\t\tstartNodes = [node[0] for node in G.nodes(data=True) if node[1][\"start\"]]\n\t\tendsNodes = [node[0] for node in G.nodes(data=True) if node[1][\"ends\"] and not node[1][\"start\"]]\n\t\totherNodes = [node[0] for node in G.nodes(data=True) if not node[1][\"ends\"] and not node[1][\"start\"]]\n\t\tpathEdges = [(u, v) for (u, v, d) in G.edges(data=True) if d['path']]\n\t\totherEdges = [(u, v) for (u, v, d) in G.edges(data=True) if not d['path']]\n\t\tnx.draw_networkx_nodes(G, pos, nodelist=endsNodes, node_color='g',node_shape='.')\n\t\tnx.draw_networkx_nodes(G, pos, nodelist=otherNodes, node_color='r', node_shape='.')\n\t\tnx.draw_networkx_nodes(G, pos, nodelist=startNodes, node_color='g', node_shape='s')\n\t\tnx.draw_networkx_edges(G, pos, edgelist=pathEdges, width=1)\n\t\tnx.draw_networkx_edges(G, pos, edgelist=otherEdges, alpha=0.5, edge_color='b', style='dashed')\n\n\t\tpos_higher = {}\n\t\ty_off = 0.06\n\t\tfor k, v in pos.items():\n\t\t\tpos_higher[k] = (v[0], v[1]+y_off)\n\t\tplt.axis('off')\n\t\tnx.draw_networkx_labels(G, pos_higher, font_size=10, font_family='sans-serif')\n\n\n\tdef render(self):\n\t\tself.addMap()\n\t\tself.addTree()\n\t\tself.addGraph()\n\t\tself.addTable()\n\t\tplt.show()\n\ndef parseFile(filename):\n\ttry:\n\t\twith open(filename, \"r\") as file: \n\t\t\troot = ET.parse(file).getroot()\n\texcept IOError as e:\n\t\tprint(e)\n\texcept:\n\t\tprint(\"Your file is probably not a correct XML file\")\n\telse:\n\t\tdata = {\"header\":{}, \"hops\":[]}\n\t\theader = root.find('header')\n\t\tdata[\"header\"][\"numQueries\"] = header.get('numQueries')\n\t\tdata[\"header\"][\"target\"] = header.get('target')\n\t\tfor entry in root.findall('hops/entry'):\n\t\t\thop = {\"responses\":[], \"id\": entry.get(\"id\")}\n\t\t\tresponseData = {\"queries\": []}\n\t\t\tfor response in entry.findall('response'):\n\t\t\t\tresponseData = {\"queries\": []}\n\t\t\t\tresponseData[\"ip\"] = response.get(\"ip\") if response.get(\"ip\") else \"\"\n\t\t\t\tresponseData[\"dns\"] = response.get(\"dns\") if response.get(\"dns\") else \"\"\n\t\t\t\tresponseData[\"errors\"] = response.get(\"errors\") if response.get(\"errors\") else \"\"\n\t\t\t\tqueries = []\n\t\t\t\tfor query in response.findall('queries/query'):\n\t\t\t\t\tqueryData = {}\n\t\t\t\t\tqueryData[\"value\"] = query.get(\"value\")\n\t\t\t\t\tqueryData[\"unit\"] = query.get(\"unit\")\n\t\t\t\t\tqueries.append(queryData)\n\t\t\t\tresponseData[\"queries\"] = queries\n\t\t\t\thop[\"responses\"].append(responseData)\n\t\t\tdata[\"hops\"].append(hop)\n\t\treturn data\n\treturn None\n\ndef getHostAddress():\n\ttarget_host = \"api.ipify.org\"\n\trequest = \"GET / HTTP/1.1\\r\\nHost:%s\\r\\n\\r\\n\" % target_host\n\ttarget_port = 80\n\tclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tclient.settimeout(3.0) \n\ttry:\n\t\tclient.connect((target_host,target_port)) \n\t\tclient.send(request.encode()) \n\t\tresponse = client.recv(4096)\n\texcept:\n\t\treturn None\n\tfinally:\n\t\tclient.close()\n\treturn response.decode().split(\"\\r\\n\\r\\n\")[1]\n\ndef calcGeopos(data):\n\tlowCorner = highCorner = (None, None)\n\tfor hop in data[\"hops\"]:\n\t\tif hop[\"geo\"] is not None:\n\t\t\tlatitude = hop[\"geo\"][\"location\"][\"latitude\"]\n\t\t\tlongitude = hop[\"geo\"][\"location\"][\"longitude\"]\n\t\t\tif lowCorner[0] is None or longitude < lowCorner[0]:\n\t\t\t\tlowCorner = (longitude, lowCorner[1])\n\t\t\tif lowCorner[1] is None or latitude < lowCorner[1]:\n\t\t\t\tlowCorner = (lowCorner[0], latitude)\n\t\t\tif highCorner[0] is None or longitude > highCorner[0]:\n\t\t\t\thighCorner = (longitude, highCorner[1])\n\t\t\tif highCorner[1] is None or latitude > highCorner[1]:\n\t\t\t\thighCorner = (highCorner[0], latitude)\n\tdata[\"header\"][\"lowCorner\"] = (lowCorner[0] + ((-180 + lowCorner[0]) / 5), lowCorner[1] + ((-85 + lowCorner[1]) / 1.5) )\n\tdata[\"header\"][\"highCorner\"] = (highCorner[0] + ((180 - highCorner[0]) / 5), highCorner[1] + ((85 - highCorner[1]) / 1.5)) \n\treturn data\n\ndef findTimeouts(data):\n\tcpt = 0\n\tfor hop in data[\"hops\"]:\n\t\tfor response in hop[\"responses\"]:\n\t\t\tif not response[\"ip\"] and not response[\"dns\"] and not response[\"queries\"]: cpt = cpt + 1\n\tdata[\"header\"][\"timeouts\"] = str(cpt)\n\treturn data\n\ndef calcStats(data):\n\tdata = findTimeouts(data)\n\t#data[\"hops\"] = [hop for hop in data[\"hops\"] if hop[\"ip\"]]\n\treader = geolite2.reader()\n\tfor hop in data[\"hops\"]:\n\t\thop[\"geo\"] = None\n\t\tif hop[\"id\"] == \"1\":\n\t\t\thop[\"geo\"] = reader.get(getHostAddress()) \n\t\telif hop[\"responses\"][0][\"ip\"]:\n\t\t\thop[\"geo\"] = reader.get(hop[\"responses\"][0][\"ip\"])\n\t\tfor response in hop[\"responses\"]:\n\t\t\tresponse[\"min\"] = response[\"delta\"] = response[\"max\"] = response[\"avg\"] = response[\"unit\"] = sum = 0.\n\t\t\tfor query in response[\"queries\"]:\n\t\t\t\tif response[\"max\"] == 0 or float(query[\"value\"]) > response[\"max\"]: response[\"max\"] = float(query[\"value\"])\n\t\t\t\tif response[\"min\"] == 0 or float(query[\"value\"]) < response[\"min\"]: response[\"min\"] = float(query[\"value\"])\n\t\t\t\tsum = sum + float(query[\"value\"])\n\t\t\t\tresponse[\"unit\"] = query[\"unit\"]\n\t\t\tif len(response[\"queries\"]) > 0: response[\"avg\"] = \"{0:.2f}\".format(sum / len(response[\"queries\"]))\n\t\t\tresponse[\"delta\"] = (response[\"max\"] - response[\"min\"]) / 2\n\treturn(calcGeopos(data))\n\ndef main(filename):\n\tdata = parseFile(filename)\n\tif data is not None:\n\t\tdata = calcStats(data)\t\n\t\tgui = GUI(data)\n\t\tgui.render()\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 2:\n\t\tprint(\"Usage: {} ./results.xml\".format(sys.argv[0]))\n\telse:\n\t\tmain(sys.argv[1])\n","sub_path":"analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":10109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52825481","text":"from Model import Model\r\nfrom scipy.integrate import solve_ivp\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom inspect import isfunction\r\n\r\nclass GeneralModel(Model):\r\n def __init__(self, times, B, A, K, iv_list, input_fluxes, xi=1):\r\n self.times = times\r\n self.B = B\r\n self.A = A\r\n self.K = K\r\n self.iv_list = iv_list\r\n self.input_fluxes = input_fluxes\r\n self.xi = xi\r\n self.pool_n = self.get_pool_n()\r\n\r\n self.Y = self.ode_solver()\r\n self.create_headers()\r\n self.df = self.get_df()\r\n self.get_c_input()\r\n self.get_diagnostic_variables()\r\n\r\n def ode_solver(self):\r\n t = [self.times[0], self.times[-1]]\r\n y = solve_ivp(self.right_hand_equation, t, self.iv_list, t_eval = self.times, vectorized = True)\r\n return y\r\n\r\n def get_input(self, t, y):\r\n idx = (np.abs(self.times - t)).argmin()\r\n if self.times[idx] > t:\r\n idx = idx - 1\r\n\r\n if isfunction(self.input_fluxes):\r\n self.tmp_input_fluxes = self.input_fluxes(t, y)\r\n else:\r\n if type(self.input_fluxes) == np.ndarray:\r\n self.tmp_input_fluxes = self.input_fluxes[idx]\r\n else:\r\n self.tmp_input_fluxes = self.input_fluxes\r\n\r\n if isfunction(self.B):\r\n self.tmp_B = self.B(t, y)\r\n else:\r\n if type(self.B) == np.ndarray:\r\n if self.B.shape[1] != 1 and self.B.shape[2] != 1:\r\n self.tmp_B = self.B[idx].reshape(self.pool_n, 1)\r\n else:\r\n self.tmp_B = self.B\r\n\r\n if isfunction(self.A):\r\n self.tmp_A = self.A(t, y)\r\n else:\r\n self.tmp_A = self.A\r\n\r\n if isfunction(self.K):\r\n self.tmp_K = self.K(t, y)\r\n else:\r\n self.tmp_K = self.K\r\n\r\n if isfunction(self.xi):\r\n self.tmp_xi = self.xi(t, y)\r\n else:\r\n if type(self.xi) == np.ndarray:\r\n self.tmp_xi = self.xi[idx]\r\n else:\r\n self.tmp_xi = self.xi\r\n\r\n def right_hand_equation(self, t, y):\r\n self.get_input(t, y)\r\n dydt = np.multiply(self.tmp_B, self.tmp_input_fluxes) + np.matmul(np.matmul(self.tmp_xi * self.tmp_A, self.tmp_K), y)\r\n return dydt\r\n\r\n def get_x(self):\r\n return self.Y.y\r\n\r\n def get_df(self):\r\n res = self.Y.y.T\r\n df = pd.DataFrame(res)\r\n df.columns = self.create_headers()\r\n return df\r\n\r\n def write_output(self, filename):\r\n self.df.to_csv(filename, index=False)\r\n\r\n def get_diagnostic_variables(self):\r\n\r\n carbon_storage_capacity = []\r\n carbon_storage_potential = []\r\n residence_time = []\r\n carbon_storage = []\r\n baseline_residence_time = []\r\n\r\n dimensions = self.Y.y.T.shape\r\n X = np.zeros(dimensions)\r\n Xc = np.zeros(dimensions)\r\n Xp = np.zeros(dimensions)\r\n \r\n for i in range(0, len(self.times)):\r\n\r\n t = self.times[i]\r\n Y = self.Y.y\r\n y = Y[:, i]\r\n y = y.reshape([y.shape[0], 1])\r\n\r\n dydt = self.right_hand_equation(t, y)\r\n matrix_AK = np.matmul(self.tmp_xi * self.tmp_A, self.tmp_K)\r\n inverse_AK = np.linalg.inv(matrix_AK)\r\n matrix_Residence = np.matmul(-inverse_AK, self.tmp_B)\r\n Residence_time = np.sum(matrix_Residence)\r\n\r\n baseline_matrix_AK = np.matmul(self.tmp_A, self.tmp_K)\r\n baseline_inverse_AK = np.linalg.inv(baseline_matrix_AK)\r\n matrix_baseline_Residence = np.matmul(-baseline_inverse_AK, self.tmp_B)\r\n baseline_Residence_time = np.sum(matrix_baseline_Residence)\r\n\r\n Xc[i] = np.multiply(matrix_Residence, self.tmp_input_fluxes).reshape(dimensions[1])\r\n X[i] = y.reshape(dimensions[1])\r\n Xp[i] = -np.matmul(inverse_AK, dydt).reshape(dimensions[1])\r\n\r\n carbon_storage_capacity.append(np.sum(np.multiply(matrix_Residence, self.tmp_input_fluxes)))\r\n carbon_storage_potential.append(-np.sum(np.matmul(inverse_AK, dydt)))\r\n residence_time.append(Residence_time)\r\n carbon_storage.append(np.sum(y))\r\n baseline_residence_time.append(np.sum(baseline_Residence_time))\r\n\r\n self.df[\"Tres\"] = residence_time\r\n self.df[\"X\"] = carbon_storage\r\n self.df[\"Xc\"] = carbon_storage_capacity\r\n self.df[\"Xp\"] = carbon_storage_potential\r\n self.df[\"Tbres\"] = baseline_residence_time\r\n self.df[\"xi\"] = self.xi\r\n\r\n self.Xc = pd.DataFrame(Xc)\r\n self.X = pd.DataFrame(X)\r\n self.Xp = pd.DataFrame(Xp)\r\n\r\n def sasu_spinup(self):\r\n #Xss = -(AK)^(-1)BU\r\n dimensions = self.Y.y.T.shape\r\n X = np.zeros(dimensions)\r\n\r\n for i in range(0, len(self.times)):\r\n t = self.times[i]\r\n Y = self.Y.y\r\n y = Y[:, i]\r\n y = y.reshape([y.shape[0], 1])\r\n self.get_input(t, y)\r\n dydt = 0\r\n matrix_AK = np.matmul(self.tmp_xi * self.tmp_A, self.tmp_K)\r\n inverse_AK = np.linalg.inv(matrix_AK)\r\n matrix_BU = np.multiply(self.tmp_B, self.tmp_input_fluxes)\r\n X[i] = np.matmul(-inverse_AK, matrix_BU).reshape(dimensions[1])\r\n\r\n col_headers = self.create_headers()\r\n for i in range(0, dimensions[1]):\r\n self.df[col_headers[i]] = X[:, i]\r\n\r\n def create_headers(self):\r\n col_headers = []\r\n rows = self.Y.y.shape[0]\r\n for i in range(0, rows):\r\n col_headers.append(\"X\" + str(i+1))\r\n return col_headers\r\n\r\n def get_x_df(self):\r\n return self.df\r\n\r\n def get_pool_n(self):\r\n return len(self.iv_list)\r\n\r\n def get_c_input(self):\r\n c_input = []\r\n if type(self.input_fluxes) == np.ndarray or type(self.input_fluxes) == list:\r\n c_input = self.input_fluxes\r\n if isfunction(self.input_fluxes):\r\n for i in range(0, len(self.times)):\r\n t = self.times[i]\r\n Y = self.Y.y\r\n y = Y[:, i]\r\n y = y.reshape([y.shape[0], 1])\r\n c_input.append(self.input_fluxes(t, y))\r\n else:\r\n c_input = self.input_fluxes\r\n self.df[\"C_input\"] = c_input\r\n headers = [\"C_input\"] + self.create_headers()\r\n self.df = self.df[headers]","sub_path":"Source_code/unit_2/GeneralModel.py","file_name":"GeneralModel.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561294168","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport numpy as np\nimport awswrangler as wr\nfrom flask import Flask, render_template, request, session, url_for, redirect, jsonify\nimport os\n\napp = Flask(__name__)\nBUCKET_NAME = os.getenv(\"BUCKET_NAME\", \"covid19-analysis-test\")\nDATABASE_NAME = os.getenv(\"DATABASE_NAME\", \"covid19-database\")\n\n@app.route(\"/process\", methods=[\"GET\", \"POST\"])\ndef process():\n if request.method == 'GET':\n return jsonify({\"message\": \"POST request expected\"})\n json_data = request.json\n\n file_name = json_data[\"file_name\"]\n # Change for read data from S3 using aws wrangler\n try:\n print(\"[INFO] Reading Data from S3\")\n path1 = f\"s3://{BUCKET_NAME}/to_process/{file_name}\"\n df_covid = wr.s3.read_csv([path1])\n # data_path = f\"./{file_name}\"\n # df_covid = pd.read_csv(data_path, sep=',')\n except Exception as e:\n print(f\"[ERROR] {e}\")\n raise e\n \n print(\"[INFO] Building new Dataset\")\n # Get only the lastest cases of Covid from each State\n df_recent_cases = df_covid.loc[df_covid['is_last'] == True]\n df_state_dataset_last = df_recent_cases.loc[df_recent_cases[\"place_type\"] == \"state\"]\n state_cases_death = df_state_dataset_last[['state', 'confirmed', 'deaths']]\n\n # Creating Dataset from dataframe cases\n df_datetime_cases = df_covid.loc[df_covid['is_last'] == False]\n df_datetime_cases['date'] = pd.to_datetime(df_datetime_cases['date'], format='%Y-%m-%d')\n df_datetime_cases_state = df_datetime_cases.loc[df_datetime_cases[\"place_type\"] == \"state\"]\n df_datetime_cases_state = df_datetime_cases_state[['date','state', 'confirmed', 'deaths']]\n\n\n # Upload to Amazon S3 and create Athena Table with AWS Wrangler\n # Storing data on Data Lake\n\n if DATABASE_NAME not in wr.catalog.databases().values:\n wr.catalog.create_database(DATABASE_NAME)\n \n print(\"[INFO] Writing data in S3\")\n wr.s3.to_parquet(\n df=state_cases_death,\n path=f\"s3://{BUCKET_NAME}/processed/agreggated-table\",\n dataset=True,\n database=DATABASE_NAME,\n table=\"covid-brazil-state\",\n mode=\"overwrite\"\n )\n\n wr.s3.to_parquet(\n df=df_datetime_cases_state,\n path=f\"s3://{BUCKET_NAME}/processed/dataframe-table\",\n dataset=True,\n database=DATABASE_NAME,\n table=\"covid-brazil-datetime\",\n mode=\"overwrite\"\n )\n\n return jsonify({\"message\" : f\"File {file_name} processed\"})\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93460011","text":"import ontology_wrapper\nimport file_interface\nimport time\nimport threading\nimport sys\n\n# The executable file needs to have extra quotationmarks to be able to be executed by the os.system command\nGRAPH_EXECUTABLE=r'\"C:\\Users\\Dan\\AppData\\Local\\GraphDB Free\\GraphDB Free.exe\"'\nGRAPH_URL=\"http://192.168.0.19:7200/repositories/company_ownership_ontology\"\n\n# This is the utility interface for access to the ontology outside of the searches\ngraphInterface=ontology_wrapper.Interface(GRAPH_URL)\ngraphInterface.connectToGraph(graphExecutable=GRAPH_EXECUTABLE, graphURL=GRAPH_URL)\n\n# The fringe is a nested dictionary of expanded and unexpanded nodes, it is implemented as a dictionary of\n# string keys and another dictionary stating the parent and whether it has been explored\nnode = {\"parentName\":\"\",\n \"companyID\" : \"\"}\nfringe={\"nodeName\":node}\n\n# The path is the list of people and companies used to get from the starting node to the goal node\n# The path will be ordered such that it is a list of three strings, as shown below:\n# Index DirectorName Company Child's Company\n# 0 SULLIVAN DANIEL J CTG SNDR\n# 1 SULLIVAN DANIEL J SNDR \n# 1 .... ... ...\npossiblePath=[]\npaths=[]\n\nqueriesMade = 0\n\n# Create a threading lock for use when the search needs to query the ontology\nqueryLock = threading.Lock()\nkillLock = threading.Lock()\nkillRequest = 0\n\ndef constructPath(halfwayNode):\n parent = fringe[halfwayNode[\"name\"]][\"parentName\"]\n while parent in fringe.keys():\n possiblePath.append([ parent, possiblePath[-1][2], fringe[parent][\"parentCompany\"] ])\n parent = fringe[parent][\"parentName\"]\n possiblePath[-1][1] = possiblePath[-2][2]\n\ndef breadthFirstSearch(ontologyInterface, currentNode={\"name\":\"N/A\",\"compnayID\":\"N/A\"}, goalNode=\"N/A\", manageFringe=False):\n \"\"\"\n This function implements a breadth first search on the ontology\n \n Keyword Arguments:\n currentNode {dict} -- the name of the starting node (default: {{str:str,str:str,str:str}})\n goalNode {str} -- the name of the goal node (default: {str})\n manageFringe {bool} -- parameter used to determine if this search will add the unexpanded nodes to the fringe used in the bidierctional search (default: {bool})\n\n Returns:\n results {bool} -- returns True if solution found, returns False if there is no bath between the nodes\n \"\"\"\n global killRequest\n global possiblePath\n global paths\n global queriesMade\n # Add the starting Node to the fringe\n if manageFringe:\n fringe[currentNode[\"name\"]] = {\"parentName\": \"\",\n \"parentCompany\": \"N/A\"}\n\n # Check if the starting node is the goal node\n if currentNode[\"name\"] == goalNode:\n return True\n frontier=[currentNode]\n explored = {}\n\n while True:\n # Check if the other tread has finished the search\n if killRequest == 1 or killLock.locked():\n sys.exit()\n\n # Check if we have explored all available nodes\n if frontier==[]:\n return False\n currentNode=frontier.pop()\n explored[currentNode[\"name\"]] = currentNode\n \n # Acquire the ontology lock and return all the directors connected to the current node by one intermediate company\n queryLock.acquire()\n children=ontologyInterface.queryOntology(currentNode)\n queriesMade+=2\n queryLock.release()\n\n # Add the children to the fringe as they are discovered\n if manageFringe:\n for child in children:\n fringe[child[\"name\"]] = {\"parentName\":currentNode[\"name\"],\n \"parentCompany\": child[\"companyID\"]}\n \n\n # For all of the children of the currently selected node, check if it is the goal node\n for child in children:\n child[\"parent\"] = currentNode[\"name\"]\n # Only check the child nodes if we have not previously explored them\n if not(child[\"name\"] in frontier or child[\"name\"] in explored.keys()):\n if child[\"name\"] == goalNode:\n # The goal node has been found, create the path from the goal node back to the start node\n possiblePath.insert(0, [child[\"name\"], child[\"companyID\"], \"N/A\"])\n parent = explored[child[\"parent\"]]\n while parent != None:\n possiblePath.insert(0, [parent[\"name\"], parent[\"companyID\"], possiblePath[0][1]])\n if \"parent\" in (explored[parent[\"name\"]].keys()):\n parent = explored[parent[\"parent\"]]\n else:\n parent=None\n return True\n\n # Check if the current node is in the fringe, if true then construct the path from the halfway point and return true\n elif child[\"name\"] in fringe.keys() and manageFringe == False:\n # Kill the thread if we have already found a path\n if len(paths) > 1:\n # Flag the kill request for the other thread\n killLock.acquire()\n killRequest = 1\n killLock.release()\n\n # Construct the end of the path from the halfway node\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], fringe[child[\"name\"]][\"parentCompany\"] ] )\n constructPath(child)\n possiblePath[0][2] = possiblePath[1][1] \n\n # Construct the path from the halfway point back to the start node\n parent = explored[child[\"parent\"]]\n while parent != None:\n possiblePath.insert(0, [parent[\"name\"], parent[\"companyID\"], possiblePath[0][1]])\n if \"parent\" in (explored[parent[\"name\"]].keys()):\n parent = explored[parent[\"parent\"]]\n else:\n parent=None\n paths.append(possiblePath)\n possiblePath = []\n \n # If not the goal node, add the child to the front of the frontier\n frontier.insert(0, child)\n\ndef recursiveDLS(ontologyInterface, currentNode={\"name\":\"N/A\",\"compnayID\":\"N/A\"}, goalNode=\"N/A\", limit=0, manageFringe=False):\n \"\"\"\n Implement the recursive function of a deepening search\n \n Arguments:\n currentNode {dict} -- Denotes the name of the current node selected for expansion in the search (default: {{str:str, str:str}})\n goalNode {str} -- Denotes the name of the goal node of the search (default: {str})\n limit {int} -- The depth limit (how many layers down of expansion from this node left) (default: {int})\n search {bool} -- Denotes which of the two searches being implemented we are in, used to know\n whether to add to the fringe or not (default: {bool})\n \n Returns:\n results {bool/string} -- returns whether the search was successful, the search failed or reached a cutoff point\n \"\"\"\n # Check if the other tread has finished the search\n global killRequest\n global possiblePath\n global paths\n global queriesMade\n if killRequest == 1 or killLock.locked():\n sys.exit()\n\n # Check if the current node is the goal node\n if currentNode[\"name\"] == goalNode:\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"],\"N/A\"])\n return True\n\n # Check if the current node is in the fringe, if true then construct the path from the halfway point and return true\n elif currentNode[\"name\"] in fringe.keys() and manageFringe == False:\n if len(paths) > 1:\n # Flag the kill request for the other thread\n killLock.acquire()\n killRequest = 1\n killLock.release()\n\n # Construct the end of the path from the other search\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], fringe[currentNode[\"name\"]][\"parentCompany\"] ] )\n constructPath(currentNode)\n possiblePath[0][2] = possiblePath[1][1]\n paths.append(possiblePath)\n possiblePath=[]\n\n # Check if we are at the cutoff\n elif limit==0:\n return \"cutoff\"\n else:\n cuttoffOccurred=False\n # The goal node has not yet been found and we are within the current set limit so we\n # iterate deeper, if there are no children then cutoff never occurred --> failure is returned\n queryLock.acquire()\n children=ontologyInterface.queryOntology(currentNode)\n queriesMade+=2\n queryLock.release()\n\n # Add the children to the fringe as they are discovered\n if manageFringe:\n for child in children:\n fringe[child[\"name\"]] = {\"parentName\":currentNode[\"name\"],\n \"parentCompany\": child[\"companyID\"]}\n\n # Process the discovered children\n for child in children:\n result=recursiveDLS(ontologyInterface, child, goalNode, limit-1, manageFringe)\n # If the child is beyond the depth limit\n if result==\"cutoff\":\n cuttoffOccurred=True\n\n # If the child is the goal node, as the recursive function propagates up we add the current node to the front of the path\n elif result==True:\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], possiblePath[0][1]])\n return result\n\n # If the goal node has not been found within the depth limit\n if cuttoffOccurred:\n return \"cutoff\"\n\n # If the goal has not been found and we are within the depth limit\n else:\n return False\n\ndef iterativeDeepening(ontologyInterface, startNode=\"N/A\", goalNode=\"N/A\", maxDepth=15, manageFringe=False):\n \"\"\"\n This function is used to implement a iterative deepening search on the ontology, \n up to the maximum depth of the ontology.\n \n Arguments:\n startNode {str} -- Denotes the name of the starting node of the search (default: {str})\n goalNode {str} -- Denotes the name of the goal node of the search (default: {str})\n maxDepth {int} -- The maximum depth of the ontology, retrieved from the ontology connections file (default: {int})\n manageFringe {bool} -- Boolean variable used to indicate which of the two searches is currently being run (default: {bool})\n \n Returns:\n result {boolean} -- Returns whether or not the search was successful, if it was then the Path list will contain the optimal path\n \"\"\"\n # If this search is managing the fringe, add the starting node to the fringe\n if manageFringe:\n fringe[startNode] = {\"parentName\": \"\",\n \"parentCompany\": \"N/A\"}\n\n startNode={\"name\":startNode, \"companyID\": \"N/A\"}\n for depth in range(1, maxDepth):\n result = recursiveDLS(ontologyInterface, startNode, goalNode, depth, manageFringe)\n if result != \"cutoff\":\n return result\n # Need to reset the expanded companies at each iteration\n ontologyInterface.resetExpandedCompanies()\n\ndef calculateCost(node={\"name\":\"N/A\",\"compnayID\":\"N/A\"}, depth=0):\n \"\"\"\n This function calculates the estimated cost of the node that it is given, based on \n the number of intermediaries between it and the starting node and the connectivity \n of the node. This function uses the utility graph interface, so as not to interfere\n with the other searches expanded companies and filtering in place\n \n Keyword Arguments:\n node {dict} -- the name and company ID of the current node selected for cost \n estimation (default: {{str:str, str:str}})\n depth {int} -- The current depth of the search (default: {int})\n \n Returns:\n int -- The estimated cost of the node\n \"\"\"\n global queriesMade\n # Acquire the ontology lock and get the connectivity of the next node\n queryLock.acquire()\n graphInterface.resetExpandedCompanies()\n connections=len(graphInterface.queryOntology(node))\n queriesMade+=2\n queryLock.release()\n \n depthFactor = (1 - depth/6) if depth < 6 else 1\n connectionFactor = connections/50 \n cost = 1 - 0.6*connectionFactor + 0.4*depthFactor\n return cost\n\ndef RBFS(ontologyInterface, currentNode={\"name\":\"N/A\",\"companyID\":\"N/A\"}, goalNode=\"N/A\", fLimit=15, depth=0, parentCompanies=[], manageFringe=False):\n \"\"\"\n Implement the recursive function of the best first search\n \n Keyword Arguments:\n currentNode {dict} -- Denotes the name of the current node selected for \n expansion in the search (default: {{str:str, str:str}})\n goalNode {str} -- Denotes the name of the goal node of the search (default: {str})\n fLimit {int} -- The maximum cost of the node we are willing to expand (default: {int})\n manageFringe {bool} -- Boolean variable used to indicate which of the two searches is currently being run (default: {bool})\n\n Returns:\n result {boolean} -- Returns whether or not the search was successful, if it was \n then the Path list will contain the optimal path between the nodes\n \"\"\"\n # Check if the other tread has finished the search\n global killRequest\n global possiblePath\n global paths\n global queriesMade\n if killRequest == 1 or killLock.locked():\n sys.exit()\n\n # Check if the currentNode is the goal node\n if currentNode[\"name\"] == goalNode:\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], \"N/A\"] )\n return True\n # Check if the current node is in the fringe, if true then construct the path \n # from the halfway point and return true\n elif currentNode[\"name\"] in fringe.keys() and manageFringe == False:\n # Flag the kill request for the other thread\n killLock.acquire()\n killRequest = 1\n killLock.release()\n\n # Construct the end of the path from the other search\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], fringe[currentNode[\"name\"]][\"parentCompany\"] ] )\n constructPath(currentNode)\n possiblePath[0][2] = possiblePath[1][1]\n paths.append(possiblePath)\n possiblePath=[]\n \n newList = parentCompanies[:]\n newList.append(currentNode[\"companyID\"])\n ontologyInterface.setExpandedCompanies(newList)\n # Acquire the ontology lock and return all the people connected to the current \n # node by one intermidiate company\n queryLock.acquire()\n successors=ontologyInterface.queryOntology(currentNode)\n queriesMade+=2\n queryLock.release()\n depth += 1\n if successors == []:\n return False, 1000000\n \n # Set the cost of the successors to the maximum between the cost of the current \n # node and the sum of the cost so far (i.e. the depth) and their estimated cost\n for s in successors:\n s[\"cost\"] = max(currentNode[\"cost\"], depth+calculateCost(s, depth))\n # If this search is required to manage the fringe, add all of the successors \n # to the fringe as they are generated\n if manageFringe:\n fringe[s[\"name\"]] = {\"parentName\":currentNode[\"name\"],\n \"parentCompany\": s[\"companyID\"]}\n\n # Sort the successors into cost order\n successors=sorted(successors, key=lambda x: (x[\"cost\"]))\n\n # Check if cost of all children is greater than the fLimit\n if successors[0][\"cost\"] > fLimit:\n return False, successors[0][\"cost\"]\n\n best=successors.pop(0)\n alternative=successors[0]\n while True:\n # Recursively explore the graph\n result = RBFS(ontologyInterface, best, goalNode, min(fLimit, alternative[\"cost\"]), depth, newList, manageFringe)\n # If the goal node has been found, generate the path as the search ascends\n # the recursion calls\n if result == True:\n possiblePath.insert(0, [currentNode[\"name\"], currentNode[\"companyID\"], possiblePath[0][1]] )\n return True\n elif result[1] != 1000000:\n # Set best node cost to the cost of their children\n best[\"cost\"] = result[1]\n successors.append(best)\n successors=sorted(successors, key=lambda x: (x[\"cost\"]))\n best = successors.pop(0)\n alternative=successors[0]\n\ndef recursiveBestFirstSearch(ontologyInterface, startName=\"N/A\", goalName=\"N/A\", fLimit=100, manageFringe=False):\n \"\"\"\n This function implements a version of the best first search algorithm, the base cost is 1 as there is no actual distance between any node and the goal node\n \n Keyword Arguments:\n startNode {str} -- Denotes the name of the starting node of the search (default: {str})\n goalNode {str} -- Denotes the name of the goal node of the search (default: {str})\n fLimit {int} -- The Maximum cost of a node that we are willing to expand (default: {int})\n manageFringe {bool} -- Boolean variable used to indicate if this search should be adding unexplored nodes to the fringe (default: {bool})\n\n Returns:\n result {boolean} -- Returns whether or not the search was successful, if it was then the Path list will contain the optimal path\n \"\"\"\n # If this search is managing the fringe, add the starting node to the fringe\n if manageFringe:\n fringe[startName] = {\"parentName\": \"\",\n \"parentCompany\": \"N/A\"}\n initialNode = {\"name\":startName, \"companyID\":\"N/A\"}\n initialNode[\"cost\"] = 0\n return RBFS(ontologyInterface, initialNode, goalName, fLimit=fLimit, depth=0, manageFringe=manageFringe)\n\ndef bidirectionalSearch():\n \"\"\"\n The purpose of this function is to run two searches concurrently, one starting from the start node, the other from\n the goal node. With an aim to meet in the middle, in order to reduce the execution time of the search.\n\n At least one search must manage a fringe.\n \"\"\"\n global possiblePath\n global paths\n global killRequest\n global fringe\n global queriesMade\n ontology_1=ontology_wrapper.Interface(GRAPH_URL)\n ontology_2=ontology_wrapper.Interface(GRAPH_URL)\n totalTime = 0\n\n startName = \"Pyle Robert D\"\n goalNames = [\"Wallace Mark E\",\n \"WOLFE ROBERT H\",\n \"WOOD PHOEBE A\",\n \"WHITE MILES D\",\n \"HERNANDEZ ENRIQUE JR\",\n \"FABRIKANT CHARLES\",\n \"WEBSTER STEVEN A\"]\n startNode={\"name\" : startName,\"companyID\" : \"N/A\"}\n for i in range(0, len(goalNames)):\n goalNode={\"name\" : goalNames[i],\"companyID\" : \"N/A\"}\n # Start running two searches concurrently, with each search starting from the opposite end of the relationship\n # Dual Iterative deepening searches\n # searchA = \"IDS\"\n # searchB = \"IDS\"\n # t1 = threading.Thread(target=iterativeDeepening, args=[ontology_1, startName, goalNames[i], 10, False])\n # t2 = threading.Thread(target=iterativeDeepening, args=[ontology_2, goalNames[i], startName, 10, True])\n # Dual breadth first searches\n # searchA = \"BFS\"\n # searchB = \"BFS\"\n # t1 = threading.Thread(target=breadthFirstSearch, args=[ontology_1, startNode, goalNames[i], False])\n # t2 = threading.Thread(target=breadthFirstSearch, args=[ontology_2, goalNode, startName, True])\n # Dual best first searches\n # searchA = \"RBFS\"\n # searchB = \"RBFS\"\n # t1 = threading.Thread(target=recursiveBestFirstSearch, args=[ontology_1, startName, goalNames[i], 10, False])\n # t2 = threading.Thread(target=recursiveBestFirstSearch, args=[ontology_2, goalNames[i], startName, 10, True])\n # 1 IDS 1 BFS\n # searchA = \"BFS\"\n # searchB = \"IDS\"\n # t1 = threading.Thread(target=iterativeDeepening, args=[ontology_1, startName, goalNames[i], 10, False])\n # t2 = threading.Thread(target=breadthFirstSearch, args=[ontology_2, goalNode, startName, True])\n # searchA = \"IDS\"\n # searchB = \"BFS\"\n # t1 = threading.Thread(target=iterativeDeepening, args=[ontology_1, startName, goalNames[i], 10, True])\n # t2 = threading.Thread(target=breadthFirstSearch, args=[ontology_2, goalNode, startName, False])\n # 1 IDS 1 RBFS\n # searchA = \"RBFS\"\n # searchB = \"IDS\"\n # t1 = threading.Thread(target=iterativeDeepening, args=[ontology_1, startName, goalNames[i], 10, False])\n # t2 = threading.Thread(target=recursiveBestFirstSearch, args=[ontology_2, goalNames[i], startName, 10, True])\n # t1 = threading.Thread(target=iterativeDeepening, args=[ontology_1, startName, goalNames[i], 10, True])\n # t2 = threading.Thread(target=recursiveBestFirstSearch, args=[ontology_2, goalNames[i], startName, 10, False])\n # # 1 BFS 1 RBFS\n # searchA = \"RBFS\"\n # searchB = \"BFS\"\n # t2 = threading.Thread(target=breadthFirstSearch, args=[ontology_2, goalNode, startName, True])\n t1 = threading.Thread(target=recursiveBestFirstSearch, args=[ontology_1, startName, goalNames[i], 10, True])\n t2 = threading.Thread(target=breadthFirstSearch, args=[ontology_2, goalNode, startName, False])\n startTime = time.time()\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n totalTime += (time.time() - startTime)\n paths.append(possiblePath)\n print(queriesMade)\n queriesMade=0\n ontology_1.resetExpandedCompanies()\n ontology_2.resetExpandedCompanies()\n possiblePath=[]\n paths=[]\n fringe={}\n killRequest=0\n # time.sleep(2)\n\n # averageTime = totalTime/25\n # file_interface.writeSearchTimes(searchA, searchB, averageTime)\n time.sleep(10)\n\ndef singleSearch():\n global paths\n global possiblePath\n global fringe\n global queriesMade\n ontology_1=ontology_wrapper.Interface(GRAPH_URL)\n totalTime = 0\n search = \"\"\n startName = \"Pyle Robert D\"\n goalNames = [\"Wallace Mark E\",\n \"WOLFE ROBERT H\",\n \"WOOD PHOEBE A\",\n \"WHITE MILES D\",\n \"HERNANDEZ ENRIQUE JR\"]\n startNode={\"name\" : startName,\"companyID\" : \"N/A\"}\n goalNode={\"name\" : goalNames[0],\"companyID\" : \"N/A\"}\n for i in range(0,len(goalNames)):\n startTime = time.time()\n # # Iterarative Deepening Search\n # iterativeDeepening(ontology_1, startName, goalNames[i], 10, False)\n # search = \"IDS\"\n # Breadth First Search\n # breadthFirstSearch(ontology_1, startNode, goalNames[i], False)\n # search = \"BFS\"\n # Best First Search\n recursiveBestFirstSearch(ontology_1, startName, goalNames[i], 10, False)\n search = \"RBFS\"\n totalTime += (time.time() - startTime)\n paths.append(possiblePath)\n print(queriesMade)\n queriesMade=0\n ontology_1.resetExpandedCompanies()\n possiblePath=[]\n paths = []\n # averageTime = totalTime/25\n # file_interface.writeSearchTimes(search, i+2, averageTime)\n time.sleep(30)\n\nif __name__==\"__main__\":\n bidirectionalSearch()\n # singleSearch()","sub_path":"Software/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":23417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211331615","text":"# TO-DO: Complete the selection_sort() function below \ndef selection_sort( arr ):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n cur_index = i\n smallest_index = cur_index\n # TO-DO: find next smallest element\n # (hint, can do in 3 loc) \n for x in range(i,len(arr)):\n if(arr[x] < arr[smallest_index]):\n smallest_index = x\n \n # TO-DO: swap\n t = arr[cur_index]\n arr[cur_index] = arr[smallest_index]\n arr[smallest_index] = t\n\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort( arr ):\n for i in range(0, len( arr )-1):\n for x in range(0, len( arr )-1):\n if(arr[x] > arr[x+1]):\n t = arr[x]\n arr[x] = arr[x+1]\n arr[x+1] = t\n\n return arr\n\n\n# STRETCH: implement the Count Sort function below\ndef count_sort( arr, maximum=-1 ):\n if(len(arr) == 0):\n return arr\n elif(any([True if x < 0 else False for x in arr])):\n return \"Error, negative numbers not allowed in Count Sort\"\n \n # Make counting list of length = largest integer in arr\n count_arr = [0]*(max(arr)+1)\n out_arr = [0]*(len(arr))\n\n # Count instances of each numberin the counting list\n for x in arr:\n count_arr[x] = count_arr[x] + 1\n\n for x in range(0,len(count_arr)-1):\n count_arr[x+1] = count_arr[x+1] + count_arr[x]\n\n for x in arr[::-1]:\n index = count_arr[x] - 1\n out_arr[index] = x\n\n arr = out_arr\n\n return arr","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397687748","text":"###################################################################\n# Author : Sivaprakash.B #\n# Email : sivaprakash674@gmail.com #\n# Purpose : Android or ADB log analysing tool on python #\n###################################################################\n\n\n# Validating the import as python versions < 3 supports Tkinter ( T - Upper Case )\n# And python versions >3 supports tkinter ( T - Lower Case )\n\ntry:\n import Tkinter as tkin # This is for python2\n import tkFileDialog # For providing dialog box option for selecting files\n import tkMessageBox # For displaying message box alerts\n import webbrowser # For converting text into clickable links\nexcept:\n import tkinter as tkin # This is for python3\n tkFileDialog=tkin.filedialog # For providing dialog box option for selecting files\n tkMessageBox=tkin.messagebox # For displaying message box alerts\n import webbrowser # For converting text into clickable links\n\nimport subprocess # For validating the output on the search query\n\n\n# Declaring variables required for the code \n\nm=tkin.Tk() # Creating root object ( m ) of the Tkinter ot tkinter \nm.title('LogPad') # Adding title to the root object or the GUI Window.\nm.resizable(0,0) # Setting the window not to be resized due to python limitations\n\nfilename =\"\" # Variable to hold the file path of the file to be analysied \nbgcolor='black' # Variable to hold Background color for the theme\nfgcolor='green' # Variable to hold foreground or text color for the theme\n\n# Variable to hold the color code for the logs based on the type log filter. \n\nlogscolorcode = {'E':'red', 'W':'yellow','D':'green', 'I':'white','V':'blue','all':'white'} \n\n# Variable to hold the text with link to reach the gitHub page.\n\ntextforrating = \"If you like the project mark the star at https://github.com/sivapraksh674/logpad \\n\"\n\n# Top frame holds the file selection and file path display section \n\ntopframe = tkin.Frame(m)\ntopframe.configure(bg=bgcolor)\ntopframe.pack( side = tkin.TOP, expand=tkin.TRUE)\n\n# Search frame is used to set the search filters and have search box enter the text for search\n\nsearchframe = tkin.Frame(m)\nsearchframe.configure(bg=bgcolor)\nsearchframe.pack( side = tkin.TOP,expand=tkin.TRUE )\n\n# Bottom frame holds the Log Area to display the logs.\n\nbottomframe = tkin.Frame(m)\nbottomframe.configure(bg=bgcolor)\nbottomframe.pack( side = tkin.TOP, expand=tkin.TRUE )\n\n\n# Function to select the file to parse the logs.\n\ndef SelectFile():\n global filename\n print (textforrating)\n filename = tkFileDialog.askopenfilename(initialdir = \".\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n \n # Validation to check if a file is selected or not \n \n if len(filename) == 0 :\n return\n \n filepatharea.delete('1.0', tkin.END)\n filepatharea.insert(tkin.INSERT,filename)\n searchquery = \"awk '{ print;}' \"+ filename\n logarea.config(fg=logscolorcode[logtype.get()]) \n logarea.insert(tkin.INSERT,subprocess.check_output(searchquery,shell=True))\n\n# Search function to filter the logs based on selection \n\ndef SearchFunction():\n global filename,logtype,cataegory\n print (textforrating)\n\n # Validation to check if a file is selected or not \n if len(filename) == 0 :\n tkMessageBox.showerror(\"Error\", \"Please select the log file to perform search\")\n return\n \n # Clearing Log Area for new search results\n \n logarea.delete('1.0', tkin.END)\n\n # Validation to if search should be performed only on classname or not.\n\n if cataegory.get() :\n if logtype.get() == \"all\" :\n searchquery = \"awk '{ print $6;}' \"+ filename +\" | sort -u \"\n else :\n searchquery = \"awk '{ if( $5 == \\\"\"+logtype.get()+\"\\\" ) print $6;}' \" + filename + \" | sort -u \"\n else :\n if logtype.get() == \"all\" :\n searchquery = \"awk '{ print;}' \"+ filename\n else :\n searchquery = \"awk '{ if( $5 == \\\"\"+logtype.get()+\"\\\" ) print;}' \"+ filename \n if searchbox.get(\"1.0\",'end-1c') :\n searchquery = searchquery+\" | grep \\\"\"+ searchbox.get(\"1.0\",'end-1c') + \"\\\" \"\n print ( \"\\n\" + searchquery ) \n \n logarea.config(fg=logscolorcode[logtype.get()]) \n logarea.insert(tkin.INSERT,subprocess.check_output(searchquery,shell=True))\n\n# Event Call Back for Key Down Events\n\ndef HandleKeyRelease (e) :\n print ( \"Keypressed\" , e.char )\n SearchFunction()\n\n# Callback function to call a web URL\n\ndef callback(url):\n webbrowser.open_new(url)\n\n# File Path label creation and packing to the view area\n\nfilepathlabel = tkin.Label(topframe,justify = tkin.LEFT)\nfilepathlabel.config(text = \"File Path :\",bg=bgcolor,fg=fgcolor)\nfilepathlabel.pack(anchor=tkin.W)\n\n# File Path creation and packing to the view area\n\nfilepatharea = tkin.Text(topframe,wrap=tkin.WORD,width=200, height= 2)\nfilepatharea.configure(bg=bgcolor,fg=fgcolor,highlightbackground=bgcolor)\nfilepatharea.pack(fill=\"none\", expand=tkin.TRUE)\n\n# Select File action button creation and packing to the view area\n\nselectfilebutton = tkin.Button(topframe, text='SELECT FILE',command=SelectFile)\nselectfilebutton.pack(side=tkin.BOTTOM)\n\n# ( Deprecated ) Get Logs action button creation and packing to the view area\n\n#searchbutton = tkin.Button(topframe, text='Search', bg='green',fg='white', command=SearchFunction)\n#searchbutton.pack(fill=\"none\",side=tkin.RIGHT)\n\n# Search box label creation and packing to the view area\n\nsearchboxlabel = tkin.Label(searchframe,justify = tkin.LEFT)\nsearchboxlabel.config(text = \"Search Area\",bg=bgcolor,fg=fgcolor)\nsearchboxlabel.pack(side=tkin.LEFT)\n \n# Search box text area creation and packing to the view area\n\nsearchbox=tkin.Text(searchframe, height=2, width=50, borderwidth=2, relief=tkin.GROOVE)\nsearchbox.configure(highlightbackground=bgcolor)\nsearchbox.bind('',HandleKeyRelease)\nsearchbox.pack(side=tkin.LEFT)\n\n\n# Class name Category selection Check Box creation\n\ncataegory = tkin.IntVar()\nclassnamecheckbox = tkin.Checkbutton(searchframe,variable=cataegory,command=SearchFunction)\nclassnamecheckbox.config(bg=bgcolor)\nclassnamecheckbox.pack(side=tkin.LEFT)\n\n# Search box label creation and packing to the view area\n\nclassnamecheckboxlabel = tkin.Label(searchframe,justify = tkin.LEFT)\nclassnamecheckboxlabel.config(text = \"Search Class Name Alone\",bg=bgcolor,fg=fgcolor)\nclassnamecheckboxlabel.pack(side=tkin.LEFT)\n\n\n# Logs Type ( E - Error, W - Warning, D - Debug, I - Information, V - Verbose , ALL ) Category selection \n# Radio box creation, packing and default option selection.\n\nlogtype= tkin.StringVar()\ntkin.Radiobutton(searchframe, \n value='E', # E - Error Category Radio Button \n variable=logtype, \n command=SearchFunction,\n bg=bgcolor,fg=fgcolor).pack(side=tkin.LEFT)\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"E\",bg=bgcolor,fg=logscolorcode['E'], padx = 20).pack(side=tkin.LEFT)\n\ntkin.Radiobutton(searchframe, \n value='W', # W - Warning Category Radio Button \n variable=logtype, \n command=SearchFunction,\n bg=bgcolor,fg=fgcolor).pack(side=tkin.LEFT)\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"W\",bg=bgcolor,fg=logscolorcode['W'], padx = 20).pack(side=tkin.LEFT)\n\ntkin.Radiobutton(searchframe, \n value='D', # D - Debug Category Radio Button \n variable=logtype, \n command=SearchFunction,\n bg=bgcolor,fg=fgcolor).pack(side=tkin.LEFT)\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"D\",bg=bgcolor,fg=logscolorcode['D'], padx = 20).pack(side=tkin.LEFT)\n\ntkin.Radiobutton(searchframe, \n value='I', # I - Info Category Radio Button \n variable=logtype,\n command=SearchFunction, \n bg=bgcolor,fg=fgcolor).pack(side=tkin.LEFT)\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"I\",bg=bgcolor,fg=logscolorcode['I'], padx = 20).pack(side=tkin.LEFT)\n\ntkin.Radiobutton(searchframe, \n value='V', # V - Verbose Category Radio Button \n variable=logtype, \n command=SearchFunction,\n bg=bgcolor,fg=fgcolor).pack(side=tkin.LEFT)\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"V\",bg=bgcolor,fg=logscolorcode['V'], padx = 20).pack(side=tkin.LEFT)\n\nR1=tkin.Radiobutton(searchframe, \n value='all', # ALL Category Radio Button \n variable=logtype, \n command=SearchFunction,\n bg=bgcolor,fg=fgcolor)\nR1.pack(side=tkin.LEFT)\nR1.select()\ntkin.Label(searchframe,justify = tkin.LEFT, text = \"ALL\",bg=bgcolor,fg=logscolorcode['all'], padx = 20).pack(side=tkin.LEFT)\n\n\n# Log Area Label creation and packing to the view area\n\nlogarealabel = tkin.Label(bottomframe,justify = tkin.LEFT)\nlogarealabel.config(text = \"Log Area\", bg=bgcolor,fg=fgcolor)\nlogarealabel.pack(anchor=tkin.W)\n\n# Log Area creation and packing to the view area\n\nlogarea = tkin.Text(bottomframe,wrap=tkin.WORD,width=200, height= 30, borderwidth=2, relief=tkin.GROOVE, bg=bgcolor, fg=fgcolor)\nlogarea.pack(fill=\"none\", expand=tkin.TRUE)\n\n# Added hyperlink to the Github Repository \n\ngithublink = tkin.Label(m, text=\"If you like the project click here to mark the star at github \\n https://github.com/sivapraksh674/logpad\", bg=bgcolor, fg=fgcolor, cursor=\"hand2\")\ngithublink.pack()\ngithublink.bind(\"\", lambda e: callback(\"https://github.com/sivapraksh674/logpad\"))\n\n# Adding config to the Root Element ( m )\n\nm.config(bg=bgcolor)\nm.mainloop()\n","sub_path":"logpad.py","file_name":"logpad.py","file_ext":"py","file_size_in_byte":9868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"17687995","text":"import numpy as np\nimport random\n\n\ndef gaussX(N, variance=1):\n \"\"\"\n a) Write a function named “gaussX” that returns samples drawn from the\n distribution along with the corresponding class labels.\n\n Inputs:\n N - desired number of samples\n variance - variance of desired circular symmetric Gaussian\n \"\"\"\n # split N approximately equally\n first_half = int(N/2)\n second_half = N - first_half\n\n # identify labels for the classes\n label_13 = -1\n label_24 = 1\n\n # generate samples of circular symmetric Gaussian for 2nd and 4th quadrants\n samples_24_r = np.sqrt(variance)*np.random.randn(first_half,1)\n samples_24_theta = -np.random.rand(first_half,1)*(np.pi/2)\n samples_24_x1 = samples_24_r*np.cos(samples_24_theta)\n samples_24_x2 = samples_24_r*np.sin(samples_24_theta)\n\n # generate samples of circular symmetric Gaussian for 1st and 3rd quadrants\n samples_13_r = np.sqrt(variance)*np.random.randn(first_half,1)\n samples_13_theta = np.random.rand(first_half,1)*(np.pi/2)\n samples_13_x1 = samples_13_r*np.cos(samples_13_theta)\n samples_13_x2 = samples_13_r*np.sin(samples_13_theta)\n\n # combine both classes into single vectors for each coordinate\n samples_x1 = np.append(samples_24_x1, samples_13_x1)\n samples_x2 = np.append(samples_24_x2, samples_13_x2)\n\n # generate label vector\n classes = np.append(label_24*np.ones((first_half, 1)),\n label_13*np.ones((second_half, 1)))\n\n # combine all into single Nx3 array\n samples = np.stack((samples_x1,samples_x2,classes),axis=-1)\n\n return samples\n\n","sub_path":"Project1/codes/requiredFunctions/gaussX.py","file_name":"gaussX.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626807179","text":"import requests\nfrom lxml import etree \nfrom eagle_demo import Chaojiying_Client\n#编码流程:\n#1.验证码识别,获取验证码图片的文字数据\n#2.对post请求进行发送(处理请求参数)\n#3.对响应数据进行持久化存储\ndef getCodeText():\n chaojiying = Chaojiying_Client('635072437', 'wujiawei', '917557')\t#用户中心>>软件ID 生成一个替换 96001\n im = open('code.jpg', 'rb').read()\t\t\t\t\t\t\t\t\t\t\t\t\t#本地图片文件路径 来替换 a.jpg 有时WIN系统须要//\n return chaojiying.PostPic(im, 1902)\t\n#1.对验证码图片进行捕获和识别\nurl = \"https://so.gushiwen.cn/user/login.aspx?from=http://so.gushiwen.cn/user/collect.aspx\"\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15(KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)'\n}\n#创建一个session对象\nsession = requests.session()\npage_text = session.get(url=url,headers=headers).text\ntree = etree.HTML(page_text)\ncode_img_src = 'https://so.gushiwen.cn'+tree.xpath('//*[@id=\"imgCode\"]/@src')[0]\nimg_data = session.get(url=code_img_src,headers=headers).content \n#将验证码图片保存到本地\nwith open('./code.jpg','wb') as fp:\n fp.write(img_data)\n#调用打码平台的实例程序进行验证码图片数据识别\nresult = str(getCodeText()['pic_str'])\nprint(result)\nprint(type(result))\n#post请求的发送(模拟登陆)\nlogin_url = \"https://so.gushiwen.cn/user/login.aspx?from=http%3a%2f%2fso.gushiwen.cn%2fuser%2fcollect.aspx\"\ndata = {\n 'email': '17671725832',\n 'pwd': 'wujiawei',\n 'code': result,\n 'denglu': '登录'\n}\nresp = session.post(url=login_url,headers=headers,data=data)\n#打印响应状态码 200即为成功\nprint(resp.status_code)\n#爬取当前用户个人主页对应的页面数据\ndetail_url = 'https://so.gushiwen.cn/user/collect.aspx'\n#使用携带cookies的session进行get请求发送\ndetail_page_text = session.get(url=detail_url,headers=headers).text \nwith open('robin.html','w',encoding='utf-8') as fp:\n fp.write(detail_page_text)","sub_path":"5_requests模块高级/1.模拟登陆古诗词网.py","file_name":"1.模拟登陆古诗词网.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372370219","text":"import math\nimport traceback\nfrom enum import Enum\nfrom threading import Thread\n\nimport cv2\nimport logging\nimport time\n\nfrom fishy.constants import libgps, fishyqr, lam2\nfrom fishy.engine.fullautofisher.qr_detection import get_values_from_image, get_qr_location\nfrom fishy.engine.semifisher.fishing_mode import FishingMode\n\nfrom fishy.engine import SemiFisherEngine\nfrom fishy.engine.common.window import WindowClient\nfrom fishy.engine.semifisher import fishing_mode, fishing_event\n\nfrom fishy.engine.common.IEngine import IEngine\nfrom pynput import keyboard, mouse\n\nfrom fishy.helper import hotkey, helper\nfrom fishy.helper.helper import sign\n\nmse = mouse.Controller()\nkb = keyboard.Controller()\n\n\ndef image_pre_process(img):\n scale_percent = 100 # percent of original size\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n dim = (width, height)\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n return img\n\n\nclass State(Enum):\n NONE = 0\n PLAYING = 1\n RECORDING = 2\n OTHER = 3\n\n\nclass FullAuto(IEngine):\n rotate_by = 30\n state = State.NONE\n\n def __init__(self, gui_ref):\n from fishy.engine.fullautofisher.controls import Controls\n from fishy.engine.fullautofisher import controls\n from fishy.engine.fullautofisher.calibrator import Calibrator\n from fishy.engine.fullautofisher.test import Test\n\n super().__init__(gui_ref)\n self._hole_found_flag = False\n self._curr_rotate_y = 0\n\n self.fisher = SemiFisherEngine(None)\n self.calibrator = Calibrator(self)\n self.test = Test(self)\n self.controls = Controls(controls.get_controls(self))\n self.show_crop = False\n\n def run(self):\n\n addons_req = [libgps, lam2, fishyqr]\n for addon in addons_req:\n if not helper.addon_exists(*addon):\n helper.install_addon(*addon)\n\n FullAuto.state = State.NONE\n\n self.gui.bot_started(True)\n self.window = WindowClient(color=cv2.COLOR_RGB2GRAY, show_name=\"Full auto debug\")\n\n try:\n self.window.crop = get_qr_location(self.window.get_capture())\n if self.window.crop is None:\n logging.warning(\"FishyQR not found\")\n self.start = False\n raise Exception(\"FishyQR not found\")\n\n if not self.calibrator.all_callibrated():\n logging.error(\"you need to calibrate first\")\n\n self.fisher.toggle_start()\n fishing_event.unsubscribe()\n\n self.controls.initialize()\n while self.start and WindowClient.running():\n if self.show_crop:\n self.window.show(self.show_crop, func=image_pre_process)\n else:\n time.sleep(0.1)\n except:\n traceback.print_exc()\n\n if self.window.get_capture() is None:\n logging.error(\"Game window not found\")\n\n self.gui.bot_started(False)\n self.controls.unassign_keys()\n self.window.show(False)\n logging.info(\"Quitting\")\n self.window.destory()\n self.fisher.toggle_start()\n\n def get_coods(self):\n img = self.window.processed_image(func=image_pre_process)\n return get_values_from_image(img)\n\n def move_to(self, target):\n if target is None:\n logging.error(\"set target first\")\n return\n\n if not self.calibrator.all_callibrated():\n logging.error(\"you need to callibrate first\")\n return\n\n current = self.get_coods()\n print(f\"Moving from {(current[0], current[1])} to {target}\")\n move_vec = target[0] - current[0], target[1] - current[1]\n\n dist = math.sqrt(move_vec[0] ** 2 + move_vec[1] ** 2)\n print(f\"distance: {dist}\")\n if dist < 5e-05:\n print(\"distance very small skipping\")\n return\n\n target_angle = math.degrees(math.atan2(-move_vec[1], move_vec[0])) + 90\n from_angle = current[2]\n\n self.rotate_to(target_angle, from_angle)\n\n walking_time = dist / self.calibrator.move_factor\n print(f\"walking for {walking_time}\")\n kb.press('w')\n time.sleep(walking_time)\n kb.release('w')\n print(\"done\")\n\n def rotate_to(self, target_angle, from_angle=None):\n if from_angle is None:\n _, _, from_angle = self.get_coods()\n\n if target_angle < 0:\n target_angle = 360 + target_angle\n while target_angle > 360:\n target_angle -= 360\n print(f\"Rotating from {from_angle} to {target_angle}\")\n\n angle_diff = target_angle - from_angle\n\n if abs(angle_diff) > 180:\n angle_diff = (360 - abs(angle_diff)) * sign(angle_diff) * -1\n\n rotate_times = int(angle_diff / self.calibrator.rot_factor) * -1\n\n print(f\"rotate_times: {rotate_times}\")\n\n for _ in range(abs(rotate_times)):\n mse.move(sign(rotate_times) * FullAuto.rotate_by * -1, 0)\n time.sleep(0.05)\n\n def look_for_hole(self):\n self._hole_found_flag = False\n\n if FishingMode.CurrentMode == fishing_mode.State.LOOKING:\n return True\n\n def found_hole(e):\n if e == fishing_mode.State.LOOKING:\n self._hole_found_flag = True\n\n fishing_mode.subscribers.append(found_hole)\n\n t = 0\n while not self._hole_found_flag and t <= 1.25:\n mse.move(0, FullAuto.rotate_by)\n time.sleep(0.05)\n t += 0.05\n while not self._hole_found_flag and t > 0:\n mse.move(0, -FullAuto.rotate_by)\n time.sleep(0.05)\n t -= 0.05\n\n self._curr_rotate_y = t\n fishing_mode.subscribers.remove(found_hole)\n return self._hole_found_flag\n\n def rotate_back(self):\n while self._curr_rotate_y > 0.01:\n mse.move(0, -FullAuto.rotate_by)\n time.sleep(0.05)\n self._curr_rotate_y -= 0.05\n\n def toggle_start(self):\n if self.start and FullAuto.state != State.NONE:\n logging.info(\"Please turn off RECORDING/PLAYING first\")\n return\n\n self.start = not self.start\n if self.start:\n self.thread = Thread(target=self.run)\n self.thread.start()\n\n\nif __name__ == '__main__':\n logging.getLogger(\"\").setLevel(logging.DEBUG)\n hotkey.initalize()\n # noinspection PyTypeChecker\n bot = FullAuto(None)\n bot.toggle_start()\n","sub_path":"fishy/engine/fullautofisher/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315254500","text":"import numpy as np\nimport pytest\nimport xarray as xr\n\nsdba = pytest.importorskip(\"xclim.sdba\") # noqa\n\nfrom xclim.sdba.base import Grouper\nfrom xclim.sdba.base import Parametrizable\nfrom xclim.sdba.processing import normalize\n\n\ndef test_param_class():\n gr = Grouper(group=\"time.month\")\n in_params = dict(\n anint=4, abool=True, astring=\"a string\", adict={\"key\": \"val\"}, group=gr\n )\n obj = Parametrizable(**in_params)\n\n assert obj.parameters == in_params\n\n repr(obj).startswith(\n \"ParametrizableClass(anint=4, abool=True, astring='a string', adict={'key': 'val'}, \"\n \"group=Grouper(dim='time',\"\n )\n\n\n@pytest.mark.parametrize(\n \"group,window,nvals\",\n [(\"time\", 1, 366), (\"time.month\", 1, 31), (\"time.dayofyear\", 5, 1)],\n)\ndef test_grouper_group(tas_series, group, window, nvals):\n tas = tas_series(np.ones(366), start=\"2000-01-01\")\n\n grouper = Grouper(group, window=window)\n grpd = grouper.group(tas)\n\n if window > 1:\n assert \"window\" in grpd.dims\n\n assert grpd.count().max() == nvals\n\n\n@pytest.mark.parametrize(\n \"group,interp,val90\",\n [(\"time\", False, True), (\"time.month\", False, 3), (\"time.month\", True, 3.5)],\n)\ndef test_grouper_get_index(tas_series, group, interp, val90):\n tas = tas_series(np.ones(366), start=\"2000-01-01\")\n grouper = Grouper(group, interp=interp)\n indx = grouper.get_index(tas)\n # 90 is March 31st\n assert indx[90] == val90\n\n\ndef test_grouper_apply(tas_series):\n tas1 = tas_series(np.arange(366), start=\"2000-01-01\")\n tas0 = tas_series(np.zeros(366), start=\"2000-01-01\")\n tas = xr.concat((tas1, tas0), dim=\"lat\")\n\n grouper = Grouper(\"time.month\")\n out = grouper.apply(\"mean\", tas)\n assert out.isel(month=0, lat=0) == 15.0\n out = normalize(tas, group=grouper)\n\n grouper = Grouper(\"time.month\", add_dims=[\"lat\"])\n out = grouper.apply(\"mean\", tas)\n assert out.ndim == 1\n assert out.isel(month=0,) == 7.5\n assert out.attrs[\"group\"] == \"time.month\"\n assert out.attrs[\"group_compute_dims\"] == [\"time\", \"lat\"]\n assert out.attrs[\"group_window\"] == 1\n\n grouper = Grouper(\"time.month\", window=5)\n out = grouper.apply(\"mean\", tas)\n np.testing.assert_almost_equal(out.isel(month=0, lat=0), 15.32236842)\n\n tas = tas.chunk({\"lat\": 1})\n out = grouper.apply(\"mean\", tas)\n assert out.chunks == ((1, 1), (12,))\n\n out = normalize(tas, group=grouper)\n assert out.chunks == ((1, 1), (366,))\n\n def mixed_reduce(grdds, dim=None):\n tas1 = grdds.tas1.mean(dim=dim)\n tas0 = grdds.tas0 / grdds.tas0.mean(dim=dim)\n tas1.attrs[\"_group_apply_reshape\"] = True\n return xr.Dataset(data_vars={\"tas1_mean\": tas1, \"norm_tas0\": tas0})\n\n tas1 = tas1.chunk({\"time\": -1})\n out = grouper.apply(mixed_reduce, {\"tas1\": tas1, \"tas0\": tas0})\n assert \"month\" not in out.norm_tas0.dims\n assert \"month\" in out.tas1_mean.dims\n\n assert out.tas1_mean.chunks == ((12,),)\n assert out.norm_tas0.chunks == ((366,),)\n","sub_path":"tests/test_sdba/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411909759","text":"# -*- coding: utf-8 -*-\nimport os\nimport datetime\nimport const as ct\nfrom pathlib import Path\nfrom scrapy import signals\nfrom datetime import datetime\nfrom scrapy import FormRequest\nfrom base.clog import getLogger\nfrom dspider.myspider import BasicSpider\nfrom dspider.items import MyDownloadItem, ChinaSecurityIndustryValuationItem\nclass ChinaSecurityIndustryValuationSpider(BasicSpider):\n #name = 'chinaSecurityIndustryValuationSpider'\n name = 'cSIValuationSpider'\n file_name = ''\n logger = getLogger(__name__)\n custom_settings = {\n 'ROBOTSTXT_OBEY': False,\n 'SPIDERMON_ENABLED': True,\n 'DOWNLOAD_DELAY': 1.0,\n 'CONCURRENT_REQUESTS_PER_IP': 10,\n 'CONCURRENT_REQUESTS_PER_DOMAIN': 1,\n 'RANDOMIZE_DOWNLOAD_DELAY': False,\n 'FILES_STORE': ct.CHINA_SECURITY_INDUSTRY_VALUATION_PATH,\n 'SPIDERMON_VALIDATION_ADD_ERRORS_TO_ITEMS': True,\n 'SPIDERMON_VALIDATION_ERRORS_FIELD': ct.SPIDERMON_VALIDATION_ERRORS_FIELD,\n 'SPIDERMON_EXPECTED_FINISH_REASONS': ct.SPIDERMON_EXPECTED_FINISH_REASONS,\n 'SPIDERMON_VALIDATION_MODELS': {\n ChinaSecurityIndustryValuationItem: 'dspider.validators.PlateValuationModel',\n },\n 'EXTENSIONS': {\n 'spidermon.contrib.scrapy.extensions.Spidermon': 500,\n },\n 'ITEM_PIPELINES': {\n 'dspider.pipelines.PlateValuationDownloadPipeline': 100,\n 'dspider.pipelines.ChinaSecurityIndustryValuationHandlePipeline': 200,\n },\n 'SPIDERMON_UNWANTED_HTTP_CODES': ct.DEFAULT_ERROR_CODES,\n 'SPIDERMON_SPIDER_CLOSE_MONITORS': (\n 'dspider.monitors.SpiderCloseMonitorSuite',\n )\n }\n allowed_domains = ['47.97.204.47']\n start_url = 'http://47.97.204.47/syl/'\n def start_requests(self):\n mformat = 'csi%Y%m%d.zip'\n end_date = datetime.now().strftime(mformat)\n self.file_name = end_date\n start_date = self.get_nday_ago(end_date, 10, dformat = mformat)\n while start_date <= end_date:\n furl = self.start_url + start_date\n yield FormRequest(url = furl, method = 'GET', callback = self.parse, errback=self.errback_httpbin)\n start_date = self.get_tomorrow_date(sdate = start_date, dformat = mformat)\n\n def parse(self, response):\n try:\n if response.status == 200:\n fname = os.path.basename(response.url)\n yield MyDownloadItem(file_urls = [response.url], file_name = fname)\n else:\n self.logger.error(\"get china security industry valuation failed url:{} status:{}\".format(response.url, response.status))\n except Exception as e:\n self.logger.error(\"get china security industry valuation exception:{}\".format(e))\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = super(ChinaSecurityIndustryValuationSpider, cls).from_crawler(crawler, *args, **kwargs)\n crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)\n return spider\n\n def spider_closed(self, spider, reason):\n mdate = datetime.now().strftime('%Y-%m-%d')\n file_path = Path(ct.CHINA_SECURITY_INDUSTRY_VALUATION_PATH)/\"{}\".format(self.file_name)\n if file_path.exists():\n message = \"download china security industry valuation {} at {} succeed\".format(file_path, mdate)\n self.status = True\n else:\n message = \"download china security industry valuation {} at {} failed\".format(file_path, mdate)\n self.status = False\n self.message = message\n self.collect_spider_info()\n","sub_path":"crawler/dspider/spiders/chinaSecurityIndustryValuationSpider.py","file_name":"chinaSecurityIndustryValuationSpider.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103396677","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/12/11 21:24\n# @Author : hyy\n# @Email : hyywestwood@zju.edu.cn\n# @File : spider_2.py\n# @Software: PyCharm\nimport configparser\nimport schedule\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport random\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nimport logging\n\n\nclass Spider:\n def __init__(self, url):\n self.url = url\n self.flag = 1 # 用于控制发邮件进行通知的时间\n self.retry_counts = 3\n # self.report_time = None\n self.djdh = None\n self.dxsk = None\n self.zdysq = None\n self.qgryl = None\n self.path = os.path.abspath(os.path.join('.', '水利部-新版数据')) # 数据文件存储路径\n self.folder = os.path.exists(self.path) # 判断存储路径文件夹是否存在,没有则创建\n if not self.folder:\n os.makedirs(self.path)\n self.driver = self.getdriver(self.url)\n self.logger = self.log_setting()\n\n def run(self):\n # schedule.every(10).minutes.do(self.single_process)\n schedule.every().day.at(\"09:00\").do(self.single_process)\n schedule.every().day.at(\"21:00\").do(self.single_process)\n text = '水利数据爬取完成'\n subject = '水利数据'\n schedule.every(3).day.at(\"22:00\").do(self.email_send, text, subject)\n # schedule.every(2).day.at(\"22:00\").do(self.email_send, text, subject)\n while True:\n schedule.run_pending()\n\n def single_process(self):\n time.sleep(10)\n self.driver.refresh()\n\n # 获取水利部官网数据\n self.djdh = None\n while self.djdh is None:\n click_btn = self.driver.find_element_by_xpath('//a[li=\"大江大河\"]')\n ActionChains(self.driver).click(click_btn).perform()\n self.djdh = self.get_data()\n self.write_data(self.djdh, '大江大河') # 将获取的数据写入文件,简单起见,不设置新旧数据对比\n self.logger.info(\"大江大河数据抓取完成\")\n\n self.dxsk = None\n while self.dxsk is None:\n click_btn = self.driver.find_element_by_xpath('//a[li=\"大型水库\"]')\n ActionChains(self.driver).click(click_btn).perform()\n self.dxsk = self.get_data()\n self.write_data(self.dxsk, '大型水库')\n # time.sleep(10)\n self.logger.info(\"大型水库数据抓取完成\")\n\n self.zdysq = None\n while self.zdysq is None:\n click_btn = self.driver.find_element_by_xpath('//a[li=\"重点雨水情\"]')\n ActionChains(self.driver).click(click_btn).perform()\n self.zdysq = self.get_data()\n self.write_data(self.zdysq, '重点雨水情')\n self.logger.info(\"重点雨水情数据抓取完成\")\n\n click_btn = self.driver.find_element_by_xpath('//a[li=\"全国日雨量\"]')\n ActionChains(self.driver).click(click_btn).perform()\n self.get_qgryl('全国日雨量')\n self.logger.info(\"全国日雨量数据抓取完成\")\n\n # self.driver.close() # 浏览器关闭\n # self.write_data(self.djdh, '大江大河') # 将获取的数据写入文件,简单起见,不设置新旧数据对比\n # self.write_data(self.dxsk, '大型水库')\n # self.write_data(self.zdysq, '重点雨水情')\n\n def getdriver(self, url, Headless=True):\n profile = webdriver.FirefoxOptions()\n user_agent = 'Mozilla/5.0 (Linux; Android 7.0; BND-AL10 Build/HONORBND-AL10; wv) ' \\\n 'AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 ' \\\n 'MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.3.1340(0x26070331) ' \\\n 'NetType/4G Language/zh_CN Process/tools'\n if Headless:\n profile.add_argument('-headless') # 设置无头模式\n # profile.set_preference('network.proxy.type', 1)\n profile.set_preference('general.useragent.override', user_agent)\n driver = webdriver.Firefox(options=profile)\n driver.get(url)\n return driver\n\n def log_setting(self):\n logger = logging.getLogger(__name__)\n logger.setLevel(level=logging.INFO)\n handler = logging.FileHandler(\"log.txt\")\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(formatter)\n logger.addHandler(handler)\n logger.addHandler(console)\n return logger\n\n def get_qgryl(self, f_str):\n path2 = os.path.join(self.path, f_str)\n folder = os.path.exists(path2)\n if not folder:\n os.makedirs(path2)\n\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept - Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Connection': 'Keep-Alive',\n 'Host': 'xxfb.mwr.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0',\n 'Referer': 'http://xxfb.mwr.cn/sq_qgryl.html',\n 'Upgrade-Insecure-Requests': '1'}\n time.sleep(5)\n bf = BeautifulSoup(self.driver.page_source, 'html.parser')\n img = bf.find('div', id='hdcontent').find_all('img')\n url = 'http://xxfb.mwr.cn' + img[0].attrs['src']\n r = requests.get(url, headers=headers, stream=True)\n if r.status_code == 200:\n open(os.path.join(path2, time.strftime(\"%Y-%m-%d\", time.localtime()) + '.png'), 'wb').write(r.content) # 将内容写入图片\n # print(self.report_time+'.png 已成功保存')\n time.sleep(10)\n # open(r'D:\\pycharm\\pachong\\水利部-新版数据\\全国日雨量\\test2.png', 'wb').write(r.content) # 将内容写入图片\n\n def get_data(self):\n while self.retry_counts > 0:\n try:\n time.sleep(random.uniform(80, 100)) # 需要停留足够长的时间确保数据加载出来\n WebDriverWait(self.driver, 60).until(EC.presence_of_element_located((By.ID, 'hdcontent')))\n retrytimes = 10\n while retrytimes > 0:\n time.sleep(30*1)\n html = self.driver.page_source\n bf = BeautifulSoup(html, 'html.parser')\n # self.report_time = str(bf.find('span', id='hddate').contents[0])\n data_hd = self.trans(bf)\n retrytimes -= 1\n if data_hd:\n return data_hd\n return None\n except Exception:\n print('错误发生,重新尝试获取,剩余次数{}'.format(self.retry_counts-1))\n self.retry_counts -= 1\n return None\n\n def trans(self, a):\n hd = a.find('div',id='hdtable').find_all('tr')\n data_hd = []\n for hang in hd:\n zhandian = []\n for item in hang.contents:\n if item.name == 'td':\n d_str = item.text\n d_str = d_str.replace('↑', '')\n d_str = d_str.replace('↓', '')\n # d_str = d_str.replace('—', '')\n d_str = d_str.replace('*', '')\n d_str = d_str.replace('?', '')\n d_str = d_str.replace('/', '')\n d_str = d_str.replace('|', '')\n # d_str = d_str.replace(':', '')\n zhandian.append(d_str.strip())\n data_hd.append(zhandian)\n return data_hd\n\n def write_data(self, data, f_str):\n path2 = os.path.join(self.path, f_str)\n folder = os.path.exists(path2)\n if not folder:\n os.makedirs(path2)\n\n if f_str == '大江大河':\n for hang in data:\n path3 = os.path.join(self.path, f_str, hang[0])\n folder = os.path.exists(path3)\n if not folder:\n os.makedirs(path3)\n with open(path3 + '\\\\{}-{}-{}.txt'.format(hang[1], hang[2], hang[3]),\n 'a+',encoding='utf-8') as f:\n f.write('{}\\t{}\\t{}\\t{} \\n'.format(time.strftime(\"%Y-\", time.localtime()) + hang[4],\n hang[5], hang[6], hang[7]))\n\n if f_str == '大型水库':\n for hang in data:\n path3 = os.path.join(self.path, f_str, hang[0])\n folder = os.path.exists(path3)\n if not folder:\n os.makedirs(path3)\n with open(path3 + '\\\\{}-{}-{}.txt'.format(hang[1], hang[2], hang[3]),\n 'a+',encoding='utf-8') as f:\n f.write('{}\\t{}\\t{}\\t{} \\n'.format(time.strftime(\"%Y-%m-%d\", time.localtime()), hang[4],\n hang[5], hang[6], hang[7]))\n\n if f_str == '重点雨水情':\n for hang in data:\n path3 = os.path.join(self.path, f_str, hang[0])\n folder = os.path.exists(path3)\n if not folder:\n os.makedirs(path3)\n with open(path3 + '\\\\{}-{}-{}.txt'.format(hang[1], hang[2], hang[3]),\n 'a+',encoding='utf-8') as f:\n f.write('{}\\t{}\\t{} \\n'.format(hang[4],hang[5], hang[6]))\n\n def email_send(self, text, subject):\n # 读取email配置\n config = configparser.ConfigParser()\n config.read(\"./config.cfg\")\n conf_email = config['email_setting']\n\n sender = conf_email['sender']\n receivers = conf_email['receivers'].split(',') # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n mail_host = conf_email['mail_host'] # 设置服务器\n mail_user = conf_email['mail_user'] # 用户名\n mail_pass = conf_email['mail_pass'] # 口令\n\n # 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\n message = MIMEText(text, 'plain', 'utf-8')\n message['From'] = Header(\"水利数据\", 'utf-8') # 发送者\n message['To'] = Header(\"hyy\", 'utf-8') # 接收者\n message['Subject'] = Header(subject, 'utf-8')\n\n try:\n smtpObj = smtplib.SMTP(mail_host, 25)\n # smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号\n smtpObj.login(mail_user, mail_pass)\n smtpObj.sendmail(sender, receivers, message.as_string())\n print(\"邮件发送成功\")\n except smtplib.SMTPException as e:\n print(\"Error: 无法发送邮件\", e)\n\n\nif __name__ == '__main__':\n url = 'http://xxfb.mwr.cn/sq_djdh.html'\n Web_spider = Spider(url)\n Web_spider.run()\n\n\n\n\n\n\n","sub_path":"spider_2.py","file_name":"spider_2.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380999518","text":"import torch\nfrom torchvision import datasets, transforms\n\nfrom deeprobust.image.attack.onepixel import Onepixel\nimport deeprobust.image.netmodels.resnet as resnet\nimport deeprobust.image.netmodels.CNN as CNN\nfrom deeprobust.image.config import attack_params\nimport matplotlib.pyplot as plt\n\nmodel = resnet.ResNet18().to('cuda')\nprint(\"Load network\")\n\nmodel.load_state_dict(torch.load(\"./trained_models/CIFAR10_ResNet18_epoch_50.pt\"))\nmodel.eval()\n\ntransform_val = transforms.Compose([\n transforms.ToTensor(),\n ])\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('deeprobust/image/data', train = False, download=True,\n transform = transform_val),\n batch_size = 1, shuffle=True) #, **kwargs)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nxx, yy = next(iter(test_loader))\nxx = xx.to('cuda').float()\n\nonepixel_params = {\n 'pixels':1\n}\nprint(xx.size())\nattack = Onepixel(model,'cuda')\nsuccess, rate = attack.generate(image = xx, label = yy, **onepixel_params)\nprint(success, rate)\n","sub_path":"examples/image/test_onepixel.py","file_name":"test_onepixel.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651358660","text":"def parse_youtube_video(video, data):\n video.etag = data[\"etag\"]\n video.id = data[\"id\"]\n\n # snippet\n snippet_data = data.get(\"snippet\", False)\n if snippet_data:\n video.published_at = snippet_data.get(\"publishedAt\", None)\n video.channel_id = snippet_data.get(\"channelId\", None)\n video.title = snippet_data.get(\"title\", None)\n video.description = snippet_data.get(\"description\", None)\n video.channel_title = snippet_data.get(\"channelTitle\", None)\n video.tags = snippet_data.get(\"tags\", None)\n video.category_id = snippet_data.get(\"categoryId\", None)\n video.live_broadcast_content = snippet_data.get(\n \"liveBroadcastContent\", None\n )\n video.default_language = snippet_data.get(\"defaultLanguage\", None)\n video.default_audio_language = snippet_data.get(\n \"defaultAudioLanguage\", None\n )\n\n # contentDetails\n content_details_data = data.get(\"contentDetails\", False)\n if content_details_data:\n video.duration = content_details_data.get(\"duration\", None)\n video.dimension = content_details_data.get(\"dimension\", None)\n video.definition = content_details_data.get(\"definition\", None)\n video.caption = content_details_data.get(\"caption\", None)\n video.licensed_content = content_details_data.get(\n \"licensedContent\", None\n )\n video.projection = content_details_data.get(\"projection\", None)\n video.has_custom_thumbnail = content_details_data.get(\n \"hasCustomThumbnail\", None\n )\n\n # status\n status_data = data.get(\"status\", False)\n if status_data:\n video.upload_status = status_data.get(\"uploadStatus\", None)\n video.failure_reason = status_data.get(\"failureReason\", None)\n video.rejection_reason = status_data.get(\"rejectionReason\", None)\n video.privacy_status = status_data.get(\"privacyStatus\", None)\n video.publish_at = status_data.get(\"publishAt\", None)\n video.license = status_data.get(\"license\", None)\n video.embeddable = status_data.get(\"embeddable\", None)\n video.public_stats_viewable = status_data.get(\n \"publicStatsViewable\", None\n )\n\n # statistics\n statistics_data = data.get(\"statistics\", False)\n if statistics_data:\n video.view_count = statistics_data.get(\"viewCount\", None)\n video.like_count = statistics_data.get(\"likeCount\", None)\n video.dislike_count = statistics_data.get(\"dislikeCount\", None)\n video.favorite_count = statistics_data.get(\"favoriteCount\", None)\n video.comment_count = statistics_data.get(\"commentCount\", None)\n\n # player\n player_data = data.get(\"player\", False)\n if player_data:\n video.embed_html = player_data.get(\"embedHtml\", None)\n video.embed_height = player_data.get(\"embedHeight\", None)\n video.embed_width = player_data.get(\"embedWidth\", None)\n\n # topicDetails\n topic_details_data = data.get(\"topicDetails\", False)\n if topic_details_data:\n video.topic_ids = topic_details_data.get(\"topicIds\", None)\n video.relevant_topic_ids = topic_details_data.get(\n \"relevantTopicIds\", None\n )\n video.topic_categories = topic_details_data.get(\"topicCategories\", None)\n\n # recordingDetails\n recording_details_data = data.get(\"recordingDetails\", False)\n if recording_details_data:\n video.recording_date = recording_details_data.get(\"recordingDate\", None)\n\n # fileDetails\n file_details_data = data.get(\"fileDetails\", False)\n if file_details_data:\n video.file_name = file_details_data.get(\"fileName\", None)\n video.file_size = file_details_data.get(\"fileSize\", None)\n video.file_type = file_details_data.get(\"fileType\", None)\n video.container = file_details_data.get(\"container\", None)\n video.video_streams = file_details_data.get(\"videoStreams\", None)\n video.audio_streams = file_details_data.get(\"audioStreams\", None)\n video.duration_ms = file_details_data.get(\"durationMs\", None)\n video.bitrate_bps = file_details_data.get(\"bitrateBps\", None)\n video.creation_time = file_details_data.get(\"creationTime\", None)\n\n # processingDetails\n processing_details_data = data.get(\"processingDetails\", False)\n if processing_details_data:\n video.processing_status = processing_details_data.get(\n \"processingStatus\", None\n )\n video.processing_failure_reason = processing_details_data.get(\n \"processingFailureReason\", None\n )\n video.file_details_availability = processing_details_data.get(\n \"fileDetailsAvailability\", None\n )\n video.processing_issues_availability = processing_details_data.get(\n \"processingIssuesAvailability\", None\n )\n video.tag_suggestions_availability = processing_details_data.get(\n \"tagSuggestionsAvailability\", None\n )\n video.editor_suggestions_availability = processing_details_data.get(\n \"editorSuggestionsAvailability\", None\n )\n video.thumbnails_availability = processing_details_data.get(\n \"thumbnailsAvailability\", None\n )\n\n # suggestions\n suggestions_data = data.get(\"suggestions\", False)\n if suggestions_data:\n video.processing_errors = suggestions_data.get(\"processingErrors\", None)\n video.processing_warnings = suggestions_data.get(\n \"processingWarnings\", None\n )\n video.processing_hints = suggestions_data.get(\"processingHints\", None)\n video.tag_suggestions = suggestions_data.get(\"tagSuggestions\", None)\n video.editor_suggestions = suggestions_data.get(\n \"editorSuggestions\", None\n )\n\n # liveStreamingDetails\n live_streaming_details_data = data.get(\"liveStreamingDetails\", False)\n if live_streaming_details_data:\n video.actual_start_time = live_streaming_details_data.get(\n \"actualStartTime\", None\n )\n video.actual_end_time = live_streaming_details_data.get(\n \"actualEndTime\", None\n )\n video.scheduled_start_time = live_streaming_details_data.get(\n \"scheduledStartTime\", None\n )\n video.scheduled_end_time = live_streaming_details_data.get(\n \"scheduledEndTime\", None\n )\n video.concurrent_viewers = live_streaming_details_data.get(\n \"concurrentViewers\", None\n )\n video.active_live_chat_id = live_streaming_details_data.get(\n \"activeLiveChatId\", None\n )\n video.localizations = data[\"localizations\"]\n\n return video\n","sub_path":"tools/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34453667","text":"import numpy as np\nfrom tqdm import tqdm\nfrom gensim.models import word2vec, Word2Vec\nimport matplotlib\nfrom typing import Any, List\nfrom gensim.models import word2vec, Word2Vec\nimport csv\nfrom matplotlib import pyplot as plt\nfrom numpy.core.multiarray import ndarray\n\n\ndef positions(positiondata_PATH):\n positions = []\n with open(positiondata_PATH, \"r\") as f:\n reader = csv.reader(f)\n for row in tqdm(reader):\n positions.append([float(data) for data in row])\n return np.array(positions)\n\n\ndef inpeek(x, y, box): # 引数: bboxのポジション([Xmim, Xmax, Ymin, Ymax]) 戻り値: 入ってたらTrue,それ以外はFalse\n if box[0] < x < box[1] and box[2] < y < box[3]:\n return True\n else:\n return False\n\nwords = []\nx_positions = positions('shuseibumtext_posi.csv')\nwith open('result170_1201.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n words.append(row[-1])\nmodel = word2vec.Word2Vec.load(\"sample2.model\")\n\nx1 = -0.532\nx2 = 0.769\nmat = np.c_[xy_positions, obj_word_positions]\ns = []\nfor row in mat:\n if x1 < row[0] < x2:\n s.append(row)\na = []\nfor row in s:\n a.append(row[2:])\n\n\n\nl = []\nfor row in tqdm(a):\n l.append([model.most_similar([np.array(row[0:200])], [], 1)[0][0],\n model.most_similar([np.array(row[200:400])], [], 1)[0][0]])\n\nwith open('./x_{}-{}_in_hist.csv'.format(x1, x2), 'w') as file:\n writer = csv.writer(file, lineterminator='\\n')\n writer.writerows(l)\n","sub_path":"sceen_text_hist_peel.py","file_name":"sceen_text_hist_peel.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178453298","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport requests\nimport itchat\nimport os\nimport re\nimport shutil\nimport time\nfrom functions import get_functions, get_example, AUTO_REPLAY\nfrom threading import Timer\n\nfrom itchat.content import *\n\n# KEY = '8edce3ce905a4c1dbb965e6b35c3834d'\nKEY = 'b545f73ce82c45f5b63f3f4b20e6f4a9'\n\n\ndef get_response(msg):\n # 构造发送给图灵机器人服务器的数据\n api_url = 'http://www.tuling123.com/openapi/api'\n data = {\n 'key': KEY,\n 'info': msg,\n 'userid': 'wechat-robot',\n }\n try:\n r = requests.post(api_url, data=data).json()\n # 字典的get方法在字典没有'text'值的时候会返回None���不会抛出异常\n text = r.get('text')\n url = r.get('url', None)\n if url is not None:\n return text + '\\n' + url\n else:\n return text\n # 为了防止服务器没有正常响应导致程序异常退出,这里用try-except捕获了异常\n # 如果服务器没能正常交互(返回非json或无法连接),那么就会进入下面的return\n except IOError:\n # 将会返回一个None\n return\n\n\ndef is_msg_from_myself(msgFromUserName):\n # 检查消息发送方是否为自己\n global myName\n return myName == msgFromUserName\n\n\n# # 注册文本消息回复函数\n# @itchat.msg_register(itchat.content.TEXT)\n# def tuling_reply(msg):\n# global autoReplyFlag, timerSet, noReply, t # 状态标志位\n# print(msg['Text'])\n# if is_msg_from_myself(msg['FromUserName']):\n# print(\"Replied!!\")\n# autoReplyFlag = False\n# noReply = False\n# try:\n# t.cancel()\n# print(\"Timer Canceled\")\n# timerSet = False\n# except:\n# pass\n# return None\n#\n# if autoReplyFlag:\n# # 为了保证在图灵Key出现问题的时候仍旧可以回复,这里设置一个默认回复\n# default_reply = 'I received: ' + msg['Text']\n# # 如果图灵Key出现问题,那么reply将会是None\n# reply = get_response(msg['Text'])\n# # a or b的意思是,如果a有内容,那么返回a,否则返回b\n# # 有内容一般就是指非空或者非None,你可以用`if a: print('True')`来测试\n# return '小K: ' + reply or '小K: ' + default_reply\n# else:\n# noReply = True\n# if not timerSet:\n# # if time.time()-noReplyStartTime >= 120:\n# print(\"Timer setting\")\n# t = Timer(12, send_busy_status, [msg['FromUserName']])\n# t.start()\n# timerSet = True\n#\n\ndef send_busy_status(user_name):\n global noReply, autoReplyFlag, timerSet\n print(\"Timer Working!\")\n if noReply:\n content = \"您好,我是主人的机器人小K\\n注意:以'小K:'\\t开头的都是自动回复的哦(^o^)\\n\"\n off_auto = r'如果不希望小K自动回复请在消息前面加上@符号' + '\\n'\n functions = '您可以回复: 所有功能 来查看小K的功能\\n'\n if AUTO_REPLAY:\n itchat.send(content + functions + off_auto, user_name)\n autoReplyFlag = True\n timerSet = False\n\n\n#\n# @itchat.msg_register([PICTURE, MAP, CARD, NOTE, SHARING, RECORDING, ATTACHMENT, VIDEO])\n# def text_reply(msg):\n# if msg['Type'] == 'Text':\n# reply_content = msg['Text']\n# elif msg['Type'] == 'Picture':\n# reply_content = r\"图片: \" + msg['FileName']\n# elif msg['Type'] == 'Card':\n# reply_content = r\" \" + msg['RecommendInfo']['NickName'] + r\" 的名片\"\n# elif msg['Type'] == 'Map':\n# x, y, location = re.search(\"\" + x.__str__() + \" 经度->\" + y.__str__()\n# else:\n# reply_content = r\"位置: \" + location\n# elif msg['Type'] == 'Note':\n# reply_content = r\"通知\"\n# elif msg['Type'] == 'Sharing':\n# reply_content = r\"分享\"\n# elif msg['Type'] == 'Recording':\n# reply_content = r\"语音\"\n# elif msg['Type'] == 'Attachment':\n# reply_content = r\"文件: \" + msg['FileName']\n# elif msg['Type'] == 'Video':\n# reply_content = r\"视频: \" + msg['FileName']\n# else:\n# reply_content = r\"消息\"\n#\n# friend = itchat.search_friends(userName=msg['FromUserName'])\n# itchat.send(r\"Friend:%s -- %s \"\n# r\"Time:%s \"\n# r\" Message:%s\" % (friend['NickName'], friend['RemarkName'], time.ctime(), reply_content),\n# toUserName='filehelper')\n#\n# itchat.send(r\"小K已经收到你在【%s】发送的消息【%s】主人稍后回复。\" % (time.ctime(), reply_content),\n# toUserName=msg['FromUserName'])\n\n\n# 处理群聊消息\n@itchat.msg_register(itchat.content.TEXT, isGroupChat=True)\ndef text_reply(msg):\n # print(msg)\n if msg['isAt']:\n content_message = msg['Content']\n print(content_message)\n start_at = content_message.find('\\u2005')\n if start_at == -1:\n start_at = 0\n print(start_at)\n content = content_message[start_at:]\n print(content)\n # 'Content': '@阿宝\\u2005宝宝真棒',\n smart_reply = '小K: ' + get_response(content)\n print(smart_reply)\n if AUTO_REPLAY:\n itchat.send(smart_reply, msg['FromUserName'])\n\n # itchat.send(u'@%s\\u2005I received: %s' % (msg['ActualNickName'], msg['Content']), msg['FromUserName'])\n\n\n# {msg_id:(msg_from,msg_to,msg_time,msg_time_touser,msg_type,msg_content,msg_url,msg_from_user_name)}\nmsg_dict = {}\n\n\n# ClearTimeOutMsg用于清理消息字典,把超时消息清理掉\n# 为减少资源占用,此函数只在有新消息动态时调用\ndef clear_timeout_msg():\n if msg_dict.__len__() > 0:\n for msgid in list(msg_dict): # 由于字典在遍历过程中不能删除元素,故使用此方法\n if time.time() - msg_dict.get(msgid, None)[\"msg_time\"] > 130.0: # 超时两分钟\n item = msg_dict.pop(msgid)\n # print(\"超时的消息:\", item['msg_content'])\n # 可下载类消息,并删除相关文件\n if item['msg_type'] == \"Picture\" \\\n or item['msg_type'] == \"Recording\" \\\n or item['msg_type'] == \"Video\" \\\n or item['msg_type'] == \"Attachment\":\n print(\"要删除的文件:\", item['msg_content'])\n os.remove(item['msg_content'])\n\n\n# 将接收到的消息存放在字典中,当接收到新消息时对字典中超时的消息进行清理\n# 没有注册note(通知类)消息,通知类消息一般为:红包 转账 消息撤回提醒等,不具有撤回功能\n@itchat.msg_register([TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO, FRIENDS])\ndef revocation(msg):\n mytime = time.localtime() # 这儿获取的是本地时间\n # 获取用于展示给用户看的时间 2017/03/03 13:23:53\n msg_time_touser = mytime.tm_year.__str__() \\\n + \"/\" + mytime.tm_mon.__str__() \\\n + \"/\" + mytime.tm_mday.__str__() \\\n + \" \" + mytime.tm_hour.__str__() \\\n + \":\" + mytime.tm_min.__str__() \\\n + \":\" + mytime.tm_sec.__str__()\n msg_from_user_name = msg['FromUserName']\n msg_id = msg['MsgId'] # 消息ID\n msg_time = msg['CreateTime'] # 消息时间\n msg_from = itchat.search_friends(userName=msg['FromUserName'])['NickName'] # 消息发送人昵称\n msg_type = msg['Type'] # 消息类型\n msg_content = None # 根据消息类型不同,消息内容不同\n msg_url = None # 分享类消息有url\n # 图片 语音 附件 视频,可下载消息将内容下载暂存到当前目录\n wx_msg_text = msg['Text']\n if msg['Type'] == 'Text':\n\n msg_content = wx_msg_text\n\n global autoReplyFlag, timerSet, noReply, t # 状态标志位\n print(wx_msg_text)\n if is_msg_from_myself(msg['FromUserName']):\n print(\"Replied!!\")\n autoReplyFlag = False\n noReply = False\n try:\n t.cancel()\n print(\"Timer Canceled\")\n timerSet = False\n except:\n pass\n return None\n\n if autoReplyFlag:\n # 查看功能\n if wx_msg_text == '所有功能':\n return '您可以使用下列功能:\\n' + get_functions()\n elif wx_msg_text[:1] == r'#':\n return get_example(wx_msg_text[1:])\n elif wx_msg_text[:1] == r'@':\n print('do nothing')\n else:\n # 为了保证在图灵Key出现问题的时候仍旧可以回复,这里设置一个默认回复\n default_reply = 'I received: ' + wx_msg_text\n # 如果图灵Key出现问题,那么reply将会是None\n reply = get_response(wx_msg_text)\n # a or b的意思是,如果a有内容,那么返回a,否则返回b\n # 有内容一般就是指非空或者非None,你可以用`if a: print('True')`来测试\n if AUTO_REPLAY:\n return '小K: ' + reply or '小K: ' + default_reply\n else:\n noReply = True\n if not timerSet:\n # if time.time()-noReplyStartTime >= 120:\n print(\"Timer setting\")\n t = Timer(12, send_busy_status, [msg['FromUserName']])\n t.start()\n timerSet = True\n\n elif msg['Type'] == 'Picture':\n msg_content = msg['FileName']\n\n try:\n # 下载图片\n wx_msg_text(msg['FileName'])\n except OSError:\n ''\n elif msg['Type'] == 'Card':\n msg_content = msg['RecommendInfo']['NickName'] + r\" 的名片\"\n elif msg['Type'] == 'Map':\n x, y, location = re.search(\"\" + x.__str__() + \" 经度->\" + y.__str__()\n else:\n msg_content = r\"\" + location\n elif msg['Type'] == 'Sharing':\n msg_content = wx_msg_text\n msg_url = msg['Url']\n elif msg['Type'] == 'Recording':\n msg_content = msg['FileName']\n wx_msg_text(msg['FileName'])\n elif msg['Type'] == 'Attachment':\n msg_content = r\"\" + msg['FileName']\n wx_msg_text(msg['FileName'])\n elif msg['Type'] == 'Video':\n msg_content = msg['FileName']\n wx_msg_text(msg['FileName'])\n elif msg['Type'] == 'Friends':\n msg_content = wx_msg_text\n\n # friend = itchat.search_friends(userName=msg['FromUserName'])\n # itchat.send(r\"Friend:%s -- %s \"\n # r\"Time:%s \"\n # r\" Message:%s\" % (friend['NickName'], friend['RemarkName'], msg_time_touser, msg_content),\n # toUserName='filehelper')\n\n if msg['Type'] != 'Text':\n if AUTO_REPLAY:\n itchat.send(\"小K已收到您的消息,主人稍后回复!\\n\\t时间: %s\\n\\t内容: %s\" % (msg_time_touser, msg_content),\n toUserName=msg['FromUserName'])\n\n # 更新字典\n # {msg_id:(msg_from,msg_time,msg_time_touser,msg_type,msg_content,msg_url)}\n msg_dict.update(\n {msg_id: {\"msg_from\": msg_from, \"msg_time\": msg_time, \"msg_time_touser\": msg_time_touser, \"msg_type\": msg_type,\n \"msg_content\": msg_content, \"msg_url\": msg_url, \"msg_from_user_name\": msg_from_user_name}})\n # 清理字典\n clear_timeout_msg()\n\n\n# 收到note类消息,判断是不是撤回并进行相应操作\n@itchat.msg_register([NOTE])\ndef save_msg(msg):\n # print(msg)\n # 创建可下载消息内容的存放文件夹,并将暂存在当前目录的文件移动到该文件中\n if not os.path.exists(\"Revocation\"):\n os.mkdir(\"Revocation\")\n if re.search(r\"\\\\<\\!\\[CDATA\\[.*撤回了一条消息\\]\\]\\>\\<\\/replacemsg\\>\", msg['Content']) is not None:\n\n old_msg_id = re.search(\"\\(.*?)\\<\\/msgid\\>\", msg['Content']).group(1)\n\n old_msg = msg_dict.get(old_msg_id, {})\n\n # print(old_msg_id, old_msg)\n\n msg_send = r'您的好友:' \\\n + old_msg.get('msg_from', '') \\\n + r\" 在 [\" + old_msg.get('msg_time_touser', '') \\\n + r\"], 撤回了一条 [\" + old_msg['msg_type'] + \"] 消息, 内容如下:\" \\\n + old_msg.get('msg_content', '')\n if old_msg['msg_type'] == \"Sharing\":\n msg_send += r\", 链接: \" + old_msg.get('msg_url', '')\n elif old_msg['msg_type'] == 'Picture' \\\n or old_msg['msg_type'] == 'Recording' \\\n or old_msg['msg_type'] == 'Video' \\\n or old_msg['msg_type'] == 'Attachment':\n msg_send += r\", 存储在当前目录下Revocation文件夹中\"\n\n # print('************' + old_msg['msg_content'])\n\n itchat.send(msg_send, toUserName='filehelper') # 将撤回消息的通知以及细节发送到文件助手\n if AUTO_REPLAY:\n itchat.send('小K : 还想撤回 ? too naive!!! 消息已经保存存啦' + r'[偷笑]',\n toUserName=old_msg.get('msg_from_user_name')) # 将撤回消息的通知以及细节发送到文件助手\n try:\n shutil.move(old_msg['msg_content'], \"Revocation\")\n except FileNotFoundError:\n print('move error')\n msg_dict.pop(old_msg_id)\n\n clear_timeout_msg()\n\n\n# 为了让实验过程更加方便(修改程序不用多次扫码),我们使用热启动\nitchat.auto_login()\n\nautoReplyFlag, timerSet, noReply = False, False, False\nt = 0 # 定义全局变量t, 用作触发器使用,此行甚是丑陋;怎么才能更优雅呢?请大神指点。\nmyName = itchat.get_friends(update=True)[0]['UserName']\nitchat.run()\n","sub_path":"robot_smart.py","file_name":"robot_smart.py","file_ext":"py","file_size_in_byte":14567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550769914","text":"import argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('s1')\n parser.add_argument('s2')\n return parser.parse_args()\n\ndef stringOneEdit(s1, s2):\n print('Comparing {0} & {1}'.format(s1, s2))\n if abs(len(s1) - len(s2)) > 1:\n print('Different by more than 1')\n return\n\n # always make s1 the longer string\n if len(s1) < len(s2):\n tmp = s1\n s1 = s2\n s2 = tmp\n\n diff = 0\n i = 0\n j = 0\n while i < len(s1) and j < len(s2) and diff < 2:\n # print('s1[{0}]{1} ?== s2[{2}]{3}'.format(i, s1[i], j, s2[j]))\n if s1[i] != s2[j]:\n diff += 1\n i += 1\n if len(s1) == len(s2):\n j += 1\n else:\n i += 1\n j += 1\n\n if diff < 2:\n print('Same or different by at most 1')\n else:\n print('Different by more than 1')\n\n\ndef main():\n args = parse_args()\n stringOneEdit(args.s1, args.s2)\n\nif __name__ == '__main__':\n main()","sub_path":"py/leetcode/string_one_edit.py","file_name":"string_one_edit.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345325418","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n\n# Load the data\nX = np.load('X_Test.npy')\nY = np.load('Y_Test.npy')\n\n#initial value of weights\n\nlayer_size_1 = 200 # put the same number as it is in neural newtwork code\nlayer_size_2 = 10\n\n# Load the variables\nw_1 = np.load('W1.npy')\nw_2 = np.load('W2.npy')\nw_3 = np.load('W3.npy')\n\n# Model\ndef model(X, w_1, w_2,w_3):\n h1 = tf.nn.tanh(tf.matmul(X, w_1))\n h2 = tf.nn.tanh(tf.matmul(h1, w_2))\n return tf.matmul(h2, w_3)\n\n# Run the optimisation\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n\n Predicted = sess.run(tf.nn.sigmoid(model),feed_dict={X:X, w_1:W1, w_2:W2, w_3:W3})\n Predicted = np.round(Predicted)\n print('Testing', np.mean(Predicted == Y), sum(Y) / len(Y))\n\n\n","sub_path":"Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283535901","text":"def addForceMomentProbe(context):\n\tobjList = ExtAPI.DataModel.Tree.ActiveObjects\n\tanalysis = objList[0].Parent\n\tfor obj in objList:\n\t\tif obj.InternalObject.Class == 403:\t\t\t# Load & BC\n\t\t\tif obj.InternalObject.AnsBCType in [13, 14, 15, 16, 17, 19, 20, 29]:\n\t\t\t\tprobe = analysis.Solution.AddForceReaction()\n\t\t\t\tprobe.Name = 'F_' + obj.Name\n\t\t\t\tprobe.BoundaryConditionSelection = obj\n\t\t\tif obj.InternalObject.AnsBCType in [13, 14, 15, 16, 17, 19, 20, 28, 29]:\n\t\t\t\tprobe = analysis.Solution.AddMomentReaction()\n\t\t\t\tprobe.Name = 'M_' + obj.Name\n\t\t\t\tprobe.BoundaryConditionSelection = obj\n# Class = 403\n# AnsBCType:\n# Frictionless Support 13\n# Compression Only Support 14\n# Cylindrical Support 15\n# Simply Supported 16\n# Elastic Support 17\n# Displacement 19\n# Remote Displacement 20\n# Fixed Rotation 28\n# Fixed Support 29\n# Velocity 54 (ej)","sub_path":"ForceMomentProbe/forceMomentProbe.py","file_name":"forceMomentProbe.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"581229929","text":"from math import ceil\nimport random\nimport sys\nimport time\nfrom pyfiglet import Figlet\nfrom plumbum import cli\nimport questionary\nimport nltk\nfrom nltk.corpus import words\nnltk.download('words') # Delete me after first time running\n\n\ndef delete_last_line():\n \"\"\"Delete the last line\"\"\"\n sys.stdout.write('\\x1b[1A')\n sys.stdout.write('\\x1b[2K')\n\n\nLETTERS = \"QWERTYUIOPASDFGHJKLZXCVBNM\"\nDIFFICULTIES = {'Easy': '5 12', 'Medium': '7 16', 'Hard': '10 20'}\nDIRECTIONS = {\n 0: '-1 0',\n 1: '-1 1',\n 2: '0 1',\n 3: '1 1',\n 4: '1 0',\n 5: '1 -1',\n 6: '0 -1',\n 7: '-1 -1',\n}\nINSTR = \"\"\"\nThis is a standard word search with 3 difficulties:\n -Easy: 5 words, 12x12\n -Medium: 7 words, 16x16\n -Hard: 10 words, 20x20\nThe words are imported from NLTK.\n\nIf you find a word, enter the coordinates of the first letter, separated by a space.\nAlthough the margins only show the units digit, enter the full coordinate.\nThe row goes first, and then the column.\nThe numbering starts at 1 1, which represents the top left corner.\nYou can always select 'exit program' to exit the program.\nEnjoy!\n\"\"\"\n\ndef print_block(text):\n \"\"\"Print the title\"\"\"\n print(Figlet(font='doom', justify='center').renderText(text))\n\ndef select_difficulty():\n \"\"\"Have user choose a difficulty\"\"\"\n return questionary.select(\n \"Choose a difficulty:\",\n choices=[\n 'Easy',\n 'Medium',\n 'Hard',\n ]).ask()\n\ndef found_words(to_find):\n \"\"\"Display words left to find\"\"\"\n return questionary.select(\n 'Check off when found:',\n choices=to_find\n ).ask()\n\ndef check_found_word(word):\n \"\"\"Check if the user really found a word\"\"\"\n return questionary.text(f\"Enter coordinates of the first letter of {word}:\").ask()\n\ndef create_letter_list_2d(board_size: int, random_words):\n \"\"\"Create the 2d list of letters to \"paste\" onto board\"\"\"\n def find_space(word, start_row, start_col, direction):\n del_row = int(DIRECTIONS[direction].split()[0])\n del_col = int(DIRECTIONS[direction].split()[1])\n has_space = True\n\n curr_row = start_row\n curr_col = start_col\n for i in word:\n if letter_list[curr_row][curr_col] != ' ' and letter_list[curr_row][curr_col] != i:\n has_space = False\n break\n curr_row += del_row\n curr_col += del_col\n\n if has_space:\n # Add to dictionary only if there is space\n start_pos[word] = f'{start_row + 1} {start_col + 1}'\n\n curr_row = start_row\n curr_col = start_col\n for let in word:\n # Add to 2d list only if there is space\n letter_list[curr_row][curr_col] = let\n\n curr_row += del_row\n curr_col += del_col\n return has_space\n\n letter_list = [[' ' for i in range(board_size)] for j in range(board_size)]\n start_pos = {}\n for word in random_words:\n success = False\n while not success:\n direction = random.randrange(8)\n start_coords = \"\"\n start_row = \"\"\n start_col = \"\"\n\n # N\n if direction == 0:\n start_row = random.randrange(len(word) - 1, board_size)\n start_col = random.randrange(board_size)\n # NE\n elif direction == 1:\n start_row = random.randrange(len(word) - 1, board_size)\n start_col = random.randrange(board_size - len(word) + 1)\n # E\n elif direction == 2:\n start_row = random.randrange(board_size)\n start_col = random.randrange(board_size - len(word) + 1)\n # SE\n elif direction == 3:\n start_row = random.randrange(board_size - len(word) + 1)\n start_col = random.randrange(board_size - len(word) + 1)\n # S\n elif direction == 4:\n start_row = random.randrange(board_size - len(word) + 1)\n start_col = random.randrange(board_size)\n # SW\n elif direction == 5:\n start_row = random.randrange(board_size - len(word) + 1)\n start_col = random.randrange(len(word) - 1, board_size)\n # W\n elif direction == 6:\n start_row = random.randrange(board_size)\n start_col = random.randrange(len(word) - 1, board_size)\n # NW\n else:\n start_row = random.randrange(len(word) - 1, board_size)\n start_col = random.randrange(len(word) - 1, board_size)\n\n start_coords = f'{start_row} {start_col}'\n success = find_space(word, int(start_coords.split()[0]),\n int(start_coords.split()[1]), direction)\n\n for i in range(board_size):\n for j in range(board_size):\n if letter_list[i][j] == ' ':\n letter_list[i][j] = random.choice(LETTERS)\n return [letter_list, start_pos]\n\ndef print_board(input_list):\n \"\"\"Paste the 2d list of letters onto a square board\"\"\"\n v_spacing_list = [f'{(i + 1) % 10} ' for i,_ in enumerate(input_list)]\n print(f\"\\n{''.join(v_spacing_list).rjust(2 * len(v_spacing_list) + 20)}\\n\")\n for i,_ in enumerate(input_list):\n h_spacing = f'{(i + 1) % 10} '\n print(h_spacing.rjust(20), end='')\n for j,_ in enumerate(input_list[i]):\n print(f'{input_list[i][j]} ', end='')\n print(f' {h_spacing} ')\n print(f\"\\n{''.join(v_spacing_list).rjust(2 * len(v_spacing_list) + 20)}\\n\")\n\ndef generate_words(num_words: int, board_size: int):\n \"\"\"Generate random words of valid length\"\"\"\n my_words = []\n while len(my_words) < num_words:\n random_word = random.choice(words.words()).upper()\n if len(random_word) in range(ceil(board_size / 4), board_size + 1):\n my_words.append(random_word)\n return my_words\n\nclass Game(cli.Application):\n VERSION = \"1.0\"\n instructions = cli.Flag(['i', 'instructions'], help=\"Read the instructions\")\n\n def main(self):\n print_block(\"Word Search\")\n\n if self.instructions:\n print(INSTR)\n time.sleep(10)\n\n diff = DIFFICULTIES[select_difficulty()].split()\n random_list = generate_words(int(diff[0]), int(diff[1]))\n random_letter_list = create_letter_list_2d(int(diff[1]), random_list)\n my_list = random_letter_list[0]\n my_pos = random_letter_list[1]\n\n print_board(my_list)\n random_list.append('exit program')\n\n print('Remaining words:')\n while len(random_list) > 1:\n has_found = found_words(random_list)\n if has_found == 'exit program':\n sys.exit()\n if check_found_word(has_found) == my_pos[has_found]:\n print(\"Nice!\")\n random_list.remove(has_found)\n else:\n print(\"Try again.\")\n time.sleep(0.5)\n for _ in range(3):\n delete_last_line()\n print(\"\\nCongrats! You win!\")\n\nif __name__ == \"__main__\":\n Game()\n\ndef test_create_letter_list_2d():\n \"\"\"Test the `create_letter_list_2d` method\"\"\"\n my_words = generate_words(5, 12)\n assert len(my_words) == 5\n assert len(create_letter_list_2d(12, my_words)[1]) == 5\n\ndef test_generate_words():\n \"\"\"Test the `generate_words` method\"\"\"\n my_words = generate_words(3, 8)\n for word in my_words:\n assert len(word) in range(2, 9)\n assert len(my_words) == 3\n","sub_path":"wordsearch.py","file_name":"wordsearch.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125965716","text":"\"\"\" Functions and Operations for analysing a cluster's tidal tails\n\n\"\"\"\n__author__ = \"Jeremy J Webb\"\n__all__ = [\n \"to_tail\",\n \"tail_path\",\n \"tail_path_match\",\n]\n\nfrom galpy.util import bovy_conversion,_rotate_to_arbitrary_vector\nfrom galpy import potential\nfrom galpy.potential import MWPotential2014\n\nimport numpy as np\n\nfrom .orbit import orbital_path, orbital_path_match\nfrom .operations import *\nfrom ..util.recipes import binmaker\n\nfrom ..util.plots import *\n\n\ndef to_tail(cluster):\n \"\"\"Calculate positions and velocities of stars when rotated such that clusters velocity vector\n points along x-axis\n\n - no change to coordinates in StarCluster\n\n Parameters\n ----------\n cluster : class\n StarCluster\n\n Returns\n -------\n x_tail,y_tail,z_tail,vx_tail,vy_tail,vz_tail : float\n rotated coordinates with cluster's velocity vector point along x-axis\n\n History:\n -------\n 2018 - Written - Webb (UofT)\n\n \"\"\"\n units0, origin0 = cluster.units, cluster.origin\n\n cluster.to_centre()\n\n v_vec = np.array([cluster.vxgc, cluster.vygc, cluster.vzgc])\n new_v_vec = np.array([1.0, 0.0, 0.0])\n\n rot = _rotate_to_arbitrary_vector(\n np.atleast_2d(v_vec), new_v_vec, inv=False, _dontcutsmall=False\n )\n\n x_tail = (\n cluster.x * rot[:, 0, 0] + cluster.y * rot[:, 1, 0] + cluster.z * rot[:, 2, 0]\n )\n y_tail = (\n cluster.x * rot[:, 0, 1] + cluster.y * rot[:, 1, 1] + cluster.z * rot[:, 2, 1]\n )\n z_tail = (\n cluster.x * rot[:, 0, 2] + cluster.y * rot[:, 1, 2] + cluster.z * rot[:, 2, 2]\n )\n vx_tail = (\n cluster.vx * rot[:, 0, 0] + cluster.vy * rot[:, 1, 0] + cluster.vz * rot[:, 2, 0]\n )\n vy_tail = (\n cluster.vx * rot[:, 0, 1] + cluster.vy * rot[:, 1, 1] + cluster.vz * rot[:, 2, 1]\n )\n vz_tail = (\n cluster.vx * rot[:, 0, 2] + cluster.vy * rot[:, 1, 2] + cluster.vz * rot[:, 2, 2]\n )\n\n cluster.to_origin(origin0)\n\n return x_tail,y_tail,z_tail,vx_tail,vy_tail,vz_tail\n\ndef tail_path(\n cluster, dt=0.1, nt=100, pot=MWPotential2014, from_centre=False, ro=8.0, vo=220.0,\n plot=False\n):\n \"\"\"Calculate tail path +/- dt Gyr around the cluster\n\n Parameters\n ----------\n cluster : class\n StarCluster\n dt : float\n timestep that StarCluster is to be moved to\n nt : int\n number of timesteps\n pot : class\n galpy Potential that orbit is to be integrate in (default: MWPotential2014)\n from_centre : bool\n genrate orbit from cluster's exact centre instead of its assigned galactocentric coordinates (default: False)\n ro :float \n galpy distance scale (Default: 8.)\n vo : float\n galpy velocity scale (Default: 220.)\n plot : bool\n plot a snapshot of the cluster in galactocentric coordinates with the orbital path (defualt: False)\n\n Returns\n -------\n t : float\n times for which path is provided\n x,y,z : float\n tail path positions\n vx,vy,vz : float\n tail path velocities\n History\n -------\n 2018 - Written - Webb (UofT)\n 2019 - Implemented numpy array preallocation to minimize runtime - Nathaniel Starkman (UofT)\n \"\"\"\n\n units0, origin0 = save_cluster(cluster)\n cluster.to_galaxy()\n cluster.to_kpckms()\n\n to, xo, yo, zo, vxo, vyo, vzo, o = orbital_path(\n cluster,\n dt=dt,\n nt=nt,\n pot=pot,\n from_centre=from_centre,\n initialize=True,\n ro=ro,\n vo=vo,\n )\n tstar, dprog, dpath = orbital_path_match(\n cluster=cluster, dt=dt, nt=nt, pot=pot, from_centre=from_centre, ro=ro, vo=vo\n )\n\n t_lower, t_mid, t_upper, t_hist = binmaker(to, nbin=nt)\n ttail = []\n xtail = []\n ytail = []\n ztail = []\n vxtail = []\n vytail = []\n vztail = []\n\n for i in range(0, len(t_mid)):\n indx = (tstar >= t_lower[i]) * (tstar <= t_upper[i])\n if np.sum(indx) > 0:\n ttail = np.append(ttail, t_mid[i])\n xtail = np.append(xtail, np.mean(cluster.x[indx]))\n ytail = np.append(ytail, np.mean(cluster.y[indx]))\n ztail = np.append(ztail, np.mean(cluster.z[indx]))\n vxtail = np.append(vxtail, np.mean(cluster.vx[indx]))\n vytail = np.append(vytail, np.mean(cluster.vy[indx]))\n vztail = np.append(vztail, np.mean(cluster.vz[indx]))\n\n if plot:\n filename = kwargs.pop(\"filename\", None)\n overplot = kwargs.pop(\"overplot\", False)\n starplot(cluster,coord='xy',overplot=overplot)\n _lplot(xtail,ytail,overplot=True)\n\n if filename != None:\n plt.savefig(filename)\n\n return_cluster(cluster, units0, origin0)\n\n return ttail, xtail, ytail, ztail, vxtail, vytail, vztail\n\n\ndef tail_path_match(\n cluster,\n dt=0.1,\n nt=100,\n pot=MWPotential2014,\n from_centre=False,\n to_path=False,\n do_full=False,\n ro=8.0,\n vo=220.0,\n plot=False,\n):\n \"\"\"Match stars to a position along the tail path of the cluster\n\n Parameters\n ----------\n cluster : class\n StarCluster\n dt : float\n timestep that StarCluster is to be moved to\n nt : int\n number of timesteps\n pot : class\n galpy Potential that orbit is to be integrate in (default: MWPotential2014)\n from_centre : bool\n genrate orbit from cluster's exact centre instead of its assigned galactocentric coordinates (default: False)\n to_path : bool\n measure distance to the path itself instead of distance to central point along the path (default: False)\n do_full : bool\n calculate dpath all at once in a single numpy array (can be memory intensive) (default:False)\n ro :float \n galpy distance scale (Default: 8.)\n vo : float\n galpy velocity scale (Default: 220.)\n plot : bool\n plot a snapshot of the cluster in galactocentric coordinates with the orbital path (defualt: False)\n\n Returns\n -------\n tstar : float\n orbital time associated with star\n dprog : float\n distance along the path to the progenitor\n dpath : \n distance to centre of the tail path bin (default) or the tail path (to_path = True)\n\n History\n -------\n 2018 - Written - Webb (UofT)\n \"\"\"\n units0, origin0 = save_cluster(cluster)\n cluster.to_galaxy()\n cluster.to_kpckms()\n\n ts, x, y, z, vx, vy, vz = tail_path(\n cluster, dt=dt, nt=nt, pot=pot, from_centre=from_centre, ro=ro, vo=vo\n )\n pindx = np.argmin(np.fabs(ts))\n\n dx = np.tile(x, cluster.ntot).reshape(cluster.ntot, len(ts)) - np.repeat(\n cluster.x, len(ts)\n ).reshape(cluster.ntot, len(ts))\n dy = np.tile(y, cluster.ntot).reshape(cluster.ntot, len(ts)) - np.repeat(\n cluster.y, len(ts)\n ).reshape(cluster.ntot, len(ts))\n dz = np.tile(z, cluster.ntot).reshape(cluster.ntot, len(ts)) - np.repeat(\n cluster.z, len(ts)\n ).reshape(cluster.ntot, len(ts))\n dr = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0)\n\n indx = np.argmin(dr, axis=1)\n dpath = np.amin(dr, axis=1)\n tstar = ts[indx] # *bovy_conversion.time_in_Gyr(ro=ro,vo=vo)\n\n dxo = x[1:] - x[0:-1]\n dyo = y[1:] - y[0:-1]\n dzo = z[1:] - z[0:-1]\n\n dprogx = np.cumsum(np.fabs(dxo))\n dprogy = np.cumsum(np.fabs(dyo))\n dprogz = np.cumsum(np.fabs(dzo))\n\n dprogx = np.insert(dprogx, 0, 0.0)\n dprogy = np.insert(dprogy, 0, 0.0)\n dprogz = np.insert(dprogz, 0, 0.0)\n\n dprogr = np.sqrt(dprogx ** 2.0 + dprogy ** 2.0 + dprogz ** 2.0)\n dprog = dprogr[indx] - dprogr[pindx]\n\n # Find distance to path instead of to central point\n if to_path:\n dxo = np.append(dxo, dxo[-1])\n dyo = np.append(dyo, dyo[-1])\n dzo = np.append(dzo, dzo[-1])\n\n if do_full:\n # Typically it is too expensive to calculate dpath all at once, but will allow option via do_full\n\n ovec = np.column_stack([dxo, dyo, dzo])\n mag_ovec = np.sqrt(dxo ** 2.0 + dyo ** 2.0 + dzo ** 2.0)\n svec = np.column_stack([dx[:, indx], dy[:, indx], dz[:, indx]])\n mag_svec = dr[:, indx]\n theta = np.arccos(np.dot(ovec[indx], svec) / (mag_ovec[indx] * mag_svec))\n dpath = mag_svec * np.sin(theta)\n else:\n # Need to optimize this via numba\n dpath = np.array([])\n for i in range(0, cluster.ntot):\n ovec = [dxo[indx[i]], dyo[indx[i]], dzo[indx[i]]]\n mag_ovec = np.sqrt(\n dxo[indx[i]] ** 2.0 + dyo[indx[i]] ** 2.0 + dzo[indx[i]] ** 2.0\n )\n\n svec = [dx[i, indx[i]], dy[i, indx[i]], dz[i, indx[i]]]\n mag_svec = dr[i, indx[i]]\n\n theta = np.arccos(\n (ovec[0] * svec[0] + ovec[1] * svec[1] + ovec[2] * svec[2])\n / (mag_ovec * mag_svec)\n )\n dpath = np.append(dpath, mag_svec * np.sin(theta))\n\n # Assign negative to stars with position vectors in opposite direction as local angular momentum vector\n rgc = np.column_stack([x[indx], y[indx], z[indx]])\n vgc = np.column_stack([vx[indx], vy[indx], vz[indx]])\n lz = np.cross(rgc, vgc)\n\n rstar = np.column_stack(\n [cluster.x - x[indx], cluster.y - y[indx], cluster.z - z[indx]]\n )\n\n ldot = np.sum(rstar * lz, axis=1)\n dpath[ldot < 0] *= -1\n\n if plot:\n filename = kwargs.pop(\"filename\", None)\n overplot = kwargs.pop(\"overplot\", False)\n _scatter(dprog,dpath,xlabel=\"Dprog\",ylabel=\"Dpath\",overplot=overplot)\n\n if filename != None:\n plt.savefig(filename)\n\n return_cluster(cluster, units0, origin0)\n\n return np.array(tstar), np.array(dprog), np.array(dpath)","sub_path":"clustertools/analysis/tails.py","file_name":"tails.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592830275","text":"#!/usr/bin/env python3\n\n########################################################## \ndef getFloats(string):\n numbers_str = string.split()\n numbers_float = [float(x) for x in numbers_str] \n return numbers_float\n\n\n######################################################## \ndef getProb(kmer, probs):\n prod = 1.0\n ll = len(kmer)\n for i in range(0,ll):\n ch = kmer[i]\n prod = prod * probs[ch][i] \n \n return prod \n######################################################################## \ndef getKMers(s, k):\n ll = len(s)\n kmers = [ ] \n for i in range(0,ll-k+1):\n kmers.append(s[i:i+k])\n\n return kmers\n \n\n######################################################## \ndef mostProb(s, probs, t):\n mostp = \"\"\n pp = 0\n allkm = getKMers(s, t)\n \n for km in allkm:\n pp1 = getProb(km, probs) \n if pp1 > pp:\n pp = pp1\n mostp = km \n\n return mostp\n\n######################################################## \n \n\nfin = open(\"input.dat\",\"r\")\ndna2 = fin.readline()\ndna = dna2.strip()\n\nts = fin.readline()\nt = int(ts.strip())\n\ncharss = fin.readline()\ncharss2 = charss.strip()\n\nchars = charss.split()\nprint(chars)\nprobs = {}\n\nfor ch in chars:\n probs[ch] = []\n\nfor i in range(0,t):\n line = fin.readline()\n floats = getFloats(line)\n ff = 0\n for ch in chars:\n probs[ch].append(floats[ff])\n ff = ff + 1\n \nprint( mostProb(dna, probs, t))\n \n","sub_path":"p3-3.py","file_name":"p3-3.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425863156","text":"import operator\n\nfull_text = input(\"Give me some text: \")\n\nremove_these = [',','.','?','(',')','-']\nfor thing in remove_these:\n full_text = full_text.replace(thing,'')\n\n\nword_list = full_text.lower().split()\n\nword_count = {}\n\nfor word in word_list:\n if word in word_count:\n word_count[word] += 1\n else:\n word_count[word] = 1\n\nsorted_count = sorted(word_count.items(), key=operator.itemgetter(1), reverse=False)\n \n\n\nfor word_tuple in sorted_count:\n print(f'{word_tuple[0].capitalize()} - {word_tuple[1]}')\n\nprint(f'\\nThere are {len(sorted_count)} unique words!')","sub_path":"inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563480127","text":"from telegram import KeyboardButton, InlineKeyboardButton\n################################################\n# АДМИН\n################################################\n\n# РАССЫЛКА\nmailing_photo = [[InlineKeyboardButton(\"✅ Да\", callback_data='mailing_photo_yes'), InlineKeyboardButton(\"❌ Нет\", callback_data='mailing_photo_no')],\n [InlineKeyboardButton('Отмена', callback_data='mail_decline')]]\n\nmailing_send = [[InlineKeyboardButton(\"❌ Отмена\", callback_data='mailing_send_no')], [InlineKeyboardButton(\"📨 Начать\", callback_data='mailing_send_yes')]]\n# РАССЫЛКА\n################################################\n# ПОЛЬЗОВАТЕЛЬ\n################################################\n\n# КНОПКИ\nd_language = {'ru': \"🇷🇺 Русский\",\n 'uz': \"🇺🇿 Узбекский\"\n }\n\nd_menu = {'ru': {'products': \"💪 Продукция\",\n 'about': \"☎ О нас\",\n 'news': \"❓ Новости\",\n 'actions': \"🎁 Акции\",\n 'main_menu': \"⬅ Главное меню\",\n 'change_language': \"🇷🇺🔄🇺🇿 Сменить язык\",\n 'basket': \"🛒 Корзина\",\n 'partnership': \"🤝 Партнерка\",\n 'cancel': \"❌ Отмена\"},\n\n 'uz': {'products': \"💪 Продукция\",\n 'about': \"☎ О нас\",\n 'news': \"❓ Новости\",\n 'actions': \"🎁 Акции\",\n 'main_menu': \"⬅ Главное меню\",\n 'change_language': \"🇺🇿🔄🇷🇺 Сменить язык\",\n 'basket': \"🛒 Корзина\",\n 'partnership': \"🤝 ��артнерка\",\n 'cancel': \"❌ Отмена\"}\n }\n\nd_action = {'ru': {'next': \"Далее ➡\", 'back': \"⬅ Назад\"},\n 'uz': {'next': \"Далее ➡\", 'back': \"⬅ Назад\"}\n }\n\nd_products = {'ru': {'healthy': \"🥦 Здоровое питание\", 'sport': \"🏋️‍♂️Спортвиное питание\"},\n 'uz': {'healthy': \"🥦 Здоровое питание\", 'sport': \"🏋️‍♂️Спортвиное питание\"}\n }\n\nback = {'ru': \"⬅ Назад\",\n 'uz': \"⬅ Назад\"}\n\nadd_basket = {'ru': '''🛒 Добавить в корзину''',\n 'uz': '''🛒 Добавить в корзину'''}\n\n\nd_basket = {'ru': {'clear_basket': \"❌ Очистить корзину\", 'order': \"🚗 Заказать\", 'promocode': \"🔲 Промокод\"},\n 'uz': {'clear_basket': \"❌ Очистить корзину\", 'order': \"🚗 Заказать\", 'promocode': \"🔲 Промокод\"}\n }\n\nd_partnership = {'ru': {'statistic': \"📊 Статистика\", 'application': \"✏ Оставить заявку\", 'info': \"❓ О партнерстве\"},\n 'uz': {'statistic': \"📊 Статистика\", 'application': \"✏ Оставить заявку\", 'info': \"❓ О партнерстве\"}\n }\n\nd_contact = {'ru': {'location': \"📍 Поделиться локацией\", 'contact': \"📞 Поделиться контактом\"},\n 'uz': {'location': \"📍 Поделиться локацией\", 'contact': \"📞 Поделиться контактом\"}\n }\n\nd_type_delivery = {'ru': {'self': \"✋ Самовывоз\", 'delivery': \"🚗 Доставка\"},\n 'uz': {'self': \"✋ Самовывоз\", 'delivery': \"🚗 Доставка\"}\n }\n\nd_type_payment = {'ru': {'cash': \"💵 Наличные\", 'payme': \"💳 PayMe\"},\n 'uz': {'cash': \"💵 Наличные\", 'payme': \"💳 PayMe\"}}\n\nd_status_order = {'accept_order': \"✅ Принять\", 'decline_order': \"❌ Отклонить\"}\n# КНОПКИ\n\n################################################\n\n# ЯЗЫК\nlanguage = [[d_language['uz']],\n [d_language['ru']]\n ]\n# ЯЗЫК\n\n\n# МЕНЮ\nmain_menu = {'ru': [[d_menu['ru']['products']],\n [d_menu['ru']['about'], d_menu['ru']['basket']],\n [d_menu['ru']['news'], d_menu['ru']['actions']],\n [d_menu['ru']['partnership']],\n [d_menu['ru']['change_language']]\n ],\n\n 'uz': [[d_menu['uz']['products']],\n [d_menu['uz']['about'], d_menu['uz']['basket']],\n [d_menu['uz']['news'], d_menu['uz']['actions']],\n [d_menu['uz']['partnership']],\n [d_menu['uz']['change_language']]\n ],\n }\n# МЕНЮ\n\n\n# ПРОДУКТЫ\nproducts = {'ru': [[d_products['ru']['healthy'], d_products['ru']['sport']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_products['ru']['healthy'], d_products['ru']['sport']],\n [d_menu['ru']['main_menu']]\n ],\n }\n\nback_category_healthy = {'ru': [InlineKeyboardButton(back['ru'], callback_data='back_category_healthy')],\n 'uz': [InlineKeyboardButton(back['uz'], callback_data='back_category_healthy')]\n }\n\nback_category_sport = {'ru': [InlineKeyboardButton(back['ru'], callback_data='back_category_sport')],\n 'uz': [InlineKeyboardButton(back['uz'], callback_data='back_category_sport')]\n }\n# ПРОДУКТЫ\n\n\n# ДОБАВЛЕНИЕ ПРОДУКТА\ncount_product = [['1', '2', '3'],\n ['4', '5', '6'],\n ['7', '8', '9']]\n\nbasket = {'ru': [[d_basket['ru']['order']],\n [d_basket['ru']['clear_basket'], d_basket['ru']['promocode']],\n [d_menu['ru']['main_menu']]],\n\n 'uz': [[d_basket['uz']['order']],\n [d_basket['uz']['clear_basket'], d_basket['uz']['promocode']],\n [d_menu['uz']['main_menu']]]\n }\n\ncancel = {'ru': [[d_menu['ru']['cancel']]\n ],\n\n 'uz': [[d_menu['uz']['cancel']]\n ],\n }\n# ДОБАВЛЕНИЕ ПРОДУКТА\n\n\n# ПАРТНЕРКА\npartnership = {'ru': [[d_partnership['ru']['application']],\n [d_partnership['ru']['info']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_partnership['uz']['application']],\n [d_partnership['uz']['info']],\n [d_menu['uz']['main_menu']]\n ]\n }\n\n\npartnership_already = {'ru': [[d_partnership['ru']['info']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_partnership['uz']['info']],\n [d_menu['uz']['main_menu']]\n ]\n }\n\npartnership_statistic = {'ru': [[d_partnership['ru']['statistic']],\n [d_partnership['ru']['info']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_partnership['ru']['statistic']],\n [d_partnership['uz']['info']],\n [d_menu['uz']['main_menu']]\n ]\n }\n# ПАРТНЕРКА\n\n\n# КОНТАКТЫ\ncontact = {'ru': [[KeyboardButton(d_contact['ru']['contact'], request_contact=True)],\n [d_menu['ru']['main_menu']]\n ],\n 'uz': [[KeyboardButton(d_contact['uz']['contact'], request_contact=True)],\n [d_menu['uz']['main_menu']]\n ]\n }\n\nlocation = {'ru': [[KeyboardButton(d_contact['ru']['location'], request_location=True)],\n [d_menu['ru']['basket']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[KeyboardButton(d_contact['uz']['location'], request_location=True)],\n [d_menu['ru']['basket']],\n [d_menu['uz']['main_menu']]\n ]\n }\n# КОНТАКТЫ\n\n\n# ЗАКАЗ\ntype_delivery = {'ru': [[d_type_delivery['ru']['self'], d_type_delivery['ru']['delivery']],\n [d_menu['ru']['basket']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_type_delivery['uz']['self'], d_type_delivery['uz']['delivery']],\n [d_menu['uz']['basket']],\n [d_menu['uz']['main_menu']]\n ]\n }\n\ntype_payment = {'ru': [[d_type_payment['ru']['cash'], d_type_payment['ru']['payme']],\n [d_menu['ru']['basket']],\n [d_menu['ru']['main_menu']]\n ],\n\n 'uz': [[d_type_payment['uz']['cash'], d_type_payment['uz']['payme']],\n [d_menu['ru']['basket']],\n [d_menu['ru']['main_menu']]\n ]\n }\n\nstatus_order = [[InlineKeyboardButton(d_status_order['accept_order'], callback_data='accept_order')],\n [InlineKeyboardButton(d_status_order['decline_order'], callback_data='decline_order')]]\n# ЗАКАЗ","sub_path":"bot_core/keyboardbot.py","file_name":"keyboardbot.py","file_ext":"py","file_size_in_byte":9434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390459042","text":"# -*- encoding: utf-8 -*-\nimport json\nimport requests\nfrom app.configuration import config\n\n\ndef send_message(recipient_id, message_text):\n\n params = {\"access_token\": config['default'].PAGE_ACCESS_TOKEN\n }\n headers = {\"Content-Type\": \"application/json\"\n }\n data = json.dumps({\"recipient\": {\n \"id\": recipient_id\n },\n \"message\": {\n \"text\": message_text\n }\n })\n requests.post(\"https://graph.facebook.com/v2.6/me/messages\", params=params, headers=headers, data=data)\n\n\ndef send_quick_reply(recipient_id, message_text, replies=('Yes!', 'No!')):\n\n params = {\"access_token\": config['default'].PAGE_ACCESS_TOKEN\n }\n headers = {\"Content-Type\": \"application/json\"\n }\n quick_replies = [{\"content_type\": \"text\",\n \"title\": word,\n \"payload\": \"DEVELOPER_DEFINED_PAYLOAD_FOR_PICKING_GREEN\"\n } for word in replies]\n\n data = json.dumps({\n \"recipient\": {\n \"id\": recipient_id\n },\n \"message\": {\n \"text\": message_text,\n \"quick_replies\": quick_replies\n\n }\n })\n requests.post(\"https://graph.facebook.com/v2.6/me/messages\", params=params, headers=headers, data=data)\n\n\ndef reply(user_id, msg):\n data = {\n \"recipient\": {\"id\": user_id},\n \"message\": {\"text\": msg}\n }\n requests.post(\"https://graph.facebook.com/v2.6/me/messages?access_token=\" + config['default'].PAGE_ACCESS_TOKEN,\n json=data)","sub_path":"app/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231377894","text":"from django.shortcuts import render\nfrom django.views import View\nfrom Tin.models import Tin, LoaiTin, TheLoai, Comment\nfrom django.http import HttpResponse\n\nfrom chung.khongDau import convert, convert1\n\n\n# Create your views here.\n\n\nclass getTrangchu(View):\n\n def get(self, request):\n theloai_base = TheLoai.objects.all()\n lt_base = dict()\n for tl in theloai_base:\n lt_base[tl] = LoaiTin.objects.filter(idTheLoai=tl.pk)\n tinxemnhieu_trangChu = Tin.objects.order_by('-view')[:5]\n tin_trangChu = Tin.objects.all()[:5]\n return render(request, 'homepage/trangchu.html',\n {'theloai_base': theloai_base, 'lt_base': lt_base, 'tinxemnhieu_trangChu': tinxemnhieu_trangChu,\n 'tin_trangChu': tin_trangChu})\n\n\nclass getTheloai(View):\n def get(self, request, theloaiPost):\n theloai_base = TheLoai.objects.all()\n lt_base = dict()\n for tl in theloai_base:\n lt_base[tl] = LoaiTin.objects.filter(idTheLoai=tl.pk)\n tinxemnhieu = Tin.objects.order_by('-view')[:5]\n\n tl = TheLoai.objects.filter(tenKhongDau=theloaiPost).first()\n tin = Tin.objects.filter(id_tl=tl.pk).order_by('-create_at')\n # khi làm phân trang thì bỏ\n if (len(tin) > 5):\n tin = tin[:5]\n return render(request, 'homepage/theloai.html',\n {'theloai_base': theloai_base, 'lt_base': lt_base, 'tinxemnhieu': tinxemnhieu\n , 'tin_theLoai': tin, 'theloai': tl})\n\n\nclass getTin(View):\n # không hieent thị hình ảnh\n\n def get(self, request, tentin):\n theloai_base = TheLoai.objects.all()\n lt_base = dict()\n for tl in theloai_base:\n lt_base[tl] = LoaiTin.objects.filter(idTheLoai=tl.pk)\n tinxemnhieu = Tin.objects.order_by('-view')[:5]\n\n tin = Tin.objects.filter(tieuDeKhongDau=tentin).first()\n return render(request, 'homepage/tin.html',\n {'theloai_base': theloai_base, 'lt_base': lt_base, 'tinxemnhieu': tinxemnhieu, 'tin': tin})\n\n\nclass getLoaiTin(View):\n def get(self, request, tenloaitin):\n theloai_base = TheLoai.objects.all()\n lt_base = dict()\n for tl in theloai_base:\n lt_base[tl] = LoaiTin.objects.filter(idTheLoai=tl.pk)\n tinxemnhieu = Tin.objects.order_by('-view')[:5]\n\n tl = LoaiTin.objects.filter(tenKhongDau=tenloaitin).first()\n tin = Tin.objects.filter(id_lt=tl.pk).order_by('-create_at')\n # khi làm phân trang thì bỏ\n if (len(tin) > 5):\n tin = tin[:5]\n return render(request, 'homepage/theloai.html',\n {'theloai_base': theloai_base, 'lt_base': lt_base, 'tinxemnhieu': tinxemnhieu\n , 'tin_theLoai': tin, 'theloai': tl})\n","sub_path":"Authen/Tin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371645277","text":"# Slowest but it used vectorizing!!!\nimport numpy as np\n\ndef myfunc(a):\n if a % 3 == 0 or a % 5 == 0:\n return a\n else:\n return 0\n\narray = np.arange(1000)\nvfunc = np.vectorize(myfunc)\ns = np.sum(vfunc(array))\n","sub_path":"problem1/multiplevector.py","file_name":"multiplevector.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275964441","text":"Import('env')\n\nif not env['GLUT']:\n Return()\n\nenv = env.Clone()\n\nenv.Prepend(CPPPATH = [\n\t'../util',\n])\n\nenv.Prepend(LIBS = ['$GLUT_LIB'])\n\nprogs = [\n 'fp-tri',\n 'tri-depth',\n 'tri-depth2',\n 'tri-depthwrite',\n 'tri-depthwrite2',\n 'tri-inv',\n 'tri-param',\n 'tri-tex',\n 'point-position',\n]\n\nfor prog in progs:\n env.Program(\n target = prog,\n source = [prog + '.c'],\n )\n","sub_path":"progs/fp/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5720667","text":"import flask\n\nfrom .. import auth\n\n\n@auth.bp.route(\"/logout\",methods=(\"GET\",\"POST\"))\ndef logout():\n if flask.request.method==\"GET\":\n return \"\"\"\"\"\"\n else:\n if auth.check_client_session():\n conn=auth.connectDB()\n cur=conn.cursor()\n \n cur.execute(\"update users set session=%s where id=%s;\",(auth.generate_salt(),flask.session.get(\"user_id\")))\n \n conn.commit()\n conn.close()\n \n flask.session.pop(\"user_id\")\n flask.session.pop(\"session\")\n \n return \"{}\",{\"Content-Type\":\"application/json\"}","sub_path":"src/auth/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154931848","text":"import datetime, time, os\nfrom datetime import timedelta\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog\n\nclass Buttons():\n\tdef __init__(self):\n\t\tw = tk.Tk(screenName = None, baseName = None, className = \" Main Control Panel\", useTk = 1)\n\t\tw.title(\"Control Panel\")\n\n\t\tdef opentl():\n\t\t\tw2 = Toplevel(w)\n\t\t\tw2.title(\"Select Time Period\")\n\t\t\tw2.withdraw()\n\n\t\t\tframe1 = Frame(w2)\n\t\t\tframe1.pack()\n\n\t\t\tself.v = tk.IntVar()\n\t\t\trbyes = Radiobutton(frame1, text = \"Yesterday\", variable = self.v, value = '1', command = self.v.set(\"1\"))\n\t\t\trbyes.grid(row = 1, column = 1)\n\t\t\trblw = Radiobutton(frame1, text = \"Last week\", variable = self.v, value = '2', command = self.v.set(\"2\"))\n\t\t\trblw.grid(row = 1, column = 2)\n\t\t\trblm = Radiobutton(frame1, text = \"Last month\", variable = self.v, value = '3', command = self.v.set(\"3\"))\n\t\t\trblm.grid(row = 1, column = 3)\n\t\t\tframe2 = Frame(w2)\n\t\t\tframe2.pack()\n\t\t\tself.but4 = tk.Button(frame2, text = \"Graph\", command = self.processRb)\n\t\t\tself.but4.grid(row = 1, column = 2, padx = \"10\")\n\t\t\tw2.mainloop()\n\n\n\t\tself.lab1 = tk.Label(w, text = \"Close the program.\", wraplength = \"150\")\n\t\tself.lab1.grid(row = 2, column = 3, padx = \"10\")\n\t\tself.lab2 = tk.Label(w, text = \"Graphs a chosen dataset\", wraplength = \"150\")\n\t\tself.lab2.grid(row = 2, column = 2, padx = \"10\")\n\t\tself.lab3 = tk.Label(w, text = \"See previous trends, ending with yesterday's full data set.\", wraplength = \"150\")\n\t\tself.lab3.grid(row = 2, column = 1, padx = \"10\")\n\t\tself.but1 = tk.Button(w, text = \"Close Window\", command = w.destroy)\n\t\tself.but1.grid(row = 1, column = 3, pady = \"10\", padx = \"10\")\n\t\tself.but2 = tk.Button(w, text = \"Graph 1 Day\", command = self.plot)\n\t\tself.but2.grid(row = 1, column = 2, padx = \"10\")\n\t\tself.but3 = tk.Button(w, text = \"Show Previous Trends\", command = opentl)\n\t\tself.but3.grid(row = 1, column = 1, padx = \"10\")\n\n\n\t\tw.mainloop()\n\n\tdef processRb(self):\n\t\tprint(self.v.get())\n\n\t\tif self.v.get() == 1:\n\t\t\tself.prev(1)\n\t\telif self.v.get() == 2:\n\t\t\tself.prev(7)\n\t\telif self.v.get() == 3:\n\t\t\tself.prev(30)\n\n\n\tdef prev(self, val):\n\t\tatemps = []\n\t\tahums = []\n\t\ttimes = []\n\n\t\tdate = datetime.date.today()\n\t\tlength = []\n\t\tdelta = timedelta(days = 1)\n\n\t\tfor i in range(val):\n\t\t\tdate = date - delta\n\t\t\tlength.append(date)\n\n\t\tfor j in range(len(length)):\n\t\t\tfdate = length[j].strftime(\"%Y-%m-%d\")\n\t\t\tdf = pd.DataFrame(pd.read_csv((os.environ['HOME'] + \"/TempHum_Results/\" + fdate + \"_results.csv\"), sep=',', index_col=1))\n\t\t\tdf.set_axis(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], axis='columns', inplace=True)\n\n\t\t\ttime = df.index\n\t\t\tcounttime = len(time)\n\t\t\tdivtime = counttime // 8\n\t\t\ttt = 0\n\n\t\t\tfdate2 = length[j].strftime(\"%d %b\")\n\t\t\ttimes.append(time[tt] + \", \" + fdate2)\n\t\t\tatemps.append(df.iloc[tt]['B'])\n\t\t\tahums.append(df.iloc[tt]['C'])\n\n\t\t\tfor k in range(7):\n\t\t\t\ttt += divtime\n\t\t\t\ttimes.append(time[tt] + \", \" + fdate2)\n\t\t\t\tatemps.append(df.iloc[tt]['B'])\n\t\t\t\tahums.append(df.iloc[tt]['C'])\n\n\t\ttimes.reverse()\n\n\t\tplt.figure()\n\t\tax1 = plt.subplot(211)\n\t\tax1.plot(times, atemps, color = \"black\", label = \"Average\", linewidth = 3.0)\n\t\tplt.ylabel('Temperature (*C)')\n\t\tplt.xticks(self.xtickval(times))\n\t\tplt.setp(ax1.get_xticklabels(), rotation = -25, ha = \"left\")\n\t\tplt.grid(True)\n\n\n\t\tax2 = plt.subplot(212)\n\t\tax2.plot(times, ahums, color = \"black\", label = \"Average\", linewidth = 3.0)\n\t\tplt.ylabel('Humidity (%)')\n\t\tplt.xticks(self.xtickval(times))\n\t\tplt.setp(ax2.get_xticklabels(), rotation = -25, ha = \"left\")\n\t\tplt.grid(True)\n\t\tval -= 1\n\t\tplt.suptitle(\"Temperature and Humidity for \" + str(length[val]) + \" to \" + str(length[0]))\n\n\n\t\tmng = plt.get_current_fig_manager()\n\t\tmng.resize(*mng.window.maxsize())\n\n\t\tplt.show()\n\n\tdef xtickval(self, value):\n\t\ttoUse = []\n\t\tfor i in range(len(value)):\n\t\t\tif len(value) <= 25:\n\t\t\t\ttoUse.append(value[i])\n\t\t\telif len(value) > 25 and len(value) < 300:\n\t\t\t\tif (i % 8 == 0) == True:\n\t\t\t\t\ttoUse.append(value[i])\n\t\t\telif len(value) >= 300:\n\t\t\t\tif (i % 50 == 0) == True:\n\t\t\t\t\ttoUse.append(value[i])\n\t\treturn toUse\n\n\tdef plot(self):\n\t\tdf = pd.DataFrame(self.newfile())\n\t\ttimes = df.index\n\t\tdf.set_axis(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], axis='columns', inplace=True)\n\t\tatemps = df['B']\n\t\tahums = df['C']\n\n\t\tplt.figure()\n\t\tax1 = plt.subplot(211)\n\t\tax1.plot(times, atemps, color = \"black\", label = \"Average\", linewidth = 3.0)\n\t\tax1.plot(times, df['D'], color = \"green\", label = \"Sensor 1\")\n\t\tax1.plot(times, df['F'], color = \"blue\", label = \"Sensor 2\")\n\t\tax1.plot(times, df['H'], color = \"purple\", label = \"Sensor 3\")\n\t\tplt.ylabel('Temperature (*C)')\n\t\tplt.xticks(self.xtickval(times))\n\t\tplt.setp(ax1.get_xticklabels(), rotation = -25, ha = \"left\")\n\t\toffset = -72\n\t\tbbox = dict(boxstyle=\"round\", fc=\"0.8\")\n\t\tarrowprops = dict(arrowstyle = \"->\", connectionstyle = \"angle, angleA = 0, angleB = 90, rad = 10\")\n\t\tax1.annotate((\n\t\t\t\"Maximum \" + str(atemps.max()) + '*C at' + str(atemps.idxmax())),\n\t\t\txy=(atemps.idxmax(), atemps.max()),\n\t\t\txytext=(offset, 2.5*offset), textcoords='offset points',\n\t\t\tbbox=bbox, arrowprops = arrowprops)\n\t\tplt.grid(True)\n\t\tplt.legend(bbox_to_anchor = (1.001, 1), loc = 'upper left', borderaxespad = 0)\n\n\t\tax2 = plt.subplot(212)\n\t\tax2.plot(times, ahums, color = \"black\", label = \"Average\", linewidth = 3.0)\n\t\tax2.plot(times, df['E'], color = \"green\", label = \"Sensor 1\")\n\t\tax2.plot(times, df['G'], color = \"blue\", label = \"Sensor 2\")\n\t\tax2.plot(times, df['I'], color = \"purple\", label = \"Sensor 3\")\n\t\tplt.xlabel('Time between' + str(min(times)) + 'and' + str(max(times)))\n\t\tplt.ylabel('Humidity (%)')\n\t\tplt.xticks(self.xtickval(times))\n\t\tplt.setp(ax2.get_xticklabels(), rotation = -25, ha = \"left\")\n\t\tax2.annotate((\n\t\t\t\"Maximum \" + str(ahums.max()) + '% at' + str(ahums.idxmax())),\n\t\t\txy=(ahums.idxmax(), ahums.max()),\n\t\t\txytext=(offset, 2.5*offset), textcoords='offset points',\n\t\t\tbbox = bbox, arrowprops = arrowprops)\n\t\tplt.grid(True)\n\t\tplt.legend(bbox_to_anchor = (1.001, 1), loc = 'upper left', borderaxespad = 0)\n\t\tplt.suptitle(\"Temperature and Humidity for \" + df.iloc[1]['A'])\n\n\t\tmng = plt.get_current_fig_manager()\n\t\tmng.resize(*mng.window.maxsize())\n\n\t\tplt.show()\n\n\tdef newfile(self):\n\t\tfile = pd.read_csv(filedialog.askopenfilename(\n\t\t\tinitialdir = (os.environ['HOME'] + \"/TempHum_Results\"), \n\t\t\t\ttitle = \"Select file\", \n\t\t\t\tfiletypes = [(\"Spreadsheets\", \"*.csv\")]),\n\t\t\tsep=',', index_col=1)\n\n\t\treturn file\n\nButtons()\n\n","sub_path":"Python_test_codes/weeklyplot.py","file_name":"weeklyplot.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357913302","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nfrom vocab_utils import Vocab\nimport namespace_utils\n\nimport tensorflow as tf\nimport TriMatchTrainer_v2 as TriMatchTrainer\nfrom TriMatchModelGraph_v2 import TriMatchModelGraph\nimport sys\n\n'''\nScript for testing.\n'''\n\ntf.logging.set_verbosity(tf.logging.ERROR) # DEBUG, INFO, WARN, ERROR, and FATAL\nnum_options=4\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_prefix', type=str, required=True, help='Prefix to the models.')\n parser.add_argument('--in_path', type=str, required=True, help='the path to the test file.')\n parser.add_argument('--out_path', type=str, required=True, help='The path to the output file.')\n parser.add_argument('--word_vec_path', type=str, required=True, help='word embedding file for the input file.')\n parser.add_argument('--mode', type=str, default=\"prediction\", help='prediction or probs')\n parser.add_argument('--batch_size', default=None, help='test batch size')\n parser.add_argument('--output_gate_probs',default=False, help='output RL gates probs.', action='store_true')\n\n\n\n args, unparsed = parser.parse_known_args()\n \n model_prefix = args.model_prefix\n in_path = args.in_path\n out_path = args.out_path\n word_vec_path = args.word_vec_path\n mode = args.mode\n out_json_path = None\n dump_prob_path = None\n\n # load the configuration file\n print('Loading configurations.')\n FLAGS = namespace_utils.load_namespace(model_prefix + \".config.json\")\n print(FLAGS)\n\n with_POS=False\n if hasattr(FLAGS, 'with_POS'): with_POS = FLAGS.with_POS\n with_NER=False\n if hasattr(FLAGS, 'with_NER'): with_NER = FLAGS.with_NER\n wo_char = False\n if hasattr(FLAGS, 'wo_char'): wo_char = FLAGS.wo_char\n\n wo_left_match = False\n if hasattr(FLAGS, 'wo_left_match'): wo_left_match = FLAGS.wo_left_match\n\n wo_right_match = False\n if hasattr(FLAGS, 'wo_right_match'): wo_right_match = FLAGS.wo_right_match\n\n wo_full_match = False\n if hasattr(FLAGS, 'wo_full_match'): wo_full_match = FLAGS.wo_full_match\n\n wo_maxpool_match = False\n if hasattr(FLAGS, 'wo_maxpool_match'): wo_maxpool_match = FLAGS.wo_maxpool_match\n\n wo_attentive_match = False\n if hasattr(FLAGS, 'wo_attentive_match'): wo_attentive_match = FLAGS.wo_attentive_match\n\n wo_max_attentive_match = False\n if hasattr(FLAGS, 'wo_max_attentive_match'): wo_max_attentive_match = FLAGS.wo_max_attentive_match\n\n max_hyp_length = 100\n if hasattr(FLAGS, 'max_hyp_length'): max_hyp_length = FLAGS.max_hyp_length\n\n max_choice_length=None\n if hasattr(FLAGS, 'max_choice_length'): max_choice_length = FLAGS.max_choice_length\n\n matching_option=0\n if hasattr(FLAGS,'matching_option'): matching_option=FLAGS.matching_option\n\n use_options=False\n if hasattr(FLAGS,'use_options'): use_options=FLAGS.use_options\n\n cond_training=False\n if hasattr(FLAGS,'cond_training') or FLAGS.matching_option==7:\n cond_training=True\n\n if args.batch_size is not None:\n FLAGS.batch_size=args.batch_size\n gen_concat_mat=False\n gen_split_mat=False\n if FLAGS.matching_option==7:\n gen_concat_mat=True\n FLAGS.cond_training=True\n if FLAGS.concat_context:\n gen_split_mat=True\n reasonet_training=False\n if hasattr(FLAGS,'reasonet_training'):\n reasonet_training=FLAGS.reasonet_training\n rl_matches=FLAGS.rl_matches\n concat_context=FLAGS.concat_context\n print('concat_context=',concat_context)\n tied_aggre=False\n if hasattr(FLAGS,'tied_aggre'):\n tied_aggre=FLAGS.tied_aggre\n tied_match=False\n if hasattr(FLAGS,'tied_match'):\n tied_match=FLAGS.tied_match\n rl_training_method=FLAGS.rl_training_method\n efficient=FLAGS.efficient\n reasonet_steps=5\n reasonet_hidden_dim=128\n reasonet_lambda=10\n reasonet_terminate_mode='original'\n reasonet_keep_first=True\n reasonet_logit_combine='sum' \n if reasonet_training:\n reasonet_steps=FLAGS.reasonet_steps\n reasonet_hidden_dim=FLAGS.reasonet_hidden_dim\n reasonet_lambda=FLAGS.reasonet_lambda\n reasonet_terminate_mode=FLAGS.reasonet_terminate_mode\n reasonet_keep_first=FLAGS.reasonet_keep_first\n reasonet_logit_combine=FLAGS.reasonet_logit_combine\n\n\n\n\n\n # load vocabs\n print('Loading vocabs.')\n word_vocab = Vocab(word_vec_path, fileformat='txt3',tolower=FLAGS.use_lower_letter)\n label_vocab = Vocab(model_prefix + \".label_vocab\", fileformat='txt2',tolower=FLAGS.use_lower_letter)\n print('word_vocab: {}'.format(word_vocab.word_vecs.shape))\n print('label_vocab: {}'.format(label_vocab.word_vecs.shape))\n num_classes = label_vocab.size()\n \n POS_vocab = None\n NER_vocab = None\n char_vocab = None\n if with_POS: POS_vocab = Vocab(model_prefix + \".POS_vocab\", fileformat='txt2',tolower=FLAGS.use_lower_letter)\n if with_NER: NER_vocab = Vocab(model_prefix + \".NER_vocab\", fileformat='txt2',tolower=FLAGS.use_lower_letter)\n char_vocab = Vocab(model_prefix + \".char_vocab\", fileformat='txt2')\n print('char_vocab: {}'.format(char_vocab.word_vecs.shape))\n \n print('Build TriMatchDataStream ... ')\n testDataStream = TriMatchTrainer.TriMatchDataStream(in_path, word_vocab=word_vocab, char_vocab=char_vocab, \n POS_vocab=POS_vocab, NER_vocab=NER_vocab, label_vocab=label_vocab, \n batch_size=FLAGS.batch_size, isShuffle=False, isLoop=True, isSort=False, \n max_char_per_word=FLAGS.max_char_per_word, \n max_sent_length=FLAGS.max_sent_length, max_hyp_length=max_hyp_length, \n max_choice_length=max_choice_length,tolower=FLAGS.use_lower_letter,\n gen_concat_mat=gen_concat_mat, gen_split_mat=gen_split_mat,\n efficient=efficient)\n print('Number of instances in testDataStream: {}'.format(testDataStream.get_num_instance()))\n print('Number of batches in testDataStream: {}'.format(testDataStream.get_num_batch()))\n\n if wo_char: char_vocab = None\n\n init_scale = 0.01\n best_path = model_prefix + \".best.model\"\n print('Decoding on the test set:')\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-init_scale, init_scale)\n with tf.variable_scope(\"Model\", reuse=False, initializer=initializer):\n if matching_option!=7:\n valid_graph = TriMatchModelGraph(num_classes, word_vocab=word_vocab, char_vocab=char_vocab,POS_vocab=POS_vocab, NER_vocab=NER_vocab, \n dropout_rate=0.0, learning_rate=FLAGS.learning_rate, optimize_type=FLAGS.optimize_type,\n lambda_l2=FLAGS.lambda_l2, char_lstm_dim=FLAGS.char_lstm_dim, context_lstm_dim=FLAGS.context_lstm_dim, \n aggregation_lstm_dim=FLAGS.aggregation_lstm_dim, is_training=False, MP_dim=FLAGS.MP_dim, \n context_layer_num=FLAGS.context_layer_num, aggregation_layer_num=FLAGS.aggregation_layer_num, \n fix_word_vec=FLAGS.fix_word_vec, with_highway=FLAGS.with_highway,\n word_level_MP_dim=FLAGS.word_level_MP_dim,\n with_match_highway=FLAGS.with_match_highway, with_aggregation_highway=FLAGS.with_aggregation_highway,\n highway_layer_num=FLAGS.highway_layer_num,\n match_to_question=FLAGS.match_to_question, match_to_passage=FLAGS.match_to_passage, match_to_choice=FLAGS.match_to_choice,\n with_full_match=(not FLAGS.wo_full_match), with_maxpool_match=(not FLAGS.wo_maxpool_match), \n with_attentive_match=(not FLAGS.wo_attentive_match), with_max_attentive_match=(not FLAGS.wo_max_attentive_match), \n use_options=use_options, num_options=num_options, with_no_match=FLAGS.with_no_match, matching_option=matching_option,\n cond_training=cond_training)\n else:\n valid_graph = TriMatchModelGraph(num_classes, word_vocab=word_vocab, char_vocab=char_vocab,POS_vocab=POS_vocab, NER_vocab=NER_vocab, \n dropout_rate=FLAGS.dropout_rate, learning_rate=FLAGS.learning_rate, optimize_type=FLAGS.optimize_type,\n lambda_l2=FLAGS.lambda_l2, char_lstm_dim=FLAGS.char_lstm_dim, context_lstm_dim=FLAGS.context_lstm_dim, \n aggregation_lstm_dim=FLAGS.aggregation_lstm_dim, is_training=False, MP_dim=FLAGS.MP_dim, \n context_layer_num=FLAGS.context_layer_num, aggregation_layer_num=FLAGS.aggregation_layer_num, \n fix_word_vec=FLAGS.fix_word_vec, with_highway=FLAGS.with_highway,\n word_level_MP_dim=FLAGS.word_level_MP_dim,\n with_match_highway=FLAGS.with_match_highway, with_aggregation_highway=FLAGS.with_aggregation_highway,\n highway_layer_num=FLAGS.highway_layer_num,\n match_to_question=FLAGS.match_to_question, match_to_passage=FLAGS.match_to_passage, match_to_choice=FLAGS.match_to_choice,\n with_full_match=(not FLAGS.wo_full_match), with_maxpool_match=(not FLAGS.wo_maxpool_match), \n with_attentive_match=(not FLAGS.wo_attentive_match), with_max_attentive_match=(not FLAGS.wo_max_attentive_match), \n use_options=use_options, num_options=num_options, with_no_match=FLAGS.with_no_match, \n matching_option=matching_option, concat_context=concat_context, \n tied_aggre=tied_aggre, rl_training_method=rl_training_method, rl_matches=rl_matches, \n cond_training=cond_training,reasonet_training=reasonet_training, reasonet_steps=reasonet_steps, \n reasonet_hidden_dim=reasonet_hidden_dim, reasonet_lambda=reasonet_lambda, \n reasonet_terminate_mode=reasonet_terminate_mode, reasonet_keep_first=reasonet_keep_first, \n efficient=efficient, tied_match=tied_match, reasonet_logit_combine=reasonet_logit_combine)\n\n# saver = tf.train.Saver()\n # remove word _embedding\n vars_ = {}\n for var in tf.global_variables():\n if \"word_embedding\" in var.name: continue\n if not var.name.startswith(\"Model\"): continue\n vars_[var.name.split(\":\")[0]] = var\n saver = tf.train.Saver(vars_)\n \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True \n sess = tf.Session(config=config)\n\n sess.run(tf.global_variables_initializer())\n step = 0\n saver.restore(sess, best_path)\n\n accuracy = TriMatchTrainer.evaluate(testDataStream, valid_graph, sess, outpath=out_path, label_vocab=label_vocab,mode=args.mode,\n char_vocab=char_vocab, POS_vocab=POS_vocab, NER_vocab=NER_vocab, use_options=use_options, \n cond_training=cond_training,output_gate_probs=args.output_gate_probs,efficient=efficient)\n print(\"Accuracy for test set is %.2f\" % accuracy)\n\n\n","sub_path":"src/TriMatchDecoder_v2.py","file_name":"TriMatchDecoder_v2.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462775549","text":"# -*- coding: utf-8 -*-\nfrom __future__ import annotations\nfrom typing import Dict, List, Tuple\n\nfrom fcatools import EMPTY_VALUE\nfrom .dyadic_link import DyadicLink\nfrom .dyadic_concept import DyadicConcept\nfrom .dyadic_generator import DyadicGenerator\nfrom .dyadic_association_rule import DyadicAssociationRule\n\nclass DyadicLattice:\n def __init__(self):\n self.lattice: Dict[frozenset, DyadicConcept] = {}\n\n def add_connection(self, intent_1, intent_2, concepts):\n concepts_reversed = {i.attrs: i.objects for i in concepts}\n\n intent_1 = frozenset(intent_1)\n intent_2 = frozenset(intent_2)\n\n if intent_1 in concepts_reversed and intent_2 not in concepts_reversed:\n extent = concepts_reversed[intent_1]\n if extent not in self.lattice:\n self.lattice[extent] = DyadicConcept(extent, intent_1)\n\n elif intent_2 in concepts_reversed and intent_1 not in concepts_reversed:\n extent = concepts_reversed[intent_2]\n if extent not in self.lattice:\n self.lattice[extent] = DyadicConcept(extent, intent_2)\n\n elif intent_2 in concepts_reversed and intent_1 in concepts_reversed:\n extent_1 = concepts_reversed[intent_1]\n extent_2 = concepts_reversed[intent_2]\n\n if extent_1 in self.lattice and extent_2 in self.lattice:\n self.lattice[extent_1].add_connection(self.lattice[extent_2])\n elif extent_1 not in self.lattice and extent_2 in self.lattice:\n c = DyadicConcept(extent_1, intent_1)\n c.add_connection(self.lattice[extent_2])\n self.lattice[extent_1] = c\n elif extent_1 in self.lattice and extent_2 not in self.lattice:\n c = DyadicConcept(extent_2, intent_2)\n c.add_connection(self.lattice[extent_1])\n self.lattice[extent_2] = c\n else:\n c1 = DyadicConcept(extent_1, intent_1)\n c2 = DyadicConcept(extent_2, intent_2)\n c1.add_connection(c2)\n self.lattice[extent_1] = c1\n self.lattice[extent_2] = c2\n\n def get_objects_count(self) -> int:\n unique_objects = []\n for objects, concept in self.lattice.items():\n for o in objects:\n if o not in unique_objects:\n unique_objects.append(o)\n\n if EMPTY_VALUE in unique_objects:\n unique_objects.remove(EMPTY_VALUE)\n\n return len(unique_objects)\n\n def compute_association_rules(self, generators: List[DyadicGenerator]) -> List[DyadicAssociationRule]:\n rules = []\n objects_count = self.get_objects_count()\n for extent, concept in self.lattice.items():\n\n for g in generators:\n if concept.attrs == g.attrs:\n gen = g\n\n ant_support = len(concept.objects) / objects_count\n children = self.lattice[concept.objects].children\n\n for attrs in gen.generator:\n if len(children) != 0 and len(concept.attrs) != 0:\n for child in children:\n\n cons_support = len(child.objects - frozenset({EMPTY_VALUE})) / objects_count\n potential_cons = [i for i in child.attrs if i not in concept.attrs]\n\n rule_conf = cons_support / ant_support\n\n if len(potential_cons) != 0:\n rules.append(DyadicAssociationRule(attrs, potential_cons, cons_support, rule_conf))\n\n rule_support = len(concept.objects - frozenset({EMPTY_VALUE})) / objects_count\n potential_cons = [i for i in concept.attrs if i not in attrs]\n\n if len(potential_cons) != 0:\n rules.append(DyadicAssociationRule(attrs, potential_cons, rule_support, 1.0))\n\n return rules\n\n def compute_generators(self) -> List[DyadicGenerator]:\n generators: List[DyadicGenerator] = []\n\n for extent, concept in self.lattice.items():\n gen = []\n\n if len(concept.attrs) > 0:\n parents = concept.parents\n faces = []\n for p in parents:\n faces.append(concept.attrs - p.attrs)\n\n if len(faces) > 0:\n first_face = faces.pop(0)\n for f in first_face:\n gen.append(frozenset({f}))\n\n if len(faces) > 0:\n for f in faces:\n min_blockers = []\n blockers = []\n\n for g in gen:\n if len(g & f) == 0:\n for element in f:\n union = frozenset({element}) | g\n if union not in blockers:\n blockers.append(union)\n else:\n if frozenset(g) not in min_blockers:\n min_blockers.append(frozenset(g))\n\n if len(blockers) == 0:\n gen = min_blockers\n elif len(min_blockers) == 0:\n gen = blockers\n else:\n result = []\n for b in blockers:\n for min_b in min_blockers:\n if min_b <= b:\n if b not in result:\n result.append(b)\n break\n gen = list(frozenset(min_blockers) | (frozenset(blockers) - frozenset(result)))\n else:\n gen = [frozenset({i}) for i in concept.attrs]\n\n generators.append(DyadicGenerator(concept.attrs, gen))\n\n return generators\n \n @staticmethod\n def build_lattice_iPred(concepts: List[DyadicConcept]) -> Tuple[DyadicLattice, List[DyadicLink]]:\n attributes = [c.attrs for c in concepts]\n\n attributes.sort(key=len)\n\n if frozenset({EMPTY_VALUE}) not in attributes:\n attributes.insert(0, frozenset({EMPTY_VALUE}))\n\n empty_set = {EMPTY_VALUE}\n faces = {}\n links = []\n concepts_lattice = DyadicLattice()\n\n for i in attributes:\n faces[i] = empty_set\n\n border = attributes.pop(0)\n\n for Ci in attributes:\n candidates = set({})\n\n for element in border:\n candidates = candidates | frozenset({(Ci & frozenset(element))})\n\n candidates = (candidates - frozenset({frozenset({})})) | empty_set\n\n for element in candidates:\n delta_intersection = Ci & faces[frozenset(element)]\n if len(delta_intersection) == 0 or delta_intersection == empty_set:\n concepts_lattice.add_connection(Ci, set(element), concepts)\n\n links.append(\n DyadicLink(\n DyadicLattice.__getObjectFromAttr(Ci, concepts),\n DyadicLattice.__getObjectFromAttr(frozenset(element), concepts)\n )\n )\n faces[frozenset(element)] = (faces[frozenset(element)] | (Ci - set(element))) - empty_set\n border = (border - frozenset({element})) - empty_set\n\n border = border | {Ci}\n\n return concepts_lattice, links\n\n @staticmethod\n def __getObjectFromAttr(attrs: frozenset, concepts: List[DyadicConcept]) -> frozenset:\n for c in concepts:\n if c.attrs == attrs:\n return c.objects\n\n return frozenset(EMPTY_VALUE)\n\n def __repr__(self) -> str:\n return f'DyadicLattice({self.lattice})'\n","sub_path":"src/fcatools/dyadic/dyadic_lattice.py","file_name":"dyadic_lattice.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384131513","text":"import numpy as np\nfrom madmom.utils.midi import MIDIFile\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Activation\n\nimport random\nimport time, os\n\n\nnum_songs = 500\nnum_timesteps = 1000\nnote_vec_size = 5\n\nnum_epochs = 100\ncheckpoint_distance = 10\n\narchitecture = [50,50,50]\n\ndataset = \"1\"\nlayers = \"\"\nfor layer in architecture:\n\tlayers += str(layer) + \"x\"\nactivation = 'relu'\n\nrun_name = \"ds:_\" + dataset + \"__arch:_\" + layers + \"__act:_\" + activation\n\nprint(run_name)\n\n\n################################################# Import music files\n\nmusic_files = []\n\ndirectory = \"datasets/\" + dataset + \"/\"\n\nfor filename in os.listdir(directory):\n\t# generate a MIDIFile object from a midi file\n\tmusic_files.append(MIDIFile.from_file(directory + filename).notes())\n\t#note structure: (onset time, pitch, duration, velocity, channel)\n\nmusic_files = np.array(music_files)\n\n#print(music_files[0,:10])\nimport sys\n#sys.exit(0)\n#################################################\n\n\nmodel = Sequential()\nfirst = True\nfor layer in architecture:\n\tif(first):\n\t\tmodel.add(LSTM(50, batch_input_shape = (1, 1, note_vec_size), stateful=True, return_sequences=True))#, init=init),\n\t\tmodel.add(Activation(activation))\n\t\tfirst = False\n\telse:\n\t\tmodel.add(LSTM(50, stateful=True, return_sequences=True))\n\t\tmodel.add(Activation(activation))\nmodel.add(LSTM(note_vec_size, stateful=True, return_sequences=True))\n\n\nmodel.compile('adam', loss='mse')\n\n#save network architecture\nmodel.save(\"models/\" + run_name)\n#create checkpoint folder if it doesn't already exist\ncheckpoint_path = \"model_checkpoints/\" + run_name\nif not os.path.exists(checkpoint_path):\n\tos.makedirs(checkpoint_path)\n\nloss_list = []\n\n# training loop\nfor epoch in range(num_epochs):\t\t\t\t\t# epochs through all songs\n\tloss_this_epoch = 0\n\tprint(\"\\n\\nEPOCH:\\t\"+str(epoch)+\" / \" + str(num_epochs) + \"\\n\\n\")\n\tfor i in range(music_files.shape[0]):\t\t# songs\n\t\tfor note in range(music_files.shape[1]-1):\t# timesteps\n\t\t\tprint(\"\\nnote:\\t\"+str(note)+\" / \"+str(music_files.shape[1]-1))\n\n\t\t\tloss_this_epoch += model.train_on_batch(np.reshape(music_files[i, note],(1,1,5)), np.reshape(music_files[i, note+1], (1,1,5)))\n\n\t\tmodel.reset_states()\n\tprint(loss_this_epoch)\n\tloss_list.append(loss_this_epoch)\n\n\tif(epoch % checkpoint_distance == 0):\n\n\t\tmodel.save_weights(checkpoint_path + \"/\" + str(epoch) + \"_epochs_weights\")\n\n\t\tgenerated_song = []\n\n\t\tgenerated_song.append( model.predict_on_batch( np.reshape(music_files[0,0],(1,1,5)) ) )#np.random.randint(2, size=num_notes) ) )\n\n\t\tfor i in range(len(music_files[0])):\n\t\t\tgenerated_song.append(model.predict_on_batch(np.reshape(generated_song[-1], (1,1,5))))\n\n\t\t#convert to np array and round pitch, velocity, and channel to integer values, then clip them to the appropriate range\n\t\tgenerated_song = np.array(generated_song)\n\n\t\t\n\t\tgenerated_song = generated_song[:,0,0,:] #manual reshaping\n\t\t\n\t\tgenerated_song[:, (1,3,4)] = np.round(generated_song[:,(1,3,4)])\n\t\tgenerated_song[:, (1,3,4)] = np.clip(generated_song[:,(1,3,4)], 0, 127)\n\t\tgenerated_song = np.nan_to_num(generated_song)\n\n\t\tprint(generated_song[:10])\n\n\t\tnew_midifile = MIDIFile.from_notes(generated_song)\n\n\t\tnew_midifile.write(\"generated_song_attempts/\" + run_name + str(epoch) + \".mid\")\n\nnp.savetxt((\"models/\" + run_name + \"__loss_trend\"), loss_list)\n\n# T O D O\n#\t\t\t\tdetermine which data to feed in\n#\t\t\t\ttweak neural net\n#\t\t\t\tget and save error rates\n\n#\t\t\t\tset up reinforcement learning","sub_path":"Desktop/music_generation_AI-master/music_generating_RNN.py","file_name":"music_generating_RNN.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93585146","text":"import allure\nimport pytest\nfrom seleniumbase import BaseCase\nfrom pages.store_page import StorePage\nfrom pages.login_page import LoginPage\nfrom pages.popup_page import PopupPage\nimport os\n\n\n@allure.feature(\"Home page\")\nclass TestHome:\n @classmethod\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(cls, base_driver):\n \"\"\"\n 前置步骤\n :return:\n \"\"\"\n cls.login_page = LoginPage(base_driver)\n cls.popup_page = PopupPage(base_driver)\n cls.store_page = StorePage(base_driver)\n cls.driver = BaseCase(base_driver)\n\n @allure.story(\"检查已有popup是否能成功保存\")\n def test_popup_save(self):\n self.login_page.login_in()\n self.driver.sleep(2)\n self.home_page.go_to_popup_page()\n self.home_page.go_to_popup_edit()\n self.home_page.go_to_popup_test()\n self.home_page.reset_popup_test()\n\n @allure.story(\"检查C端页面popup是否与配置一致\")\n def test_toc_popup(self):\n self.login_page.login_in()\n self.driver.sleep(2)\n self.home_page.go_to_store()\n self.store_page.login_in()\n self.store_page.check_popup_header()\n self.store_page.check_popup_description()\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-s\", os.path.abspath(__file__)])\n","sub_path":"tests/test_popup.py","file_name":"test_popup.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152029937","text":"# Author: wonder\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport numpy as np\nfrom xgboost import plot_importance\nfrom sklearn.preprocessing import Imputer\n\nfrom sklearn.metrics import mean_absolute_error\n\n\ndef loadDataset():\n dataset_path = './dataset/train'\n noises = [1, 5, 6, 12, 13, 19, 20]\n datas = []\n for i in range(25):\n if i + 1 in noises: continue\n fpath = os.path.join(dataset_path, '{}.npy'.format(i + 1))\n data = np.load(fpath) # shape: 144 * 81 * 2\n data = data.swapaxes(0,1)\n #data = data.reshape((81*144,2))\n #print(\"sssss\", data.shape)\n datas.append(data)\n #datas = np.concatenate(datas, axis=0)\n #print(datas.shape)\n return np.array(datas)\n\n\ndef loadTrainData(data):\n data_num = len(data)\n #print(data_num)\n #print(data.shape)\n X = []\n Y = []\n for row in range(0, data_num-1):\n X.append(data[row, :, :, :])\n Y.append(data[row+1, :, :, :])\n X = np.array(X)\n Y = np.array(Y)\n X = X.reshape((-1, 144, 2))\n Y = Y.reshape((-1, 144, 2))\n X = X.reshape(-1, 288)\n Y = Y.reshape(-1, 288)\n return X, Y\n\ndef loadTestData():\n dataset_path = './dataset/h_train'\n fpath = os.path.join(dataset_path, '28.npy')\n data = np.load(fpath) \n data = data.swapaxes(0,1)\n\n data[:,:,0], data[:,:,1] = data[:,:,1], data[:,:,0]\n return data\n \ndef train(X, Y):\n # XGBoost训练过程\n models = []\n for i in range(288):\n model = xgb.XGBRegressor(max_depth=5, learning_rate=0.1, n_estimators=160, silent=False, objective='reg:gamma')\n models.append(model)\n \n for t in range(144):\n in_id = 2 * t\n out_id = 2 * t + 1\n models[in_id].fit(X, Y[:, in_id])\n models[out_id].fit(X, Y[:, out_id])\n # 对测试集进行预测\n return models\n\n #pred = model.predict(val)\n \n # 评估预测结果 \n #print(\"MAE: %.2f%%\" % (mean_absolute_error(Y_val,Y_pred)))\ndef test(models, X):\n X = X.reshape(81, 288)\n Y = np.zeros((81,144,2))\n for t in range(144):\n in_id = 2 * t\n out_id = 2 * t + 1\n in_pred = models[in_id].predict(X)\n out_pred = models[out_id].predict(X)\n Y[:,t,0] = in_pred\n Y[:,t,1] = out_pred\n return Y\n #print(\"MAE: %.2f%%\" % (mean_absolute_error(Y_val,Y_pred)))\n \nif __name__ == '__main__':\n #data = loadDataset()\n #X, Y = loadTrainData(data)\n X_val = loadTestData()\n #models = train(X, Y)\n\n import pickle\n #with open('xgb_model.pkl', 'wb') as f:\n # pickle.dump(models, f)\n with open('xgb_model.pkl','rb') as f:\n models = pickle.load(f)\n res = test(models,X_val)\n\n\n import time, datetime\n def time2str(id, date):\n dt = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n t1 = time.mktime(dt.timetuple()) + int(id) * 10 * 60\n t2 = t1 + 10 * 60\n t1_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t1))\n t2_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t2))\n\n return t1_str, t2_str\n\n date = '2019-01-29'\n with open('./results/xgb-ly/29.csv', 'w') as f:\n title = 'stationID,startTime,endTime,inNums,outNums'\n print(title, file=f)\n x, y, z = res.shape\n print(res[0][0])\n for j in range(y):\n for i in range(x):\n t1, t2 = time2str(i, date)\n in_num, out_num = res[i][j] \n print(j, t1, t2, in_num, out_num, sep=',', file=f)\n","sub_path":"xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274459742","text":"class Solution:\n def networkDelayTime(self, times, N, K):\n from collections import defaultdict\n import math\n g = defaultdict(dict)\n for s, d, t in times:\n g[s][d] = t\n\n distances = dict()\n queue = []\n for n in range(1, N+1):\n distances[n] = math.inf\n queue.append(n)\n distances[K] = 0\n\n while queue:\n queue = list(reversed(sorted(queue, key=lambda k: distances[k])))\n node = queue.pop()\n\n for neighbor, d in g[node].items():\n new_distance = min(\n d + distances[node],\n distances[neighbor]\n )\n distances[neighbor] = new_distance\n return max(distances.values())\n\nif __name__ == '__main__':\n inputs = [[2,1,1],[2,3,1],[3,4,1]]\n N = 4\n K = 2\n print(Solution().networkDelayTime(inputs, N, K))\n","sub_path":"leetcode/743_network_delay_time.py","file_name":"743_network_delay_time.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"293794610","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 5 14:12:38 2017\n\n@author: admin\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef numerical_diff(f, x):\n h = 1e-4\n return (f(x+h) - f(x-h)) / (2*h)\n \ndef function_1(x):\n return 0.01 * x **2 + 0.1*x\n \ndef function_2(x):\n return x[0]**2 + x[1]**2 \n\ndef numerical_gradient(f, x):\n h = 1e-4\n grad = np.zeros_like(x)\n \n for idx in range(x.size):\n tmp_val = x[idx]\n \n #f(x+h) 계산\n x[idx] = tmp_val + h\n fxh1 = f(x)\n \n #f(x-h) 계산\n x[idx] = tmp_val - h\n fxh2 = f(x)\n \n grad[idx] = (fxh1 - fxh2) / (2*h)\n x[idx] = tmp_val\n \n return grad\n\ndef gradient_descent(f, init_x, lr=0.01, step_num=100):\n x = init_x\n \n for i in range(step_num):\n grad = numerical_gradient(f,x)\n x -= lr * grad\n print (grad, x)\n return x\n\n \nx = np.arange(0.0, 20.0, 0.1)\ny = function_1(x)\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.plot(x,y)\nplt.show()\n\n\nprint(numerical_gradient(function_2, np.array([3.0, 4.0])))\nprint(numerical_gradient(function_2, np.array([0.0, 2.0])))\nprint(numerical_gradient(function_2, np.array([3.0, 0.0])))","sub_path":"ch04/num_diff.py","file_name":"num_diff.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333357847","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom builtins import str\nfrom builtins import object\nfrom qgis.core import QgsProject,QgsMapLayer,QgsVectorLayer,QgsDataProvider,QgsDataSourceUri\nfrom PyQt5.QtCore import Qt\n\nclass run(object):\n def __init__(self, id, gtotool, config, debug):\n super(run, self).__init__()\n\n try:\n #tool data\n activelayer = config.get('active_layer',None)\n layers = config['layers']\n #init\n iface= gtotool.iface\n prj = QgsProject.instance()\n root = prj.layerTreeRoot()\n for g in layers:\n try:\n group=g['name']\n #state=g['state']\n state = g['visible']\n if debug: gtotool.info.log(group + \"/\" + str(state))\n node=prj.layerTreeRoot().findGroup(group)#QgsLayerTreeNode\n if node: node.setItemVisibilityCheckedRecursive(state)\n except ValueError as e:\n gtotool.info.err(e)\n #set layers\n for lyr in layers:\n layer = prj.mapLayersByName(lyr['name'])\n if layer:\n layer = layer[0] # duplicte names => take the first\n visible = lyr['visible']\n if visible:\n prj.layerTreeRoot().findLayer(layer.id()).setItemVisibilityCheckedParentRecursive(True)\n else:\n prj.layerTreeRoot().findLayer(layer.id()).setItemVisibilityChecked(False)\n #symbology\n symbology = lyr.get(\"symbology\",None)#list\n if symbology is not None:\n if isinstance(symbology,list):\n ltl = root.findLayer(layer.id())#QgsLayerTreeLayer\n ltm = iface.layerTreeView().model()\n legendNodes = ltm.layerLegendNodes(ltl)\n for ln in legendNodes: # QgsLayerTreeModelLegendNode\n for d in symbology:#dic\n symLayerName= d.get(\"name\")\n if symLayerName == ln.data(0):\n if d.get(\"visible\",True):\n ln.setData(Qt.Checked, Qt.CheckStateRole)\n else:\n ln.setData(Qt.Unchecked, Qt.CheckStateRole)\n break\n else:\n ltl = root.findLayer(layer.id())#QgsLayerTreeLayer\n ltm = iface.layerTreeView().model()\n legendNodes = ltm.layerLegendNodes(ltl)\n for ln in legendNodes: # QgsLayerTreeModelLegendNode\n if symbology:\n ln.setData(Qt.Checked, Qt.CheckStateRole)\n else:\n ln.setData(Qt.Unchecked, Qt.CheckStateRole)\n #set active layer:\n if activelayer:\n layer = prj.mapLayersByName(activelayer)\n if layer:\n layer = layer[0] # duplicte names => take the first\n prj.layerTreeRoot().findLayer(layer.id()).setItemVisibilityCheckedParentRecursive(True)\n iface.setActiveLayer(layer)\n except Exception as e:\n gtotool.info.err(e)\n\n","sub_path":"GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/mActionGTOmas.py","file_name":"mActionGTOmas.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"513597732","text":"import run_full\r\nimport cv2\r\nimport os, shutil\r\n\r\ndef clear_folder():\r\n folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','pic')\r\n for the_file in os.listdir(folder):\r\n file_path = os.path.join(folder, the_file)\r\n try:\r\n if os.path.isfile(file_path):\r\n os.unlink(file_path)\r\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\r\n except Exception as e:\r\n print(e)\r\n\r\nvidcap = cv2.VideoCapture('../../../4/vid4.mp4')\r\nsuccess,image = vidcap.read()\r\ncount = 0\r\n\r\nwhile success:\r\n cv2.imwrite(\"../../pic/frame%s.jpg\" % str(count).zfill(5), image)\r\n if count%40 == 0 and count != 0:\r\n run_full.demo_images()\r\n clear_folder()\r\n success,image = vidcap.read()\r\n print(' Read a new frame: ', success)\r\n count += 1","sub_path":"src/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197301780","text":"# name = \"Siddhesh\"\n\n# new_list = [letter.upper() for letter in name]\n\n# print(new_list)\n\n# double_list = [2*n for n in range(1,25)]\n\n# print(double_list)\n\n# names = [\"Alex\", \"Annemarie\", \"Jeaneatte\", \"Sarah\", \"Robert\", \"Patrick\", \"Jenny\"]\n\n# long_names = [name.upper() for name in names if len(name) > 5]\n\n# print(long_names)\n\nnumbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n# 🚨 Do Not Change the code above 👆\n\n#Write your 1 line code 👇 below:\n\nsquared_numbers = [n**2 for n in numbers]\n\n\n\n#Write your code 👆 above:\n\nprint(squared_numbers)\n\n\n","sub_path":"day_26/main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126791030","text":"import os.path\nimport re\n\nimport PyQt5.uic\nfrom PyQt5 import QtCore, QtWidgets\n\nROOT = QtCore.QFileInfo(__file__).absolutePath()\n\ndef getImgPath(name):\n return os.path.join(ROOT, \"resources\", \"img\", name)\n\ndef getTranslationPath(name):\n return os.path.join(ROOT, \"resources\", \"ts\", name)\n\ndef getOpenFilename(parent=None, root=\".\", title=\"\"):\n if not title: \n title = translate(\"Global\", \"Open\")\n \n return QtWidgets.QFileDialog.getOpenFileName(parent, title, root)\n\ndef getSaveFilename(parent=None, root=\".\", title=\"\"):\n if not title: \n title = translate(\"Global\", \"Save\")\n \n return QtWidgets.QFileDialog.getSaveFileName(parent, title, root)\n\ndef loadUi(widget):\n basepath = os.path.join(ROOT, \"views\")\n\n basename = [a for a in re.split(\n r\"([A-Z][a-z]*)\", \n widget.__class__.__name__\n ) if a] # Split on capital letters\n basename = \"_\".join(basename).lower()\n\n uifile = os.path.join(basepath, \"{basename}.ui\".format(basename=basename))\n uipath = os.path.dirname(uifile)\n\n currdir = QtCore.QDir.currentPath()\n QtCore.QDir.setCurrent(uipath)\n\n PyQt5.uic.loadUi(uifile, widget)\n\n QtCore.QDir.setCurrent(currdir)\n\ndef showPopup(msg, parent=None):\n popup = QtWidgets.QMessageBox(parent)\n popup.setText(msg)\n popup.show()\n\ntranslate = QtCore.QCoreApplication.translate\n","sub_path":"orchardtreatmentcreator/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107886753","text":"# EX 2.3\ndef Mode(Li):\n '''\n takes a list and return the most freq value\n \n >>> L = [1,1,2,2,3,4,4,3,3]\n '''\n val_c = {}\n for i in Li:\n if i in val_c.keys():\n val_c[i] += 1\n else:\n val_c[i] = 1\n # now we have a histogram\n # want to return key in histo w the greatest val\n high_freq = 0\n modes = []\n for key in val_c:\n if val_c[key] > high_freq:\n high_freq = val_c[key]\n modes = [(key, high_freq)]\n elif val_c[key] == high_freq:\n modes.append((key, high_freq))\n #mode = (modes[:][1].sum()) / len(modes)\n '''\n modes[:][1]\n (5, 3)\n modes[:]\n [(2, 3), (5, 3), (6, 3)]'''\n # there can be > 1 mode\n # thus =>\n # if known beforehand would have been easier to use dic and call keys()\n # but since were already this far\n mode = []\n for i in modes:\n mode.append(i[0])\n return mode\n\ndef histo(Li):\n val_c = {}\n for i in Li:\n if i in val_c.keys():\n val_c[i] += 1\n else:\n val_c[i] = 1 \n return val_c\n \ndef AllModes(Li):\n '''\n Returns a list val/freq pairs in descending order of freq\n <=> greatest to least\n '''\n hist = histo(Li)\n #return sorted(hist.items(), reverse=True)\n # this is true for sorting by value size but not freq\n # lets just flip the tuples sort and then flip back\n L = []\n for i in hist.items():\n L.append((i[1],i[0]))\n M = sorted(L) \n modes = []\n for j in M:\n #j[0],j[1] = j[1],j[0]\n #'tuple' object does not support item assignment\n modes.append((j[1],j[0]))\n return modes\n\n# EX 2.4\nimport nsfg\nimport math\n# using totalwgt_lb investigate whether first babies are heavier or lighter than others\n\ndf = nsfg.ReadFemPreg()\n# now were in business\n\n# lets begin by creating 2 DFs one for first babes one for others\n#df_1st = df[pregordr == 1]\n#df_oth = df[pregordr != 1]\ndf_1st = df[df.pregordr == 1]\ndf_oth = df[df.pregordr != 1]\ndiff_in_means = df_1st.totalwgt_lb.mean() - df_oth.totalwgt_lb.mean()\n# lets begin with the mean for each\nprint('the mean for 1st babes is',\ndf_1st.totalwgt_lb.mean(), 'mean for others is', df_oth.totalwgt_lb.mean(), \n'thus => on avg difference between weight of 1st babies vs others is', diff_in_means)\n\n# lets eval the stds of each\nprint('std for 1st babes is', df_1st.totalwgt_lb.std(), 'and for other babies is', df_oth.totalwgt_lb.std())\n\n# => barely any deviation either, so we can conclude that 1st babies are \n# likely to weigh less than others\n\n# lets compute Cohen's d now\n# which is diff in means / pooled std\n# and is used to compare difference between groups according to the \n# variability between groups\n\ndef Coh_d(col1, col2):\n diff_in_u = col1.mean() - col2.mean()\n EV1_var = col1.var() * (len(col1) / (len(col1) + len(col2)))\n EV2_var = col2.var() * (len(col2) / (len(col1) + len(col2)))\n pooled_var = (EV1_var + EV2_var)\n d = diff_in_u / (math.sqrt(pooled_var))\n return d\n\nd_diff = Coh_d(df_1st.totalwgt_lb, df_oth.totalwgt_lb)\n\nprint('Cohens d between 1st and others for baby weight is', d_diff)\nprint('Cohens d between 1st and others for preg length is', \n Coh_d(df_1st.prglngth, df_oth.prglngth))\n","sub_path":"brydon_chap02ex.py","file_name":"brydon_chap02ex.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65384104","text":"\nimport logging\nimport authorized\nfrom base_request_handler import BaseRequestHandler\n\nclass AddBlogReaction(BaseRequestHandler):\n def post(self,randomID):\n\n self.session = True\n user = UserInfo()\n user.whoIs(self)\n\n\n # this key is actually of fixed length\n theKey = \"-\" + randomID + Sketch.generateKey()\n sketchComment = SketchComment(key_name = theKey)\n sketchComment.randomID = randomID\n sketchComment.body = self.request.get('text_input')\n\n #sketchComment.author_user = user.user\n sketchComment.author_email = user.email\n sketchComment.author_user_id = user.user_id\n sketchComment.author_string_user_id = util.convDecToBase(string._long(user.user_id),62)\n sketchComment.author_nickname = user.nickname\n\n # This is necessary cause we need to store in the comment who is the author of the sketch\n # cause we'll have the client browser to independently check whether to allow the author of the sketch to delete any of the comments\n # we could do this check on the server side without storing the sketch author in the comment \n # BUT we can't do the check on the client side without passing a parameter accross three pages... \n sketch = Sketch.get_by_randomID(randomID)\n if sketch is None: self.redirect(\"/403.html\")\n sketchComment.sketch_author_user_id = sketch.author_user_id\n \n sketchComment.save()\n self.redirect('/view/'+randomID+\"/\")\n\n\n\nclass DeleteBlogReaction(BaseRequestHandler):\n @authorized.role(\"user\")\n def get(self,commentId):\n\n\t\t logging.info('got in')\n\n\t\t util.insertUsersideCookies(self)\n\n\t\t self.session = True\n\t\t user = UserInfo()\n\t\t user.whoIs(self)\n\n\t\t # anonymous users can't delete comments or sketches\n\t\t if not user.user:\n\t\t \tself.redirect(\"/403.html\")\n\t\t \treturn\n\n\t\t # does the comment exist?\n\t\t q = SketchComment.get_by_key_name(commentId)\n\t\t if not q:\n\t\t \tlogging.info('no such comment')\n\t\t \tself.redirect(\"/403.html?no such comment\")\n\t\t \treturn\n\n\t\t # is the user a) an admin b) the owner of the sketch c) the owner of the comment?\n\t\t if ((user.user_id == q.author_user_id) or (user.is_current_user_admin) or (user.user_id == q.sketch_author_user_id)):\n\t\t \tlogging.info('ok, deleting now')\n\t\t \tq.delete()\n\t\t else:\n\t\t \tlogging.info('wrong permissions')\n\t\t \tself.redirect(\"/403.html?you cant do that\")\n\t\t \treturn\n\n\t\t logging.info('redirecting to: ' + self.request.get(\"backTo\"))\n\t\t self.redirect(self.request.get(\"backTo\"))\n\t\t return\n\n\n","sub_path":"blog_reactions.py","file_name":"blog_reactions.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53693647","text":"#! /usr/bin/python\n\"\"\"\n\n@author: Pablo ALINGERY for IAS 07-05-2013\n\"\"\"\nfrom gaia_client_idoc import *\n\nd1 = datetime(2012,8,10,0,0,0)\nd2 = d1 + timedelta(days=1)\n\ngaia_data_list = search( DATES=[d1,d2], nb_res_max=1 ) \n\n#for item in gaia_data_list :\n#\tprint item\n\n#the fastest way to retrieve data\n#PS : The directory 'results' has to be created !\nget(GAIA_LIST=gaia_data_list, TARGET_DIR=\"results\")\n\n#specify TYPE you want to retrieve , it should be in list 'temp','em','width','chi2' (TYPE=['all'] will do as well ), FILENAME would be the default one \n#get(GAIA_LIST=gaia_data_list, TARGET_DIR=\"results\", TYPE=['temp','em'])\n\n#specify FILENAME you want to retrieve , it should be a dictionary with key within 'temp','em','width','chi2' and value can be whatever you want\n#get(GAIA_LIST=gaia_data_list, TARGET_DIR=\"results\", FILENAME={'temp' :'temp.fits','em':'em.fits'})\n\n#Need to do it quietly \n#get(GAIA_LIST=gaia_data_list, TARGET_DIR=\"results\",QUIET=True)\n\n#########################Warning###########################\n#specify both FILENAME and TYPE is not allowed \n#get(GAIA_LIST=gaia_data_list, TARGET_DIR=\"results\", FILENAME={'temp' :'temp.fits','em':'em.fits'}, TYPE=['temp','em'])\n###########################################################\n\n#Need to get a tar ball do sthg like :\n#get_selection(GAIA_LIST=gaia_data_list,DOWNLOAD_TYPE=\"tar\", target_dir=\"results\" ,FILENAME=\"my_dowload_file.tar\")\n\n","sub_path":"example_gaia.py","file_name":"example_gaia.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4161595","text":"import sys\nimport psycopg2\nfrom filmie2ML import *\n##\n## Script to add a USER & MOVIE_ID and RATINGS to end of a datafile\n##\n##USAGE ::\n## $ python3 dumpUser2File.py filename User_ID\n##\n## DOES NOT NORMALIZE THE ADDED USERS RATINGS\n\n\nfilename = sys.argv[1]\nuser_id = sys.argv[2]\n\n\nf_data = open(filename,'a')\n\ncurML = openMovieLensDB()\nmovies = loadDBData(curML, \"movies_large\")\n\ncurFilmie = openFilmieDB()\n\n\n### GRAB USERS MOVIES\n\nuser_filmie_Movies = grabLikes(curFilmie,[int(user_id)])\nwrong_count = 0\nfor item in user_filmie_Movies:\n movie_id = item[1]\n try:\n print(str(user_id) + \",\" + str(movie_id) + \",\" + \"4.5\" + \",\" + \"123456\", file = f_data)\n except:\n print(\"Failed out of something\", movie_id)\n\n\nuser_filmie_Movies = grabDisLikes(curFilmie,[int(user_id)])\nfor item in user_filmie_Movies:\n movie_id = item[1]\n try:\n print(str(user_id) + \",\" + str(movie_id) + \",\" + \"1.5\" + \",\" + \"123456\", file = f_data)\n except:\n print(\"Failed out of something negative\", movie_id)\n \n\ncurML.close()\ncurFilmie.close()\n\nf_data.close()\n","sub_path":"dumpUsers2CSVFile.py","file_name":"dumpUsers2CSVFile.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40608208","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n # 递归\n def inorderTraversal1(self, root: TreeNode):\n nums = []\n return self.method(root, nums)\n\n def method(self, root: TreeNode, nums):\n if root is not None:\n self.method(root.left, nums)\n nums.append(root.val)\n self.method(root.right, nums)\n return nums\n\n # 栈\n def inorderTraversal2(self, root: TreeNode):\n res = []\n if not root:\n return res\n stack = []\n cur = root\n while stack or cur:\n while cur:\n stack.append(cur)\n cur = cur.left\n cur = stack.pop()\n res.append(cur.val)\n cur = cur.right\n return res\n\n # 染色方法\n def inorderTraversal(self, root: TreeNode):\n stack = list()\n stack.append((0, root))\n result = list()\n while stack:\n color, cursor = stack.pop()\n if cursor is None:\n continue\n if color == 0:\n stack.append((0, cursor.right))\n # 染色为1,表示已经到过该节点,下一次路过取出value进行存储\n stack.append((1, cursor))\n # 由于使用栈,所以这里倒序,这样会从左节点开始取出\n stack.append((0, cursor.left))\n else:\n result.append(cursor.val)\n\n return result\n\n\nif __name__ == '__main__':\n x = Solution()\n a = TreeNode(1)\n b = TreeNode(2)\n c = TreeNode(3)\n a.left = b\n a.right = c\n x.inorderTraversal(a)\n print(x.inorderTraversal(a))\n","sub_path":"94. 二叉树的中序遍历.py","file_name":"94. 二叉树的中序遍历.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548684991","text":"\n\"\"\"\nA resource defines an object that exposes functionality. It allows the\ndefinition of handler that can be called on the resource.\n\"\"\"\nimport six\nfrom whistle.request import Request\nfrom whistle.response import Response\nfrom functools import partial\n\n\nclass ResourceOptions(object):\n \"\"\"\n A configuration class for the resource. Provides sane defaults for options\n that can be over-ridden by the Meta class in a resource\n\n @handlers: a map of handler_name to a list of callables.\n resource.handler_name will call the callable with a request object.\n The handler is expected to return a response object\n\n @serializer (Class): object of this class should be able to serialize the\n response from the handlers\n\n @validator (Class): object of this class should be able to validate the\n request. Authorization, update and create request data validation,\n business validations (is this update even allowed etc) are all expected\n to be handled by a validator\n \"\"\"\n\n def __init__(self, options):\n self.options = options\n self.handlers = {}\n self.serializer = None # set this to default serializer later on\n self.validator = None\n\n for opt_name in dir(options):\n if not opt_name.startswith('_'):\n setattr(self, opt_name, getattr(options, opt_name))\n\n\nclass ResourceMetaClass(type):\n\n def __new__(cls, name, bases, attrs):\n new_class = super(ResourceMetaClass, cls).__new__(\n cls, name, bases, attrs)\n options = getattr(new_class, 'Meta', None)\n new_class._meta = ResourceOptions(options)\n\n return new_class\n\n\nclass Resource(six.with_metaclass(ResourceMetaClass)):\n\n def get_validation_object(self, request):\n if self._meta.validator:\n return self._meta.validator(request=request)\n else:\n return None\n\n def validate_and_call(self, fn, validation_object, request):\n \"\"\"\n validation_object.pre_function_name and\n validation_object.post_function_name are called before and after the\n actual function call. If pre/post return a response, it is immediately\n returned. If the function call returns a response, it is passed to the\n post_validator to modify the response if necessary.\n \"\"\"\n if validation_object is not None:\n fn_name = fn.__name__\n pre_validator = getattr(validation_object,\n \"pre_%s\" % fn_name, None)\n post_validator = getattr(validation_object,\n \"post_%s\" % fn_name, None)\n else:\n pre_validator = None\n post_validator = None\n\n if pre_validator:\n # can raise a validation error and exit\n pre_validator_response = pre_validator(request=request)\n if pre_validator_response is not None:\n return pre_validator_response\n\n response = fn(request=request)\n\n if post_validator:\n if isinstance(response, Response):\n post_validator_response = post_validator(response=response)\n else:\n post_validator_response = post_validator(request=request)\n\n if post_validator_response is not None:\n return post_validator_response\n\n return response\n\n def call_handler(self, pipeline, call, user=None, **kwargs):\n \"\"\"\n used when \"resource_obj\".\"handler_name\" is accessed. A partial of this\n function is returned on attribute access.\n \"\"\"\n pipeline = [fn for fn in pipeline if callable(fn)]\n request = Request(user=user, params=kwargs, call=call)\n validation_object = self.get_validation_object(request)\n\n for fn in pipeline:\n response = self.validate_and_call(fn, validation_object, request)\n if isinstance(response, Response):\n # fn has returned a response. Break and return the value\n break\n\n return response # this should be serialized and returned later on\n\n def __getattr__(self, name):\n \"\"\"\n check if a handler is being accessed. If yes check if it is a callable\n and return a partial that will construct the request and call the\n handler with it.\n \"\"\"\n handlers = self._meta.handlers\n if name in handlers:\n handler_pipeline = handlers[name]\n return partial(self.call_handler,\n pipeline=handler_pipeline,\n call=name)\n else:\n return object.__getattribute__(self, name)\n","sub_path":"whistle/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181856829","text":"import os\n\nfrom house_page_processor import HousePageProcessor\nfrom requester import Requester, RequestsLibrary, RetryError\nfrom time_util import timeit\n\n\nclass HouseDownloader:\n \"\"\"Class that takes care of downloading the images for the given range of houses from the API.\n\n It does so by iterating through the first 10 pages of the API endpoint and processing each page\n to download the images.\n \"\"\"\n NUMBER_OF_PAGES = 10\n API_URL = \"http://app-homevision-staging.herokuapp.com/api_project/houses\"\n\n def __init__(self, requests_library: Requester, house_page_processor: HousePageProcessor):\n self._requests_library = requests_library\n self._house_page_processor = house_page_processor\n\n @classmethod\n def build(cls):\n return cls(RequestsLibrary(), HousePageProcessor.build())\n\n @timeit\n def download_houses(self):\n self._ensure_dir()\n\n results = []\n for i in range(1, self.NUMBER_OF_PAGES+1):\n # I think this could also be parallelized as a further improvement, no real reason we\n # have to access the pages sequentially\n url = f\"{self.API_URL}?page={i}\"\n try:\n response = self._requests_library.get(url)\n except RetryError: # We allow for 5 retries which should always be enough but if not we log it\n print(f\"Page {i} could not be loaded after the maximum amount of retries.\")\n continue\n\n page_results = self._house_page_processor.download_all(response.json())\n for page_result in page_results:\n results.append(page_result)\n\n return results\n\n def _ensure_dir(self):\n if not os.path.isdir(\"output\"):\n os.mkdir(\"output\")\n","sub_path":"src/house_downloader.py","file_name":"house_downloader.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502712280","text":"LEVE_ITEMS = {\"name\": {}, \"id\": {}}\nQUEST_TEXT = {\"text\": {}}\nQUESTS = {\"name\": {}, \"genre_name\": {}, \"npc_name\": {}, \"patch\": {\"name\": {}, \"number\": {}}}\nITEM_CLASSES = {} # TODO: Add all item classes.\n\ndef items(args):\n d = {}\n for cls in ITEM_CLASSES:\n sd = {}\n for subcls in cls:\n sd[subcls] = args\n d[cls] = sd\n return d\n\ntable = {\n # TODO: Gathering, crafting\n\t\"achievement\": {\n\t\t\"url\": \"https://api.xivdb.com/achievement\",\n\t\t\"keys\": {\n\t\t\t'name': {},\n 'name_ja': {},\n\t\t\t'patch': {'number': {}},\n\t\t\t'kind_name': {},\n 'type_name': {},\n 'category_name': {},\n 'help': {},\n 'item': {},\n 'title': {}\n\t\t}\n\t},\n \"action\": {\n \"url\": \"https://api.xivdb.com/action\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'help_en': {},\n 'level': {},\n 'cost_cp': {},\n 'cost_hp': {},\n 'cost_mp': {},\n 'cost_tp': {},\n 'cast_range': {},\n 'cast_time': {},\n 'recast_time': {},\n 'is_trait': {},\n 'type_name': {},\n 'patch': {'number': {}},\n },\n },\n \"emote\": {\n \"url\": \"https://api.xivdb.com/emote\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'text_command': {'command_1': {}, 'command_2': {}, 'command_3': {}, 'command_4': {}},\n 'patch': {'number': {}},\n },\n },\n \"enemy\": {\n \"url\": \"https://api.xivdb.com/enemy\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'map_data': {\n 'maps': {\"placename_name\": {}, \"zone_name\": {}},\n 'stats': {'hpAvg': {}, 'hpMax': {}, 'hpMin': {}, 'levelMax': {}, 'levelMin': {}, 'mpAvg': {}, 'mpMax': {}, 'mpMin': {}},\n },\n 'patch': {'number': {}},\n },\n },\n \"fate\": {\n \"url\": \"https://api.xivdb.com/fate\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'help_en': {},\n 'class_level': {},\n 'class_level_max': {},\n 'map': {'placename': {'name': {}}},\n 'patch': {'number': {}},\n },\n },\n \"instance\": {\n \"url\": \"https://api.xivdb.com/instance\",\n \"keys\": {\n 'name': {},\n 'name_ja': {},\n 'help_en': {},\n 'level': {},\n 'level_sync': {},\n 'time_limit': {},\n 'tanks_per_party': {},\n 'healers_per_party': {},\n 'dps_per_party': {},\n 'item_level': {},\n 'item_level_sync': {},\n 'content_name': {},\n 'patch': {'number': {}},\n },\n },\n \"item\": {\n \"url\": \"https://api.xivdb.com/item\",\n \"keys\": {\n 'name': {},\n 'name_ja': {},\n 'help_en': {},\n 'classjob_category': {},\n 'level_equip': {},\n 'level_item': {},\n 'category_name': {},\n 'attributes_base': {'auto_attack': {}, 'auto_attack_hq': {}, 'block_rate': {}, 'block_rate_hq': {}, 'block_strength': {}, 'block_strength_hq': {},\n 'damage': {}, 'damage_hq': {}, 'defense': {}, 'defense_hq': {}, 'delay': {}, 'delay_hq': {}, 'magic_damage': {}, 'magic_damage_hq': {},\n 'magic_defense': {}, 'magic_defense_hq': {}},\n 'attributes_params': {\"name\": {}, \"value\": {}, \"value_hq\": {}},\n 'patch': {'number': {}},\n },\n },\n \"leve\": {\n \"url\": \"https://api.xivdb.com/leve\",\n \"keys\": {\n 'name': {},\n 'name_ja': {},\n 'help_en': {},\n 'gil_reward': {},\n 'exp_reward': {},\n 'leve_client': {},\n 'class_level': {},\n 'classjob_category': {},\n 'time_limit': {},\n 'placename': {'name': {}},\n 'patch': {'number': {}},\n #'items': items(LEVE_ITEMS),\n #TODO: This\n },\n },\n \"minion\": {\n \"url\": \"https://api.xivdb.com/minion\",\n \"keys\": {\n 'name': {},\n 'name_ja': {},\n 'info1': {},\n 'info2': {},\n 'summon': {},\n 'behavior': {},\n 'action': {},\n 'attack': {},\n 'cost': {},\n 'hp': {},\n 'defense': {},\n 'skill_cost': {},\n 'speed': {},\n 'patch': {'number': {}},\n },\n },\n \"npc\": {\n \"norepeat\": True,\n \"url\": \"https://api.xivdb.com/npc\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'title': {},\n 'title_de': {},\n 'title_fr': {},\n 'title_jp': {},\n 'patch': {'number': {}},\n },\n },\n \"placename\": {\n \"url\": \"https://api.xivdb.com/placename\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'enemies': {\"name\": {}, \"positions\": {\"hp\": {}, \"level\": {}, \"x\": {}, \"y\": {}}},\n 'instances': {\"name\": {}, \"level\": {}, \"content_type\": {}, \"help\": {}},\n 'npcs': {\"name\": {}, \"position\": {\"x\": {}, \"y\": {}}},\n 'quests': QUESTS,\n 'patch': {'number': {}},\n },\n },\n \"quest\": {\n \"url\": \"https://api.xivdb.com/quest\",\n \"keys\": {\n 'category_name': {},\n 'exp_reward': {},\n 'genre_name': {},\n 'gil_reward': {},\n 'name': {},\n 'name_jp': {},\n 'npc_start': {'name': {}},\n 'npc_end': {'name': {}},\n 'patch': {'number': {}},\n 'pre_quests': QUESTS,\n 'post_quests': QUESTS,\n 'text': {'journal': QUEST_TEXT, 'todo': QUEST_TEXT},\n },\n },\n \"status\": {\n \"url\": \"https://api.xivdb.com/status\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'help': {},\n 'patch': {'number': {}},\n },\n },\n \"title\": {\n \"url\": \"https://api.xivdb.com/title\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n 'name_female': {},\n 'name_female_jp': {},\n 'achivements': {\"name\": {}},\n 'patch': {'number': {}},\n },\n },\n \"weather\": {\n \"url\": \"https://api.xivdb.com/weather\",\n \"keys\": {\n 'name': {},\n 'name_jp': {},\n },\n },\n}\n\ndef databases():\n\treturn table\n","sub_path":"core/scripts/databases/ffxiv.py","file_name":"ffxiv.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349727913","text":"# -*- coding: utf-8 -*-\n\nfrom random import randint\n\n# Доработать практическую часть урока lesson_007/python_snippets/08_practice.py\n\n# Необходимо создать класс кота. У кота есть аттрибуты - сытость и дом (в котором он живет).\n# Кот живет с человеком в доме.\n# Для кота дом характеризируется - миской для еды и грязью.\n# Изначально в доме нет еды для кота и нет грязи.\n\n# Доработать класс человека, добавив методы\n# подобрать кота - у кота появляется дом.\n# купить коту еды - кошачья еда в доме увеличивается на 50, деньги уменьшаются на 50.\n# убраться в доме - степень грязи в доме уменьшается на 100, сытость у человека уменьшается на 20.\n# Увеличить кол-во зарабатываемых человеком денег до 150 (он выучил пайтон и устроился на хорошую работу :)\n\n# Кот может есть, спать и драть обои - необходимо реализовать соответствующие методы.\n# Когда кот спит - сытость уменьшается на 10\n# Когда кот ест - сытость увеличивается на 20, кошачья еда в доме уменьшается на 10.\n# Когда кот дерет обои - сытость уменьшается на 10, степень грязи в доме увеличивается на 5\n# Если степень сытости < 0, кот умирает.\n# Так же надо реализовать метод \"дейс��вуй\" для кота, в котором он принимает решение\n# что будет делать сегодня\n\n# Человеку и коту надо вместе прожить 365 дней.\n\n# TODO Изменил начальные условия для большей правдоподобности: Челокеп подбирает сытых животных,\n# TODO но приходит в пустой дом. Изменил логику подыхания от голода - животное умирает в конце дня\n# TODO и уже не участвует в дальнейшей \"жизни\". Собака инициирует драку, но только с сытым котом\n\nfrom random import randint\n\n# Реализуем модель человека.\n# Человек может есть, работать, играть, ходить в магазин.\n# У человека есть степень сытости, немного еды и денег.\n# Если сытость < 0 единиц, человек умирает.\n# Человеку надо прожить 365 дней.\nfrom termcolor import cprint\n\n\nclass House:\n def __init__(self):\n self.food = 0\n self.animal_food = 0\n self.money = 0\n self.debris = 0\n self.number_fighting_cat = 0\n\n def __str__(self):\n return 'В доме еды для людей осталось {}, еды для животных -- {}, денег -- {}, грязи -- {}'.format(\n self.food, self.animal_food, self.money, self.debris)\n\n\nclass Man:\n\n def __init__(self, name):\n self.name = name\n self.fullness = 50\n self.house = None\n self.living = True\n\n def __str__(self):\n return 'Я - {}, сытость {}'.format(\n self.name, self.fullness)\n\n def eat(self):\n if self.house.food >= 10:\n cprint('{} поел'.format(self.name), color='yellow')\n self.fullness += 10\n self.house.food -= 10\n else:\n cprint('{} нет еды'.format(self.name), color='red')\n\n def work(self):\n cprint('{} сходил на работу'.format(self.name), color='blue')\n self.house.money += 150\n self.fullness -= 10\n\n def watch_MTV(self):\n cprint('{} смотрел MTV целый день'.format(self.name), color='green')\n self.fullness -= 10\n\n def shopping(self):\n if self.house.money >= 50:\n cprint('{} сходил в магазин за своей едой'.format(self.name), color='magenta')\n self.house.money -= 50\n self.house.food += 50\n else:\n cprint('{} не хватает денег на покупку своей еды!'.format(self.name), color='red')\n\n def shopping_animal_food(self):\n if self.house.money >= 50:\n cprint('{} сходил в магазин за едой для животных'.format(self.name), color='magenta')\n self.house.money -= 50\n self.house.animal_food += 50\n else:\n cprint('{} не хватает денег на покупку еды для животных!'.format(self.name), color='red')\n\n def clearn_house(self):\n self.house.debris -= 100\n self.fullness -= 20\n cprint('{} убирался в доме'.format(self.name), color='cyan')\n\n def go_to_the_house(self, house):\n self.house = house\n self.fullness -= 10\n cprint('{} Вьехал в дом'.format(self.name), color='cyan')\n\n def pick_up_cat(self, cat):\n cat.house = self.house\n cprint('Кота {} подобрал {} и принес в дом'.format(cat.name, self.name), color='cyan')\n\n def pick_up_dog(self, dog):\n dog.house = self.house\n cprint('Собаку {} подобрал {} и принес в дом'.format(dog.name, self.name), color='cyan')\n\n def act(self):\n if self.living:\n dice = randint(1, 6)\n if self.fullness < 0:\n cprint('{} умер...'.format(self.name), color='red')\n self.living = False\n return\n if self.fullness < 20:\n self.eat()\n elif self.house.money <= 50:\n self.work()\n elif self.house.food <= 10:\n self.shopping()\n elif self.house.animal_food <= 30:\n self.shopping_animal_food()\n elif self.house.debris >= 100:\n self.clearn_house()\n elif dice == 1:\n self.work()\n elif dice == 2:\n self.eat()\n else:\n self.watch_MTV()\n else:\n cprint('==================', color='red')\n\n\nclass Cat:\n\n def __init__(self, name):\n self.name = name\n self.fullness = 50\n self.house = None\n self.living = True\n\n def __str__(self):\n if self.living:\n if self.fullness < 0:\n cprint('Кот {} умер...'.format(self.name), color='red')\n self.living = False\n\n str_print = 'Я кот - {}, сытость {}'.format(self.name, self.fullness)\n else:\n str_print = '========================='\n return str_print\n\n def eat(self):\n if self.house.animal_food >= 10:\n cprint('Кот {} поел'.format(self.name), color='yellow')\n self.fullness += 20\n self.house.animal_food -= 10\n else:\n self.fullness -= 10\n cprint('У кота {} нет еды. Остался голодный на всесь день'.format(self.name), color='red')\n\n def sleep(self):\n self.fullness -= 10\n cprint('Кот {} дрых весь день'.format(self.name), color='blue')\n\n def strip_wallpaper(self):\n self.fullness -= 10\n self.house.debris += 5\n cprint('Кот {} подрал обои'.format(self.name), color='red')\n\n def fight_with_dog(self, cat, dog):\n self.fullness -= 10\n self.house.debris += 5\n self.house.number_fighting_cat = 0\n cprint('Кот {} дрался с собакой {}'.format(cat.name, dog.name), color='red')\n\n def act(self, cat=None):\n if self.living:\n dice = randint(1, 6)\n if self.house.number_fighting_cat == 1 and cat == cat1:\n self.fight_with_dog(cat=cat1, dog=dog1)\n elif self.house.number_fighting_cat == 2 and cat == cat2:\n self.fight_with_dog(cat=cat2, dog=dog1)\n else:\n if self.fullness < 20:\n self.eat()\n elif dice in (1, 3, 5):\n self.sleep()\n else:\n self.strip_wallpaper()\n else:\n cprint('=======================', color='red')\n\n\nclass Dog:\n\n def __init__(self, name):\n self.name = name\n self.fullness = 50\n self.house = None\n self.living = True\n\n def __str__(self):\n if self.living:\n if self.fullness < 0:\n cprint('Собака {} умерла...'.format(self.name), color='red')\n self.living = False\n str_print = 'Я собака - {}, сытость {}'.format(self.name, self.fullness)\n else:\n str_print = '========================='\n return str_print\n\n def eat(self):\n if self.house.animal_food >= 10:\n cprint('Собака {} поела'.format(self.name), color='yellow')\n self.fullness += 20\n self.house.animal_food -= 10\n else:\n self.fullness -= 10\n cprint('У собаки {} нет еды, осталась голодная на весь день'.format(self.name), color='red')\n\n def crew_furniture(self):\n self.fullness -= 10\n self.house.debris += 5\n cprint('Собака {} грызла мебель'.format(self.name), color='red')\n\n def fight_with_cat(self, cat):\n self.fullness -= 10\n self.house.debris += 5\n cprint('Собака {} дралась с котом {}'.format(self.name, cat.name), color='red')\n\n def act(self):\n if self.living:\n dice = randint(1, 4)\n self.house.number_fighting_cat = 0\n if self.fullness < 20:\n self.eat()\n elif dice == 1 and cat1.living and cat1.fullness > 10:\n self.house.number_fighting_cat = 1\n self.fight_with_cat(cat=cat1)\n elif dice == 2 and cat2.living and cat2.fullness > 10:\n self.house.number_fighting_cat = 2\n self.fight_with_cat(cat=cat2)\n else:\n self.crew_furniture()\n else:\n cprint('=======================', color='red')\n\n\nmark = Man(name='Марк')\nmy_sweet_home = House()\ndog1 = Dog(name='Жучка')\ncat1 = Cat(name='Мурзик')\ncat2 = Cat(name='Барсик')\nmark.go_to_the_house(house=my_sweet_home)\nmark.pick_up_dog(dog=dog1)\nmark.pick_up_cat(cat=cat1)\nmark.pick_up_cat(cat=cat2)\nfor day in range(1, 366):\n print('================ день {} =================='.format(day))\n mark.act()\n dog1.act()\n cat1.act(cat=cat1)\n cat2.act(cat=cat2)\n print('--- в конце дня ---')\n print(mark)\n print(dog1)\n print(cat1)\n print(cat2)\n print(my_sweet_home)\n\n# Усложненное задание (делать по желанию)\n# Создать несколько (2-3) котов и подселить их в дом к человеку.\n# Им всем вместе так же надо прожить 365 дней.\n# (Можно определить критическое количество котов, которое может прокормить человек...)\n","sub_path":"lesson_007/03_man_ans_cat.py","file_name":"03_man_ans_cat.py","file_ext":"py","file_size_in_byte":11829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161882479","text":"import math\nimport random\n\n# returns the probability of the value given the mean and standard\n# deviation of a gaussian distribution\ndef gaussianProbability(mean, standardDeviation, value):\n variance = standardDeviation * standardDeviation\n coef = 1.0 / math.sqrt(2.0 * math.pi * variance)\n diffFromMean = float(value) - float(mean)\n innerExp = (-1.0 * diffFromMean * diffFromMean) / (2.0 * variance)\n return coef * math.exp(innerExp)\n\ndef mean(values):\n if len(values) > 0:\n num = float(len(values))\n return sum(values) / num\n else:\n return 0\n\n# fundamental theory of probability:\n# variance([randomGaussian(0.0, 1.0) for n in xrange(0, 200)]) => 1.1129958700263398\n# variance([randomGaussian(0.0, 1.0) for n in xrange(0, 200000)]) => 0.99666458910502709\ndef variance(values):\n avg = mean(values)\n sumOfSquareDistancesToMean = 0.0\n for v in values:\n delta = avg - v\n deltaSqr = delta * delta\n sumOfSquareDistancesToMean += deltaSqr\n return sumOfSquareDistancesToMean / float(len(values))\n\n# returns a random number distributed according to the normal distribution with\n# the provided mean and sigma\ndef randomGaussian(mean, standardDeviation):\n # From Knuth v2, 3rd ed, p122\n w = 0\n y1 = 0\n y2 = 0\n while True:\n x1 = 2.0 * random.random() - 1.0;\n x2 = 2.0 * random.random() - 1.0;\n w = x1 * x1 + x2 * x2; # square it\n if w < 1.0 and w > 0.0:\n break\n return mean + standardDeviation * x1 * math.sqrt (-2.0 * math.log (w) / w);\n\n# given an array of N pairs (where first is mean and second is sigma), returns\n# a list of length N with N random gaussians\ndef randomMultivariateGaussian(arrayOfMeansAndSigmas):\n return [ randomGaussian(u, sigma) for [u, sigma] in arrayOfMeansAndSigmas];\n\n\ndef randomUniform(min, max):\n return (max - min) * random.random() + min\n\ndef randomMultivariateUniform(arrayOfMinMaxes):\n return [ randomUniform(min, max) for [min, max] in arrayOfMinMaxes]\n\ndef ex():\n randomMultivariateGaussian([ (1.0, 10.0), [-5.0, 2.0], [6.0, .001]])\n return randomGaussian(0.0, 1.0)\n\n# usage:\n# [x for x in lowVarianceSample([\"Common\", \"Uncommon\"], [10.0, 1.0], 10)]\n# draws newSize # of objects from objects with corresponding weights\n# yields for each object\n# credit: http://www.google.com/codesearch/p?hl=en&sa=N&cd=2&ct=rc#D6Z80snicPI/trunk/fvision_modules/prob/src/fvision/prob/resampling.cpp&q=low%20variance%20resampling\ndef lowVarianceSample2(objects, weights, newSize):\n if newSize > 0:\n sumWeights = float(sum(weights))\n weights = [float(x) / sumWeights for x in weights]\n invSize = 1.0 / float(newSize)\n r = random.random() * invSize\n i = 0\n c = weights[0]\n for m in xrange(0, newSize):\n u = r + float(float(m) - 1.0 ) * invSize\n while u > c:\n i = i + 1\n c = c + weights[i]\n yield objects[i]\n\ndef lowVarianceSampleBuggy(objects, samples = None):\n if not samples or samples <= 0:\n if samples <= 0 or len(objects) == 0:\n raise Exception(\"Invalid number of samples provided.\")\n samples = len(objects)\n totalWeight = sum([o.weight for o in objects])\n result = []\n offset = random.random()*totalWeight/float(samples)\n increment = float(totalWeight)/float(samples)\n cumulativeWeight = objects[0].weight\n nextSample = offset\n index = 0\n for i in range(samples):\n while nextSample > cumulativeWeight:\n index += 1\n cumulativeWeight += objects[index].weight\n nextSample += increment\n result.append(objects[index])\n assert(samples == len(result)) \n for r in result:\n r.weight = 1.0 / float(len(result))\n return result\n","sub_path":"hw4/statutil.py","file_name":"statutil.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462101218","text":"try:\n class Card:\n def __init__(self,suit,number):\n self._suit = suit\n self._number = number\n def __repr__(self):\n return self.number + \" of\" + self.suit\n @property\n def suit(self):\n return self._suit\n @property\n def number(self):\n return self._number\n @suit.setter\n def suit(self,suit):\n if suit in [\"hearts\", \"clubs\", \"diamonds\",\"spades\"]:\n self._suit=suit\n else:\n print(\"Invalid Suit\")\n @number.setter\n def number(self,number):\n if number in [\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"Jack\",\"Queen\",\"King\",\"Ace\"]:\n self._number= number\n\n else:\n print(\"Invalid Face Value\")\n\n my_card = Card(\"spades\",\"Ace\")\n my_card.suit = \"dinosaur\"\n my_card.number = 3.67\n print(my_card)\nexcept:\n pass\n\n\n","sub_path":"cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379369324","text":"# Tests of obsolete or not-yet-used functionality\n# To run this test, you must be at the package root directory and run:\n# pytest tests/test_aux.py\n\n# This is necessary only when using \"pytest.raises\" tests\n# import pytest\n\nimport os\nimport warnings\n\ntestid='ci37511872'\nconfigfile='tests/testconfig.yml'\n\ndef test_aggregate():\n from dyfi import aggregate\n\n assert aggregate.myCeil(17,10)==20\n\n\ndef test_ipe():\n from dyfi import ipes\n\n func=ipes.aww2014wna # Test this ipe\n\n assert 8.3