diff --git "a/3622.jsonl" "b/3622.jsonl" new file mode 100644--- /dev/null +++ "b/3622.jsonl" @@ -0,0 +1,304 @@ +{"seq_id":"9697799899","text":"import sys\ninput = sys.stdin.readline\n# [BOJ] 16198 에너지 모으기 / 재귀, 백트래킹\nn = int(input())\nx = list(map(int, input().split()))\nans = []\nchk = 0\n\ndef solve(e):\n if len(x) == 2:\n ans.append(e)\n return\n \n for i in range(1, len(x) - 1):\n chk = x[i - 1] * x[i + 1]\n temp = x.pop(i)\n solve(e + chk)\n x.insert(i, temp)\nsolve(0)\nprint(max(ans))","repo_name":"Jsplix/Algorithms","sub_path":"BOJ/BOJ_16198.py","file_name":"BOJ_16198.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"23320691390","text":"\n# code for \"Simulation Research on Data Stream Driven Decision Making in Dynamic Environments\"\n# code by Joseph W. Clark, University of Nebraska at Omaha, 2013\n# joeclark77@hotmail.com\n\n# this file contains the rough-cut \"Environment\" object with the following features:\n# 1. takes initialization parameters for the # of classes (n), # of x variables (m), and relative proportions of the population for each class (p, a list)\n# 2. stores, for each class, a reward distribution R and m x-variable distributions M\n# 3. implements some form of environmental change\n# 4. can generate an individual or a list of individuals\n\n\nimport random\n\ndef weighted_choice(weights):\n \"cribbed from http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/\"\n totals = []\n running_total = 0\n for w in weights:\n running_total += w\n totals.append(running_total)\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i\n\n \n \n \nclass CClass:\n # one of the n classes/type of customers or agents in the environment\n def __init__(self,C,R,m):\n self._m = m # the number of X variables\n self._C = C # the class knows its number/name\n self._R = R # reward \"distribution\" -- in this case a reward fixed at initialization\n self._M = [ random.betavariate(2,2) for i in range(m) ] # x variable distributions -- in this case randomly generated probabilities of a 1 or 0 value (aka Bernoulli distribution)\n #print( \"CClass initialized with reward (\", self._R, \") and x-variable distributions M1-Mn\", self._M )\n def get_individual(self):\n # generate a random individual of this class\n xvars = [ 1 if random.random()\"\n return(description)\n\n \n \nclass BasicEnvironment:\n # the simple environment model described in the 2013 AMCIS paper\n def __init__(self,n,p,m,q,z):\n self._n = n\n self._p = p\n self._m = m\n self._z = z\n # initialize _C as a list of n CClass objects (in this case, two classes, one \"good\" one \"bad\")\n self._C = [ CClass(\"1\",1,m), CClass(\"2\",-1,m) ]\n # check for errors\n if len(self._p) != len(self._C): print(\"ERROR: there are\",len(self._C),\"CClasses but\",len(self._p),\"values of p given\")\n #print( \"Environment initialized with n =\",self._n,\", p =\",self._p,\", m =\",self._m ) \n\n def generate_individuals(self,q):\n individuals = []\n for i in range(q):\n # based on population proportions (p), choose a class\n c = weighted_choice(self._p) \n # return class number, reward, and x variables\n individuals.append( self._C[c].get_individual() )\n return(individuals) \n\n def concept_drift(self):\n # implement the effects of concept drift on the environment\n # (calling the concept_drift() method of CClasses as needed)\n # print(\"concept drifting...\")\n if random.random() < self._z:\n for c in self._C: c.concept_drift()\n \n \n \n \n \n# the following is test code; it will only run if this module is launched directly \n# and not when it is imported by another script\nif __name__ == \"__main__\":\n be = BasicEnvironment(2,(0.5,0.5),5,10,0.02)\n #data = be.generate_individuals(10)\n #print(data)\n for C in be._C: print(C._M)\n be.concept_drift()\n\n","repo_name":"joeclark-phd/dsddm","sub_path":"python/rough_cut_environment.py","file_name":"rough_cut_environment.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"18446828435","text":"# determines if a string is a palindrome\n\n# Option 1:\nstring = 'level'\npalindrome = string == string[::-1]\n\nprint(palindrome) # True\n\n# Option 2:\nphrase.find(phrase[::-1])\n\n# Option 3:\ndef palindrome_check(str):\n a = str.lower() # normalize string\n return a == a[::-1]\n\n\nprint(palindrome_check(\"hannah\")) # True\nprint(palindrome_check('Hannah')) # True \nprint(palindrome_check(\"madaa\")) # False\n\n# Option 4:\ndef checkPalindrome(inputString):\n check_string = inputString.lower()\n return True if check_string == check_string[::-1] else False\n","repo_name":"matthewfduffy/coding-challenges-python","sub_path":"_Common_Snippets/string/palindrome_check.py","file_name":"palindrome_check.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"42519307565","text":"#coding=utf-8\r\nimport sys\r\nsys.path.append ('C:\\pythoncode')\r\nfrom util.read_ini import ReadIni\r\nfrom selenium import webdriver\r\nimport time\r\nclass FoundElement ():\r\n def __init__(self):\r\n self.driver = webdriver.Chrome()\r\n self.driver.get(\"http://www.5itest.cn/register\")\r\n self.init= ReadIni()\r\n time.sleep(5)\r\n\r\n def getElement(self,value):\r\n\r\n data=self.init.get_value(value)\r\n by=data.split('>')[0]\r\n element=data.split('>')[1]\r\n if by=='id':\r\n self.driver.find_element_by_id(element).send_keys('111')\r\n\r\n def close(self):\r\n self.driver.close() \r\n\r\nif __name__ == \"__main__\":\r\n element= FoundElement()\r\n element.getElement('user_email') \r\n time.sleep(5)\r\n element.close()\r\n\r\n\r\n\r\n ","repo_name":"chenxiaoao0429/Selenium","sub_path":"pythoncode/login/find_element.py","file_name":"find_element.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12158794635","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.files.storage import FileSystemStorage\nimport cv2\nimport imutils\nimport numpy as np\nimport pytesseract\nimport re\n\nfrom .models import LicensePlate,Driver\n\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n\ndef extract_license_plate(request):\n if request.method == 'POST' and request.FILES['image']:\n uploaded_image = request.FILES['image']\n fs = FileSystemStorage()\n image_path = fs.save(uploaded_image.name, uploaded_image)\n\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (600, 400))\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = cv2.bilateralFilter(gray, 13, 15, 15)\n\n edged = cv2.Canny(gray, 30, 200)\n contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n screenCnt = None\n\n for c in contours:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.018 * peri, True)\n\n if len(approx) == 4:\n screenCnt = approx\n break\n\n if screenCnt is None:\n detected = 0\n result = \"No contour detected\"\n else:\n detected = 1\n cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)\n\n if detected == 1:\n mask = np.zeros(gray.shape, np.uint8)\n new_image = cv2.drawContours(mask, [screenCnt], 0, 255, -1,)\n new_image = cv2.bitwise_and(img, img, mask=mask)\n\n (x, y) = np.where(mask == 255)\n (topx, topy) = (np.min(x), np.min(y))\n (bottomx, bottomy) = (np.max(x), np.max(y))\n cropped = gray[topx:bottomx + 1, topy:bottomy + 1]\n\n else:\n cropped = gray\n\n text = pytesseract.image_to_string(cropped, config='--psm 11')\n\n text = re.search(r'([A-Z]{3}-\\w{5})|([A-Z]{3}-\\w{5})', text)\n\n if text:\n plate_number = text.group(0)\n try:\n # check if license plate exists in database\n plate_obj = LicensePlate.objects.filter(plate_number=plate_number).first()\n \n if plate_obj:\n driver_obj = Driver.objects.filter(license_plate=plate_obj).first()\n result= \"Detected license plate Number is: \" + plate_number + \"\\nDriver Name: \" + driver_obj.name + \"\\nAddress: \" + driver_obj.address\n \n else:\n # create new LicensePlate object if not found in database\n plate_obj = LicensePlate(image=uploaded_image, plate_number=plate_number)\n plate_obj.save()\n result = \"Detected license plate Number is: \" + plate_number\n except Exception as e:\n result = \"An error occurred while saving the license plate: \" + str(e)\n else:\n result = \"License plate not found in the required format.\"\n\n\n img = cv2.resize(img, (500, 300))\n cropped = cv2.resize(cropped, (400, 200))\n cv2.imwrite(image_path, img)\n cv2.imwrite('cropped.jpg', cropped)\n\n context = {'result': result, 'image_path': image_path}\n\n return render(request, 'upload.html', context)\n\n return render(request, 'upload.html')\n","repo_name":"alaminmagaga/license","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"34885017530","text":"\"\"\"\n-----------------------------\nCP460 (Fall 2020)\nName: Keven Iskander\nID: 160634540\nAssignment 2\n-----------------------------\n\"\"\"\n\n\"\"\"\n-----------------------------\nCP460 (Fall 2020)\nAssignment 2 Solution\n-----------------------------\n\"\"\"\nimport utilities\nimport math\n\n\"\"\"--------- Constants ----------- \"\"\"\nDICT_FILE = 'engmix.txt'\nPAD = 'q'\nBLOCK_MAX_SIZE = 20\n\n\"\"\"\n----------------------------------------------------\n Task 1: Utilities\n----------------------------------------------------\n\"\"\"\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\n base (str)\nReturn: positions (2D list)\nDescription: Analyzes a given text for any occurrence of base characters\n Returns a 2D list with characters and their respective positions\n format: [[char1,pos1], [char2,pos2],...]\n Example: get_positions('I have 3 cents.','c.h') -->\n [['h',2],['c',9],['.',14]]\n Text and base are not changed\nAsserts: text and base are strings\n---------------------------------------------------\n\"\"\"\ndef get_positions(text,base):\n \n assert type(text) == str\n assert type(base) == str\n\n positions = []\n\n for i in range(len(text)):\n for j in range(len(base)):\n if text[i] == base[j]:\n positions.append([base[j], i])\n\n return positions\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\n base (str)\nReturn: updated_text (str)\nDescription: Removes all base characters from text\nAsserts: text and base are strings\n---------------------------------------------------\n\"\"\"\ndef clean_text(text,base):\n \n assert type(text) == str\n assert type(base) == str\n\n updated_text = text\n\n for i in range(len(text)):\n for j in range(len(base)):\n if text[i] == base[j]:\n \n updated_text = updated_text.replace(base[j], '')\n\n\n return updated_text\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\n positions (lsit): [[char1,pos1],[char2,pos2],...]]\nReturn: updated_text (str)\nDescription: Inserts all characters in the positions 2D list into their respective\nAsserts: text is a string and positions is a list\n---------------------------------------------------\n\"\"\"\ndef insert_positions(text, positions):\n \n assert type(text) == str\n assert type(positions) == list\n\n updated_text = text\n\n for i in range(len(positions)):\n updated_text = updated_text[:positions[i][1]] + positions[i][0] + updated_text[positions[i][1]:]\n return updated_text\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\n block_size (int)\n padding (bool): False/default = no padding, True = padding\n pad (str): default = PAD\nReturn: blocks (list)\nDescription: Create a list containing strings each of given block size\n if padding flag is set, pad using given padding character\n if no padding character given, use global PAD\nAsserts: text is a string and b_size is a positive integer\n---------------------------------------------------\n\"\"\"\ndef text_to_blocks(text,b_size,padding = 0,pad =PAD):\n \n assert type(text) == str\n assert type(b_size) == int\n assert b_size > 0\n\n blocks = []\n i = 0\n \n while i < (len(text)):\n if len(text) - i < b_size and padding == 1:\n \n blocks.append(text[i:i+b_size] + (pad * (b_size-(len(text)-i))))\n \n else:\n blocks.append(text[i:i+b_size])\n i+=b_size\n return blocks\n\n\"\"\"\n----------------------------------------------------\nParameters: text (string): input string\n shifts (int): number of shifts\n direction (str): 'l' or 'r'\nReturn: update_text (str)\nDescription: Shift a given string by given number of shifts (circular shift)\n If shifts is a negative value, direction is changed\n If no direction is given or if it is not 'l' or 'r' set to 'l'\nAsserts: text is a string and shifts is an integer\n---------------------------------------------------\n\"\"\"\ndef shift_string(text,shifts,direction='l'):\n \n assert type(text) == str\n assert type(shifts) == int\n\n updated_text = text\n accepted_directions = 'rRlL'\n\n if direction not in accepted_directions:\n direction = 'l'\n\n if abs(shifts) > len(text):\n\n if shifts > 0:\n shifts = shifts % len(text)\n else:\n shifts = abs(shifts) % len(text)\n shifts = -shifts\n\n if shifts > 0:\n\n if direction == 'r' or direction == 'R':\n \n updated_text = updated_text[-shifts:] + updated_text[:-shifts]\n # print('r1')\n if direction == 'l' or direction == 'L':\n \n updated_text = updated_text[shifts:] + updated_text[:shifts]\n # print('l1')\n\n elif shifts < 0:\n if direction == 'r' or direction == 'R':\n \n updated_text = updated_text[-shifts:] + updated_text[:-shifts]\n # print('r2')\n if direction == 'l' or direction == 'L':\n \n updated_text = updated_text[shifts:] + updated_text[:shifts]\n # print('l2')\n\n return updated_text\n\n\"\"\"\n----------------------------------------------------\nParameters: input_list (list): 2D list\n item (?)\nReturn: i,j (int,int)\nDescription: Performs linear search on input list to find \"item\"\n returns i,j, where i is the row number and j is the column number\n if not found returns -1,-1\nAsserts: input_list is a list\n---------------------------------------------------\n\"\"\"\ndef index_2d(input_list,item):\n \n assert type(input_list) == list\n\n for i in range(len(input_list)):\n j = 0\n while j < len(input_list[i]):\n if input_list[i][j] == item:\n return i, j\n j+=1\n \n return -1,-1\n\n\"\"\"\n----------------------------------------------------\n Task 2: Block Rotation Cipher\n----------------------------------------------------\n\"\"\"\n\n\"\"\"\n----------------------------------------------------\nParameters: key (b,r): tuple(int,int)\nReturn: updated_key (b,r): tuple(int,int)\nDescription: Private helper function for block rotate cipher\n Update the key to smallest positive value\n if an invalid key return (0,0)\nAsserts: None\n---------------------------------------------------\n\"\"\"\ndef _adjust_key_block_rotate(key):\n\n updated_key = 0,0\n \n if type(key) == tuple:\n if isinstance(key[0], int) and isinstance(key[1], int) and key[0]>0:\n temp0 = key[0]\n temp1 = key[1]\n \n while (temp1>temp0):\n temp1 = temp1 - temp0\n while (temp1<0):\n if temp0 + temp1 < temp0:\n temp1 = temp1 + temp0\n updated_key = (temp0, temp1)\n \n\n return updated_key\n\n\"\"\"\n----------------------------------------------------\nParameters: plaintext (str)\n key (tuple(int,int))\nReturn: ciphertext (str)\nDescription: Encryption using Block Rotation Cipher\n Uses left circular rotation + padding\nAsserts: plaintext is a string\nErrors: if invalid key: \n print: \"Error(e_block_rotate): invalid key\"\n return empty string\n---------------------------------------------------\n\"\"\"\ndef e_block_rotate(plaintext,key):\n \n assert type(plaintext) == str\n\n ciphertext = ''\n \n space_positions = get_positions(plaintext, '\\n')\n plaintext = clean_text(plaintext, '\\n')\n\n key = _adjust_key_block_rotate(key)\n if key == (0,0):\n print(\"Error(e_block_rotate): invalid key\")\n \n plainblocks = text_to_blocks(plaintext, key[0], padding=1, pad=PAD)\n\n for i in range(len(plainblocks)):\n plainblocks[i] = shift_string(plainblocks[i], key[1], direction='l')\n ciphertext = ciphertext + plainblocks[i]\n\n ciphertext = insert_positions(ciphertext, space_positions)\n \n return ciphertext\n\n\"\"\"\n----------------------------------------------------\nParameters: ciphertext (str)\n key (tuple(int,int))\nReturn: plaintext (str)\nDescription: Decryption using Block Rotation Cipher\n Removes padding if it exist\nAsserts: ciphertext is a string\nErrors: if invalid key: \n print: \"Error(d_block_rotate): invalid key\" \n return empty string\n---------------------------------------------------\n\"\"\"\ndef d_block_rotate(ciphertext,key):\n \n assert type(ciphertext) == str\n\n plaintext = ''\n \n space_positions = get_positions(ciphertext, '\\n')\n ciphertext = clean_text(ciphertext, '\\n')\n\n key = _adjust_key_block_rotate(key)\n if key == (0,0):\n print(\"Error(e_block_rotate): invalid key\")\n\n cipherblocks = text_to_blocks(ciphertext, key[0])\n\n for i in range(len(cipherblocks)):\n cipherblocks[i] = shift_string(cipherblocks[i], key[1], direction='r')\n plaintext = plaintext + cipherblocks[i]\n\n plaintext = insert_positions(plaintext, space_positions)\n plaintext = plaintext.rstrip(PAD)\n\n return plaintext\n\n\"\"\"\n----------------------------------------------------\nParameters: ciphertext (string)\n arguments (list): [b0,bn,r] default = [0,0,0]\n b0: minimum block size\n bn: maximum block size\n r: rotations\nReturn: key,plaintext\nDescription: Cryptanalysis of Block Rotate Cipher\n Returns plaintext and key (r,b)\n Attempts block sizes from b1 to b2 (inclusive)\n If bn is invalid or unspecified use BLOCK_MAX_SIZE\n Minimum valid value for b0 is 1\n---------------------------------------------------\n\"\"\"\ndef cryptanalysis_block_rotate(ciphertext,arguments=[0,0,0]):\n\n dict_list = utilities.load_dictionary(DICT_FILE)\n \n if (arguments[0] > 0 and arguments[1]> 0) and (arguments[0] == arguments[1]) and arguments[2] == 0:\n for i in range(arguments[1]):\n \n text = d_block_rotate(ciphertext, (arguments[1], i))\n\n if utilities.is_plaintext(text, dict_list) == True:\n return (arguments[1],i), text\n \n\n if (arguments[0] == 0 and arguments[1] == 0) and (arguments[2] > 0):\n \n for i in range(arguments[0], arguments[1]):\n\n text = d_block_rotate(ciphertext, (i, arguments[2]))\n if utilities.is_plaintext(text, dict_list) == True:\n return (i, arguments[2]), text\n\n if (arguments[0] > 0 or arguments[1]> 0) and arguments[2] == 0:\n \n if arguments[1] == 0:\n arguments[1] = BLOCK_MAX_SIZE\n if arguments[0] == 0:\n arguments[0] = 1\n\n for i in range(arguments[0], arguments[1]): \n\n for j in range(arguments[1]):\n \n text = d_block_rotate(ciphertext, (i, j))\n if utilities.is_plaintext(text, dict_list) == True:\n\n return (i, j), text\n\n \n if (arguments[0] == 0 and arguments[1] == 0 and arguments[2] == 0):\n if arguments[1] == 0:\n arguments[1] = BLOCK_MAX_SIZE\n if arguments[0] == 0:\n arguments[0] = 1\n\n for i in range(arguments[0], arguments[1]): \n\n for j in range(arguments[1]):\n \n text = d_block_rotate(ciphertext, (i, j))\n if utilities.is_plaintext(text, dict_list) == True:\n return (i, j), text\n \n\n return '',''\n\n\"\"\"\n----------------------------------------------------\n Task 3: Wheatstone Playfair Cipher\n----------------------------------------------------\n\"\"\"\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\nReturn: f_text (str): formatted text\nDescription: Formats a plaintext\n 1- Every W/w is converted to VV/vv\n 2- Append an x if the text length is odd (excluding non-alpha chars)\n 3- Convert every double character pair ## to #x or #X\n----------------------------------------------------\n\"\"\"\ndef _format_playfair(plaintext):\n \n special = utilities.get_base('nonalpha')\n\n plaintext = plaintext.replace('w', 'vv')\n plaintext = plaintext.replace('W', 'VV')\n\n positions = []\n for a in range(len(plaintext)):\n if plaintext[a] == ' ':\n positions.append([' ',a])\n if plaintext[a] in special:\n positions.append([plaintext[a],a])\n if plaintext[a] == '\\n':\n positions.append([plaintext[a],a])\n\n plaintext = clean_text(plaintext,' ')\n plaintext = clean_text(plaintext,special)\n plaintext = clean_text(plaintext,'\\n')\n\n i=0\n while i < len(plaintext)-1:\n \n if plaintext[i] == plaintext[i+1] and plaintext[i].isupper() == True:\n plaintext = plaintext[:i+1] + 'X' + plaintext[i+2:]\n if plaintext[i] == plaintext[i+1] and plaintext[i].islower() == True:\n plaintext = plaintext[:i+1] + 'x' + plaintext[i+2:]\n\n \n i += 2\n\n\n if len(plaintext) % 2 != 0:\n plaintext = plaintext + 'x'\n \n \n plaintext = insert_positions(plaintext, positions)\n\n\n\n return plaintext\n\n\"\"\"\n----------------------------------------------------\nParameters: text (str)\nReturn: r_text (str): restored text\nDescription: Restores a plaintext by:\n 1- Converting VV/vv back to W/w\n 2- Append an x if the text length is odd (excluding non-alpha chars)\n 3- Convert every double character pair ## to #x or #X\nAsserts: None\n----------------------------------------------------\n\"\"\"\ndef _restore_playfair(text): \n # print(text)\n # r_text = clean_text(text, utilities.get_base('nonalpha'))\n r_text = text\n \n dict_list = utilities.load_dictionary(DICT_FILE)\n\n new_text = ''\n\n\n\n i = 0\n # print(r_text)\n \n while i < len(r_text)-1:\n \n if r_text[i] == 'V' and r_text[i+1] == 'X' and r_text[i-1]!='V':\n r_text = r_text[:i+1] + 'V' + r_text[i+2:]\n # r_text = r_text.replace('VV', 'W')\n if r_text[i] == 'v' and r_text[i+1] == 'x'and r_text[i-1]!='v':\n r_text = r_text[:i+1] + 'v' + r_text[i+2:]\n # r_text = r_text.replace('vv', 'w')\n # print(r_text)\n \n i += 1\n # print(r_text)\n r_text = r_text.replace('VV', 'W')\n r_text = r_text.replace('vv', 'w')\n # print(r_text)\n\n specials = utilities.get_base('nonalpha')\n positions = []\n\n for a in range(len(r_text)):\n if r_text[a] == ' ':\n positions.append([' ',a])\n if r_text[a] in specials:\n positions.append([r_text[a],a])\n if r_text[a] == '\\n':\n positions.append([r_text[a],a])\n\n \n\n if len(r_text)-1 % 2 != 0:\n r_text = r_text.rstrip('x')\n # print(r_text)\n # print('YES')\n\n # if is_plaintext(_restore_word_playfair(r_text.rstrip('x'), dict_list), dict_list):\n # r_text = r_text.strip('x')\n\n word_list = utilities.text_to_words(r_text)\n # print(word_list)\n \n\n for i in range(len(word_list)):\n word_list[i] = _restore_word_playfair(word_list[i], dict_list)\n new_text = new_text + word_list[i]\n \n # print(new_text)\n\n r_text = new_text\n\n # print(r_text)\n\n r_text = clean_text(r_text,' ')\n r_text = clean_text(r_text, specials)\n r_text = clean_text(r_text, '\\n')\n if len(r_text) % 2 != 0:\n r_text = r_text.rstrip('x')\n \n\n # print('W placed', r_text)\n\n \n r_text = insert_positions(r_text, positions)\n return r_text\n\n\"\"\"\n----------------------------------------------------\nParameters: word (str)\n dict_list (list): 2d dictionary list\nReturn: r_word (str): restored word\nDescription: Restores a word by removing the 'x' character whenever necessary\n Assumes a word has no more than two x's\n Assumes word is either lower, UPPER or Capitalized\nAsserts: None\n----------------------------------------------------\n\"\"\"\ndef _restore_word_playfair(word,dict_list):\n \n new_word = word\n x_count = 0\n i = 0\n x_char = 'x'\n if new_word.isupper()==True:\n x_char = 'X'\n if utilities.is_plaintext(new_word, dict_list, 1) == True:\n return new_word\n else:\n while i < len(new_word):\n if new_word[i] == x_char and i != 0 and x_count < 2:\n # print('test1')\n new_word = new_word[:i] + new_word[i-1] + new_word[i+1:]\n if utilities.is_plaintext(word, dict_list, 1) == True:\n return new_word\n x_count+=1\n i+=1\n\n if utilities.is_plaintext(new_word, dict_list, 1) == False:\n j = 0\n x_count = 0\n x_skip = 1\n new_word = word\n while j < len(new_word):\n if new_word[j] == x_char and j != 0 and x_count < 2:\n # print('test2')\n if x_skip == 0:\n x_skip-=1\n new_word = new_word[:j] + new_word[j-1] + new_word[j+1:]\n if utilities.is_plaintext(word, dict_list, 1) == True:\n return new_word\n else:\n x_skip-=1\n x_count+=1\n j+=1\n\n if utilities.is_plaintext(new_word, dict_list, 1) == False:\n k = 0\n x_count = 0\n x_skip = 1\n new_word = word\n while k < len(new_word):\n if new_word[k] == x_char and k != 0 and x_count < 2:\n # print('test3')\n if x_skip == 1:\n x_skip-=1\n new_word = new_word[:k] + new_word[k-1] + new_word[k+1:]\n if utilities.is_plaintext(word, dict_list, 1) == True:\n return new_word\n else:\n x_skip-=1\n x_count+=1\n k+=1\n\n \n return new_word\n\n\"\"\"\n----------------------------------------------------\nParameters: plaintext(str)\n key (list): Playfair Square\nReturn: ciphertext (str)\nDescription: Encryption using Wheatstone Playfair Cipher\n Preserves all non-alpha characters\n Preserves case of characters\n Uses vv for w\n Invokes _format_playfair utility function\nAsserts: plaintext is a string and key is a list\n----------------------------------------------------\n\"\"\"\ndef e_playfair(plaintext, key):\n \n assert type(plaintext) == str\n assert type(key) == list\n \n ciphertext = _format_playfair(plaintext)\n # print(ciphertext)\n\n specials = utilities.get_base('nonalpha')\n positions = []\n\n for a in range(len(ciphertext)):\n if ciphertext[a] == ' ':\n positions.append([' ',a])\n if ciphertext[a] in specials:\n positions.append([ciphertext[a],a])\n if ciphertext[a] == '\\n':\n positions.append([ciphertext[a],a])\n\n ciphertext = clean_text(ciphertext,' ')\n ciphertext = clean_text(ciphertext, specials)\n ciphertext = clean_text(ciphertext, '\\n')\n\n cipherblock = text_to_blocks(ciphertext, 2)\n # print(cipherblock, ciphertext)\n # print(cipherblock)\n\n new_text = ''\n\n \n for i in range(len(cipherblock)):\n\n first = index_2d(key, cipherblock[i][0].upper())\n second = index_2d(key, cipherblock[i][1].upper())\n\n fi = first[0]\n fj = first[1]\n si = second[0]\n sj = second[1]\n\n # print(cipherblock, fi, fj, si, sj, ciphertext)\n # print(cipherblock)\n # print(fi,fj,si,sj)\n if fi == si and fi != -1 and si != -1:\n # print('TEST1')\n if fj != len(key)-1:\n fj+=1\n else:\n fj = 0\n if sj != len(key)-1:\n sj+=1\n else:\n sj = 0\n \n elif fj == sj and fj != -1 and sj != -1:\n # print('TEST2')\n if fi != len(key)-1:\n fi +=1\n else:\n fi = 0\n if si != len(key)-1:\n si+=1\n else:\n si = 0\n\n elif sj < fj and sj != -1 and fj != -1:\n # print('TEST3')\n if first[1] - (first[1]-second[1])>=0:\n fj = fj - (first[1]-second[1])\n else:\n fj = len(key)-1\n if second[1] + (first[1]-second[1]) < 5:\n sj = sj + (first[1]-second[1])\n else:\n sj = len(key)-1\n\n elif fj < sj and fj != -1 and sj != -1:\n # print('TEST4')\n if first[1] - (first[1]-second[1]) >= 0:\n fj = fj - (first[1]-second[1])\n else:\n fj = 0\n if second[1] - (second[1]-first[1]) >= 0:\n sj = sj - (second[1]-first[1])\n else:\n # print('TEST')\n\n sj=0 #+ (second[1]-first[1])\n \n # print(cipherblock[i], fi, fj, si, sj)\n if fi < 0 or fj < 0:\n if cipherblock[i][1].islower():\n cipherblock[i] = ' ' + key[si][sj].lower()\n else:\n cipherblock[i] = ' ' + key[si][sj]\n elif si < 0 or sj < 0:\n if cipherblock[i][0].islower():\n cipherblock[i] = key[fi][fj].lower() + ' '\n else:\n cipherblock[i] = key[fi][fj] + ' '\n else:\n if cipherblock[i].islower():\n cipherblock[i] = key[fi][fj].lower() + key[si][sj].lower()\n elif cipherblock[i][0].islower()==False and cipherblock[i][1].islower()==True:\n cipherblock[i] = key[fi][fj] + key[si][sj].lower()\n elif cipherblock[i][0].islower()==True and cipherblock[i][1].islower()==False:\n cipherblock[i] = key[fi][fj].lower() + key[si][sj]\n else:\n cipherblock[i] = key[fi][fj] + key[si][sj]\n new_text+=cipherblock[i]\n\n \n ciphertext = new_text\n ciphertext = insert_positions(ciphertext, positions)\n\n # print(cipherblock)\n # print(ciphertext)\n\n return ciphertext\n\n\"\"\"\n----------------------------------------------------\nParameters: ciphertext(str)\n key (list): Playfair Square\nReturn: plaintext (str)\nDescription: Decryption using Wheatstone Playfair Cipher\n Invokes _restore_playfair function to restore plaintext\nAsserts: ciphertext is a string and key is a list\n----------------------------------------------------\n\"\"\"\ndef d_playfair(ciphertext, key):\n \n assert type(ciphertext) == str\n assert type(key) == list\n\n plaintext = ciphertext\n\n specials = utilities.get_base('nonalpha')\n positions = []\n\n # this position population loop can be replaced by get_positions function\n for a in range(len(plaintext)):\n if plaintext[a] == ' ':\n positions.append([' ',a])\n if plaintext[a] in specials:\n positions.append([plaintext[a],a])\n if plaintext[a] == '\\n':\n positions.append([plaintext[a],a])\n\n plaintext = clean_text(plaintext,' ')\n plaintext = clean_text(plaintext, specials)\n plaintext = clean_text(plaintext, '\\n')\n\n plainblock = text_to_blocks(plaintext, 2)\n # print(cipherblock, ciphertext)\n # print(plainblock)\n\n new_text = ''\n\n for i in range(len(plainblock)):\n\n first = index_2d(key, plainblock[i][0].upper())\n second = index_2d(key, plainblock[i][1].upper())\n\n fi = first[0]\n si = second[0]\n fj = first[1]\n sj = second[1]\n\n if fi == si and fi != -1 and si != -1:\n # print('TEST1')\n if fj != 0:\n fj-=1\n else:\n fj = len(key)-1\n if sj != 0:\n sj-=1\n else:\n sj = len(key)-1\n\n elif fj == sj and fj != -1 and sj != -1:\n # print('TEST2')\n if fi != 0:\n fi -=1\n else:\n fi = len(key)-1\n if si != 0:\n si-=1\n else:\n si = len(key)-1\n\n elif fj < sj and sj != -1 and fj != -1:\n # print('TEST3')\n if first[1] - (first[1]-second[1])>=0:\n fj = fj - (first[1]-second[1])\n else:\n fj = len(key)-1\n if second[1] + (first[1]-second[1]) < 5:\n sj = sj + (first[1]-second[1])\n else:\n sj = len(key)-1\n\n elif sj < fj and fj != -1 and sj != -1:\n # print('TEST4')\n if first[1] - (first[1]-second[1]) >= 0:\n fj = fj - (first[1]-second[1])\n else:\n fj = 0\n if second[1] - (second[1]-first[1]) >= 0:\n sj = sj - (second[1]-first[1])\n else:\n # print('TEST')\n sj = 0\n # sj=len(key)-1 #+ (second[1]-first[1])\n\n if fi < 0 or fj < 0:\n if plainblock[i][1].islower():\n plainblock[i] = ' ' + key[si][sj].lower()\n else:\n plainblock[i] = ' ' + key[si][sj]\n elif si < 0 or sj < 0:\n if plainblock[i][0].islower():\n plainblock[i] = key[fi][fj].lower() + ' '\n else:\n plainblock[i] = key[fi][fj] + ' '\n else:\n if plainblock[i].islower():\n plainblock[i] = key[fi][fj].lower() + key[si][sj].lower()\n elif plainblock[i][0].islower()==False and plainblock[i][1].islower()==True:\n plainblock[i] = key[fi][fj] + key[si][sj].lower()\n elif plainblock[i][0].islower()==True and plainblock[i][1].islower()==False:\n plainblock[i] = key[fi][fj].lower() + key[si][sj]\n else:\n plainblock[i] = key[fi][fj] + key[si][sj]\n new_text+=plainblock[i]\n\n # print(new_text)\n plaintext = new_text\n plaintext = insert_positions(plaintext, positions)\n plaintext = _restore_playfair(plaintext)\n # print(plaintext)\n \n \n\n\n return plaintext\n\n\"\"\"\n----------------------------------------------------\n Task 4: Columnar Transposition Cipher\n----------------------------------------------------\n\"\"\"\n\n\"\"\"\n----------------------------------------------------\nParameters: key (str) \nReturn: key_order (list)\nDescription: Returns key order, e.g. [face] --> [1,2,3,0]\n If invalid key --> return []\n Applies to all ASCII characters from space to ~\nAsserts: None\n----------------------------------------------------\n\"\"\"\ndef _get_order_ct(key):\n\n if type(key)!= str:\n return []\n \n key_order = []\n\n key = ''.join([j for i,j in enumerate(key) if j not in key[:i]])\n # key = clean_text(key, ' ')\n\n all_char = ' ' + utilities.get_base('all')\n # print(key)\n\n if len(key) == 1:\n return [0]\n else:\n\n for i in range(len(all_char)):\n for j in range(len(key)):\n if key[j] == all_char[i] and key.index(key[j]) not in key_order:\n # print(key.index(key[j]),key[j])\n key_order.append(key.index(key[j]))\n\n return key_order\n\n\"\"\"\n----------------------------------------------------\nParameters: plaintext (str)\n kye (str)\nReturn: ciphertext (list)\nDescription: Encryption using Columnar Transposition Cipher\n Does not include whitespaces in encryption\n Uses padding\nAsserts: plaintext is a string\nErrors: if key is invalid:\n print: Error(e_ct): invalid key\n----------------------------------------------------\n\"\"\"\ndef e_ct(plaintext,key):\n \n assert type(plaintext) == str\n\n if _get_order_ct(key) == []:\n print('Error(e_ct): invalid key')\n return ''\n \n positions = []\n ciphertext = plaintext\n for a in range(len(ciphertext)):\n if ciphertext[a] == ' ':\n positions.append([ciphertext[a], a])\n if ciphertext[a] == '\\n':\n positions.append([ciphertext[a], a])\n if ciphertext[a] == '\\t':\n positions.append([ciphertext[a], a])\n \n ciphertext = clean_text(ciphertext, ' ')\n ciphertext = clean_text(ciphertext, '\\n')\n ciphertext = clean_text(ciphertext, '\\t')\n\n key_order = _get_order_ct(key)\n columnar_table = utilities.new_matrix(math.ceil(len(ciphertext)/len(key_order)),len(key_order), PAD)\n \n # for i in range(len(columnar_table)):\n # print(columnar_table[i])\n\n k = 0\n for i in range(len(columnar_table)):\n for j in range(len(columnar_table[i])):\n if k < len(ciphertext):\n columnar_table[i][j] = ciphertext[k]\n k+=1\n else:\n columnar_table[i][j] = PAD\n\n \n # print(key_order)\n # for i in range(len(columnar_table)):\n # print(columnar_table[i])\n \n ciphertext = ''\n for i in range(len(key_order)):\n for j in range(len(columnar_table)):\n ciphertext = ciphertext + columnar_table[j][key_order[i]]\n # print(key_order[i],j)\n # print(ciphertext)\n\n # print(ciphertext)\n\n ciphertext = insert_positions(ciphertext,positions)\n ciphertext = ciphertext.rstrip(PAD)\n\n return ciphertext\n\n\"\"\"\n----------------------------------------------------\nParameters: ciphertext (str)\n kye (str)\nReturn: plaintext (list)\nDescription: Decryption using Columnar Transposition Cipher\nAsserts: ciphertext is a string\nErrors: if key is invalid:\n print: Error(d_ct): invalid key\n----------------------------------------------------\n\"\"\"\ndef d_ct(ciphertext,key):\n \n assert type(ciphertext) == str\n\n if _get_order_ct(key) == []:\n print('Error(d_ct): invalid key')\n return ''\n\n positions = []\n plaintext = ciphertext\n for a in range(len(plaintext)):\n if plaintext[a] == ' ':\n positions.append([plaintext[a], a])\n if plaintext[a] == '\\n':\n positions.append([plaintext[a], a])\n if plaintext[a] == '\\t':\n positions.append([plaintext[a], a])\n \n plaintext = clean_text(plaintext, ' ')\n plaintext = clean_text(plaintext, '\\n')\n plaintext = clean_text(plaintext, '\\t') \n\n key_order = _get_order_ct(key)\n columnar_table = utilities.new_matrix(math.ceil(len(plaintext)/len(key_order)),len(key_order), PAD)\n\n k = 0\n for i in range(len(columnar_table[k])):\n for j in range(len(columnar_table)):\n if k < len(plaintext):\n columnar_table[j][key_order[i]] = plaintext[k]\n k+=1\n else:\n columnar_table[j][i] = PAD\n # print(key_order)\n \n # for i in range(len(columnar_table)):\n # print(columnar_table[i])\n\n plaintext = ''\n for i in range(len(columnar_table)):\n for j in range(len(key_order)):\n plaintext = plaintext + columnar_table[i][j]\n # print(key_order[i],j)\n # print(ciphertext)\n\n # print(ciphertext)\n\n plaintext = insert_positions(plaintext,positions)\n plaintext = plaintext.rstrip(PAD)\n\n\n\n return plaintext","repo_name":"keveniskander/Cryptography_Cipher_Collection_2","sub_path":"A2_solution.py","file_name":"A2_solution.py","file_ext":"py","file_size_in_byte":31505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"72445132171","text":"from __future__ import print_function\nimport re\nimport argparse\nimport pandas as pandas\nimport os\nfrom tqdm import tqdm\nfrom functools import partial\nimport numpy as np\n\ntemplate_keys = [\"#VORNAME\", \"#NACHNAME\", \"#AUFGABE\"]\nregexes = {key: \"({})+\".format(key) for key in template_keys}\n\n\ndef main(excel_file, template_svg):\n def conv_func(input):\n output = str(input)\n return output\n df = pandas.read_excel(excel_file, converters={key : conv_func for key in template_keys}, na_filter=False)\n value_dict = { key: [row[key] for _, row in df.iterrows()] for key in template_keys }\n pointer_dict = { key: 0 for key, _ in value_dict.items() }\n\n #prepare current outile\n i_outfile = 0\n done = False\n\n while not done:\n i_outfile += 1\n with open(next_out_file(i_outfile, template_svg), 'w') as output_file:\n with open(template_svg, 'r') as template_file:\n for line in template_file:\n new_line = line\n\n # search line and replace\n for key_to_replace, regex in regexes.items():\n cur_pointer = pointer_dict[key_to_replace]\n value_list = value_dict[key_to_replace]\n if cur_pointer < len(value_list):\n cur_value = str(value_list[cur_pointer])\n if re.search(regex, new_line):\n if len(cur_value) == 0:\n cur_value = str(\"\")\n\n print(str(\"{} {} {}\").format(key_to_replace, cur_pointer, cur_value))\n new_line = re.sub(regex, cur_value, new_line)\n pointer_dict[key_to_replace] += 1\n else:\n pass\n # print new line\n print(new_line.encode(\"utf-8\"), file=output_file)\n if np.all([len(lst) == pointer_dict[key] for key, lst in value_dict.items()]):\n done = True\n \n\n if cur_pointer == 0:\n print(\"no matches were found. please check your options again\")\n done = True\n \n summary(i_outfile, pointer_dict)\n\n\ndef next_out_file(i_outfile, template_svg):\n print('creating new output file number {}'.format(i_outfile))\n path, ext = os.path.splitext(template_svg)\n path = \"{}_output_{}\".format(path, i_outfile)\n output_svg = ''.join([path, ext])\n return output_svg\n\n\ndef summary(i_outfile, pointer_dict):\n print(\"---Summary---\")\n print(\"number of output files created : {}\".format(i_outfile))\n template = \"number of {} inserted : {}\"\n for key, count_value in pointer_dict.items():\n print(template.format(key, count_value))\n\n\nif __name__ == '__main__':\n import argparse\n epilog = \"\"\"\n SVG generator. Finds template values within the SVG and replaces them with the fields from the excel file, one at a time.\n Template keys:\n template_keys = [\"#VORNAME\", \"#NACHNAME\", \"#AUFGABE\"]\n\n If there are more fields in the excel table than the template has fields, multiple output files will be generated\n\n example usage:\n python badge_maker.py --excel-file example_table.xlsx --template-svg template_grid.svg\n\n\n Note: excel file values must not contain & characters or other characters that need to be escaped.\n \"\"\"\n parser = argparse.ArgumentParser(epilog=epilog)\n parser.add_argument('--excel-file', default='example.xlsx', help=\"path to .xlsx-file\")\n parser.add_argument('--template-svg', default='example_template.svg', help=\"template.svg\")\n args = parser.parse_args()\n main(args.excel_file, args.template_svg)","repo_name":"hackundsoehne/hs-toolkit","sub_path":"badge_maker/badge_maker.py","file_name":"badge_maker.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"40037535090","text":"from __future__ import annotations\n\nERROR_FEATURE_NOT_SUPPORTED = '0A000'\n\nERROR_CARDINALITY_VIOLATION = '21000'\n\n# Class 22 — Data Exception\nERROR_DATA_EXCEPTION = '22000'\nERROR_NUMERIC_VALUE_OUT_OF_RANGE = '22003'\nERROR_INVALID_DATETIME_FORMAT = '22007'\nERROR_DATETIME_FIELD_OVERFLOW = '22008'\nERROR_DIVISION_BY_ZERO = '22012'\nERROR_INTERVAL_FIELD_OVERFLOW = '22015'\nERROR_CHARACTER_NOT_IN_REPERTOIRE = '22021'\nERROR_INVALID_PARAMETER_VALUE = '22023'\nERROR_INVALID_TEXT_REPRESENTATION = '22P02'\nERROR_INVALID_REGULAR_EXPRESSION = '2201B'\nERROR_INVALID_LOGARITHM_ARGUMENT = '2201E'\nERROR_INVALID_POWER_ARGUMENT = '2201F'\nERROR_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'\nERROR_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'\n\n# Class 23 — Integrity Constraint Violation\nERROR_INTEGRITY_CONSTRAINT_VIOLATION = '23000'\nERROR_RESTRICT_VIOLATION = '23001'\nERROR_NOT_NULL_VIOLATION = '23502'\nERROR_FOREIGN_KEY_VIOLATION = '23503'\nERROR_UNIQUE_VIOLATION = '23505'\nERROR_CHECK_VIOLATION = '23514'\nERROR_EXCLUSION_VIOLATION = '23P01'\n\n# Class 25 - Invalid Transaction State\nERROR_IDLE_IN_TRANSACTION_TIMEOUT = '25P03'\nERROR_READ_ONLY_SQL_TRANSACTION = '25006'\n\nERROR_INVALID_SQL_STATEMENT_NAME = '26000'\n\n# Class 28 - Invalid Authorization Specification\nERROR_INVALID_AUTHORIZATION_SPECIFICATION = '28000'\nERROR_INVALID_PASSWORD = '28P01'\n\nERROR_INVALID_CATALOG_NAME = '3D000'\nERROR_INVALID_CURSOR_NAME = '34000'\n\nERROR_SERIALIZATION_FAILURE = '40001'\nERROR_DEADLOCK_DETECTED = '40P01'\n\n# Class 42 - Syntax Error or Access Rule Violation\nERROR_WRONG_OBJECT_TYPE = '42809'\nERROR_INSUFFICIENT_PRIVILEGE = '42501'\nERROR_UNDEFINED_TABLE = '42P01'\nERROR_DUPLICATE_DATABASE = '42P04'\nERROR_SYNTAX_ERROR = '42601'\nERROR_DUPLICATE_CURSOR = '42P03'\nERROR_DUPLICATE_PREPARED_STATEMENT = '42P05'\nERROR_INVALID_COLUMN_REFERENCE = '42P10'\n\nERROR_PROGRAM_LIMIT_EXCEEDED = '54000'\n\nERROR_OBJECT_IN_USE = '55006'\n\nERROR_QUERY_CANCELLED = '57014'\nERROR_CANNOT_CONNECT_NOW = '57P03'\n\n# Class 08 - Connection Exception\nERROR_CONNECTION_CLIENT_CANNOT_CONNECT = '08001'\nERROR_CONNECTION_DOES_NOT_EXIST = '08003'\nERROR_CONNECTION_REJECTION = '08004'\nERROR_CONNECTION_FAILURE = '08006'\nERROR_PROTOCOL_VIOLATION = '08P01'\n\nERROR_INTERNAL_ERROR = 'XX000'\n\nCONNECTION_ERROR_CODES = [\n ERROR_CANNOT_CONNECT_NOW,\n ERROR_CONNECTION_CLIENT_CANNOT_CONNECT,\n ERROR_CONNECTION_DOES_NOT_EXIST,\n ERROR_CONNECTION_REJECTION,\n ERROR_CONNECTION_FAILURE,\n]\n\n\nclass BackendError(Exception):\n\n def __init__(self, *, fields: dict[str, str]) -> None:\n msg = fields.get('M', f'error code {fields[\"C\"]}')\n self.fields = fields\n super().__init__(msg)\n\n def code_is(self, code: str) -> bool:\n return self.fields[\"C\"] == code\n\n def get_field(self, field: str) -> str | None:\n return self.fields.get(field)\n\n\ndef get_error_class(fields: dict[str, str]) -> type[BackendError]:\n return error_class_map.get(fields[\"C\"], BackendError)\n\n\nclass BackendQueryCancelledError(BackendError):\n pass\n\n\nclass BackendConnectionError(BackendError):\n pass\n\n\nclass BackendPrivilegeError(BackendError):\n pass\n\n\nclass BackendCatalogNameError(BackendError):\n pass\n\n\nerror_class_map = {\n ERROR_CANNOT_CONNECT_NOW: BackendConnectionError,\n ERROR_CONNECTION_CLIENT_CANNOT_CONNECT: BackendConnectionError,\n ERROR_CONNECTION_DOES_NOT_EXIST: BackendConnectionError,\n ERROR_CONNECTION_REJECTION: BackendConnectionError,\n ERROR_CONNECTION_FAILURE: BackendConnectionError,\n ERROR_INSUFFICIENT_PRIVILEGE: BackendPrivilegeError,\n ERROR_QUERY_CANCELLED: BackendQueryCancelledError,\n ERROR_INVALID_CATALOG_NAME: BackendCatalogNameError,\n}\n\n\ndef _build_fields(code, message, severity=\"ERROR\", detail=None, hint=None):\n fields = {\n \"S\": severity,\n \"V\": severity,\n \"C\": code,\n \"M\": message,\n }\n if detail is not None:\n fields[\"D\"] = detail\n if hint is not None:\n fields[\"H\"] = hint\n return fields\n\n\ndef new(\n code, message, severity=\"ERROR\", detail=None, hint=None, **extra_fields\n):\n fields = _build_fields(code, message, severity, detail, hint)\n fields.update(extra_fields)\n return get_error_class(fields)(fields=fields)\n\n\nclass FeatureNotSupported(BackendError):\n def __init__(self, message=\"feature not supported\", **kwargs):\n super().__init__(fields=_build_fields(\n ERROR_FEATURE_NOT_SUPPORTED, message, **kwargs\n ))\n\n\nclass ProtocolViolation(BackendError):\n def __init__(self, message=\"protocol violation\", **kwargs):\n super().__init__(fields=_build_fields(\n ERROR_PROTOCOL_VIOLATION, message, **kwargs\n ))\n\n\nclass CannotConnectNowError(BackendError):\n def __init__(self, message=\"cannot connect now\", **kwargs):\n super().__init__(fields=_build_fields(\n ERROR_CANNOT_CONNECT_NOW, message, **kwargs\n ))\n\n\nclass InvalidAuthSpec(BackendError):\n def __init__(\n self, message=\"invalid authorization specification\", **kwargs\n ):\n super().__init__(fields=_build_fields(\n ERROR_INVALID_AUTHORIZATION_SPECIFICATION, message, **kwargs\n ))\n","repo_name":"edgedb/edgedb","sub_path":"edb/server/pgcon/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":11828,"dataset":"github-code","pt":"15"} +{"seq_id":"41369768346","text":"from typing import Dict\n\nfrom pysis.exceptions import ProcessError\n\nfrom isis_server.ISISRequest import ISISInputFile\nfrom pysis import IsisPool\nfrom copy import deepcopy\nfrom multiprocessing import Lock\nfrom .logger import get_logger\n\n\nclass ISISCommand:\n def __init__(self, command_name, extra_args: Dict[str, str] = None, disable_to_arg=False):\n self._command_name = command_name\n self._disable_to_arg = disable_to_arg\n self._logger = get_logger(command_name)\n if extra_args is None:\n self._extra_args = dict()\n else:\n self._extra_args = extra_args\n\n def run(self, *files: ISISInputFile):\n errors = list()\n proc_lock = Lock()\n\n self._logger.debug(\"Running {}...\".format(self._command_name))\n\n with IsisPool() as isis:\n isis_cmd = getattr(isis, self._command_name)\n\n for file in files:\n args = deepcopy(self._extra_args)\n args[\"from\"] = file.input_target\n\n if not self._disable_to_arg:\n args[\"to\"] = file.output_target\n\n try:\n isis_cmd(**args)\n except ProcessError as e:\n err = e.stderr.decode(\"utf-8\")\n with proc_lock:\n self._logger.error(\n \"{} error: {}\".format(self._command_name, err)\n )\n errors.append(err)\n\n self._logger.debug(\"{} complete\".format(self._command_name))\n\n return errors\n","repo_name":"OsirisCapstone2020/ISIS-Server","sub_path":"isis_server/ISISCommand.py","file_name":"ISISCommand.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"41093013173","text":"from categorias.models import Categoria\n\nfrom produtos.models import Produto\n\nfrom django.db.models.fields import UUIDField\nfrom django.test import TestCase\nimport uuid\n\n\nclass CategoriaModelTest(TestCase):\n def test_se_os_atributos_da_model_estao_corretos(self):\n id = Categoria._meta.get_field(\"id\")\n nome = Categoria._meta.get_field(\"nome\")\n\n self.assertIs(id.get_internal_type(), UUIDField().get_internal_type())\n self.assertEqual(id.primary_key, True)\n self.assertEqual(id.default, uuid.uuid4)\n self.assertEqual(id.editable, False)\n self.assertEqual(nome.max_length, 20)\n self.assertEqual(nome.unique, True)\n\n def test_se_os_atributos_da_model_nao_sao_nulaveis(self):\n nome = Categoria._meta.get_field(\"nome\")\n\n self.assertEqual(nome.null, False)\n","repo_name":"erickmarchetti/izamaravilha-panificadora","sub_path":"categorias/tests/tests_model.py","file_name":"tests_model.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"21524016277","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\n\nfrom .forms import CoverLetterForm, DocumentForm\nfrom .models import CoverLetter, Document_CV\n\n# Create your views here.\n\n@login_required\ndef manager(request):\n user = request.user\n return render(request, 'manager/manager.html')\n\n@login_required\ndef biography(request):\n saved = None\n if request.method == 'POST':\n form = CoverLetterForm(request.POST, prefix='coverletter')\n if form.is_valid():\n cover_letter, created = CoverLetter.objects.get_or_create(user=request.user)\n cover_letter_content = form.cleaned_data['cover_letter']\n cover_letter.cover_letter = cover_letter_content\n cover_letter.save()\n saved = True\n else:\n form = CoverLetterForm(prefix='coverletter')\n form2 = DocumentForm(prefix='document') \n saved = False\n return render(request, 'manager/biography.html', {'form': form, 'form2': form2, 'saved': saved})\n\n@login_required\ndef documentupload(request):\n saved = None\n if request.method == 'POST':\n form2 = DocumentForm(request.POST, request.FILES, prefix='document')\n if form2.is_valid():\n cv, created = Document_CV.objects.get_or_create(user=request.user)\n cv_document = form2.cleaned_data['document']\n cv.document = cv_document\n cv.save()\n saved = True\n else:\n form2 = DocumentForm(prefix='document') \n saved = False \n return render(request, 'manager/upload_done.html', {'form': form2, 'saved': saved})\n\n\n@login_required\ndef architect(request):\n return render(request, 'manager/architect.html')\n\n@login_required\ndef deactivate(request):\n return render(request, 'manager/deactivate.html') \n\n@login_required\ndef deactivate_account(request):\n user = request.user\n user.is_active = False\n user.save()\n\n return redirect('home:index') ","repo_name":"Hisham-Pak/job-portal","sub_path":"job-portal/manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"17125142671","text":"import os\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_vsftpd_config_exists(host):\n ftp_conf = host.file('/etc/vsftpd.conf')\n assert ftp_conf.exists\n\n\ndef test_vsftpd_is_running(host):\n assert host.service('vsftpd').is_running\n\ndef test_ftp_users(host):\n ftp_user = host.file('/etc/group')\n users = ['Rand', 'chistov', 'Bychkov', 'Chernov', \n 'Chernyshova', 'Dmitriev', 'Kuritsyn', \n 'Lantsov', 'Makhaev_A', 'Shevtsev',\n 'Dolotkazina', 'Ann', 'Antonovich', \n 'Pavel', 'solovyev', 'Emelyanov',\n 'DSBobrus', 'Alimov', 'dep4', 'klimov',\n 'Petr', 'Shutov', 'KavUpdate']\n assert ftp_user.contains('ftp_users')\n for user in users:\n assert ftp_user.contains(user)\n\ndef test_net_services_work(host):\n assert host.socket(\"tcp://0.0.0.0:21\").is_listening\n\ndef bin_false_in_shells(host):\n shells = host.file('/etc/shells')\n assert shells.contains('/bin/false')\n","repo_name":"Randsw/server_m","sub_path":"roles/set_up_ftp/molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"19413799952","text":"\"\"\"Este módulo contém funções para realizar a extração dos dados das fontes da\ncatarinense.\"\"\"\n\nimport os\nimport ftplib\n\n\ndef extract() -> None:\n \"\"\"Realiza a etapa de extração dos dados.\n \n Returns\n None\n \"\"\"\n path_prefix = 'data/catarinense'\n\n if not os.path.exists(path_prefix):\n os.mkdir(path_prefix)\n\n print('Iniciando download dos dados')\n download_data(path_prefix)\n print('Download dos dados terminado')\n\n # Os dados da catarinense, apesar de vir com extensão .TXT, são dados\n # separados por vírgula, então vamos apenas renomear os arquivos.\n rename_data(path_prefix)\n\n\ndef download_data(dest_dir: str) -> None:\n \"\"\"Realiza o download dos dados do FTP da catarinense.\n \n Args\n dest_dir (str) -- O diretório de destino dos dados baixados.\n\n Returns\n None.\n \"\"\"\n ftp = ftplib.FTP((os.getenv('FTP_HOST')), (os.getenv('FTP_USER')), (os.getenv('FTP_PASSWORD')))\n ftp.cwd(\"/\")\n filematch = '*.TXT'\n\n for filename in ftp.nlst(filematch):\n target_file_name = os.path.join(dest_dir, os.path.basename(filename))\n with open(target_file_name, 'wb') as fhandle:\n ftp.retrbinary('RETR %s' % filename, fhandle.write)\n\n\ndef rename_data(path_prefix) -> None:\n \"\"\"Renomeia os arquivos baixados e padroniza o texto.\n \n Args\n path_prefix (str) -- O diretório onde os dados se encontram.\n\n Returns\n None.\n \"\"\"\n for filename in os.listdir(path_prefix):\n formatted_filename = filename \\\n .lower() \\\n .replace('txt', 'csv')\n\n src = f'{path_prefix}/{filename}'\n dst = f'{path_prefix}/{formatted_filename}'\n os.rename(src, dst)\n","repo_name":"bigadriva/cdp-etl","sub_path":"src/catarinense/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"42219730240","text":"import graphene\nfrom graphene_django import DjangoObjectType\n\n\nmodel_to_graphene = {\n \"CharField\": {\"type\": graphene.String, \"operators\": [\"icontains\"]},\n \"TextField\": {\"type\": graphene.String, \"operators\": [\"icontains\"]},\n \"DateField\": {\"type\": graphene.Date, \"operators\": [\"lt\", \"gt\", \"lte\", \"gte\"]},\n \"DateTimeField\": {\"type\": graphene.DateTime, \"operators\": [\"lt\", \"gt\", \"lte\", \"gte\"]},\n \"BigAutoField\": {\"type\": graphene.Int, \"operators\": [\"lt\", \"gt\", \"lte\", \"gte\"]},\n \"IntegerField\": {\"type\": graphene.Int, \"operators\": [\"lt\", \"gt\", \"lte\", \"gte\"]},\n \"URLField\": {\"type\": graphene.String, \"operators\": [\"icontains\"]},\n \"BooleanField\": {\"type\": graphene.Boolean, \"operators\": []}\n}\n\nbanned_fields = [\n \"ImageField\",\n \"FileField\",\n \"ManyToManyField\",\n \"OneToOneField\"\n]\n\n\nclass BaseType(DjangoObjectType):\n class Meta:\n abstract = True\n\n @classmethod\n def resolve(cls, root, info, **kwargs):\n fields = []\n for i in cls._meta.model._meta.get_fields():\n if i.get_internal_type() == \"ForeignKey\":\n fields.append(i.name+\"__id\")\n elif i.get_internal_type() not in banned_fields:\n fields.append(i.name)\n for j in model_to_graphene[i.get_internal_type()][\"operators\"]:\n fields.append(f\"{i.name}__{j}\")\n\n filters = {}\n for i in kwargs:\n if i is not None and i in fields:\n filters[i] = kwargs[i]\n\n return cls._meta.model.objects.filter(**filters)\\\n .prefetch_related(*cls._meta.model.related_fields)\\\n .all()\n\n @classmethod\n def get_filter_fields(cls) -> dict:\n dic = {}\n\n for i in cls._meta.model._meta.get_fields():\n t = i.get_internal_type()\n\n if t == \"ForeignKey\":\n dic[f\"{i.related_model.__name__.lower()}__id\"] = graphene.Argument(graphene.Int, default_value=None)\n\n if t not in banned_fields and t != \"ForeignKey\":\n dic_result = model_to_graphene[t]\n dic[i.name] = graphene.Argument(dic_result[\"type\"], default_value=None)\n\n for j in dic_result[\"operators\"]:\n dic[f\"{i.name}__{j}\"] = graphene.Argument(dic_result[\"type\"], default_value=None)\n\n return dic\n\n","repo_name":"ninnroot/duwin_backend","sub_path":"duwin_backend/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"6250330685","text":"from urllib.request import urlopen\r\nimport time\r\n\r\n\r\ndef get_load_time(url):\r\n\r\n if (\"https\" or \"http\") in url: \r\n open_this_url = urlopen(url)\r\n else:\r\n open_this_url = urlopen(\"https://\" + url)\r\n start_time = time.time()\r\n open_this_url.read()\r\n end_time = time.time()\r\n open_this_url.close()\r\n time_to_load = end_time - start_time\r\n\r\n return time_to_load\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n url = input(\"Saisissez l'url dont vous souhaitez vérifier le temps de chargement : \")\r\n print(f\"\\nLe temps de chargement de {url} est de {get_load_time(url):.2} secondes.\")\r\n except:\r\n print(\"Erreur dans la requête\")","repo_name":"GabinCleaver/Time_Go_WebSite","sub_path":"time_loader.py","file_name":"time_loader.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"15"} +{"seq_id":"17425863152","text":"a = int(input(\"Enter the first number:\"))\nb = int(input(\"Enter the second number:\"))\nc = int(input(\"Enter the third number:\"))\nif a <= b and a <= c :\n min = a\nelif b <= a and b <= c :\n min = b\nelif c <= a and c <= b :\n min = c\n#checking for the minumum value\nif a >= b and a >= c:\n max = a\nelif b >= a and b >= c :\n max = b\nelif c >= a and c >= b:\n max = c\n#checking for the maximux value\nprint(min,max)\n#print (\"The minimum number is:\", min , \" and the maximum is:\", max)\n#other method for printing the result\n","repo_name":"MohammedAtef0/Python_Problem_Sovling_PPS","sub_path":"Sheet1- Data Types and Conditions/E. Max_and_Min.py","file_name":"E. Max_and_Min.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"15"} +{"seq_id":"31834501231","text":"# At a basic level, Pandas objects can be thought of as enhanced versions of\n# NumPy structured arrays in which the rows/cols are ID'd with labels instead of int indices\n\n# Pandas provides many useful tools, methods, and functionality on top of basic data structures,\n# but first you must understand the structures (objects)\n\n# three fundamental Pandas data structures:\n # Series\n # DataFrame\n # Index\n\nimport numpy as np\nimport pandas as pd\n\n##############################\n### the Pandas Series object\n# a Series is a one-dimensional array of indexed data\n# can construct as follows:\ndata = pd.Series([0.25, 0.5, 0.75, 1.0])\ndata\n\n# as seen in output, Series wraps both sequence of vals and sequence of indices\n# can access with 'values' and 'index' attributes\n\n# 'values' are simply a NumPy array:\ndata.values\n\n# 'index' is an array-like object of type pd.Index:\ndata.index\n\n# also like NP array, data can be accessed via associated index:\ndata[1]\ndata[1:3]\n\n\n## 'Series' as a generalized NumPy array\n# diff b/w series and np arr:\n# presence of index:\n # NP arr has an implicitly defined integer index\n # PD series has an explicitly defined index\n\n# we can make the explicit index a value of any type (not just int):\ndata = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd'])\ndata\ndata['b']\n\n# or make the indeces non-sequential:\ndata = pd.Series([0.25, 0.5, 0.75, 1.0], index=['2', '3', '5', '7'])\ndata\ndata[5]\n\n\n## 'Series' as a specialized dictionary\n# It's more accurrate to think of Series as a special Python dictionary\n # dict maps arbitrary keys to arbitrary values\n # series maps typed keys to a set of typed values\n\n# the typing is important: the static typing is why PD arrs are more efficient than dicts\n # NP arrays > Python lists\n # PD Series > Python dicts\n\n# an example to clarify the relationship --- can create PD Series from dict\npopulation_dict = {'California': 38332521,\n 'Texas': 26448193,\n 'New York': 19651127,\n 'Florida': 19552860,\n 'Illinois': 12882135}\npopulation = pd.Series(population_dict)\npopulation\n\n# can then perform typical dict-style item access on the new PD Series:\npopulation['California']\n\n# additionally, Series supports array-style slicing (while dicts do NOT)\npopulation['California':'Illinois']\n\n\n## Constructing Series objects\n# all constructors so far have been a version of following statement:\npd.Series(data, index='index')\n\n# data can be a NP Array, 'index' defaults to int sequence\npd.Series([2, 4, 6])\n\n# data can be a scalar - behavior is: scaled to fill specified indeces\npd.Series(5, index=[100, 200, 300])\n\n# data can be a dictionary - 'index' defaults to sorted dictionary keys:\npd.Series({2:'a', 1:'b', 3:'c'})\n # NOTE: my iPython terminal is not displaying 1, 2, 3 as book shows.\n # it shows in the order entered, even when stored in var.\n\n# can set index explicitly for dict if you want, but it's a bit odd to say the least\npd.Series({2:'a', 1:'b', 3:'c'}, index=[3, 2])\n\n\n##############################\n### the Pandas DataFrame object\n# If 'Series' is an analog of 1D array w/ flexible indeces\n# Then 'DataFrame' is an analog of 2D array w/ flexible row/col indeces\n\n# i.e. if you think of\n # 2d array as ordered sequence of aligned (sharing index) 1d columns\n# then\n # DataFrame as ordered sequence of aligned Series objects\n\n# Demonstration: new Series lisiting area of 5 states mentioned prior\narea_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297,\n 'Florida': 170312, 'Illinois': 149995}\narea = pd.Series(area_dict)\narea\n\n# Can use this in conjunction with \"popularion\" series from before\nstates = pd.DataFrame({'population': population, 'area': area})\nstates\n\n# like Series, DataFrame has an index attribute that gives access to ind labels\nstates.index\n\n# Additionally, DataFrame has a 'columns' attribute, which is an Index object\n# containing column labels\nstates.columns\n\n\n## DataFrame as a specialized dictionary\n# Similarly, we can think of DataFrame as a specialized dictionary\n # Dictionary maps keys to values\n # DataFrame maps column name to a Series of column data\n\n# demonstrated by asking for 'area' attribute, which returns the Series obj\nstates['area']\n\n# sticking point! in a 2D NP array, data[0] returns first ROW\n# in a DataFrame, data['col0'] returns first COLUMN\n# thus, we prefer to think of DataFrames as generalized dicts for the most part\n\n\n## Constructing DataFrame objects\n\n# from a single Series object: (i.e. 1-column DataFrame)\npd.DataFrame(population, columns=['population'])\n\n# from a list of dicts (using a list comprehension to create data)\ndata = [{'a': i, 'b': 2 * i}\n for i in range(3)]\npd.DataFrame(data)\n\n# **NOTE: if keys in dict are missing, PD fills in with \"NaN\" vals **\npd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}])\n\n# from a dictionary of Series objects\npd.DataFrame({'population': population, 'area': area})\n\n# from a 2d NP array (if omitting col names, int indeces used for each)\npd.DataFrame(np.random.rand(3, 2), \n columns=['foo', 'bar'], index=['a', 'b', 'c'])\n\n# from a NP structured array\nA = np.zeros(3, dtype=[('A', 'i8'), ('B', 'f8')])\nA\npd.DataFrame(A)\n\n\n##############################\n### The Pandas Index object\n# In both 'Series' and 'Dataframes', we have explicit 'Index' to reference/modify\n\n# Index object is an interesting structure\n# can be thought of as either an immutable array or an ordered set \n # (technically multi-set due to repeated vals)\n\n# Consequences: certain operations available on Index objects\nind = pd.Index([2, 3, 5, 7, 11])\nind\n\n\n## Index as immutable array\n# like array, can use standard Python indexing notation to get vals/slices\nind[1]\nind[::2]\n\n# similar attributes familiar to NP arrs\nprint(ind.size, ind.shape, ind.ndim, ind.dtype)\n\n# as they are immutable, we cannot modify via normal shorthand\nind[1] = 0 # produces runtime error 'Index does not support mutable operations'\n\n\n## Index as ordered set\n# PD objects are designed to facilitate operations such as joins across datasets\n# thus, set arithmetic is often very useful\n\n# Index object follows many conventions used by pythons built-in \"set\" data structure\n# unions, intersections, differences, and other combinations can be computed:\nindA = pd.Index([1, 3, 5, 7, 9])\nindB = pd.Index([2, 3, 5, 7, 11])\n\nindA & indB # intersection\nindA | indB # union\nindA ^ indB # symmetric difference\n\n# can also equivalently access via object methods if you prefer\nindA.intersection(indB)","repo_name":"pgiardiniere/notes-PythonDataScienceHandbook","sub_path":"3.01-PandasObjects.py","file_name":"3.01-PandasObjects.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12963887609","text":"from torch import nn, tensor\nimport torch\nfrom model.varianceadaptor.lengthregulator import LengthRegulator\nfrom model.varianceadaptor.variancepredictor import VariancePredictor\n\nimport yaml\nimport os\nimport json\n\nfrom utils.mask_embedding import get_mask_from_lengths\n\nclass VarianceAdaptor(nn.Module):\n\n def __init__(self, model_config: dict, preprocess_config: dict) -> None:\n \"\"\"\n Initializes the variance adapter using the given config. \n \"\"\"\n super(VarianceAdaptor, self).__init__()\n\n # Define duration predictor\n self.duration = VariancePredictor(model_config) \n self.length_regulator = LengthRegulator # A function\n\n # maybe write code, which extracts those features\n # self.features = config['model']['variance-adaptor']['features']\n # self.features.sorted(key= lambda f: config['model']['variance-adaptor'][f]['order'])\n # self.feature_predictors = {feature: VariancePredictor(config) for feature in self.features}\n\n ## define pitch predictor and energy predictor\n self.pitch = VariancePredictor(model_config)\n self.energy = VariancePredictor(model_config) \n\n ## loads file, shouldnt it just be passed in? and shouldn't it be closed again?\n variance_config = model_config['variance-adaptor']\n\n with open(\n os.path.join(preprocess_config[\"path\"][\"preprocessed_path\"], \"stats.json\")\n ) as f:\n stats = json.load(f)\n pitch_min, pitch_max = stats[\"pitch\"][:2]\n energy_min, energy_max = stats[\"energy\"][:2]\n\n ## Get bins and embeddings for pitch and energy \n self.pitch_bins = self.get_bin(pitch_min, pitch_max, variance_config['pitch']['n_bins'], variance_config['pitch']['type'])\n self.pitch_embedding = nn.Embedding(variance_config['pitch']['n_bins'], model_config['transformer']['encoder']['hidden'])\n self.pitch_preprocess_type = preprocess_config['preprocessing']['pitch']['feature'] # phoneme or frame\n\n self.energy_bins = self.get_bin(energy_min, energy_max, variance_config['energy']['n_bins'], variance_config['energy']['type'])\n self.energy_embedding = nn.Embedding(variance_config['energy']['n_bins'], model_config['transformer']['encoder']['hidden'])\n self.energy_preprocess_type = preprocess_config['preprocessing']['energy']['feature'] # phoneme or frame\n\n\n # setup embedding here based on config.\n\n \n def get_bin(self, low: float, high: float, n: int, type: str) -> nn.Parameter:\n \"\"\"\n Finds n (int) bins between low (flaot) and high (float) using linspace. If type=log, then logspace is used instead.\n \"\"\"\n if type == 'log':\n return nn.Parameter(torch.linspace(low.log(), high.log(), n).exp(), requires_grad=False)\n else: # In this case type == 'linear' // potentially add more types? Klaus\n return nn.Parameter(torch.linspace(low, high, n), requires_grad=False)\n \n\n def get_feature_embedding(self, predictor: VariancePredictor, bins: torch.Tensor, embedding: nn.Embedding, x: torch.Tensor, target: torch.Tensor, mask: torch.Tensor, scale: int = 1):\n \"\"\"\n Finds the predicted values and embedding using the given predictor and some data.\\n\n\n if a target is given, it will scale the prediction and calculate the embeddings using the prediction,\\n\n else it will calculate the embedding useing the true values.\n \"\"\"\n prediction = predictor(x, mask)\n\n if target is None: # Inference\n prediction = prediction * scale\n embeddings = embedding(torch.bucketize(prediction, bins)) # bucketize takes some values (continous) and an ordered list containing bounderies. For each value find the interval in the bounderies, where the value fits in and replace the value wit the larger (right) boundery. Example: if we have value 2.2 and bounderies [1,4,6,22], then we would return 4 (as 2.2 is between 1 and 4), if we had value [6.001,21] and the same bounderies we would return [22,22] as both of these numbers fall between 6 and 22.\n # lolno it would return [3,3] // Klaus\n else: # Training\n embeddings = embedding(torch.bucketize(target, bins))\n\n return prediction, embeddings\n\n\n def forward(self, hidden_phoneme_sequence: torch.Tensor, sequence_mask: torch.Tensor, frame_masks: torch.Tensor, targets: torch.Tensor, scales: int) -> 'tuple[tensor]':\n\n \"\"\"\n Arguments:\n hidden_phoneme_sequence: A Tensor of size [B, 𝕃, E] \n sequence_mask: A Tensor of shape [B, 𝕃] telling us which phoneme embeddings to mask. \n frame_mask: A Tensor of shape [B, 𝕄] telling us which frames to mask.\n\n Output:\n variance_embedding: A Tensor of size [B, 𝕄, E]\n duration_prediction: A Tensor of size [B, 𝕃, 1]\n energy_prediction: A Tensor of size [B,𝕃] (phoneme) or [B, 𝕄] (frame)\n pitch_prediction: A Tensor of size [B,𝕃] (phoneme) or [B, 𝕄] (frame)\n\n Description:\n The hidden_phoneme_sequence is passed to the length_duration predictor.\n The hidden_phoneme_sequence is then length regulated\n The length regulated hidden_phoneme_sequence is then passed to the pitch predictor,\n which outputs a pitch prediction and embedding of that pitch prediction. \n The output is then added to the input passed to the embeddings, whose result is passed to\n the energy predictor, which outputs the same as the pitch predictor and lets the final resulting\n embedding be the energy embedding plus the input passed to the energy predictor.\n\n Pseudo-Code:\n x = hidden_phoneme_sequence\n x = length_regulator(x, length_duration(x))\n pitch, pitch_embedding = pitch(x)\n x = pitch_embedding + x\n energy, energy_embedding = energy(x)\n variance_embedding = energy_embedding + x\n\n Abbreviations:\n B = Batch\n L = Sequence Length\n L (Length Regulated) = Sequence Length cloned in accordance with the predicted duration \n E = Embedding Dimension\n\n \"\"\"\n ## takes initial hidden phoneme sequence\n x = hidden_phoneme_sequence\n\n ## pass through duration predictor\n log_duration = self.duration(x, sequence_mask)\n rounded_duration = torch.clamp((log_duration.exp() * scales['duration']).round(), min = 0)\n if self.pitch_preprocess_type == 'phoneme_level':\n pitch, pitch_embedding = self.get_feature_embedding(self.pitch, self.pitch_bins, self.pitch_embedding, x, targets['pitch'], sequence_mask, scales['pitch'])\n x = pitch_embedding + x\n\n ## get enegy embedding and perform skip layer\n if self.energy_preprocess_type == 'phoneme_level':\n energy, energy_embedding = self.get_feature_embedding(self.energy, self.energy_bins, self.energy_embedding, x, targets['energy'], sequence_mask, scales['energy'])\n x = energy_embedding + x\n\n ## length regulation\n if targets['duration'] is None:\n x, mel_lens, _pad_lens = self.length_regulator(x, rounded_duration) \n # Generate the frame_masks from the predicted lengths.\n frame_masks = get_mask_from_lengths(mel_lens)\n\n \n else:\n # We don't multiply with scales['duration'], since this is expected to be incorperated into the target\n x, mel_lens, _pad_lens = self.length_regulator(x, targets['duration'])\n\n\n ## get pitch embedding and perform skip layer\n if self.pitch_preprocess_type == 'frame_level':\n pitch, pitch_embedding = self.get_feature_embedding(self.pitch, self.pitch_bins, self.pitch_embedding, x, targets['pitch'], frame_masks, scales['pitch'])\n x = pitch_embedding + x\n\n ## get enegy embedding and perform skip layer\n if self.energy_preprocess_type == 'frame_level':\n energy, energy_embedding = self.get_feature_embedding(self.energy, self.energy_bins, self.energy_embedding, x, targets['energy'], frame_masks, scales['energy'])\n x = energy_embedding + x\n\n variance_out = x\n # Also return the pitch and energy without embedding them, as we need these for optimization during training\n return log_duration, pitch, energy, variance_out, frame_masks, mel_lens ","repo_name":"Skroko/02466-Corti-AI-Project","sub_path":"model/varianceadaptor/varianceadaptor.py","file_name":"varianceadaptor.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"39017777511","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass CrwalipItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n s_ip = scrapy.Field()\n dt_crwalTime = scrapy.Field()\n\n def get_insert_sql(self):\n # 书写insert语句\n s_insertSql = \"\"\"\n insert into ipProxyPool(IP, crawl_time)\n VALUES (%s, %s)\n \"\"\"\n params = (self['s_ip'], self['dt_crwalTime'])\n\n return s_insertSql, params\n\n def get_select_sql(self):\n # 书写查询语句\n s_selectSql = \"\"\"\n select id from ipProxyPool\n where IP = %s\n \"\"\"\n params = (self['s_ip'], )\n return s_selectSql, params\n","repo_name":"JasonmerND/crwalIp","sub_path":"crwalIp/crwalIp/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"15891807347","text":"# -*- coding = utf-8 -*-\n# @TIME :2022/2/24 15:20\n# @Author :CrescentLove\n# @Software :PyCharm\nimport datetime\nimport json\nimport os\nimport threading\nimport tkinter as tk\nimport tkinter.messagebox # 这个是消息框,对话框的关键\nfrom tkinter import ttk\n\nfrom CrescentRes.Mail import Mail\n# from tkinter import filedialog\nfrom NewSeizor import Auto\n\n\nclass App(ttk.Frame):\n def __init__(self, parent):\n ttk.Frame.__init__(self)\n # Make the app responsive\n self.recr = None\n self.passw = None\n self.user = None\n self.host = None\n self.usag = None\n self.bcoo = None\n self.lcoo = None\n self.field = None\n self.timeI = None\n self.venue = None\n self.sport = None\n for index in [0, 1, 2]:\n self.columnconfigure(index=index, weight=1, )\n self.rowconfigure(index=index, weight=1)\n\n self.option_menu_list = [\"\", \"羽毛球\", \"篮球\"]\n self.var_4 = tk.StringVar(value=self.option_menu_list[1])\n self.option_menu_listVenue = [\"\", \"气膜\", \"霍体\"]\n self.var_5 = tk.StringVar(value=self.option_menu_listVenue[1])\n self.option_menu_listTime = [\"\", \"07:00-08:00\", \"08:00-09:00\", \"09:00-10:00\", \"10:00-11:00\", \"11:00-12:00\",\n \"12:00-13:00\",\n \"13:00-14:00\", \"14:00-15:00\", \"15:00-16:00\", \"16:00-17:00\", \"17:00-18:00\",\n \"18:00-19:00\",\n \"19:00-20:00\", \"20:00-21:00\", \"21:00-22:00\"]\n self.var_6 = tk.StringVar(value=self.option_menu_listTime[1])\n self.option_menu_listfield = [\"\", \"全选\", \"场地1\", \"场地2\", \"��地3\", \"场地4\", \"场地5\", \"场地6\", \"场地7\", \"场地8\", \"场地9\", \"场地10\",\n \"场地11\", \"场地12\"]\n self.var_7 = tk.StringVar(value=self.option_menu_listfield[1])\n self.option_menu_listconfig = [\"\", \"方案1\", \"方案2\", \"方案3\"]\n self.var_8 = tk.StringVar(value=self.option_menu_listconfig[1])\n self.datelist = [(datetime.date.today() + datetime.timedelta(days=i)).strftime(\"%m-%d\") for i in range(8)]\n self.var_isam = tk.BooleanVar(value=True)\n self.var_tod = tk.BooleanVar(value=True)\n self.var_isfast = tk.BooleanVar(value=True)\n self.var_isfood = tk.BooleanVar(value=True)\n self.var_d1 = tk.StringVar()\n self.var_d2 = tk.StringVar()\n self.var_d3 = tk.StringVar()\n self.var_d4 = tk.StringVar()\n self.var_d5 = tk.StringVar()\n self.var_d6 = tk.StringVar()\n self.var_d7 = tk.StringVar()\n self.var_d8 = tk.StringVar()\n\n self.setup_widgets()\n\n ############################################################################\n\n def start(self):\n if not os.path.exists('mailConfig.json'):\n ismail = tkinter.messagebox.askokcancel('提示', '是否配置邮箱?')\n if ismail:\n self.clickyouxiang()\n if not os.path.exists('venueConfig.json'):\n isven = tkinter.messagebox.showwarning('警告', '请配置场地信息')\n self.clickchangdi()\n if not os.path.exists('venueConfig.json'):\n isven = tkinter.messagebox.showwarning('警告', '请配置网络参数')\n self.clickwangluo()\n with open('venueConfig.json') as f:\n ve = json.loads(f.read())\n self.sport = ve['sport']\n self.venue = ve['venue']\n self.timeI = ve['time']\n self.field = ve['ci']\n\n with open('netConfig.json') as f:\n ve = json.loads(f.read())\n self.lcoo = ve['loginCookie']\n self.bcoo = ve['confirmCookie']\n self.usag = ve['userAgent']\n with open('mailConfig.json') as f:\n ve = json.loads(f.read())\n self.host = ve['host']\n self.user = ve['user']\n self.passw = ve['password']\n self.recr = ve['recr']\n a = self.var_d3\n print(self.var_d1.get(), self.var_d2.get(), self.var_d3.get())\n daylist = [self.var_d1.get(), self.var_d2.get(), self.var_d3.get(), self.var_d4.get(), self.var_d5.get(),\n self.var_d6.get(), self.var_d7.get(), self.var_d8.get()]\n # 执行关键程序\n mainApp = Auto(self.var_isfast.get(), self.var_isam.get(), self.var_isfood.get(), self.var_tod.get(), daylist,\n self.sport, self.venue,\n self.timeI, self.field, self.lcoo, self.bcoo, self.usag)\n\n self.thread_it(mainApp.sta)\n\n mailnow = Mail(self.host, self.user, self.passw, self.recr)\n self.thread_it(mailnow.mail2me, mainApp.st_B)\n\n # 推出功能\n\n def thread_it(self, func, *args):\n '''将函数打包进线程'''\n # 创建\n t = threading.Thread(target=func, args=args)\n # 守护 !!!\n t.setDaemon(True)\n # 启动\n t.start()\n # 阻塞--卡死界面!\n # t.join()\n\n def endProgam(self):\n raise SystemExit\n\n ####################################################################################################\n def delnet(self):\n self.Netentry1.delete(0, \"end\")\n self.Netentry2.delete(0, \"end\")\n self.Netentry3.delete(0, \"end\")\n\n def savenet(self):\n with open(file='netConfig.json', mode='w+', encoding='utf-8') as n:\n data = {\"loginCookie\": self.Netentry1.get(), \"confirmCookie\": self.Netentry2.get(),\n \"userAgent\": self.Netentry3.get()}\n json.dump(data, n)\n\n def delmail(self):\n self.Mailentry1.delete(0, \"end\")\n self.Mailentry2.delete(0, \"end\")\n self.Mailentry3.delete(0, \"end\")\n self.Mailentry4.delete(0, \"end\")\n\n def savemail(self):\n with open(file='mailConfig.json', mode='w+', encoding='utf-8') as n:\n data = {\"host\": self.Mailentry1.get(), \"user\": self.Mailentry2.get(), \"password\": self.Mailentry3.get(),\n \"recr\": self.Mailentry4.get()}\n json.dump(data, n)\n\n def saveven(self):\n with open(file='venueConfig.json', mode='w+', encoding='utf-8') as n:\n data = {\"sport\": self.var_4.get(), \"venue\": self.var_5.get(), \"time\": self.var_6.get(),\n \"ci\": self.var_7.get(), \"proj\": self.var_8.get()}\n json.dump(data, n)\n\n #####################################################################################################\n def clickshouce(self):\n os.system(r\".\\userBook.md\")\n\n def clickyouxiang(self):\n self.mailPage = ttk.LabelFrame(self, text=\"配置\", padding=(10, 5), labelanchor=\"n\") # 修改按钮和边框宽度\n self.mailPage.grid(row=0, column=1, padx=(20, 10), pady=(20, 10), sticky=\"nsew\")\n self.mailPage.columnconfigure(index=0, weight=1)\n\n self.hostname = ttk.Label(self.mailPage, text=\"Mail Host\")\n self.hostname.grid(row=1, column=0, padx=5, pady=(0, 10), sticky=\"ew\")\n self.Mailentry1 = ttk.Entry(self.mailPage)\n self.Mailentry1.grid(row=1, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n self.username = ttk.Label(self.mailPage, text=\"User Name\")\n self.username.grid(row=2, column=0, padx=5, pady=(0, 10), sticky=\"ew\")\n self.Mailentry2 = ttk.Entry(self.mailPage)\n self.Mailentry2.grid(row=2, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n self.password = ttk.Label(self.mailPage, text=\"Password\")\n self.password.grid(row=3, column=0, padx=5, pady=(0, 10), sticky=\"ew\")\n self.Mailentry3 = ttk.Entry(self.mailPage, show='*')\n self.Mailentry3.grid(row=3, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n self.receiver = ttk.Label(self.mailPage, text=\"Receiver\")\n self.receiver.grid(row=4, column=0, padx=5, pady=(0, 10), sticky=\"ew\")\n self.Mailentry4 = ttk.Entry(self.mailPage)\n self.Mailentry4.grid(row=4, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n try:\n with open(\"mailConfig.json\") as fmail:\n k = json.loads(fmail.read())\n\n self.Mailentry1.insert(0, k[\"host\"])\n self.Mailentry2.insert(0, k[\"user\"])\n self.Mailentry3.insert(0, k[\"password\"])\n self.Mailentry4.insert(0, k[\"recr\"])\n\n except Exception as e:\n print(e)\n self.Mailentry1.insert(0, '邮箱host/如smtp.163.com')\n self.Mailentry2.insert(0, '邮箱账号')\n self.Mailentry3.insert(0, '授权码,非登陆密码')\n self.Mailentry4.insert(0, '需要发送提醒的邮箱')\n\n self.qingkongM = ttk.Button(self.mailPage, text=\"清空\", command=self.delmail)\n self.qingkongM.grid(row=5, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.baocunM = ttk.Button(self.mailPage, text=\"保存\", style=\"Accent.TButton\", command=self.savemail)\n self.baocunM.grid(row=5, column=1, padx=5, pady=10, sticky=\"nsew\")\n\n def clickwangluo(self):\n self.netPage = ttk.LabelFrame(self, text=\"配置\", padding=(10, 5), labelanchor=\"n\") # 修改按钮和边框宽度\n self.netPage.grid(row=0, column=1, padx=(20, 10), pady=(20, 10), sticky=\"nsew\")\n self.netPage.columnconfigure(index=0, weight=1)\n\n self.cookielogin = ttk.Label(self.netPage, text=\"登录Cookies\")\n self.cookielogin.grid(row=1, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.Netentry1 = ttk.Entry(self.netPage)\n # self.Netentry1.insert(0, '复制登录时Cookies')\n self.Netentry1.grid(row=1, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n self.cookieConfirm = ttk.Label(self.netPage, text=\"下单Cookies\")\n self.cookieConfirm.grid(row=2, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.Netentry2 = ttk.Entry(self.netPage)\n # self.Netentry2.insert(0, '复制下单时Cookies')\n self.Netentry2.grid(row=2, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n self.userAgent = ttk.Label(self.netPage, text=\"User Agent\")\n self.userAgent.grid(row=3, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.Netentry3 = ttk.Entry(self.netPage)\n # self.Netentry3.insert(0, '复制任意请求时user-agent参数')\n self.Netentry3.grid(row=3, column=1, padx=5, pady=(0, 10), sticky=\"ew\")\n\n try:\n with open(\"netConfig.json\") as fnet:\n k = json.loads(fnet.read())\n\n self.Netentry1.insert(0, k[\"loginCookie\"])\n self.Netentry2.insert(0, k[\"confirmCookie\"])\n self.Netentry3.insert(0, k[\"userAgent\"])\n\n except Exception as e:\n print(e)\n self.Netentry1.insert(0, '复制登录时Cookies')\n self.Netentry2.insert(0, '复制下单时Cookies')\n self.Netentry3.insert(0, '复制任意请求时user-agent参数')\n\n self.qingkongN = ttk.Button(self.netPage, text=\"清空\", command=self.delnet)\n self.qingkongN.grid(row=4, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.baocunN = ttk.Button(self.netPage, text=\"保存\", style=\"Accent.TButton\", command=self.savenet)\n self.baocunN.grid(row=4, column=1, padx=5, pady=10, sticky=\"nsew\")\n\n # self.netPage2 = ttk.LabelFrame(self, text=\"试试这些\", padding=(0, 0, 0, 10), labelanchor=\"n\")\n # self.netPage2.grid(row=1, column=1, padx=10, pady=(5, 10), sticky=\"nsew\")\n # self.netPage2.columnconfigure(index=0, weight=1)\n #\n # self.storeNet = ttk.Button(self.netPage2, text=\"保存方案\", style=\"Accent.TButton\")\n # self.storeNet.grid(row=0, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n def clickchangdi(self):\n try:\n with open(\"venueConfig.json\") as fven:\n k = json.loads(fven.read())\n self.var_4 = tk.StringVar(value=k[\"sport\"])\n self.var_5 = tk.StringVar(value=k[\"venue\"])\n self.var_6 = tk.StringVar(value=k[\"time\"])\n self.var_7 = tk.StringVar(value=k[\"ci\"])\n self.var_8 = tk.StringVar(value=k[\"proj\"])\n except Exception as e:\n print(e)\n\n self.venuePage = ttk.LabelFrame(self, text=\"配置\", padding=(10, 5), labelanchor=\"n\") # 修改按钮和边框宽度\n self.venuePage.grid(row=0, column=1, padx=(20, 10), pady=(20, 10), sticky=\"nsew\")\n self.venuePage.columnconfigure(index=0, weight=1)\n\n self.yundongming = ttk.Label(self.venuePage, text=\"体育项目\")\n self.yundongming.grid(row=1, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.yundongmenu = ttk.OptionMenu(self.venuePage, self.var_4, *self.option_menu_list)\n self.yundongmenu.grid(row=1, column=1, padx=5, pady=(0, 10), sticky=\"w\" + \"n\" + \"s\" + \"e\")\n\n self.changdiming = ttk.Label(self.venuePage, text=\"体育场馆\")\n self.changdiming.grid(row=2, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.changdimenu = ttk.OptionMenu(self.venuePage, self.var_5, *self.option_menu_listVenue)\n self.changdimenu.grid(row=2, column=1, padx=5, pady=(0, 10), sticky=\"w\" + \"n\" + \"s\" + \"e\")\n\n self.timeming = ttk.Label(self.venuePage, text=\"期望时间\")\n self.timeming.grid(row=3, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.timemenu = ttk.OptionMenu(self.venuePage, self.var_6, *self.option_menu_listTime)\n self.timemenu.grid(row=3, column=1, padx=5, pady=(0, 10), sticky=\"w\" + \"n\" + \"s\" + \"e\")\n\n self.fieldming = ttk.Label(self.venuePage, text=\"期望场地\")\n self.fieldming.grid(row=4, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.fieldmenu = ttk.OptionMenu(self.venuePage, self.var_7, *self.option_menu_listfield)\n self.fieldmenu.grid(row=4, column=1, padx=5, pady=(0, 10), sticky=\"w\" + \"n\" + \"s\" + \"e\")\n\n self.configming = ttk.Label(self.venuePage, text=\"配置方案\")\n self.configming.grid(row=5, column=0, padx=5, pady=(0, 10), sticky=\"ewsn\")\n self.configmenu = ttk.OptionMenu(self.venuePage, self.var_8, *self.option_menu_listconfig)\n self.configmenu.grid(row=5, column=1, padx=5, pady=(0, 10), sticky=\"w\" + \"n\" + \"s\" + \"e\")\n\n self.baocunV = ttk.Button(self.venuePage, text=\"保存\", style=\"Accent.TButton\", command=self.saveven)\n self.baocunV.grid(row=6, column=0, padx=5, pady=10, sticky=\"nsew\", columnspan=3)\n\n # 设置选项区\n # self.venuePage2 = ttk.LabelFrame(self, text=\"试试这些\", padding=(0, 0, 0, 10), labelanchor=\"n\")\n # self.venuePage2.grid(row=1, column=1, padx=10, pady=(5, 10), sticky=\"nsew\")\n # self.venuePage2.columnconfigure(index=0, weight=1)\n #\n #\n # self.storeVenue = ttk.Button(self.venuePage2, text=\"保存方案\", style=\"Accent.TButton\")\n # self.storeVenue.grid(row=0, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n def clickzhuye(self):\n self.rootPage = ttk.LabelFrame(self, text=\"菜单\", padding=(10, 5), labelanchor=\"n\") # 修改按钮和边框宽度\n self.rootPage.grid(row=0, column=1, padx=(20, 10), pady=(20, 10), sticky=\"nsew\")\n self.rootPage.columnconfigure(index=0, weight=1)\n\n self.isAm = ttk.Checkbutton(self.rootPage, text=\"不抢上午\", style=\"Switch.TCheckbutton\", variable=self.var_isam)\n self.isAm.grid(row=1, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.isToday = ttk.Checkbutton(self.rootPage, text=\"不抢今日\", style=\"Switch.TCheckbutton\", variable=self.var_tod)\n self.isToday.grid(row=1, column=2, padx=5, pady=10, sticky=\"nsew\")\n\n self.isAm = ttk.Checkbutton(self.rootPage, text=\"不抢饭点\", style=\"Switch.TCheckbutton\", variable=self.var_isfood)\n self.isAm.grid(row=1, column=1, padx=5, pady=10, sticky=\"nsew\")\n\n self.isAm = ttk.Checkbutton(self.rootPage, text=\"慢速防挂\", style=\"Switch.TCheckbutton\", variable=self.var_isfast)\n self.isAm.grid(row=1, column=3, padx=5, pady=10, sticky=\"nsew\")\n\n self.d1 = ttk.Checkbutton(self.rootPage, text=self.datelist[0], style=\"Toggle.TButton\", variable=self.var_d1)\n self.d1.grid(row=2, column=0, padx=5, pady=10, sticky=\"nsew\")\n self.d2 = ttk.Checkbutton(self.rootPage, text=self.datelist[1], style=\"Toggle.TButton\", variable=self.var_d2)\n self.d2.grid(row=2, column=1, padx=5, pady=10, sticky=\"nsew\")\n self.d3 = ttk.Checkbutton(self.rootPage, text=self.datelist[2], style=\"Toggle.TButton\", variable=self.var_d3)\n self.d3.grid(row=2, column=2, padx=5, pady=10, sticky=\"nsew\")\n self.d4 = ttk.Checkbutton(self.rootPage, text=self.datelist[3], style=\"Toggle.TButton\", variable=self.var_d4)\n self.d4.grid(row=2, column=3, padx=5, pady=10, sticky=\"nsew\")\n self.d5 = ttk.Checkbutton(self.rootPage, text=self.datelist[4], style=\"Toggle.TButton\", variable=self.var_d5)\n self.d5.grid(row=3, column=0, padx=5, pady=10, sticky=\"nsew\")\n self.d6 = ttk.Checkbutton(self.rootPage, text=self.datelist[5], style=\"Toggle.TButton\", variable=self.var_d6)\n self.d6.grid(row=3, column=1, padx=5, pady=10, sticky=\"nsew\")\n self.d7 = ttk.Checkbutton(self.rootPage, text=self.datelist[6], style=\"Toggle.TButton\", variable=self.var_d7)\n self.d7.grid(row=3, column=2, padx=5, pady=10, sticky=\"nsew\")\n self.d8 = ttk.Checkbutton(self.rootPage, text=self.datelist[7], style=\"Toggle.TButton\", variable=self.var_d8)\n self.d8.grid(row=3, column=3, padx=5, pady=10, sticky=\"nsew\")\n\n self.switch = ttk.Button(self.rootPage, text=\"START\", style=\"Accent.TButton\", command=self.start)\n self.switch.grid(row=4, column=0, padx=5, pady=10, sticky=\"nsew\", columnspan=4)\n self.endp = ttk.Button(self.rootPage, text=\"QUIT\", command=self.endProgam)\n self.endp.grid(row=5, column=0, padx=5, pady=10, sticky=\"nsew\", columnspan=4)\n # self.menu_frame2 = ttk.LabelFrame(self, text=\"About\", padding=(40, 10), labelanchor=\"n\")\n # self.menu_frame2.grid(row=1, column=0, padx=(20, 10), pady=(20, 10), sticky=\"nsew\", columnspan=4)\n # 点击开始抢票!(会检查是否配置方案,网络,如果没有弹出警告框跳转,检查是否配置邮件,弹出建议配置)\n\n def setup_widgets(self):\n # Create a Frame for the Checkbuttons\n\n self.menu_frame = ttk.LabelFrame(self, text=\"菜单\", padding=(10, 5), labelanchor=\"n\") # 修改按钮和边框宽度\n self.menu_frame.grid(row=0, column=0, padx=(20, 10), pady=(20, 10), sticky=\"nsew\")\n\n # Menubuttons\n self.zhuye = ttk.Button(\n self.menu_frame, text=\"主页\", style=\"Accent.TButton\", command=self.clickzhuye)\n self.zhuye.grid(row=1, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.changdi = ttk.Button(self.menu_frame, text=\"场地配置\", command=self.clickchangdi)\n self.changdi.grid(row=2, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.wangluo = ttk.Button(self.menu_frame, text=\"网络配置\", command=self.clickwangluo)\n self.wangluo.grid(row=3, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.youxiang = ttk.Button(self.menu_frame, text=\"邮件配置\", command=self.clickyouxiang)\n self.youxiang.grid(row=4, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.shouce = ttk.Button(self.menu_frame, text=\"使用手册\", command=self.clickshouce)\n self.shouce.grid(row=5, column=0, padx=5, pady=10, sticky=\"nsew\")\n\n self.menu_frame2 = ttk.LabelFrame(self, text=\"About\", padding=(40, 10), labelanchor=\"n\")\n self.menu_frame2.grid(row=1, column=0, padx=(20, 10), pady=(20, 10), sticky=\"nsew\", columnspan=4)\n\n self.mailA = ttk.Label(self.menu_frame2, text=\"Mail: markdowndir@foxmail.com\", padding=(40, 10))\n self.mailA.grid(row=1, column=0, padx=(20, 10), pady=(20, 10), sticky=\"nsew\", columnspan=4)\n\n self.clickzhuye()\n # fd = filedialog.LoadFileDialog(self.menu_frame2) # 创建打开文件对话框\n # filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称\n # filedialog.Open(filename)\n\n # self.labelmail = ttk.Label(\n # self.menu_frame2,\n # image=tk.PhotoImage(file='./res/mail.png'),\n # )\n # self.labelmail.grid(row=1, column=0, pady=10)\n # self.labelgit = ttk.Label(\n # self.menu_frame2,\n # text=\"github\",\n # anchor='ne'\n # )\n # self.labelgit.grid(row=2, column=0, pady=10)\n #\n # for j in range(15):\n # for i in range(12):\n # self.changci = ttk.Checkbutton(self.widgets_frame, text=\"场地\"+str(i+1), style=\"Toggle.TButton\")\n # self.changci.grid(row=j+1, column=i, padx=5, pady=10, sticky=\"nsew\")\n\n\nif __name__ == \"__main__\":\n root = tk.Tk() # 创建tk对象\n root.title(\"气膜爷来了\")\n root.tk.call(\"source\", \"sun-valley.tcl\") # 设置主题sun-valley\n root.tk.call(\"set_theme\", \"dark\")\n\n # 设置主窗口组件,设置pack填充方式\n app = App(root)\n app.pack(fill=\"both\", expand=True)\n\n # 设置居中\n root.update()\n root.minsize(root.winfo_width(), root.winfo_height())\n x_cordinate = int((root.winfo_screenwidth() / 2) - (root.winfo_width() / 2))\n y_cordinate = int((root.winfo_screenheight() / 2) - (root.winfo_height() / 2))\n root.geometry(\"+{}+{}\".format(x_cordinate, y_cordinate))\n\n root.mainloop()\n","repo_name":"CrescentLove/SJTUvenueGUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"24549846539","text":"def isPrime(n):\r\n # Checking for cases of 2 and 3\r\n if (n <= 1):\r\n return 0\r\n if (n <= 3):\r\n return 1\r\n # skip checking middle five numbers in the loop\r\n if (n % 2 == 0 or n % 3 == 0):\r\n return 0\r\n i = 5\r\n while (i * i <= n):\r\n if (n % i == 0 or n % (i + 2) == 0):\r\n return 0\r\n i = i + 6\r\n return 1\r\n\r\nnum1,num2=map(str,input().split())\r\n\r\n# interlacing string\r\n#By using zip_longest->string = \"\".join([ x + y for x, y in itertools.zip_longest(num1, num2, fillvalue=\"\")])\r\n\r\nstring = list(num2)\r\nfor i,c in enumerate(num1):string.insert(i*2,c);#print(string)\r\nstring=\"\".join(string)\r\n\r\nif isPrime(int(string)):print(\"%s and %s Interlaced is %s is a Prime Number\"%(num1,num2,string))\r\nelse:print(\"%s and %s Interlaced is %s is Not a Prime Number\"%(num1,num2,string))\r\n","repo_name":"DANUSHRAJ/Skillrack-Daily-Challenge-And-Daily-Test","sub_path":"Prime Number by Interlacing.py","file_name":"Prime Number by Interlacing.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"38129474955","text":"# 미로의 최단거리 통로(DFS)\n\n# 7*7 격자판 미로를 탈출하는 최단경로의 경로수를 출력하는 프로그램. 경로수는 출발점에서 도착점까지 가는데 이동한 횟수를 의미.\n# 출발점은 (1,1) 좌표이고, 탈출 도착점은 (7,7)좌표이다. 격자판의 1은 벽이도, 0은 도로이다.\n# 격자판의 움직임은 상하좌우로만 움직인다. 미로가 다음과 같다면\n# 출 0 0 0 0 0 0\n# 0 1 1 1 1 1 0\n# 0 0 0 1 0 0 0\n# 1 1 0 1 0 1 1\n# 1 1 0 1 0 0 0\n# 1 0 0 0 1 0 0\n# 1 0 1 0 0 0 도\n# 위의 지도에서 출발점에서 도착점까지 갈 수 있는 방법의 수는 8가지이다.\n\n# 입력설명\n# 7*7 격자판의 정보가 주어진다.\n# 0 0 0 0 0 0 0\n# 0 1 1 1 1 1 0\n# 0 0 0 1 0 0 0\n# 1 1 0 1 0 1 1\n# 1 1 0 1 0 0 0\n# 1 0 0 0 1 0 0\n# 1 0 1 0 0 0 0\n\n# 출력설명\n# 첫번쨰 줄에 경로의 가지수를 출력한다.\n# 8\n\n# 내풀이\nmiro = [list(map(int, input().split())) for _ in range(7)]\ncnt = 0\ndx = [1, 0, -1, 0]\ndy = [0, -1, 0, 1]\n\ndef DFS(x, y):\n global cnt\n global ch\n if x==6 and y==6:\n cnt+=1\n else:\n for i in range(4):\n nowX = x+dx[i]\n nowY = y+dy[i]\n if 0<=nowX<=6 and 0<=nowY<=6 and miro[nowX][nowY] == 0:\n miro[nowX][nowY] = 1\n DFS(nowX, nowY)\n miro[nowX][nowY] = 0\n\nmiro[0][0] = 1\nDFS(0, 0)\nprint(cnt)\n\n# 해설\ndx = [-1, 0 ,1, 0]\ndy = [0, 1, 0, -1]\ndef DFS(x, y):\n global cnt\n if x==6 and y==6:\n cnt+=1\n else:\n for i in range(4):\n xx=x+dx[i]\n yy=y+dy[i]\n if 0<=xx<=6 and 0<=yy<=6 and miro[xx][yy] == 0:\n board[xx][yy] = 1\n DFS(xx, yy)\n board[xx][yy] = 0\n\nif __name__ == \"__main__\":\n board = [list(map(int, input().split())) for _ in range(7)]\n cnt = 0\n board[0][0] = 1\n DFS(0,0)\n print(cnt)\n","repo_name":"Gyuil-Hwnag/Algorithm_Study","sub_path":"chap7/chap7-10.py","file_name":"chap7-10.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"30917329499","text":"import os, time\nfrom urllib import parse,request\nimport pygame\nimport tkinter\nimport tkinter.messagebox\n\nREQUEST_URL = 'http://61.182.134.97/wmc/dataPMC'\n\nroot = tkinter.Tk()\nroot.withdraw()\n\ndef audio_init():\n pygame.mixer.init()\n track = pygame.mixer.music.load(\"alarm.wav\")\n\ndef get_data():\n try:\n data = {\n 'tags[]': 'HBJT:HX:HX_TT|CALC_AGC2'\n }\n # response = requests.post(REQUEST_URL, data = {\n # 'tags[]': 'HBJT:HX:HX_TT|CALC_AGC2'\n # })\n req = request.Request(url = REQUEST_URL, data = parse.urlencode(data).encode(encoding='utf-8'))\n res = request.urlopen(req)\n res_data = eval(res.read().decode(encoding='utf-8'))\n if res_data and len(res_data) > 0:\n return float(res_data[0])\n else:\n return False\n except Exception as e:\n print(e)\n return False\n \n \nif __name__ == \"__main__\":\n print('初始化...')\n audio_init()\n last_agc = None\n is_lost = False\n while True:\n print('开始获取...')\n res = get_data()\n if res == False and is_lost == False:\n is_lost = True\n pygame.mixer.music.play()\n tkinter.messagebox.showerror('错误','与服务器失去连接!')\n else:\n is_lost = False\n if last_agc == None:\n last_agc = res\n else:\n diff = abs(last_agc - res)\n print('上一次AGC %s , 当前AGC %s , 差值%s' %(str(last_agc), str(res), str(diff)))\n last_agc = res\n if diff >= 1:\n pygame.mixer.music.play()\n tkinter.messagebox.showwarning('','前后5秒AGC下发值差距超过1MW!')\n time.sleep(5)","repo_name":"leeyongxin/webstudy","sub_path":"client/alarm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"17847967089","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n\ndef _convert_watch_key_to_tensor_name(watch_key):\n return watch_key[:watch_key.rfind(\":\")]\n\n\ndef annotate_source(dump,\n source_file_path,\n do_dumped_tensors=False,\n file_stack_top=False,\n min_line=None,\n max_line=None):\n \"\"\"Annotate a Python source file with a list of ops created at each line.\n\n (The annotation doesn't change the source file itself.)\n\n Args:\n dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph\n has been loaded.\n source_file_path: (`str`) Path to the source file being annotated.\n do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be\n used to annotate the source file.\n file_stack_top: (`bool`) Whether only the top stack trace in the\n specified source file is to be annotated.\n min_line: (`None` or `int`) The 1-based line to start annotate the source\n file from (inclusive).\n max_line: (`None` or `int`) The 1-based line number to end the annotation\n at (exclusive).\n\n Returns:\n A `dict` mapping 1-based line number to a list of op name(s) created at\n that line, or tensor names if `do_dumped_tensors` is True.\n\n Raises:\n ValueError: If the dump object does not have a Python graph set.\n \"\"\"\n\n py_graph = dump.python_graph\n if not py_graph:\n raise ValueError(\"Cannot perform source annotation due to a lack of set \"\n \"Python graph in the dump object\")\n\n source_file_path = os.path.normpath(source_file_path)\n\n line_to_op_names = {}\n for op in py_graph.get_operations():\n try:\n traceback = dump.node_traceback(op.name)\n except KeyError:\n pass\n\n for file_path, line_number, _, _ in reversed(traceback):\n if (min_line is not None and line_number < min_line or\n max_line is not None and line_number >= max_line):\n continue\n\n if os.path.normpath(file_path) != source_file_path:\n continue\n\n if do_dumped_tensors:\n watch_keys = dump.debug_watch_keys(op.name)\n # Convert watch keys to unique Tensor names.\n items_to_append = list(\n set(map(_convert_watch_key_to_tensor_name, watch_keys)))\n else:\n items_to_append = [op.name]\n\n if line_number in line_to_op_names:\n line_to_op_names[line_number].extend(items_to_append)\n else:\n line_to_op_names[line_number] = items_to_append\n\n if file_stack_top:\n break\n\n return line_to_op_names\n","repo_name":"DengZhuangSouthRd/simple_tensorflow","sub_path":"tensorflow/python/debug/lib/source_utils.py","file_name":"source_utils.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"15"} +{"seq_id":"1752664317","text":"from flask import Flask, render_template, jsonify, request\nimport json, os\nfrom telegram_bot import TelegramBot\n\napp = Flask(__name__)\nbot = TelegramBot()\n\ndef detect_replit():\n try:\n owner = os.environ[\"REPL_OWNER\"]\n return True\n except:\n return False\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n@app.route('/post', methods=['POST'])\ndef post():\n if request.method == 'POST':\n try:\n data = request.get_json()\n text = data[\"text\"]\n bot.send_post(text)\n data = {\"success\": True}\n except:\n data = {\"success\": False}\n else:\n data = {\"success\": False}\n return jsonify(data)\n\n@app.route('/create', methods=['GET'])\ndef create():\n data = {'text': bot.create_post()}\n return jsonify(data)\n\nif __name__ == '__main__':\n if detect_replit():\n print(f\"Hello, {os.environ['REPL_OWNER']}! Your bot is running.\")\n app.run(host='0.0.0.0', port=8080, debug=False)\n else:\n app.run(port=8080, debug=False)\n","repo_name":"VAhafonov/ChannelGPT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"15592109072","text":"\"\"\"\nScript to convert a CSV file to an OCL-formatted JSON file based on a provided set of CSV Resource\nDefinitions. The resulting JSON is intended for the OclFlexImporter. See\nOclStandardCsvToJsonConverter.default_csv_resource_definitions in this file for examples.\nNote that resource_fields are required unless \"required\": False or a \"default\" is included.\nNext steps:\n- Bring handling of resource IDs into alignment with updated OCLAPI ID implementation\n- Implement support for: (1) Generic Auto Concept References, (2) Generic Standalone References\n- Implement import script validation\n\"\"\"\nimport csv\nimport json\nimport re\n\nimport six\n\nfrom distutils import util\n\nfrom ocldev import oclconstants\n\n\nclass OclCsvToJsonConverter(object):\n \"\"\" Class to convert CSV file to OCL-formatted JSON flex file \"\"\"\n\n # Constants for method of processing the CSV\n PROCESS_BY_DEFINITION = 'process_by_definition'\n PROCESS_BY_ROW = 'process_by_row'\n\n # Constants for explicitly defined field definitions\n DEF_CORE_FIELDS = 'core_fields'\n DEF_SUB_RESOURCES = 'subresources'\n DEF_KEY_VALUE_PAIRS = 'key_value_pairs'\n DEF_RESOURCE_FIELD_TYPES = [\n DEF_CORE_FIELDS,\n DEF_SUB_RESOURCES,\n DEF_KEY_VALUE_PAIRS,\n ]\n\n # Constants for specific attribute names\n DEF_KEY_RESOURCE_FIELD = 'resource_field'\n DEF_KEY_RESOURCE_TYPE = 'resource_type'\n DEF_KEY_ID_COLUMN = 'id_column'\n DEF_KEY_IS_ACTIVE = 'is_active'\n DEF_KEY_SKIP_IF_EMPTY = 'skip_if_empty_column'\n DEF_KEY_TRIGGER_COLUMN = '__trigger_column'\n DEF_KEY_TRIGGER_VALUE = '__trigger_value'\n DEF_KEY_SKIP_HANDLER = 'skip_handler'\n\n # Constants for automatic resource definitions\n DEF_TYPE_AUTO_RESOURCE = 'AUTO-RESOURCE'\n DEF_AUTO_CONCEPT_NAMES = 'auto_concept_names'\n DEF_AUTO_CONCEPT_DESCRIPTIONS = 'auto_concept_descriptions'\n DEF_AUTO_ATTRIBUTES = 'auto_attributes'\n DEF_AUTO_RESOURCE_TEMPLATE = 'auto_resource_template'\n DEF_KEY_TRIGGER_COLUMN_PREFIX = '__trigger_column_prefix'\n DEF_KEY_SKIP_IF_EMPTY_PREFIX = 'skip_if_empty_column_prefix'\n DEF_KEY_AUTO_INDEX_PREFIX = 'index_prefix'\n DEF_KEY_AUTO_INDEX_POSTFIX = 'index_postfix'\n DEF_KEY_AUTO_INDEX_REGEX = 'index_regex'\n AUTO_REPLACEMENT_FIELDS = {\n DEF_KEY_TRIGGER_COLUMN_PREFIX: DEF_KEY_TRIGGER_COLUMN,\n DEF_KEY_SKIP_IF_EMPTY_PREFIX: DEF_KEY_SKIP_IF_EMPTY,\n }\n\n # Note that underscores are allowed for a concept ID and the exception is made in the code\n INVALID_CHARS = ' `~!@#$%^&*()_+-=[]{}\\\\|;:\"\\',/<>?'\n REPLACE_CHAR = '-'\n\n def __init__(self, csv_filename='', input_list=None,\n csv_resource_definitions=None, verbose=False, allow_special_characters=False):\n \"\"\"\n Initialize this object\n :param csv_filename: Filename to load CSV data from; use \"input_list\"\n if CSV already loaded into list\n :param input_list: List of dictionaries objects representing each row of the CSV file\n :param csv_resource_definitions: Properly formatted dictionary defining\n how to convert CSV to OCL-JSON\n :param verbose: 0=off, 1=some debug info, 2=all debug info\n :param allow_special_characters: concept id special characters will not be replaced by `-`\n \"\"\"\n self.allow_special_characters = allow_special_characters\n self.csv_filename = csv_filename\n self.input_list = input_list\n if csv_filename:\n self.load_csv(csv_filename)\n self.verbose = verbose\n self.set_resource_definitions(csv_resource_definitions=csv_resource_definitions)\n self.output_list = []\n self._current_row_num = 0\n self._total_rows = 0\n\n def preprocess_csv_row(self, row, attr=None):\n \"\"\" Method intended to be overwritten in classes that extend this object \"\"\"\n return row\n\n def set_resource_definitions(self, csv_resource_definitions=None):\n \"\"\" Set CSV resource definitions to use to convert to JSON \"\"\"\n self.csv_resource_definitions = csv_resource_definitions\n\n def load_csv(self, csv_filename):\n \"\"\" Load CSV file into the input_list \"\"\"\n input_list = []\n with open(csv_filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n input_list.append(row)\n self.input_list = input_list\n\n def process(self, method=PROCESS_BY_DEFINITION, num_rows=0, attr=None):\n \"\"\" Process CSV into OCL-formatted JSON \"\"\"\n if method == OclCsvToJsonConverter.PROCESS_BY_ROW:\n return self.process_by_row(num_rows=num_rows, attr=attr)\n return self.process_by_definition(num_rows=num_rows, attr=attr)\n\n def process_by_row(self, num_rows=0, attr=None):\n \"\"\" Process CSV by applying all definitions to each row before moving to the next row \"\"\"\n if self.csv_filename:\n self.load_csv(self.csv_filename)\n self._current_row_num = 0\n self._total_rows = len(self.input_list)\n self.output_list = []\n for csv_row in self.input_list:\n if num_rows and self._current_row_num >= num_rows:\n break\n self._current_row_num += 1\n csv_row = self.preprocess_csv_row(csv_row.copy(), attr)\n for csv_resource_def in self.csv_resource_definitions:\n if (self.DEF_KEY_IS_ACTIVE in csv_resource_def and not csv_resource_def[\n self.DEF_KEY_IS_ACTIVE]):\n continue\n ocl_resources = self.process_csv_row_with_definition(\n csv_row, csv_resource_def, attr=attr)\n if ocl_resources and isinstance(ocl_resources, dict): # Single OCL resource\n self.output_list.append(ocl_resources)\n elif ocl_resources and isinstance(ocl_resources, list): # List of OCL resources\n self.output_list += ocl_resources\n return self.output_list\n\n def process_by_definition(self, num_rows=0, attr=None):\n \"\"\" Process the CSV file by looping through it entirely once for each definition \"\"\"\n if self.csv_filename:\n self.load_csv(self.csv_filename)\n self.output_list = []\n self._total_rows = len(self.input_list)\n for csv_resource_def in self.csv_resource_definitions:\n if self.DEF_KEY_IS_ACTIVE in csv_resource_def and not csv_resource_def[\n self.DEF_KEY_IS_ACTIVE]:\n continue\n if self.verbose:\n six.print_(('\\n%s' % ('*' * 120)))\n six.print_(('Processing definition: %s' % csv_resource_def['definition_name']))\n # print csv_resource_def\n six.print_(('*' * 120))\n self._current_row_num = 0\n for csv_row in self.input_list:\n if num_rows and self._current_row_num >= num_rows:\n break\n self._current_row_num += 1\n csv_row = self.preprocess_csv_row(csv_row.copy(), attr)\n ocl_resources = self.process_csv_row_with_definition(\n csv_row, csv_resource_def, attr=attr)\n if ocl_resources and isinstance(ocl_resources, dict): # Single OCL resource\n self.output_list.append(ocl_resources)\n elif ocl_resources and isinstance(ocl_resources, list): # List of OCL resources\n self.output_list += ocl_resources\n return self.output_list\n\n def process_csv_row_with_definition(self, csv_row, csv_resource_def, attr=None):\n \"\"\" Process individual CSV row with the provided CSV resource definition \"\"\"\n\n # Throw exception if resource_type not in the resource definition\n if self.DEF_KEY_RESOURCE_TYPE not in csv_resource_def:\n err_msg = 'Missing required \"%s\" in row definition: %s' % (\n self.DEF_KEY_RESOURCE_TYPE, csv_resource_def)\n raise Exception(err_msg)\n\n # TRIGGER: Skip row if the trigger column does not equal trigger_value\n if self.DEF_KEY_TRIGGER_COLUMN in csv_resource_def:\n if csv_resource_def[self.DEF_KEY_TRIGGER_COLUMN] not in csv_row:\n return None\n if csv_row[csv_resource_def[self.DEF_KEY_TRIGGER_COLUMN]] != csv_resource_def[\n self.DEF_KEY_TRIGGER_VALUE]:\n return None\n\n # SKIP_IF_EMPTY: Skip if all SKIP_IF_EMPTY columns have blank values\n is_skip_row = self.is_skip_row(csv_resource_def, csv_row)\n if is_skip_row:\n if self.verbose:\n # print 'SKIPPING: %s' % (csv_resource_def['definition_name'])\n pass\n return None\n\n # Either process batch of auto resources or build individual resource\n ocl_resource_type = csv_resource_def[self.DEF_KEY_RESOURCE_TYPE]\n if ocl_resource_type == OclCsvToJsonConverter.DEF_TYPE_AUTO_RESOURCE:\n auto_resource_def_template = csv_resource_def[\n OclCsvToJsonConverter.DEF_AUTO_RESOURCE_TEMPLATE]\n unique_auto_resource_indexes = OclCsvToJsonConverter.get_unique_csv_row_auto_indexes(\n index_prefix=auto_resource_def_template[self.DEF_KEY_AUTO_INDEX_PREFIX],\n index_postfix=auto_resource_def_template[self.DEF_KEY_AUTO_INDEX_POSTFIX],\n index_regex=auto_resource_def_template[self.DEF_KEY_AUTO_INDEX_REGEX],\n resource_def_template=auto_resource_def_template,\n csv_row=csv_row)\n ocl_resources = []\n for auto_index in unique_auto_resource_indexes:\n resource_def = OclCsvToJsonConverter.generate_resource_def_from_template(\n auto_resource_index=auto_index,\n index_prefix=auto_resource_def_template[self.DEF_KEY_AUTO_INDEX_PREFIX],\n index_postfix=auto_resource_def_template[self.DEF_KEY_AUTO_INDEX_POSTFIX],\n resource_def_template=auto_resource_def_template)\n ocl_resource = self.process_csv_row_with_definition(\n csv_row, resource_def, attr=attr)\n if ocl_resource:\n ocl_resources.append(ocl_resource)\n return ocl_resources\n return self.build_resource(csv_row, csv_resource_def, attr=attr)\n\n def is_skip_row(self, csv_resource_def, csv_row):\n \"\"\"\n Determine if a skip row based on the DEF_KEY_SKIP_IF_EMPTY columns. Returns TRUE\n only if all columns are empty.\n TODO: Provide attribute to skip if ANY column is blank instead of ALL\n \"\"\"\n is_skip_row = False\n if self.DEF_KEY_SKIP_IF_EMPTY in csv_resource_def and csv_resource_def[\n self.DEF_KEY_SKIP_IF_EMPTY]:\n has_non_empty_cell = False\n skip_columns = csv_resource_def[self.DEF_KEY_SKIP_IF_EMPTY]\n if not isinstance(skip_columns, list):\n skip_columns = [skip_columns]\n for skip_column in skip_columns:\n if skip_column in csv_row and csv_row[skip_column] != '':\n has_non_empty_cell = True\n break\n if not has_non_empty_cell:\n is_skip_row = True\n elif OclCsvToJsonConverter.DEF_KEY_SKIP_HANDLER in csv_resource_def:\n handler = getattr(self, csv_resource_def[OclCsvToJsonConverter.DEF_KEY_SKIP_HANDLER])\n if not handler:\n err_msg = \"skip_handler '%s' is not defined\" % csv_resource_def[\n OclCsvToJsonConverter.DEF_KEY_SKIP_HANDLER]\n raise Exception(err_msg)\n is_skip_row = handler(csv_resource_def, csv_row)\n return is_skip_row\n\n def build_resource(self, csv_row, csv_resource_def, attr=None):\n \"\"\" Build an OCL resource \"\"\"\n\n # Start building the resource\n ocl_resource_type = csv_resource_def[self.DEF_KEY_RESOURCE_TYPE]\n ocl_resource = {'type': ocl_resource_type}\n\n # Determine resource's ID and auto-replace invalid ID characters\n id_column = None\n if self.DEF_KEY_ID_COLUMN in csv_resource_def and csv_resource_def[self.DEF_KEY_ID_COLUMN]:\n id_column = csv_resource_def[self.DEF_KEY_ID_COLUMN]\n if id_column not in csv_row or not csv_row[id_column]:\n raise Exception('ID column %s not set or empty in row %s' % (id_column, csv_row))\n if ocl_resource_type in [\n oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n oclconstants.OclConstants.RESOURCE_TYPE_MAPPING]:\n ocl_resource['id'] = self.format_identifier(\n csv_row[id_column], allow_underscore=True)\n else:\n ocl_resource['id'] = self.format_identifier(csv_row[id_column])\n\n # Set core fields, eg concept_class, datatype, external_id, etc.\n if self.DEF_CORE_FIELDS in csv_resource_def and csv_resource_def[self.DEF_CORE_FIELDS]:\n ocl_resource.update(self.process_resource_def(\n csv_row, csv_resource_def[self.DEF_CORE_FIELDS]))\n\n # Build mapping to/from concept URLs if not provided\n if ocl_resource_type == oclconstants.OclConstants.RESOURCE_TYPE_MAPPING:\n # Determine whether mapping target is internal or external\n map_target = ocl_resource.pop(\n oclconstants.OclConstants.MAPPING_TARGET,\n oclconstants.OclConstants.MAPPING_TARGET_INTERNAL)\n if map_target not in oclconstants.OclConstants.MAPPING_TARGETS:\n map_target = oclconstants.OclConstants.MAPPING_TARGET_INTERNAL\n\n # Build from_concept_url if not provided\n ocl_resource[oclconstants.OclConstants.MAPPING_FROM_CONCEPT_URL] = OclCsvToJsonConverter.get_concept_url(\n concept_url=ocl_resource.pop(oclconstants.OclConstants.MAPPING_FROM_CONCEPT_URL, ''),\n owner_id=ocl_resource.pop(oclconstants.OclConstants.MAPPING_FROM_CONCEPT_OWNER_ID, ''),\n owner_type=ocl_resource.pop(\n oclconstants.OclConstants.MAPPING_FROM_CONCEPT_OWNER_TYPE,\n oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION),\n source=ocl_resource.pop(oclconstants.OclConstants.MAPPING_FROM_SOURCE_ID, ''),\n concept_id=ocl_resource.pop(oclconstants.OclConstants.MAPPING_FROM_CONCEPT_ID, ''))\n\n # Handle to_concept_url based on Internal or External map target\n if map_target == oclconstants.OclConstants.MAPPING_TARGET_INTERNAL:\n # JSON internal mapping requires map_type, from_concept_url, and to_concept_url\n ocl_resource[oclconstants.OclConstants.MAPPING_TO_CONCEPT_URL] = OclCsvToJsonConverter.get_concept_url(\n concept_url=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_CONCEPT_URL, ''),\n owner_id=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_CONCEPT_OWNER_ID, ''),\n owner_type=ocl_resource.pop(\n oclconstants.OclConstants.MAPPING_TO_CONCEPT_OWNER_TYPE,\n oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION),\n source=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_SOURCE_ID, ''),\n concept_id=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_CONCEPT_ID, ''))\n elif map_target == oclconstants.OclConstants.MAPPING_TARGET_EXTERNAL:\n # JSON external mapping needs map_type, from_concept_url, to_source_url and\n # to_concept_code. to_concept_name is optional.\n if oclconstants.OclConstants.MAPPING_TO_CONCEPT_URL in ocl_resource and ocl_resource[oclconstants.OclConstants.MAPPING_TO_CONCEPT_URL]:\n err_msg = ('External mapping must not have a '\n '\"to_concept_url\": %s' % ocl_resource[oclconstants.OclConstants.MAPPING_TO_CONCEPT_URL])\n raise Exception(err_msg)\n ocl_resource[oclconstants.OclConstants.MAPPING_TO_SOURCE_URL] = OclCsvToJsonConverter._get_external_mapping_to_source_url(\n to_source_url=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_SOURCE_URL, ''),\n to_concept_owner_id=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_CONCEPT_OWNER_ID, ''),\n to_concept_owner_type=ocl_resource.pop(\n oclconstants.OclConstants.MAPPING_TO_CONCEPT_OWNER_TYPE,\n oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION),\n to_concept_source=ocl_resource.pop(oclconstants.OclConstants.MAPPING_TO_SOURCE_ID, ''))\n\n # Set sub-resources, eg concept names/descriptions\n if self.DEF_SUB_RESOURCES in csv_resource_def and csv_resource_def[self.DEF_SUB_RESOURCES]:\n for group_name in csv_resource_def[self.DEF_SUB_RESOURCES]: # eg \"names\",\"descriptions\"\n ocl_resource[group_name] = []\n for dict_def in csv_resource_def[self.DEF_SUB_RESOURCES][group_name]:\n ocl_sub_resource = self.process_resource_def(csv_row, dict_def)\n if ocl_sub_resource:\n ocl_resource[group_name].append(ocl_sub_resource)\n\n # Key value pairs, eg custom attributes\n if self.DEF_KEY_VALUE_PAIRS in csv_resource_def and csv_resource_def[\n self.DEF_KEY_VALUE_PAIRS]:\n for group_name in csv_resource_def[self.DEF_KEY_VALUE_PAIRS]:\n ocl_resource[group_name] = {}\n for kvp_def in csv_resource_def[self.DEF_KEY_VALUE_PAIRS][group_name]:\n # Key\n key = None\n if 'key' in kvp_def and kvp_def['key']:\n key = kvp_def['key']\n elif 'key_column' in kvp_def and kvp_def['key_column']:\n if kvp_def['key_column'] in csv_row and csv_row[kvp_def['key_column']]:\n key = csv_row[kvp_def['key_column']]\n else:\n err_msg = ('key_column \"%s\" must be non-empty in CSV within '\n 'key_value_pair: %s' % (kvp_def['key_column'], kvp_def))\n raise Exception(err_msg)\n else:\n err_msg = ('Expected \"key\" or \"key_column\" key in key_value_pair '\n 'definition, but neither found: %s' % kvp_def)\n raise Exception(err_msg)\n\n # Value\n if 'value' in kvp_def:\n value = kvp_def['value']\n elif 'value_column' in kvp_def and kvp_def['value_column']:\n if kvp_def['value_column'] in csv_row:\n value = csv_row[kvp_def['value_column']]\n else:\n err_msg = ('value_column \"%s\" does not exist in CSV for '\n 'key_value_pair: %s' % (kvp_def['value_column'], kvp_def))\n raise Exception(err_msg)\n else:\n err_msg = ('Expected \"value\" or \"value_column\" key in key_value_pair '\n 'definition, but neither found: %s' % kvp_def)\n raise Exception(err_msg)\n\n # Set the key-value pair\n if key and (value or ('omit_if_empty_value' in kvp_def and not kvp_def[\n 'omit_if_empty_value'])):\n ocl_resource[group_name][key] = value\n\n # Handle auto-names\n if self.DEF_AUTO_CONCEPT_NAMES in csv_resource_def:\n concepts_names = self.get_auto_sub_resources(\n csv_row, csv_resource_def[self.DEF_AUTO_CONCEPT_NAMES])\n if concepts_names:\n ocl_resource['names'] = concepts_names\n\n # Handle auto-descriptions\n if self.DEF_AUTO_CONCEPT_DESCRIPTIONS in csv_resource_def:\n concepts_descriptions = self.get_auto_sub_resources(\n csv_row, csv_resource_def[self.DEF_AUTO_CONCEPT_DESCRIPTIONS])\n if concepts_descriptions:\n ocl_resource['descriptions'] = concepts_descriptions\n\n # Handle auto-attributes\n if self.DEF_AUTO_ATTRIBUTES in csv_resource_def:\n extra_attr = self.get_auto_extra_attributes(\n csv_row, csv_resource_def[self.DEF_AUTO_ATTRIBUTES])\n if extra_attr:\n ocl_resource['extras'] = extra_attr\n\n # Optionally display debug info\n if self.verbose:\n if self._current_row_num:\n six.print_(('[Row %s of %s] %s' % (self._current_row_num, self._total_rows,\n json.dumps(ocl_resource))))\n else:\n six.print_((json.dumps(ocl_resource)))\n\n return ocl_resource\n\n def get_auto_extra_attributes(self, csv_row, auto_attributes_def):\n \"\"\"\n Get a dictionary of auto-extra (\"custom\") attributes in the given CSV row.\n Two models supported:\n 1) Standard column: If column name is \"attr:Key1\", then \"Key1\" is the key, and the\n cell in each row is the value. By default, this is omitted if the value is empty\n unless \"omit_if_empty_value\" is set to False in the field definition.\n 2) Separated key/value pair: This model allows the custom attribute key to be adjusted\n for each row by separating the key and value into their own columns sharing a common\n index. E.g. if column name is \"attr_key[001]\", the cell in each row is the key. A\n separate column with the name \"attr_value[001]\" must be defined, and the cell in each\n row is the corresponding value. If the key is blank it is omitted. If value is blank,\n it is omitted by default, unless \"omit_if_empty_value\" is set to False in the field\n definition.\n\n Brief CSV example:\n attr:my-attribute,attr_key[1],attr_value[1],attr_key[27],attr_value[27]\n my-attribute value,This row's key,\"This row's key\",Another key,Another value\n \"\"\"\n extra_attributes = {}\n keyless_values = {}\n valueless_keys = {}\n\n # Determine whether to omit blank values (default = True)\n omit_if_empty_value = True\n if 'omit_if_empty_value' in auto_attributes_def and not auto_attributes_def[\n 'omit_if_empty_value']:\n omit_if_empty_value = False\n\n # Prepare search strings\n standard_needle = '%s%s' % (\n auto_attributes_def['standard_column_prefix'], auto_attributes_def['separator'])\n key_needle = '^%s%s(%s)%s$' % (\n auto_attributes_def['key_column_prefix'],\n re.escape(auto_attributes_def[self.DEF_KEY_AUTO_INDEX_PREFIX]),\n auto_attributes_def[self.DEF_KEY_AUTO_INDEX_REGEX],\n re.escape(auto_attributes_def[self.DEF_KEY_AUTO_INDEX_POSTFIX]))\n value_needle = '^%s%s(%s)%s$' % (\n auto_attributes_def['value_column_prefix'],\n re.escape(auto_attributes_def[self.DEF_KEY_AUTO_INDEX_PREFIX]),\n auto_attributes_def[self.DEF_KEY_AUTO_INDEX_REGEX],\n re.escape(auto_attributes_def[self.DEF_KEY_AUTO_INDEX_POSTFIX]))\n\n data_types = ['bool', 'str', 'int', 'float', 'list', 'json']\n\n # Process CSV columns\n for column_name in csv_row:\n data_type = 'str'\n if column_name.count(':') == 2:\n suffix_part = column_name.split(':')[2].strip()\n if suffix_part in data_types:\n data_type = suffix_part\n if column_name[:len(standard_needle)] == standard_needle:\n # Check if standard attr (e.g. attr:my-custom-attr)\n if not omit_if_empty_value or (omit_if_empty_value and csv_row[column_name]):\n key_name = column_name[len(standard_needle):]\n if key_name.endswith(\":\" + data_type):\n key_name = key_name.replace(\":\" + data_type, \"\")\n extra_attributes[key_name] = self.do_datatype_conversion(csv_row[column_name], data_type)\n else:\n key_regex_match = re.search(key_needle, column_name)\n value_regex_match = re.search(value_needle, column_name)\n if key_regex_match:\n key_index = key_regex_match.group(1)\n if not key_index:\n # Invalid (ie blank) auto index\n raise Exception(\"Auto indexes must be non-empty\")\n elif not csv_row[column_name]:\n # Skip if the key is empty\n continue\n elif key_index in keyless_values:\n # We now have a key/value pair\n extra_attributes[csv_row[column_name]] = keyless_values.pop(key_index)\n else:\n # Save and continue processing columns\n key_name = csv_row[column_name]\n valueless_keys[key_index] = key_name\n elif value_regex_match:\n value_index = value_regex_match.group(1)\n if not value_index:\n # Invalid (ie blank) auto index\n raise Exception(\"Auto indexes must be non-empty\")\n elif not csv_row[column_name] and omit_if_empty_value:\n # Optionally skip if empty value\n continue\n elif value_index in valueless_keys:\n # We now have a key/value pair\n key_name = valueless_keys.pop(value_index)\n extra_attributes[key_name] = csv_row[column_name]\n else:\n # Save and continue processing columns\n value = csv_row[column_name]\n keyless_values[value_index] = value\n\n # TODO: Handle orphaned keys and values\n\n return extra_attributes\n\n def get_auto_sub_resources(self, csv_row, auto_sub_resources_def):\n \"\"\"\n Get a list of auto-generated sub_resources for the CSV row based on the specified definition\n \"\"\"\n sub_resources = []\n if 'sub_resource_type' not in auto_sub_resources_def:\n raise Exception('Missing required \"sub_resource_type\" in auto_sub_resources definition')\n\n # Add primary sub resource (if defined)\n if 'primary_sub_resource' in auto_sub_resources_def:\n sub_resource = self.process_resource_def(\n csv_row, auto_sub_resources_def['primary_sub_resource'])\n is_skip_row = self.is_skip_row(auto_sub_resources_def, csv_row)\n if sub_resource and not is_skip_row:\n sub_resources.append(sub_resource)\n\n # Add auto sub resources\n if 'auto_sub_resources' in auto_sub_resources_def:\n unique_auto_resource_indexes = OclCsvToJsonConverter.get_unique_csv_row_auto_indexes(\n index_prefix=auto_sub_resources_def[self.DEF_KEY_AUTO_INDEX_PREFIX],\n index_postfix=auto_sub_resources_def[self.DEF_KEY_AUTO_INDEX_POSTFIX],\n index_regex=auto_sub_resources_def[self.DEF_KEY_AUTO_INDEX_REGEX],\n resource_def_template=auto_sub_resources_def['auto_sub_resources'],\n csv_row=csv_row)\n for auto_resource_index in unique_auto_resource_indexes:\n sub_resource_def = OclCsvToJsonConverter.generate_resource_def_from_template(\n index_prefix=auto_sub_resources_def[self.DEF_KEY_AUTO_INDEX_PREFIX],\n index_postfix=auto_sub_resources_def[self.DEF_KEY_AUTO_INDEX_POSTFIX],\n auto_resource_index=auto_resource_index,\n resource_def_template=auto_sub_resources_def['auto_sub_resources'])\n sub_resource = self.process_resource_def(csv_row, sub_resource_def)\n is_skip_row = self.is_skip_row(auto_sub_resources_def, sub_resource)\n if sub_resource and not is_skip_row:\n sub_resources.append(sub_resource)\n\n return sub_resources\n\n @staticmethod\n def replace_auto_field(prefix_field_to_replace, new_field_name, index_prefix, index_postfix,\n auto_resource_index, resource_def_template):\n \"\"\"\n Replaces prefix fields (eg. 'skip_if_empty_column_prefix': 'map_to_concept_id') with an\n indexed field (eg. 'skip_if_empty_column': 'map_to_concept_id[07]')\n \"\"\"\n if prefix_field_to_replace in resource_def_template:\n field_prefixes = resource_def_template.pop(prefix_field_to_replace)\n new_field_prefixes = []\n if not isinstance(field_prefixes, list):\n field_prefixes = [field_prefixes]\n for field_prefix in field_prefixes: # eg. field_prefix => 'map_to_concept_id'\n auto_field_name = '%s%s%s%s' % (\n field_prefix, index_prefix, auto_resource_index, index_postfix)\n new_field_prefixes.append(auto_field_name)\n if len(new_field_prefixes) > 1:\n resource_def_template[new_field_name] = new_field_prefixes\n elif len(new_field_prefixes) == 1:\n resource_def_template[new_field_name] = new_field_prefixes[0]\n\n @staticmethod\n def _get_external_mapping_to_source_url(to_source_url='', to_concept_owner_id='',\n to_concept_owner_type='', to_concept_source=''):\n if to_source_url:\n return to_source_url\n return oclconstants.OclConstants.get_repository_url(\n owner_id=to_concept_owner_id, repository_id=to_concept_source,\n owner_type=to_concept_owner_type, include_trailing_slash=True)\n\n @staticmethod\n def generate_resource_def_from_template(index_prefix, index_postfix, auto_resource_index,\n resource_def_template):\n \"\"\"\n Get a resource definition for the specified resource definition template and auto index\n \"\"\"\n if isinstance(resource_def_template, dict):\n resource_def_template = resource_def_template.copy()\n # Replace the resource definition prefix fields\n for prefix_field_to_replace in OclCsvToJsonConverter.AUTO_REPLACEMENT_FIELDS:\n OclStandardCsvToJsonConverter.replace_auto_field(\n prefix_field_to_replace=prefix_field_to_replace,\n new_field_name=OclCsvToJsonConverter.AUTO_REPLACEMENT_FIELDS[\n prefix_field_to_replace],\n index_prefix=index_prefix, index_postfix=index_postfix,\n auto_resource_index=auto_resource_index,\n resource_def_template=resource_def_template)\n # Replace the field definition prefix fields\n for resource_field_type in OclCsvToJsonConverter.DEF_RESOURCE_FIELD_TYPES:\n if resource_field_type in resource_def_template:\n resource_def_template[resource_field_type] = OclCsvToJsonConverter.generate_resource_def_from_template(\n index_prefix=index_prefix, index_postfix=index_postfix,\n auto_resource_index=auto_resource_index,\n resource_def_template=resource_def_template[resource_field_type])\n return resource_def_template\n elif isinstance(resource_def_template, list):\n new_field_defs = []\n for current_field_def in resource_def_template:\n new_field_def = current_field_def.copy()\n if 'column_prefix' in new_field_def:\n column_prefix = new_field_def.pop('column_prefix')\n new_column_name = '%s%s%s%s' % (\n column_prefix, index_prefix, auto_resource_index, index_postfix)\n if 'column' in new_field_def and new_field_def['column']:\n if not isinstance(new_field_def['column'], list):\n new_field_def['column'] = [new_field_def['column']]\n # Add new column to beginning of list so that it is searched first\n new_field_def['column'].insert(0, new_column_name)\n else:\n new_field_def['column'] = new_column_name\n new_field_defs.append(new_field_def)\n return new_field_defs\n else:\n err_msg = ('Invalid type \"%s\" for resource_def_template. '\n 'Expected or .') % str(type(resource_def_template))\n raise Exception(err_msg)\n\n @staticmethod\n def get_unique_csv_row_auto_indexes(index_prefix, index_postfix, index_regex,\n resource_def_template, csv_row):\n \"\"\"\n Return list of unique auto indexes in the CSV row as defined by the resource_def_template.\n Note that resource_def_template may be either a full resource_def (dict) or a\n sub_resource_def (list).\n \"\"\"\n unique_auto_resource_indexes = []\n if isinstance(resource_def_template, dict):\n for resource_field_type in OclCsvToJsonConverter.DEF_RESOURCE_FIELD_TYPES:\n if resource_field_type in resource_def_template:\n unique_auto_resource_indexes += OclCsvToJsonConverter.get_unique_csv_row_auto_indexes(\n index_prefix=index_prefix, index_postfix=index_postfix,\n index_regex=index_regex,\n resource_def_template=resource_def_template[resource_field_type],\n csv_row=csv_row)\n # Dedup the list so that each auto-index only appears once\n unique_auto_resource_indexes = [i for n, i in enumerate(\n unique_auto_resource_indexes) if i not in unique_auto_resource_indexes[n + 1:]]\n elif isinstance(resource_def_template, list):\n for column_name in csv_row:\n for field_def in resource_def_template:\n if 'column_prefix' not in field_def:\n continue\n if column_name[:len(field_def['column_prefix'])] == field_def['column_prefix']:\n search_exp = r'^%s%s(%s)%s$' % (\n field_def['column_prefix'], re.escape(index_prefix),\n index_regex, re.escape(index_postfix))\n regex_match = re.search(search_exp, column_name)\n if regex_match:\n index = regex_match.group(1)\n if index and index not in unique_auto_resource_indexes:\n unique_auto_resource_indexes.append(index)\n else:\n err_msg = ('Invalid type \"%s\" for resource_def_template. '\n 'Expected or .') % str(type(resource_def_template))\n raise Exception(err_msg)\n return unique_auto_resource_indexes\n\n def process_resource_def(self, csv_row, resource_def):\n \"\"\"\n Returns a resource by processing a resource definition. A resource definition is a\n list of field definitions.\n \"\"\"\n new_resource = {}\n for field_def in resource_def:\n value = self.process_field_def(csv_row, field_def)\n if value is not None:\n new_resource[field_def[self.DEF_KEY_RESOURCE_FIELD]] = value\n return new_resource\n\n def process_field_def(self, csv_row, field_def):\n \"\"\" Processes a single resource field definition for the given CSV row \"\"\"\n if self.DEF_KEY_RESOURCE_FIELD not in field_def:\n raise Exception(\n 'Expected key \"%s\" in subresource definition, but none found: %s' % (\n self.DEF_KEY_RESOURCE_FIELD, field_def))\n return self.get_csv_value(csv_row, field_def)\n\n def get_csv_value(self, csv_row, field_def):\n \"\"\"\n Return a value from a csv_row for the specified field definition.\n field_def must include 'resource_type' and either a 'column' or 'value' key or\n both a 'csv_to_json_processor' and 'data_column' keys. If 'column' is a list, then\n the first non-empty column in the list that is present in the CSV row is used.\n Set 'skip_empty_value' to False to not skip non-empty values.\n Optional keys include 'required', 'default', and 'datatype'\n \"\"\"\n if 'column' in field_def:\n columns = field_def['column']\n if not isinstance(field_def['column'], list):\n columns = [columns]\n skip_empty_value = True\n\n if 'skip_empty_value' in field_def:\n skip_empty_value = bool(skip_empty_value)\n for column in columns:\n if column in csv_row and (\n skip_empty_value and csv_row[column] or not skip_empty_value):\n if 'datatype' in field_def:\n return self.do_datatype_conversion(csv_row[column], field_def['datatype'])\n return csv_row[column]\n\n # No value found from 'column', so apply default/required\n if 'default' in field_def:\n # Return 'default' if 'column' is not in CSV row\n return field_def['default']\n elif 'required' in field_def and field_def['required']:\n err_msg = 'Missing required column %s in CSV row: %s' % (\n field_def['column'], csv_row)\n raise Exception(err_msg)\n\n # Return None if no value found and not required\n return None\n elif 'value' in field_def:\n # Just return whatever is in the 'value' definition\n return field_def['value']\n elif 'csv_to_json_processor' in field_def and field_def['csv_to_json_processor']:\n # Use a custom method to generate the value\n method_to_call = getattr(self, field_def['csv_to_json_processor'])\n return method_to_call(csv_row, field_def)\n else:\n err_msg = ('Expected \"column\", \"value\", or \"csv_to_json_processor\" key in field'\n 'definition, but none found: %s' % field_def)\n raise Exception(err_msg)\n\n def do_datatype_conversion(self, value, datatype):\n \"\"\"\n Convert the value to the specified datatype, where datatype is a string of the name of the\n desired datatype (e.g. datatype=\"bool\", \"int\", \"float\").\n \"\"\"\n if datatype == 'bool':\n return bool(util.strtobool(str(value)))\n elif datatype == 'int':\n return int(value)\n elif datatype == 'float':\n return float(value)\n elif datatype == 'list':\n return [v.strip() for v in value.strip('][').split(',')]\n elif datatype == 'json':\n try:\n return json.loads(value)\n except:\n return value\n return value\n\n def process_auto_concept_reference(self, csv_row, field_def):\n \"\"\" Returns a concept reference expression, e.g. {'expressions': []} \"\"\"\n # TODO: the concept url variables are not stored in the field_def or csv_row, they're evaluated\n concept_url = OclCsvToJsonConverter.get_concept_url(\n owner_id=field_def.pop('ref_target_owner'),\n owner_type=field_def.pop('ref_target_owner_type'),\n source=field_def.pop('ref_target_source'),\n concept_id=field_def.pop('ref_target_concept_id'))\n if concept_url:\n return {'expressions': [concept_url]}\n return None\n\n def process_reference(self, csv_row, field_def):\n \"\"\" (DEPRECATED) Processes a reference in the CSV row \"\"\"\n result = None\n if ('data_column' in field_def and field_def['data_column'] and\n field_def['data_column'] in csv_row):\n result = {'expressions': [csv_row[field_def['data_column']]]}\n return result\n\n def format_identifier(self, unformatted_id, allow_underscore=False):\n \"\"\"\n Format a string according to the OCL ID rules: Everything in INVALID_CHARS goes,\n except that underscores are allowed for the concept_id\n \"\"\"\n if self.allow_special_characters:\n return unformatted_id\n\n formatted_id = list(unformatted_id)\n if allow_underscore:\n # Remove underscore from the invalid characters - Concept IDs are okay with underscores\n chars_to_remove = self.INVALID_CHARS.replace('_', '')\n else:\n chars_to_remove = self.INVALID_CHARS\n for index in range(len(unformatted_id)):\n if unformatted_id[index] in chars_to_remove:\n formatted_id[index] = self.REPLACE_CHAR\n return ''.join(formatted_id)\n\n @staticmethod\n def get_concept_url(concept_url='', owner_id='', owner_type='', source='', concept_id=''):\n \"\"\" Returns a concept URL \"\"\"\n if concept_url:\n return concept_url\n return '%s/concepts/%s/' % (oclconstants.OclConstants.get_repository_url(\n owner_id=owner_id, owner_type=owner_type, repository_id=source,\n repository_type=oclconstants.OclConstants.RESOURCE_TYPE_SOURCE), concept_id)\n\n\nclass OclStandardCsvToJsonConverter(OclCsvToJsonConverter):\n \"\"\" Standard CSV to OCL-formatted JSON converter \"\"\"\n\n # Standard auto index constants\n AUTO_INDEX_STANDARD_PREFIX = '['\n AUTO_INDEX_STANDARD_POSTFIX = ']'\n AUTO_INDEX_STANDARD_REGEX = '[a-zA-Z0-9\\\\-_]+'\n\n default_csv_resource_definitions = [\n {\n 'definition_name': 'Generic Organization',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'name', 'column': 'name'},\n {'resource_field': 'company', 'column': 'company', 'required': False},\n {'resource_field': 'website', 'column': 'website', 'required': False},\n {'resource_field': 'location', 'column': 'location', 'required': False},\n {'resource_field': 'public_access', 'column': 'public_access', 'default': 'View'},\n ],\n OclCsvToJsonConverter.DEF_AUTO_ATTRIBUTES: {\n 'standard_column_prefix': 'attr', # e.g. 'attr:Reporting Frequency'\n 'separator': ':',\n 'key_column_prefix': 'attr_key', # 2-digit number required, e.g. attr_key[01]\n 'value_column_prefix': 'attr_value', # 2-digit number required, e.g. attr_value[01]\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n }\n },\n {\n 'definition_name': 'Generic Source',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_SOURCE,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_SOURCE,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'external_id', 'column': 'external_id', 'required': False},\n {'resource_field': 'short_code', 'column': ['short_code', 'id'], 'required': False},\n {'resource_field': 'name', 'column': 'name'},\n {'resource_field': 'full_name', 'column': ['full_name', 'name'], 'required': False},\n {'resource_field': 'source_type', 'column': 'source_type', 'required': False},\n {'resource_field': 'public_access', 'column': 'public_access', 'default': 'View'},\n {'resource_field': 'default_locale', 'column': 'default_locale', 'default': 'en'},\n {'resource_field': 'supported_locales', 'column': 'supported_locales',\n 'default': 'en'},\n {'resource_field': 'website', 'column': 'website', 'required': False},\n {'resource_field': 'description', 'column': 'description', 'required': False},\n {'resource_field': 'custom_validation_schema', 'column': 'custom_validation_schema',\n 'required': False},\n {'resource_field': 'owner', 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n ],\n OclCsvToJsonConverter.DEF_AUTO_ATTRIBUTES: {\n 'standard_column_prefix': 'attr', # e.g. 'attr:Reporting Frequency'\n 'separator': ':',\n 'key_column_prefix': 'attr_key', # 2-digit number required, e.g. attr_key[01]\n 'value_column_prefix': 'attr_value', # 2-digit number required, e.g. attr_value[01]\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n }\n },\n {\n 'definition_name': 'Generic Collection',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_COLLECTION,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_COLLECTION,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'external_id', 'column': 'external_id', 'required': False},\n {'resource_field': 'short_code', 'column': ['short_code', 'id'], 'required': False},\n {'resource_field': 'name', 'column': 'name'},\n {'resource_field': 'full_name', 'column': ['full_name', 'name'], 'required': False},\n {'resource_field': 'collection_type', 'column': 'collection_type',\n 'required': False},\n {'resource_field': 'public_access', 'column': 'public_access', 'default': 'View'},\n {'resource_field': 'default_locale', 'column': 'default_locale', 'default': 'en'},\n {'resource_field': 'supported_locales', 'column': 'supported_locales',\n 'default': 'en'},\n {'resource_field': 'website', 'column': 'website', 'required': False},\n {'resource_field': 'description', 'column': 'description', 'required': False},\n {'resource_field': 'custom_validation_schema', 'column': 'custom_validation_schema',\n 'required': False},\n {'resource_field': 'owner', 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n ],\n OclCsvToJsonConverter.DEF_AUTO_ATTRIBUTES: {\n 'standard_column_prefix': 'attr', # e.g. 'attr:Reporting Frequency'\n 'separator': ':',\n 'key_column_prefix': 'attr_key', # 2-digit number required, e.g. attr_key[01]\n 'value_column_prefix': 'attr_value', # 2-digit number required, e.g. attr_value[01]\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n }\n },\n {\n 'definition_name': 'Generic Concept',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'retired', 'column': 'retired', 'required': False, 'datatype': 'bool'},\n {'resource_field': 'external_id', 'column': 'external_id', 'required': False},\n {'resource_field': 'concept_class', 'column': 'concept_class'},\n {'resource_field': 'parent_concept_urls', 'column': 'parent_concept_urls',\n 'default': None, 'datatype': 'list'},\n {'resource_field': 'datatype', 'column': 'datatype', 'default': 'None'},\n {'resource_field': 'owner', 'column': 'owner_id'},\n {'resource_field': 'update_comment', 'column': 'update_comment'},\n {'resource_field': 'owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column': 'source'},\n ],\n OclCsvToJsonConverter.DEF_AUTO_CONCEPT_NAMES: {\n 'sub_resource_type': 'names',\n 'skip_if_empty_column': 'name',\n 'primary_sub_resource': [\n {'resource_field': 'name', 'column': 'name'},\n {'resource_field': 'locale', 'column': 'name_locale', 'default': 'en'},\n {'resource_field': 'locale_preferred', 'column': 'name_locale_preferred',\n 'default': True},\n {'resource_field': 'name_type', 'column': 'name_type',\n 'default': 'Fully Specified'},\n {'resource_field': 'external_id', 'column': 'name_external_id',\n 'required': False},\n ],\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n 'auto_sub_resources': [\n {'resource_field': 'name', 'column_prefix': 'name'},\n {'resource_field': 'locale', 'column_prefix': 'name_locale', 'default': 'en'},\n {'resource_field': 'locale_preferred', 'column_prefix': 'name_locale_preferred',\n 'required': False},\n {'resource_field': 'name_type', 'column_prefix': 'name_type',\n 'required': False},\n {'resource_field': 'external_id', 'column_prefix': 'name_external_id',\n 'required': False},\n ]\n },\n OclCsvToJsonConverter.DEF_AUTO_CONCEPT_DESCRIPTIONS: {\n 'sub_resource_type': 'descriptions',\n 'skip_if_empty_column': 'description',\n 'primary_sub_resource': [\n {'resource_field': 'description', 'column': 'description'},\n {'resource_field': 'locale', 'column': 'description_locale', 'default': 'en'},\n {'resource_field': 'locale_preferred', 'column': 'description_locale_preferred',\n 'required': False},\n {'resource_field': 'description_type', 'column': 'description_type',\n 'required': False},\n {'resource_field': 'external_id', 'column': 'description_external_id',\n 'required': False},\n ],\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n 'auto_sub_resources': [\n {'resource_field': 'description', 'column_prefix': 'description'},\n {'resource_field': 'locale', 'column_prefix': 'description_locale',\n 'default': 'en'},\n {'resource_field': 'locale_preferred',\n 'column_prefix': 'description_locale_preferred', 'required': False},\n {'resource_field': 'description_type', 'column_prefix': 'description_type',\n 'required': False},\n {'resource_field': 'external_id', 'column_prefix': 'description_external_id',\n 'required': False},\n ],\n },\n OclCsvToJsonConverter.DEF_AUTO_ATTRIBUTES: {\n 'standard_column_prefix': 'attr', # e.g. 'attr:Reporting Frequency'\n 'separator': ':',\n 'key_column_prefix': 'attr_key', # 2-digit number required, e.g. attr_key[01]\n 'value_column_prefix': 'attr_value', # 2-digit number required, e.g. attr_value[01]\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n }\n },\n {\n 'definition_name': 'Generic Auto Concept Internal Mappings',\n 'is_active': True,\n 'resource_type': OclCsvToJsonConverter.DEF_TYPE_AUTO_RESOURCE,\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n OclCsvToJsonConverter.DEF_AUTO_RESOURCE_TEMPLATE: {\n 'definition_name': 'Generic Concept Internal Mapping',\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_MAPPING,\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n 'skip_if_empty_column_prefix': ['map_to_concept_id', 'map_to_concept_url'],\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'retired', 'column': 'retired', 'required': False, 'datatype': 'bool'},\n {'resource_field': 'map_target', 'column_prefix': 'map_target',\n 'default': oclconstants.OclConstants.MAPPING_TARGET_INTERNAL},\n {'resource_field': 'map_type', 'column_prefix': 'map_type',\n 'default': 'Same As'},\n {'resource_field': 'sort_weight', 'column': 'sort_weight', 'datatype': 'float'},\n {'resource_field': 'update_comment', 'column': 'update_comment'},\n {'resource_field': 'external_id', 'column_prefix': 'map_external_id',\n 'required': False},\n {'resource_field': 'from_concept_url', 'column_prefix': 'map_from_concept_url',\n 'required': False},\n {'resource_field': oclconstants.OclConstants.MAPPING_FROM_CONCEPT_ID,\n 'column_prefix': 'map_from_concept_id',\n 'column': 'id', 'required': False},\n {'resource_field': 'from_concept_owner_id', 'column': 'owner_id',\n 'column_prefix': 'map_from_concept_owner_id', 'required': False},\n {'resource_field': 'from_concept_owner_type', 'column': 'owner_type',\n 'column_prefix': 'map_from_concept_owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'from_concept_source', 'column': 'source',\n 'column_prefix': 'map_from_concept_source', 'required': False},\n {'resource_field': 'to_concept_url', 'column_prefix': 'map_to_concept_url',\n 'required': False},\n {'resource_field': 'to_concept_code', 'column_prefix': 'map_to_concept_id',\n 'required': False},\n {'resource_field': 'to_concept_name', 'column_prefix': 'map_to_concept_name',\n 'required': False},\n {'resource_field': 'to_concept_owner_id', 'column': 'owner_id',\n 'column_prefix': 'map_to_concept_owner_id', 'required': False},\n {'resource_field': 'to_concept_owner_type', 'column': 'owner_type',\n 'column_prefix': 'map_to_concept_owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'to_concept_source', 'column': 'source',\n 'column_prefix': 'map_to_concept_source', 'required': False},\n {'resource_field': 'owner', 'column_prefix': 'map_owner_id',\n 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column_prefix': 'map_owner_type',\n 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column_prefix': 'map_source', 'column': 'source'},\n ],\n },\n },\n {\n 'definition_name': 'Generic Auto Concept External Mappings',\n 'is_active': True,\n 'resource_type': OclCsvToJsonConverter.DEF_TYPE_AUTO_RESOURCE,\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n OclCsvToJsonConverter.DEF_AUTO_RESOURCE_TEMPLATE: {\n 'definition_name': 'Generic Concept External Mapping',\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_MAPPING,\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n 'skip_if_empty_column_prefix': ['extmap_to_concept_id', 'extmap_to_concept_url'],\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'retired', 'column': 'retired', 'required': False, 'datatype': 'bool'},\n {'resource_field': 'map_target', 'column_prefix': 'extmap_target',\n 'default': oclconstants.OclConstants.MAPPING_TARGET_EXTERNAL},\n {'resource_field': 'map_type', 'column_prefix': 'extmap_type',\n 'default': 'Same As'},\n {'resource_field': 'sort_weight', 'column': 'sort_weight', 'datatype': 'float'},\n {'resource_field': 'update_comment', 'column': 'update_comment'},\n {'resource_field': 'external_id', 'column_prefix': 'extmap_external_id',\n 'required': False},\n {'resource_field': 'from_concept_url', 'required': False,\n 'column_prefix': 'extmap_from_concept_url'},\n {'resource_field': oclconstants.OclConstants.MAPPING_FROM_CONCEPT_ID,\n 'column_prefix': 'extmap_from_concept_id',\n 'column': 'id', 'required': False},\n {'resource_field': 'from_concept_owner_id', 'column': 'owner_id',\n 'column_prefix': 'extmap_from_concept_owner_id', 'required': False},\n {'resource_field': 'from_concept_owner_type', 'column': 'owner_type',\n 'column_prefix': 'extmap_from_concept_owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'from_concept_source', 'column': 'source',\n 'column_prefix': 'extmap_from_concept_source', 'required': False},\n {'resource_field': 'to_concept_url', 'column_prefix': 'extmap_to_concept_url',\n 'required': False},\n {'resource_field': 'to_concept_code', 'column_prefix': 'extmap_to_concept_id',\n 'required': False},\n {'resource_field': 'to_concept_name', 'column_prefix': 'extmap_to_concept_name',\n 'required': False},\n {'resource_field': 'to_concept_owner_id', 'column': 'owner_id',\n 'column_prefix': 'extmap_to_concept_owner_id', 'required': False},\n {'resource_field': 'to_concept_owner_type', 'column': 'owner_type',\n 'column_prefix': 'extmap_to_concept_owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'to_concept_source', 'column': 'source',\n 'column_prefix': 'extmap_to_concept_source', 'required': False},\n {'resource_field': 'to_source_url', 'column_prefix': 'extmap_to_source_url',\n 'required': False},\n {'resource_field': 'owner', 'column_prefix': 'extmap_owner_id',\n 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column_prefix': 'extmap_owner_type',\n 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column_prefix': 'extmap_source',\n 'column': 'source'},\n ],\n },\n },\n {\n 'definition_name': 'Generic Auto Concept Reference',\n 'is_active': False,\n 'resource_type': OclCsvToJsonConverter.DEF_TYPE_AUTO_RESOURCE,\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT,\n OclCsvToJsonConverter.DEF_AUTO_RESOURCE_TEMPLATE: {\n 'definition_name': 'Generic Concept Reference',\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_REFERENCE,\n 'index_prefix': AUTO_INDEX_STANDARD_PREFIX,\n 'index_postfix': AUTO_INDEX_STANDARD_POSTFIX,\n 'index_regex': AUTO_INDEX_STANDARD_REGEX,\n 'skip_if_empty_column_prefix': ['ref_collection'],\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'owner', 'column_prefix': 'ref_owner_id',\n 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column_prefix': 'ref_owner_type',\n 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'collection', 'column_prefix': 'ref_collection'},\n {'resource_field': 'ref_type', 'column_name': 'ref_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_CONCEPT},\n {'resource_field': 'ref_target_owner_id',\n 'column_prefix': 'ref_target_owner_id', 'column': 'owner_id'},\n {'resource_field': 'ref_target_owner_type',\n 'column_prefix': 'ref_target_owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'ref_target_source', 'column_prefix': 'ref_target_source',\n 'column': 'source'},\n {'resource_field': 'ref_target_concept_id', 'column': 'id'},\n {'resource_field': 'data',\n 'csv_to_json_processor': 'process_auto_concept_reference'},\n ]\n }\n },\n {\n 'definition_name': 'Generic Standalone Internal Mapping',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_MAPPING,\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_MAPPING,\n 'skip_if_empty_column': ['map_to_concept_id', 'to_concept_id',\n 'map_to_concept_url', 'to_concept_url'],\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'retired', 'column': 'retired', 'required': False, 'datatype': 'bool'},\n {'resource_field': 'map_target', 'column': 'map_target',\n 'default': oclconstants.OclConstants.MAPPING_TARGET_INTERNAL},\n {'resource_field': 'map_type', 'column': 'map_type', 'default': 'Same As'},\n {'resource_field': 'sort_weight', 'column': 'sort_weight', 'datatype': 'float'},\n {'resource_field': 'update_comment', 'column': 'update_comment'},\n {'resource_field': 'external_id', 'column': 'external_id',\n 'required': False},\n {'resource_field': 'from_concept_url', 'required': False,\n 'column': ['map_from_concept_url', 'from_concept_url']},\n {'resource_field': oclconstants.OclConstants.MAPPING_FROM_CONCEPT_ID,\n 'column': ['map_from_concept_id', 'from_concept_id', 'from_concept_code'],\n 'required': False},\n {'resource_field': 'from_concept_owner_id', 'required': False,\n 'column': ['map_from_concept_owner_id', 'from_concept_owner_id', 'owner_id']},\n {'resource_field': 'from_concept_owner_type',\n 'column': ['map_from_concept_owner_type', 'from_concept_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'from_concept_source', 'required': False,\n 'column': ['map_from_concept_source', 'from_concept_source', 'source']},\n {'resource_field': 'to_concept_url', 'required': False,\n 'column': ['map_to_concept_url', 'to_concept_url']},\n {'resource_field': 'to_concept_code', 'required': False,\n 'column': ['map_to_concept_id', 'to_concept_id', 'to_concept_code']},\n {'resource_field': 'to_concept_name', 'required': False,\n 'column': ['map_to_concept_name', 'to_concept_name']},\n {'resource_field': 'to_concept_owner_id', 'required': False,\n 'column': ['map_to_concept_owner_id', 'to_concept_owner_id', 'owner_id']},\n {'resource_field': 'to_concept_owner_type',\n 'column': ['map_to_concept_owner_type', 'to_concept_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'to_concept_source', 'required': False,\n 'column': ['map_to_concept_source', 'to_concept_source', 'source']},\n {'resource_field': 'owner', 'column': ['map_owner_id', 'owner_id']},\n {'resource_field': 'owner_type', 'column': ['map_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column': ['map_source', 'source']},\n ]\n },\n {\n 'definition_name': 'Generic Standalone External Mapping',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_MAPPING,\n '__trigger_column': 'resource_type',\n '__trigger_value': 'External Mapping', # Note deviation from RESOURCE_TYPE constants\n 'skip_if_empty_column': ['map_to_concept_id', 'to_concept_id',\n 'map_to_concept_url', 'to_concept_url'],\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'retired', 'column': 'retired', 'required': False, 'datatype': 'bool'},\n {'resource_field': 'map_target', 'column': 'map_target',\n 'default': oclconstants.OclConstants.MAPPING_TARGET_EXTERNAL},\n {'resource_field': 'map_type', 'column': 'map_type', 'default': 'Same As'},\n {'resource_field': 'sort_weight', 'column': 'sort_weight', 'datatype': 'float'},\n {'resource_field': 'update_comment', 'column': 'update_comment'},\n {'resource_field': 'external_id', 'column': 'external_id',\n 'required': False},\n {'resource_field': 'from_concept_url', 'required': False,\n 'column': ['map_from_concept_url', 'from_concept_url']},\n {'resource_field': oclconstants.OclConstants.MAPPING_FROM_CONCEPT_ID,\n 'column': ['map_from_concept_id', 'from_concept_id', 'from_concept_code'],\n 'required': False},\n {'resource_field': 'from_concept_owner_id', 'required': False,\n 'column': ['map_from_concept_owner_id', 'from_concept_owner_id', 'owner_id']},\n {'resource_field': 'from_concept_owner_type',\n 'column': ['map_from_concept_owner_type', 'from_concept_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'from_concept_source', 'required': False,\n 'column': ['map_from_concept_source', 'from_concept_source', 'source']},\n {'resource_field': 'to_concept_code', 'required': False,\n 'column': ['map_to_concept_id', 'to_concept_id', 'to_concept_code']},\n {'resource_field': 'to_concept_name', 'required': False,\n 'column': ['map_to_concept_name', 'to_concept_name']},\n {'resource_field': 'to_concept_owner_id', 'required': False,\n 'column': ['map_to_concept_owner_id', 'to_concept_owner_id', 'owner_id']},\n {'resource_field': 'to_concept_owner_type',\n 'column': ['map_to_concept_owner_type', 'to_concept_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'to_concept_source', 'required': False,\n 'column': ['map_to_concept_source', 'to_concept_source', 'source']},\n {'resource_field': 'owner', 'column': ['map_owner_id', 'owner_id']},\n {'resource_field': 'owner_type', 'column': ['map_owner_type', 'owner_type'],\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column': ['map_source', 'source']},\n ]\n },\n {\n 'definition_name': 'Generic Collection Reference',\n 'is_active': False,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_REFERENCE,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_REFERENCE,\n 'skip_if_empty_column': 'id',\n\n },\n {\n 'definition_name': 'Generic Source Version',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_SOURCE_VERSION,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_SOURCE_VERSION,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'description', 'column': 'description'},\n {'resource_field': 'released', 'column': 'released', 'required': False,\n 'datatype': 'bool'},\n {'resource_field': 'retired', 'column': 'retired', 'required': False,\n 'datatype': 'bool'},\n {'resource_field': 'owner', 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'source', 'column': 'source'},\n ],\n },\n {\n 'definition_name': 'Generic Collection Version',\n 'is_active': True,\n 'resource_type': oclconstants.OclConstants.RESOURCE_TYPE_COLLECTION_VERSION,\n 'id_column': 'id',\n '__trigger_column': 'resource_type',\n '__trigger_value': oclconstants.OclConstants.RESOURCE_TYPE_COLLECTION_VERSION,\n 'skip_if_empty_column': 'id',\n OclCsvToJsonConverter.DEF_CORE_FIELDS: [\n {'resource_field': 'description', 'column': 'description'},\n {'resource_field': 'released', 'column': 'released', 'required': False,\n 'datatype': 'bool'},\n {'resource_field': 'retired', 'column': 'retired', 'required': False,\n 'datatype': 'bool'},\n {'resource_field': 'owner', 'column': 'owner_id'},\n {'resource_field': 'owner_type', 'column': 'owner_type',\n 'default': oclconstants.OclConstants.RESOURCE_TYPE_ORGANIZATION},\n {'resource_field': 'collection', 'column': 'collection'},\n ],\n },\n ]\n\n def __init__(self, csv_filename='', input_list=None, verbose=False, allow_special_characters=False):\n \"\"\" Initialize the object with the standard CSV resource definition \"\"\"\n OclCsvToJsonConverter.__init__(\n self, csv_filename=csv_filename,\n input_list=input_list,\n csv_resource_definitions=self.default_csv_resource_definitions,\n verbose=verbose,\n allow_special_characters=allow_special_characters\n )\n","repo_name":"OpenConceptLab/ocldev","sub_path":"ocldev/oclcsvtojsonconverter.py","file_name":"oclcsvtojsonconverter.py","file_ext":"py","file_size_in_byte":73802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"15"} +{"seq_id":"26090697377","text":"import re, math\n\nf = open('in/2', 'r')\nprog = f.read().split(\",\")\nprog = [int(j) for j in prog]\nprog_og = prog.copy()\n\ndef process(i, ins):\n #print(i, ins)\n if(ins[0] == 1):\n prog[ins[3]] = prog[ins[1]] + prog[ins[2]]\n elif(ins[0] == 2):\n prog[ins[3]] = prog[ins[1]] * prog[ins[2]]\n elif(ins[0] == 99):\n return True\n return False\n\nprint(prog)\nprog[1] = 12\nprog[2] = 2\nfor i in range(0, len(prog), 4):\n if process(i, prog[i:i+4]):\n break\n\nprint(\"part1\", prog[0])\n\nfor j in range(0, 100):\n for k in range(0, 100):\n prog = prog_og.copy()\n prog[1] = j\n prog[2] = k\n for i in range(0, len(prog), 4):\n if process(i, prog[i:i+4]):\n break\n if prog[0] == 19690720:\n print(\"part2\", (100*j)+k)\n \n","repo_name":"domgoodwin/advent","sub_path":"2019/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"18946485267","text":"import sys\nsys.path.insert(0, \"../../TheanoXLA\")\n\nimport numpy as np\nimport theanoxla.tensor as T\nfrom theanoxla import layers, losses, optimizers, function, gradients\nfrom theanoxla.utils import batchify, vq_to_boundary\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nimport argparse\n\nparse = argparse.ArgumentParser()\nparse.add_argument('--dataset', type=int, default=0)\nparse.add_argument('--WG', type=int, default=64)\nargs = parse.parse_args()\n\n\ndef generator(Z, out_dim, D=64):\n layer = [layers.Dense(Z, D)]\n layer.append(layers.Activation(layer[-1], T.leaky_relu))\n layer.append(layers.Dense(layer[-1], D))\n layer.append(layers.Activation(layer[-1], T.leaky_relu))\n layer.append(layers.Dense(layer[-1], out_dim))\n return layer\n\ndef discriminator(X):\n layer = [layers.Dense(X, 64)]\n layer.append(layers.Activation(layer[-1], T.leaky_relu))\n layer.append(layers.Dense(layer[-1], 64))\n layer.append(layers.Activation(layer[-1], T.leaky_relu))\n layer.append(layers.Dense(layer[-1], 2))\n return layer\n\n\nBS = 100\nlr = 0.00005\n\n# create dataset\nif args.dataset == 0:\n DATA = np.random.randn(1000, 2)\n DATA *= 0.06\n mx, my = np.meshgrid(np.linspace(-1, 1, 5), np.linspace(-1, 1, 5))\n DATA += np.stack([mx.flatten(), my.flatten()], 1).repeat(1000 // 25, 0)\nelif args.dataset == 1:\n DATA = np.random.rand(1000, 2) * 2 - 1\n gauss = np.random.randn(500, 2) * 0.1 + np.array([0.5, 0.5])\n DATA = np.concatenate([DATA, gauss])\nelif args.dataset == 2:\n DATA = np.random.rand(1000, 2) * 2 - 1\n\n\n# create placeholders and predictions\nX = T.Placeholder([BS, 2], 'float32')\nZ = T.Placeholder([BS, 2], 'float32')\nG_sample = generator(Z, 2, args.WG)\nlogits = discriminator(T.concatenate([G_sample[-1], X]))\nlabels = T.concatenate([T.zeros(BS, dtype='int32'), T.ones(BS, dtype='int32')])\n\n# compute the losses\ndisc_loss = losses.sparse_crossentropy_logits(labels, logits[-1]).mean()\ngen_loss = losses.sparse_crossentropy_logits(1 - labels[:BS],\n logits[-1][:BS]).mean()\n\n# create the vq mask\nmasks = T.concatenate([G_sample[1] > 0, G_sample[3] > 0], 1)\n\n# compute the slope matrix for the poitns and its determinant\nA = T.stack([gradients(G_sample[-1][:,0].sum(), [Z])[0],\n gradients(G_sample[-1][:,1].sum(), [Z])[0]], 1)\ndet = T.abs(T.det(A))\n\n# variables\nd_variables = sum([l.variables() for l in logits], [])\ng_variables = sum([l.variables() for l in G_sample], [])\n\n# updates\nupdates_d, _ = optimizers.Adam(disc_loss, d_variables, lr)\nupdates_g, _ = optimizers.Adam(gen_loss, g_variables, lr)\nupdates = {**updates_d, **updates_g}\n\n# functions\nf = function(Z, X, outputs = [disc_loss, gen_loss],\n updates = updates)\ng = function(Z, outputs=[G_sample[-1]])\nh = function(Z, outputs=[masks, det])\n\n##### TRAINING\n\nfor epoch in range(12000):\n for x in batchify(DATA, batch_size=BS, option='random_see_all'):\n z = np.random.rand(BS, 2) * 2 -1\n f(z, x)\n\n##### SAMPLE POINTS\nG = list()\nfor i in range(10):\n z = np.random.rand(BS, 2) * 2 -1\n G.append(g(z)[0])\nG = np.concatenate(G)\n\n#### SAMPLE DETS\nNN = 400\nMIN, MAX = -1, 1\nxx, yy = np.meshgrid(np.linspace(MIN, MAX, NN), np.linspace(MIN, MAX, NN))\nXX = np.stack([xx.flatten(), yy.flatten()], 1)\nO2 = list()\nfor x in batchify(XX, batch_size=BS, option='continuous'):\n a, b = h(x)\n O2.append(b)\nO2 = np.log(np.concatenate(O2))\n\n##### SAMPLE REGIONS\n\n# high proba case\nproba = np.exp(O2)\nhigh_samples = np.random.choice(range(len(XX)), size=1000, p=proba/ proba.sum())\nhigh_samples = XX[high_samples]\nhigh_samples_out = list()\nfor x in batchify(high_samples, batch_size=BS, option='continuous'):\n high_samples_out.append(g(x)[0])\nhigh_samples_out = np.concatenate(high_samples_out)\n\n# low proba case\nlow_samples = np.random.choice(range(len(XX)), size=1000, p=(proba.max()-proba)/(proba.max()-proba).sum())\nlow_samples = XX[low_samples]\nlow_samples_out = list()\nfor x in batchify(low_samples, batch_size=BS, option='continuous'):\n low_samples_out.append(g(x)[0])\nlow_samples_out = np.concatenate(low_samples_out)\n\n###### PLOTS\n\n\nplt.figure(figsize=(4, 4))\nplt.imshow(O2.reshape((NN, NN)), aspect='auto', origin='lower',\n extent=(MIN, MAX, MIN, MAX))\nplt.colorbar()\nplt.savefig('zspace_logdet_{}_{}.jpg'.format(args.dataset, args.WG))\nplt.close()\n\nplt.figure(figsize=(4, 4))\nplt.imshow(np.exp(O2).reshape((NN, NN)), aspect='auto', origin='lower',\n extent=(MIN, MAX, MIN, MAX))\nplt.colorbar()\nplt.savefig('zspace_det_{}_{}.jpg'.format(args.dataset, args.WG))\nplt.close()\n\nplt.figure(figsize=(4, 4))\nplt.plot(high_samples_out[:, 0], high_samples_out[:, 1], 'rx')\nplt.plot(low_samples_out[:, 0], low_samples_out[:, 1], 'gx')\nplt.savefig('bisamples_{}_{}.jpg'.format(args.dataset, args.WG))\nplt.close()\n\nplt.figure(figsize=(4, 4))\nplt.plot(DATA[:, 0], DATA[:, 1], 'x')\nplt.plot(G[:, 0], G[:, 1], 'x')\nplt.savefig('samples_{}_{}.jpg'.format(args.dataset, args.WG))\nplt.close()\n\n","repo_name":"RandallBalestriero/GAN","sub_path":"DETERMINANT/multigauss_determinant.py","file_name":"multigauss_determinant.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"8922722128","text":"# -*- coding: utf-8 -*-\n# @Time : 2021-04-06 15:50\n# @Author : Lodge\nname = \"lite-tools\"\n__ALL__ = [\n \"get_lan\", # 获取内网ip\n \"get_wan\", # 获取外网ip\n \"check_proxy\", # 校验代理是否有效\n \"try_catch\", # @ 异常捕获 + 重试配置 + 回调\n \"get_time\", # 时间转换 + 时间获取\n \"time_range\", # 获取时间起始范围\n \"time_count\", # @ 获取函数运行时间\n \"get_b64e\", # 加密成 base64 有其它配置\n \"get_b64d\", # 解密 base64\n \"get_md5\", # 加密成 md5\n \"get_sha\", # 用sha 加密 默认sha 256 有其它配置\n \"get_sha3\", # 用sha3加密 默认sha3 256 有其它配置\n \"get_ua\", # 获取一条随机ua 可以加参数指定浏览器或者系统\n \"try_get\", # jsonPath 方式取字典值\n \"try_key\", # 可根据键/值 取值/键\n \"MySql\", # MySql 连接池对象\n \"MySqlConfig\", # mysql -- 专属配置\n \"FlattenJson\", # 把json平坦化\n # \"JsJson\", # 从js里面提取json内容,这个目前不可以泛用\n \"WrapJson\", # 把一个json按照指定模板折叠\n \"match_case\", # 类似match case\n \"clean_html\", # 提取html标签内的内容\n \"CleanString\", # 清理字符串里面的特殊字符 文本不是太大可以用\n \"color_string\", # 给字体加颜色\n \"SqlString\", # 获取mysql的语句 太复杂不行\n \"math_string\" # 没啥用,就是打印以下数学字符\n # \"x_timeout\", # 这个没有弄好 就是限制函数最大运行时间的\n \"Singleton\", # @ 单例\n \"Buffer\", # @ 缓存队列 + 统计\n \"count_lines\", # 获取文件行数\n \"LiteLogFile\", # 日志文件记录到缓存区 --> 这里是偶尔记录一条那种(打点) 10000条内容缓冲区 超了从1开始记录 高频记录用 loguru\n # 下面的是js转python的操作 还没有写完 也没有弄完 还有 >>> 36进制转换等等操作\n \"atob\", # ascii to bytes\n \"btoa\", # bytes to ascii\n \"to_string_2\", # js里面数字转2进制 (123456789).toString(2)\n \"to_string_16\", # js里面数字转16进制 (123456789).toString(16)\n \"to_string_36\", # js里面数字转36进制 (123456789).toString(36)\n \"xor\", # 异或 同python ^ 这里主要是解决精度问题\n \"unsigned_right_shift\", # >>> 无符号右移 解决精度问题\n \"left_shift\", # << 左移 解决精度问题\n \"dec_to_bin\", # 十进制浮数转二进制数据(返回的是字符串 注意哦) 同 toString(2)\n # 下面是一些可以对外使用的一些redis操作 等http包第一版能用的时候再放出来\n \"LiteRedis\", # 这个用处不大 但是可以用一个本地文件的配置文件来初始化这个对象使用 如 rd = LiteRedis(\"/root/config.json\")\n \"LiteProxy\", # 更方便的获取代理的操作 基于redis的set/list模式 默认set随机弹出 也可以切换为mode='list'滚动获取 使用方式可以使用 LiteRedis.help() 获取\n \"BloomFilter\", # 基于redis的布隆过滤器\n]\n\n# 还有一个东西不放这里了 可以这样引用\n# from lite_tools.tools.core.lite_ja3 import sync_ja3, async_ja3\n\nfrom lite_tools.tools.core.ip_info import get_lan, get_wan, check_proxy\nfrom lite_tools.tools.core.lib_base64 import get_b64d, get_b64e\nfrom lite_tools.tools.core.lite_parser import try_get, try_key, FlattenJson, JsJson, WrapJson\nfrom lite_tools.tools.core.lib_hashlib import get_md5, get_sha, get_sha3, get_5dm # 5dm是我自己用的不对外展示\nfrom lite_tools.tools.time.lite_time import get_time, time_count, time_range\nfrom lite_tools.tools.core.lite_try import try_catch\nfrom lite_tools.tools.core.lite_ua import get_ua\nfrom lite_tools.tools.sql.lite_mysql import MySql\nfrom lite_tools.tools.sql.lite_redis import LiteRedis, LiteProxy, BloomFilter\nfrom lite_tools.tools.sql.config import MySqlConfig\nfrom lite_tools.tools.sql.lib_mysql_string import SqlString\nfrom lite_tools.tools.core.lite_match import match_case\nfrom lite_tools.tools.core.lite_string import clean_html, CleanString, color_string, math_string\nfrom lite_tools.tools.time.httpx_timeout import x_timeout\nfrom lite_tools.tools.core.lite_cache import Singleton, Buffer\nfrom lite_tools.tools.core.lite_file import count_lines, LiteLogFile\n\nfrom lite_tools.tools.js import (\n atob, btoa,\n to_string_2, to_string_16, to_string_36,\n xor, unsigned_right_shift, left_shift, dec_to_bin\n)\n\nfrom lite_tools.version import VERSION\n\n\nversion = VERSION\n\n__ALL__ += [\"5dm\"] # 这个是给我自己用的\n","repo_name":"Heartfilia/lite_tools","sub_path":"lite_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"zh","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"4036928836","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport time\nimport sys\nimport threading\n\nfrom pymachinetalk.dns_sd import ServiceDiscovery\nimport pymachinetalk.halremote as halremote\n\n\nclass BasicClass(object):\n def __init__(self):\n self.sd = ServiceDiscovery()\n\n rcomp = halremote.RemoteComponent('anddemo', debug=False)\n rcomp.no_create = True\n rcomp.newpin('button0', halremote.HAL_BIT, halremote.HAL_OUT)\n rcomp.newpin('button1', halremote.HAL_BIT, halremote.HAL_OUT)\n led_pin = rcomp.newpin('led', halremote.HAL_BIT, halremote.HAL_IN)\n led_pin.on_value_changed.append(self.led_pin_changed)\n led_pin.on_synced_changed.append(self.led_pin_synced)\n rcomp.on_connected_changed.append(self._connected)\n\n self.halrcomp = rcomp\n self.sd.register(rcomp)\n\n def led_pin_synced(self, synced):\n if synced:\n print(\"LED pin synced\")\n\n def led_pin_changed(self, value):\n print('LED pin value changed: %s' % str(value))\n\n def _connected(self, connected):\n print('Remote component connected: %s' % str(connected))\n\n def start(self):\n self.sd.start()\n\n def stop(self):\n self.sd.stop()\n\n\ndef main():\n basic = BasicClass()\n\n print('starting')\n basic.start()\n\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n print('stopping threads')\n basic.stop()\n\n # wait for all threads to terminate\n while threading.active_count() > 1:\n time.sleep(0.1)\n\n print('threads stopped')\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"machinekit/pymachinetalk","sub_path":"examples/hal_basic/anddemo.py","file_name":"anddemo.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"70771512972","text":"from collections import deque\n\ndef main():\n with open(\"input_6.txt\", \"r\") as infile:\n buffer = infile.read().strip()\n unq = deque()\n\n # Part 1\n for i in range(len(buffer)):\n unq.append(buffer[i])\n if len(unq) == 4 and len(set(unq)) == 4:\n print(i + 1)\n break\n if len(unq) == 4:\n unq.popleft()\n\n # Part 2\n unq = deque()\n for i in range(len(buffer)):\n unq.append(buffer[i])\n if len(unq) == 14 and len(set(unq)) == 14:\n print(i + 1)\n break\n if len(unq) == 14:\n unq.popleft()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adamp01/advent_of_code","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"26423143897","text":"import utility_functions\nfrom collections import OrderedDict\nimport table_creation\nimport plotly\nfrom plotly.graph_objs import Bar, Layout\nfrom pathlib import Path\n\nCOLORS = ['rgb(114,90,193)', 'rgb(128,206,215)', 'rgb(0,121,214)', 'rgb(173,10,101)']\n\ndef playerstats(team_id, conn, curr, player_position):\n find_players = \"\"\"\n SELECT user_id FROM Teams\n WHERE team_id=?\n \"\"\"\n curr.execute(find_players, (team_id,))\n teammates = curr.fetchall()\n\n user_id = teammates[player_position][0]\n\n cmd = \"\"\"\n SELECT event_id, kills, headshots, damage, distance FROM Playerstats\n WHERE user_id=?\n \"\"\"\n curr.execute(cmd, (user_id,))\n stats = curr.fetchall()\n\n player_dict = {}\n data = []\n partitions = ['Kills', 'Headshots', 'Damage', 'Distance']\n title = 'Point Breakdown for Player {}'.format(user_id)\n for event in stats:\n event_id = event[0]\n kills = event[1]\n headshots = event[2]\n damage = event[3]\n distance = event[4]\n player_dict[event_id] = [kills * 1000, headshots * 1000, damage, distance]\n \n color_index = 0\n for event, stats in player_dict.items():\n data.append(\n Bar(\n x=partitions,\n y=stats,\n name='Event {}'.format(event),\n marker=dict(\n color=COLORS[color_index]\n )\n )\n )\n color_index += 1\n\n layout = Layout(\n barmode='group',\n title=title,\n yaxis=dict(\n title='Points'\n ),\n xaxis=dict(\n title='Point Breakdown'\n )\n )\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n return fig\n\n\ndef teamstats(team_id, conn, curr):\n \"\"\"\n Use this function by adding the team_id as the argument. It can be done in\n a number of ways:\n\n teamstats(3) #can take one user id\n teamstats([2,3]) #or can run multiple user ids\n\n \"\"\"\n \n title = \"Team Points Breakdown for Team {}\".format(team_id)\n cmd = \"\"\"\n SELECT user_id, event_id, team_id, score FROM PlayerStats\n WHERE team_id={}\n \"\"\".format(team_id)\n curr.execute(cmd)\n teamscores = curr.fetchall()\n player_dict = {}\n event_set = set()\n data = []\n for player in teamscores:\n user_id = player[0]\n event_id = player[1]\n score = player[3]\n if user_id in player_dict:\n player_dict[user_id][event_id] = score\n else:\n player_dict[user_id] = {event_id: score}\n event_set.add(event_id)\n\n color_index = 0\n for player, score in player_dict.items():\n scores = []\n for event_id in event_set:\n scores.append(player_dict[player][event_id])\n width = 0\n if len(event_set) == 1:\n data.append(\n Bar(\n x=['event {}'.format(str(event_id)) for event_id in list(event_set)],\n y=scores,\n name='Player ID {}'.format(player),\n width=.4,\n marker=dict(\n color=COLORS[color_index]\n )\n )\n )\n else:\n data.append(\n Bar(\n x=['event {}'.format(str(event_id)) for event_id in list(event_set)],\n y=scores,\n name='Player ID {}'.format(player),\n marker=dict(\n color=COLORS[color_index]\n )\n )\n )\n color_index += 1\n\n layout = Layout(\n barmode='stack',\n title=title,\n yaxis=dict(\n title='Points'\n ),\n xaxis=dict(\n title='Events'\n )\n )\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n return fig\n\n\ndef basic(title, fieldnames, values):\n \"\"\"\n Example:\n basic(\"The Title\", [\"Bar_1\", \"Bar_2\"],[1,2])\n \"\"\"\n data = [plotly.graph_objs.Bar(\n x=fieldnames,\n y=values\n )]\n layout = plotly.graph_objs.Layout(title=title)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n #plotly.offline.plot(fig, filename=str(PATH)+'/{}.html'.format(title))\n return fig\n\ndef stacked(title, dictList):\n \"\"\"\n Example:\n a = [\n graph.my_dict(['Team1','Team2','team3'], [1,2,3], 'kills'),\n graph.my_dict(['Team1','Team2','team3'], [1,1,1], 'deaths'),\n graph.my_dict(['Team1','Team2','team3'], [7,8,9], 'distance')]\n\n grouped(a)\n \"\"\"\n data = list()\n for my_dict in dictList:\n data.append(plotly.graph_objs.Bar(\n x=my_dict['groupName'],\n y=my_dict['values'],\n name=my_dict['barName']\n ))\n layout = plotly.graph_objs.Layout(\n barmode='stack', \n title=title)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n #plotly.offline.plot(fig, filename=str(PATH)+'/{}.html'.format(title))\n return fig\n\ndef grouped(title, dictList):\n \"\"\"\n Example:\n a = [\n graph.my_dict(['Team1','Team2','team3'], [1,2,3], 'kills'),\n graph.my_dict(['Team1','Team2','team3'], [1,1,1], 'deaths'),\n graph.my_dict(['Team1','Team2','team3'], [7,8,9], 'distance')]\n\n grouped(a)\n \"\"\"\n data = list()\n for my_dict in dictList:\n data.append(Bar(\n x=my_dict['groupName'],\n y=my_dict['values'],\n name=my_dict['barName']\n ))\n\n layout = plotly.graph_objs.Layout(barmode='group', title=title)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n #plotly.offline.plot(fig, filename=str(PATH)+'/{}.html'.format(title))\n return fig\n\ndef my_dict(x,y,z):\n return OrderedDict([('groupName', x), ('values', y), ('barName', z)])","repo_name":"brotaku13/PUBG_Sqlite_database","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"199310373","text":"\r\n\r\nclass Agent:\r\n def __init__(self, name, x, y, hs, objective, food, food_objective, speed, size, sense, x_or_y, neg_pos, food_x, food_y, energy):\r\n \"\"\"\r\n Agent class\r\n\r\n :param name: Agent identity\r\n :param x: Agent x position\r\n :param y: Agent y position\r\n :param hx: Home x position\r\n\r\n :param objective: Agent objective\r\n :param food: Agent food count\r\n :param speed: Agent speed\r\n :param size: Agent size\r\n :param sense: Agent sense\r\n\r\n :param x_or_y: which axes has the random move been\r\n :param neg_pos: did agent move positively or negativiley\r\n :param energy: Agent energy\r\n \"\"\"\r\n # Agent identity\r\n self.name = name\r\n\r\n # Agent positions\r\n self.x = x\r\n self.y = y\r\n\r\n # Closest home border\r\n self.hs = hs\r\n\r\n # Agent food info\r\n self.objective = objective\r\n self.food = food\r\n self.food_objective = food_objective\r\n\r\n # Agent mutation factors\r\n self.speed = speed\r\n self.size = size\r\n self.sense = sense\r\n\r\n # Agent random move\r\n self.x_or_y = x_or_y\r\n self.neg_pos = neg_pos\r\n\r\n # Food target position\r\n self.food_x = food_x\r\n self.food_y = food_y\r\n\r\n # Agent Energy count\r\n self.energy = energy\r\n\r\n def __str__(self):\r\n return \"Theoph's agent, located at \" + str(self.x) + \",\" + str(self.y)\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n","repo_name":"Mofmof2004/Natural_selection_Simulation","sub_path":"Agent/Agent_class.py","file_name":"Agent_class.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"7682196255","text":"#!/usr/bin/env python3\n# -*- coding: utf8 -*-\nfrom PyQt5.QtWidgets import QAbstractItemDelegate, QStyledItemDelegate , QItemDelegate\nfrom PyQt5.QtWidgets import QComboBox, QMessageBox\nfrom PyQt5.QtGui import QPalette\nfrom mysqlconnector import getAdminConnection, getConnection\nfrom PyQt5.QtCore import Qt, QSize, QRect, QRectF\nfrom numberDialog import numberDialog\nimport re\n\nclass defaultDelegate(QStyledItemDelegate):\n def paint(self, painter, option, index):\n if index.model().data(index, Qt.EditRole) == 'None':\n pass\n else:\n super().paint(painter, option, index)\n\n def setEditorData(self, editor, index):\n if index.model().data(index, Qt.EditRole) == 'None':\n editor.setText(\"\") \n else:\n super().setEditorData(editor, index)\n\n \nclass comboDelegate(QStyledItemDelegate):\n def __init__(self,parent):\n self.content={}\n print(\"comboDelegate is created\")\n super().__init__(parent)\n\n def fillContent(self, query):\n connection = getConnection()\n cursor = connection.cursor()\n cursor.execute(query)\n row = cursor.fetchone()\n while row is not None:\n self.content[row['id']]=row['value']\n row = cursor.fetchone()\n connection.close()\n\n def paint(self, painter, option, index):\n #print(\"Ячейка {0}.{1}:{2}\".format(index.row(), index.column(), type(index.data())))\n #print(\"comboDelegate paint()\")\n painter.save()\n #model = index.model()\n painter.drawText(option.rect, Qt.AlignCenter, index.data())\n painter.restore()\n\n def createEditor(self, parent, option, index):\n widgetEditor = QComboBox(parent)\n widgetEditor.addItems(self.content.values())\n print(\"Editor for delegate is created\")\n return widgetEditor\n\n def setModelData(self, editor, model, index):\n pass\n #model.setData(index, \n\n def setEditorData(self, editor, index):\n print(\"setEditorData run\")\n #text=index.model().data(index, Qt.EditRole)#.toString()\n #index.model().setChangedData(True)\n if index.model().data(index, Qt.DisplayRole):\n editor.setCurrentIndex(list(self.content.keys()).index(index.model().data(index.sibling(index.row(),index.column()+1), Qt.DisplayRole)))\n #editor.setCurrentIndex(list(self.content.keys()).index(index.sibling(index.row(),index.column()+1), Qt.DisplayRole))\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\nclass comboBdDelegate(QAbstractItemDelegate):\n \"\"\"Делегат будет использоваться для работы с внешними ключами таблицы. \n В конструктор передается название таблицы и название поля значения\"\"\"\n def __init__(self, parent, name_table, name_field):\n self.content = {} # Словарь имеет ключом первичный ключ подчиненной таблицы, а значением передаваемое в конструкторе поле таблицы\n super().__init__(parent)\n #print(\"relationDelegate is created\")\n self.tb_name = name_table\n self.fd_name = name_field\n self.fill_content()\n #print(self.content)\n\n def fill_content(self):\n self.content.clear()\n self.content[0]='-----'\n con = getConnection()\n strSQL = \"SELECT * FROM {}\".format(self.tb_name)\n cursor = con.cursor()\n result = 0\n try:\n cursor.execute(strSQL)\n row = cursor.fetchone()\n while row is not None:\n result += 1\n self.content[row[list(row.keys())[0]]] = row[self.fd_name]\n row = cursor.fetchone()\n except:\n QMessageBox.critical(None, \"Сообщение об ошибке\", \"Ошибка при выполнении запроса\\n{}\".format(strSQL))\n con.close()\n #print(self.content)\n return result\n\n def paint(self, painter, option, index):\n #print(\"relationDelegate paint()\\nrow={}, column={}, data = {}\".format(index.row(), index.column(), str(index.data())))\n #import ipdb; ipdb.set_trace()\n painter.save()\n #print(\"Ячейка {0}.{1}-Тип:{2},Данные:{3}\".format(index.row(), index.column(), type(index.data()), index.data()))\n if index.data() and (not index.data() == 'None'):\n pass\n #model = index.model()\n try:\n painter.drawText(option.rect, Qt.AlignCenter, self.content[int(index.data())])\n except:\n print(\"Error from relationDelegate.paint.\",index.data())\n painter.restore()\n\n def createEditor(self, parent, option, index):\n widgetEditor = QComboBox(parent)\n widgetEditor.addItems(self.content.values())\n #print(\"Editor for delegate is created\")\n return widgetEditor\n\n def setEditorData(self, editor, index):\n #print(\"setEditorData for relationDelegate\")\n if index.model().data(index, Qt.DisplayRole) and (not index.model().data(index, Qt.DisplayRole) == 'None'):\n editor.setCurrentIndex(list(self.content.keys()).index(int(index.model().data(index, Qt.DisplayRole))))\n #editor.setCurrentIndex(list(self.content.keys()).index(index.model().data(index, Qt.DisplayRole)))\n\n def setModelData(self, editor, model, index):\n data = list(self.content.keys())[editor.currentIndex()]\n #if data:\n #print(f\"from Delegate setModelData data={data}\")\n model.setData(index, data, Qt.EditRole)\n #strSQL = \"UPDATE {'table'} SET \n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n \nclass tableDelegate(QAbstractItemDelegate):\n \"\"\"Делегат для редактирования и отображения данных из подчиненной таблицы. При этом \n данные выбираются с помощью таблицы и элементов управления, с помощью которых \n осуществляется фильтрация данных в таблице.\n В качестве данных (data) из модели получаем кортеж, содержащий всю строку подчиенной таблицы.\n для отображения данных используется функция от этой строки-function_draw.\"\"\"\n def __init__(self, parent):\n self.result = -1, \"None\", \"None\"\n self.listFilters = []\n super().__init__(parent)\n\n def setSlaveTable(self, name_table):\n \"\"\" Установка названия подчиненной таблицы. Кроме того определяется названия поля, \n являющегося первичным ключом подчиненной таблицы\"\"\"\n indexSQL = 'SHOW INDEX FROM {} WHERE Key_name=\"PRIMARY\"'.format(name_table)\n connect = getConnection()\n cursor = connect.cursor()\n cursor.execute(indexSQL)\n row=cursor.fetchone()\n self.slave_index = row['Column_name']\n #selectSQL = \"SELECT * from {} WHERE {} = {}\".format(name_table, name_key, key)\n #print(selectSQL)\n\n def createEditor(self, parent, option, index):\n #super().createEditor(parent, option, index)\n formEditor = numberDialog(self.parent(), self.listFilters)\n formEditor.setNameTable(\"numbers\")\n formEditor.signalEndEdit.connect(self.closeAndCommitEditor)\n return formEditor\n\n def setModelData(self, editor, model, index):\n \"\"\"Если строка в таблице не выбрана, то в модель возвращается -1,\n если строка выбрана, то возвращается строка содержащая номер строки\"\"\"\n #print(\"run setModelData(); currentIndex = {}\".format(editor.mainTable.currentRow()))\n #print(\"from setModelData. Index = {}\".format(editor.mainTable.currentRow()))\n indexOfEditor = editor.tableQuery.currentRow()\n if indexOfEditor == -1:\n self.result = 'None'\n else:\n self.result = editor.tableQuery.item(indexOfEditor,0).text()\n #self.result = editor.mainTable.item(indexOfEditor,0).text(),editor.mainTable.item(indexOfEditor,1).text(),editor.mainTable.item(indexOfEditor,2).text()\n model.setData(index, self.result, Qt.EditRole)\n \n def closeAndCommitEditor(self):\n formEditor = self.sender()\n print(\"closeAndCommitEditor\")\n self.commitData.emit(formEditor)\n self.closeEditor.emit(formEditor)\n\n def setEditorData(self, editor, index):\n pass \n\n def paint(self, painter, option, index):\n painter.save()\n if index.data():\n painter.drawText(option.rect, Qt.AlignCenter, str(index.data()))\n painter.restore()\n\n def updateEditorGeometry(self, editor, option, index):\n \"\"\"Размер прямоугольника должен быть чуть меньше, чем размер отображения\"\"\"\n #print(\"type of self of delegate:{}\".format(type(self)))\n editor.setGeometry(300, 300, 500, 300)#option.rect)\n\nclass functionViewTableEditDelegate(tableDelegate):\n \"\"\"Делегат для отображения и редактирования столбца таблицы, который содержит\n поле для связи с подчиненной таблицей. Отображение производится с помощью функции, \n передаваемой в конструктор делегата. Редактирование производится с помощью \n диалового окна, в котором отображаетс подчиненная таблица. \"\"\"\n def __init__(self, parent, name_table, function_view):\n \"\"\" В конструктор передаются имя подчиенной таблицы и функция над \n связанной записью в подчиненной таблице\"\"\"\n self._function_view = function_view\n self._name_slave_table = name_table\n indexSQL = 'SHOW INDEX FROM {} WHERE Key_name=\"PRIMARY\"'.format(name_table)\n connect = getConnection()\n cursor = connect.cursor()\n cursor.execute(indexSQL)\n row=cursor.fetchone()\n slave_index = row['Column_name']\n connect.close()\n self._SqlSelectSlaveRecord = \"SELECT * FROM {} WHERE {} = {{}}\".format(self._name_slave_table, slave_index) \n super().__init__(parent)\n\n def createEditor(self, parent, option, index):\n #super().createEditor(parent, option, index)\n formEditor = numberDialog(self.parent(), self.listFilters)\n formEditor.setNameTable(self._name_slave_table)\n formEditor.signalEndEdit.connect(self.closeAndCommitEditor)\n return formEditor\n\n def paint(self, painter, option, index):\n painter.save()\n if (index.data()) and (not (index.data() == 'None')):\n SqlSelect = self._SqlSelectSlaveRecord.format(index.data())\n #print(SqlSelect)\n connect = getConnection()\n cursor = connect.cursor()\n cursor.execute(SqlSelect)\n row = cursor.fetchone()\n connect.close()\n painter.drawText(option.rect, Qt.AlignCenter, str(self._function_view(row)))\n painter.restore()\n\nclass functionViewQueryEditDelegate(tableDelegate):\n \"\"\"Делегат который для отображения и редактирования данных в ячейке таблицы использует\n форму, содержащую результат выполнения запроса к БД. Остальное все так же, как и\n в functionViewTableEditDelegate\"\"\"\n def __init__(self, parent, query, function_view):\n \"\"\"В таблицу передаются строка запроса и функция к строке \n результатов, использующаяся для вывода значения\"\"\"\n self._function_view = function_view\n self._query = query\n self._primary_key = self.getFieldPrimaryKey()\n super().__init__(parent)\n\n def getFieldPrimaryKey(self):\n \"\"\"Метод возвращает название поля первичного ключа главной таблицы. Определяется по имени первого \n поля в запросе `query`\"\"\"\n if not self._query:\n return \"\"\n else:\n return re.search(\"(?<=SELECT\\s)\\w\\..+?(?=as)\", self._query, flags=re.I).group().rstrip().lstrip().split('.')\n #return re.search(\"(?<=SELECT.+)\\w\\..+?(?=as)\", self._query, flags=re.I).group().rstrip()\n\n def paint(self, painter, option, index):\n painter.save()\n if (index.data()) and (not (index.data() == 'None')) and (self._primary_key):\n connect = getConnection()\n cursor = connect.cursor()\n selectSQL = self._query+\" WHERE {} = {}\".format('.'.join(self._primary_key), index.data())\n #print(f\"selectSQL={selectSQL}\")\n cursor.execute(selectSQL)\n row = cursor.fetchone()\n connect.close()\n painter.drawText(option.rect, Qt.AlignCenter, str(self._function_view(row)))\n painter.restore()\n\n def createEditor(self, parent, option, index):\n #super().createEditor(parent, option, index)\n formEditor = numberDialog(self.parent(), self.listFilters)\n formEditor.setQuery(self._query)#\"SELECT r.id_room, r.num_room as `№ пом`, r.cod_parent as parent, (SELECT address FROM rooms WHERE id_room = parent) as `Объект`, r.floor as `Этаж` FROM rooms r WHERE r.cod_parent > -1 ORDER BY level\")\n formEditor.signalEndEdit.connect(self.closeAndCommitEditor)\n return formEditor\n\nclass multiStringsDelegate(QItemDelegate):\n \"\"\"Делегат для отбражение в ячейке таблицы содержимого в несколько строк. Делегат служит для\n отображения данных из связанной таблицы при отношении многие к одному.\"\"\"\n def __init__(self, parent):\n super().__init__(parent)\n\n #def sizeHint(self, option, index):\n # return QSize(100, 200)\n\n #def paint(self, painter, option, index):\n # painter.save()\n # if index.data():\n # #print(\"paint from multiStringsDelegate\")\n # listStrings = str(index.data()).split(',')\n # #painter.drawText(option.rect, Qt.AlignCenter, f\"<<{str(index.data())}>>\")\n # painter.drawText(option.rect, Qt.AlignCenter, '\\n'.join(listStrings))\n # #painter.drawText(option.rect.adjusted(0,0,0,300), Qt.AlignCenter, '\\n'.join(listStrings))\n # painter.restore()\n\n def drawDisplay(self, painter, option, rect, text):\n #print(\"drawDisplay()\")\n if text:\n cg=option.state\n painter.save()\n #painter.setPen(option.palette.color(cg, QPalette::Text));\n #listStrings = str(index.data()).split(',')\n listStrings = text.split(',')\n #painter.setPen(option.palette.color(cg, QPalette.Text));\n painter.drawText(QRectF(rect.adjusted(0, 0, 0, 500)), '\\n'.join(listStrings))#, option)\n painter.restore()\n \n","repo_name":"svyatoslav68/abonent","sub_path":"Delegats.py","file_name":"Delegats.py","file_ext":"py","file_size_in_byte":15748,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"36424499336","text":"import os\nimport PIL.Image as Image\nimport numpy as np\n\ntrain_list = '/home/iceicehyhy/Dataset/CASIA/CASIA_FIRST_10/pairs_train.txt'\nroot_dir = '/home/iceicehyhy/Dataset/CASIA/CASIA_FIRST_10'\n\ndef PIL_loader(path):\n try:\n img = Image.open(path).convert('RGB')\n except IOError:\n print('Cannot load image ' + path)\n else:\n return img\n\n\ndef default_reader(fileList):\n imgList = []\n with open(fileList, 'r') as file:\n for line in file.readlines():\n imgPath, label = line.strip().split(' ')\n imgList.append((imgPath, int(label)))\n return imgList\n\n\ndef read_data_from_list():\n imgList = default_reader(train_list)\n for c_b in range (len(imgList)):\n img_p, label_ = imgList[c_b]\n img_ = np.asarray(PIL_loader(os.path.join(root_dir, img_p)))\n img_ = np.expand_dims(img_, axis= 0)\n label_ = np.expand_dims(label_, axis=0)\n if c_b == 0:\n batch_x = img_\n batch_y = label_\n else:\n batch_x = np.concatenate((batch_x, img_), axis= 0)\n batch_y = np.concatenate((batch_y, label_), axis= 0)\n \n index = np.arange(len(imgList))\n np.random.shuffle(index)\n cutoff_index = int(0.1 * len(imgList))\n train_index = index[cutoff_index:]\n val_index = index[:cutoff_index]\n return (batch_x[train_index], batch_y[train_index]), (batch_x[val_index], batch_y[val_index])","repo_name":"iceicehyhy/cosface_keras","sub_path":"read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"6656596001","text":"from apikey import get_key # replace with your own api key file\nimport json \nimport googlemaps \n\nbmaps = googlemaps.Client(key = get_key()) #reference your own api key file\n\n\n\ndef ToGeoCode(address): \n geocoded = bmaps.geocode(address) \n lat = geocoded[0][\"geometry\"][\"location\"][\"lat\"] \n long = geocoded[0][\"geometry\"][\"location\"][\"lng\"] \n coordinates = str(lat) + ', ' + str(long)\n return coordinates \n\n\n\ndef ToAdress(geocode): #converts list of coordinates to a formatted address\n latitude = geocode[0] \n longitude = geocode[1] \n address = bmaps.reverse_geocode((latitude,longitude)) \n fixed_address = address[0]['formatted_address'] \n return str(fixed_address) \n\n#make sure to pass the longitude and latitude to this as a tuple (lat, long) or just use the plain-text address name of choice \ndef time_n_distance(origin,end,output): #calls on distance matrix, pulls relevant info (distance + travel time) and makes new json \n distance = bmaps.distance_matrix(origin, end, mode='walking')[\"rows\"][0][\"elements\"][0][\"distance\"][\"value\"] #distance is a float\n time_ = bmaps.distance_matrix(origin, end, mode='walking')[\"rows\"][0][\"elements\"][0][\"duration\"][\"text\"] #time is a string\n if output == \"distance\": \n return distance \n elif output == \"time\": \n return time_ \n else: \n print(\" \")\n\n\n\n\n","repo_name":"goldrix/airbnb","sub_path":"backend/geocode_funcs.py","file_name":"geocode_funcs.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"29309860818","text":"\n\ndef read(address, length):\n if length < 16:\n print('Please read >= 16 bytes at once')\n return None\n import esp, gc\n gc.collect() # to explicitly clean up any previously made buffers, so we don't starve memory\n buffer = bytearray(length)\n esp.flash_raw_read(address, buffer)\n del esp\n return buffer\n\n\ndef _write(address, data):\n import esp, gc\n esp.flash_raw_write(address, data)\n del esp\n\n","repo_name":"tjclement/ecsc21-fw","sub_path":"firmware/python_modules/ecsc2021/flash.py","file_name":"flash.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"7367240204","text":"#! /usr/bin/env python\n# encoding: utf-8\n# DC 2008\n# Thomas Nagy 2010 (ita)\n\nimport re\n\nfrom waflib import Utils, Task, TaskGen, Logs\nfrom waflib.TaskGen import feature, before, after, extension\nfrom waflib.Configure import conf\n\nINC_REGEX = \"\"\"(?:^|['\">]\\s*;)\\s*INCLUDE\\s+(?:\\w+_)?[<\"'](.+?)(?=[\"'>])\"\"\"\nUSE_REGEX = \"\"\"(?:^|;)\\s*USE(?:\\s+|(?:(?:\\s*,\\s*(?:NON_)?INTRINSIC)?\\s*::))\\s*(\\w+)\"\"\"\nMOD_REGEX = \"\"\"(?:^|;)\\s*MODULE(?!\\s*PROCEDURE)(?:\\s+|(?:(?:\\s*,\\s*(?:NON_)?INTRINSIC)?\\s*::))\\s*(\\w+)\"\"\"\n\nEXT_MOD = \".mod\"\n\n# TODO (DC)\n# - handle pre-processed files (FORTRANPPCOM in scons)\n# - handle modules\n# - handle multiple dialects\n# - windows...\n# TODO (ita) understand what does all that mean ^^\n\nre_inc = re.compile(INC_REGEX, re.I)\nre_use = re.compile(USE_REGEX, re.I)\nre_mod = re.compile(MOD_REGEX, re.I)\n\nclass fortran_parser(object):\n\t\"\"\"\n\twe cannot do it at once from a scanner function, so the idea is to let the method\n\trunnable_status from the fortran task do a global resolution on the names found\n\n\tthe scanning will then return:\n\t* the nodes of the module names that will be produced\n\t* the nodes of the include files that will be used\n\t* the names of the modules to use\n\t\"\"\"\n\n\tdef __init__(self, incpaths):\n\t\tself.seen = []\n\n\t\tself.nodes = []\n\t\tself.names = []\n\n\t\tself.incpaths = incpaths\n\n\tdef find_deps(self, node):\n\t\t\"\"\"read a file and output what the regexps say about it\"\"\"\n\t\ttxt = node.read()\n\t\tincs = []\n\t\tuses = []\n\t\tmods = []\n\t\tfor line in txt.splitlines():\n\t\t\t# line by line regexp search? optimize?\n\t\t\tm = re_inc.search(line)\n\t\t\tif m:\n\t\t\t\tincs.append(m.group(1))\n\t\t\tm = re_use.search(line)\n\t\t\tif m:\n\t\t\t\tuses.append(m.group(1))\n\t\t\tm = re_mod.search(line)\n\t\t\tif m:\n\t\t\t\tmods.append(m.group(1))\n\t\treturn (incs, uses, mods)\n\n\tdef start(self, node):\n\t\t\"\"\"use the stack self.waiting to hold the nodes to iterate on\"\"\"\n\t\tself.waiting = [node]\n\t\twhile self.waiting:\n\t\t\tnd = self.waiting.pop(0)\n\t\t\tself.iter(nd)\n\n\tdef iter(self, node):\n\t\tpath = node.abspath()\n\t\tincs, uses, mods = self.find_deps(node)\n\t\tfor x in incs:\n\t\t\tif x in self.seen:\n\t\t\t\tcontinue\n\t\t\tself.seen.append(x)\n\t\t\tself.tryfind_header(x)\n\n\t\tfor x in uses:\n\t\t\tname = \"USE@%s\" % x\n\t\t\tif not name in self.names:\n\t\t\t\tself.names.append(name)\n\n\t\tfor x in mods:\n\t\t\tname = \"MOD@%s\" % x\n\t\t\tif not name in self.names:\n\t\t\t\tself.names.append(name)\n\n\t\t#for x in mods:\n\t\t#\tnode = self.task.generator.bld.bldnode.find_or_declare(x + EXT_MOD)\n\t\t#\tassert(node)\n\t\t#\tif node.abspath() in self.seen:\n\t\t#\t\tcontinue\n\t\t#\tself.task.set_inputs(node)\n\n\tdef tryfind_header(self, filename):\n\t\tfound = None\n\t\tfor n in self.incpaths:\n\t\t\tfound = n.find_resource(filename)\n\t\t\tif found:\n\t\t\t\tself.nodes.append(found)\n\t\t\t\tself.waiting.append(found)\n\t\t\t\tbreak\n\t\tif not found:\n\t\t\tif not filename in self.names:\n\t\t\t\tself.names.append(filename)\n\ndef scan(self):\n\ttmp = fortran_parser(self.generator.includes_nodes)\n\ttmp.task = self\n\ttmp.start(self.inputs[0])\n\tif Logs.verbose:\n\t\tLogs.debug('deps: deps for %r: %r; unresolved %r' % (self.inputs, tmp.nodes, tmp.names))\n\treturn (tmp.nodes, tmp.names)\n\n","repo_name":"RunarFreyr/waz","sub_path":"waf/waflib/extras/fc_scan.py","file_name":"fc_scan.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"9172103870","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.python.keras.models import Sequential\r\nfrom tensorflow.python.keras.layers import Dense, Dropout\r\nfrom tensorflow.python.keras.optimizers import Adam, SGD, Adadelta\r\n\r\n\r\ndef preprocess():\r\n \r\n data_source_train = \"F:\\Gautam\\Tech Stuff\\Python Projects\\Datasets\\poker_training.csv\"\r\n data_source_test = \"F:\\Gautam\\Tech Stuff\\Python Projects\\Datasets\\poker_testing.csv\"\r\n\r\n data_train = pd.read_csv(data_source_train, index_col = False, header=None)\r\n data_test = pd.read_csv(data_source_test, index_col = False, header=None)\r\n\r\n data_train.columns=['S1', 'R1', 'S2', 'R2', 'S3', 'R3',\r\n 'S4', 'R4', 'S5', 'R5', 'CLASS']\r\n\r\n data_train = pd.get_dummies(data_train, columns=['S1', 'R1', 'S2', 'R2', 'S3', 'R3',\r\n 'S4', 'R4', 'S5', 'R5', 'CLASS'])\r\n\r\n data_test.columns=['S1', 'R1', 'S2', 'R2', 'S3', 'R3',\r\n 'S4', 'R4', 'S5', 'R5', 'CLASS']\r\n\r\n data_test = pd.get_dummies(data_test, columns=['S1', 'R1', 'S2', 'R2', 'S3', 'R3',\r\n 'S4', 'R4', 'S5', 'R5', 'CLASS'])\r\n\r\n data = pd.concat([data_train, data_test])\r\n data = data.dropna(inplace=False)\r\n\r\n x = data[data.columns[0:85]].values\r\n y = data[data.columns[85:]].values\r\n\r\n return x, y\r\n\r\n\r\ntrain_x, train_y = preprocess()\r\n\r\nmodel = Sequential()\r\n\r\nmodel.add(Dense(50, input_dim=85, activation='relu'))\r\nmodel.add(Dense(50, activation='relu'))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nmodel.fit(train_x, train_y, verbose=2, batch_size=128, validation_split=0.1, epochs=100)\r\n","repo_name":"SingaFx/Neural-Networks","sub_path":"Poker/Poker_Training.py","file_name":"Poker_Training.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12998327586","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 5 12:47:57 2015\n\n@author: Rick\n Using Python2.7\n\"\"\"\n\nimport random as ran\nimport math as m\n\nclass blockchain:\n def __init__(self):\n self.greeting()\n self.userCount = 25\n # (0,4,4,10) is reserved for the main user0 in middle with 10 BitCoin\n self.userInfo = [(0,4,4,10)]\n self.records = [ [ ] for i in range(self.userCount+1)]\n self.network = [ [' ' for i in range(0,9)] for j in range(0,9)]\n self.generateRandomUsers()\n \n def greeting(self):\n print(\"There are already 25 random BitCoin users in the network!\")\n print(\"You are at the middle of the network with 10 BitCoins!\")\n \n def generateRandomUsers(self):\n # put user0 into network\n self.network[4][4] = \" 0\"\n self.records[0].append(\"TRADE --- Initial coin %d\" % 10)\n # put userRandom into network\n for i in range(1,self.userCount+1):\n # generate user id\n if i<10:\n uid = \" \" + str(i)\n else:\n uid = str(i)\n pos = (4,4)\n while self.network[pos[0]][pos[1]] != ' ':\n pos = (ran.randint(0,8),ran.randint(0,8))\n self.network[pos[0]][pos[1]] = uid\n coin = ran.randint(1,10)\n self.userInfo.append((i,pos[0],pos[1],coin))\n self.records[i].append(\"TRADE --- Initial coin %d\" % coin)\n\n def showNetwork(self):\n print(\"User distribution in current network\")\n for i in range(0,9):\n print(self.network[i]) \n \n def showUserInfo(self):\n print(\"Detailed user info\")\n for i in range(len(self.userInfo)):\n tup = self.userInfo[i]\n print(\"User %d at (%d,%d) has coin %d\" \\\n %(tup[0],tup[1],tup[2],tup[3]))\n \n def checkUserRecord(self,no):\n print(\"Record of user-%d is listed as follow:\" % no)\n for rec in self.records[no]:\n print(rec)\n print(\"Current coin balance : %d\" % self.userInfo[no][3])\n \n def findNearby(self,no1,no2):\n uno1 = self.userInfo[no1]\n uno2 = self.userInfo[no2]\n affectRegion = []\n for user in self.userInfo:\n if uno1 == user or uno2 == user:\n continue\n if (m.pow((user[1]-uno1[1]),2)+m.pow((user[2]-uno1[2]),2)<=9):\n affectRegion.append(user[0]) \n return affectRegion\n\n def updateUser(self,no,amount):\n old = self.userInfo[no]\n new = (no,old[1],old[2],amount)\n self.userInfo[no] = new\n \n def trade(self,u,no):\n tradeType = raw_input(\"Trade type [S-Sell,B-Buy]: \")\n if tradeType == \"S\":\n limit = self.userInfo[u][3]\n elif tradeType == \"B\":\n limit = self.userInfo[no][3]\n else:\n print(\"Invalid option!\")\n self.trade(no) \n amount = int(raw_input(\"Valid trading amount [1~%d]: \" % limit))\n source = self.userInfo[u]\n target = self.userInfo[no] \n # Find the nearby users\n print(\"Transaction completed!\")\n vUsers = self.findNearby(u,no)\n print(\"Verifications are sent to users:\")\n print(vUsers)\n # Update the balance and record\n if tradeType == \"S\":\n self.updateUser(0,source[3]-amount)\n self.updateUser(no,target[3]+amount)\n self.records[0].append(\"TRADE --- Sell %d to No.%d\" \\\n % (amount,no))\n self.records[no].append(\"TRADE --- Buy %d from No.%d\" \\\n % (amount,u))\n for vu in vUsers:\n self.records[vu].append(\"VERIFY --- C %d from No.%d to No.%d\" \\\n % (amount,u,no))\n else: # Buy\n self.updateUser(0,source[3]+amount)\n self.updateUser(no,target[3]-amount)\n self.records[0].append(\"TRADE --- Buy %d from No.%d\" \\\n % (amount,no))\n self.records[no].append(\"TRADE --- Sell %d to No.%d\" \\\n % (amount,u))\n for vu in vUsers:\n self.records[vu].append(\"VERIFY --- %d from No.%d to No.%d)\" \\\n % (amount,no,u)) \n \n def execute(self):\n while True:\n print(\"\\nAvaliable options:\")\n print(\" N - Show the current network\")\n print(\" A - Check info of all users\")\n print(\" C - Check the record of a single user\")\n print(\" T - Trade with another user\")\n print(\" E - Exit the system\")\n choice = raw_input(\"Select your choice (): \")\n if choice == \"N\":\n self.showNetwork()\n elif choice == \"A\":\n self.showUserInfo()\n elif choice == \"C\":\n no = int(raw_input(\"Select no. of target user [0~25]: \"))\n self.checkUserRecord(no)\n elif choice == \"T\":\n no = int(raw_input(\"Select no. of target user [1~25]: \"))\n self.trade(0,no)\n elif choice == \"E\":\n break\n else:\n print(\"Invalid option!\")\n \n \ndef illustrateBlockchain():\n bc = blockchain()\n bc.execute()\n \n \nif __name__ == \"__main__\":\n illustrateBlockchain()\n \n ","repo_name":"info-rick/proj-bitCoin","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"9761375265","text":"from ade import CLASSES\nfrom PIL import Image\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nclass DeepLabModel(object):\n \"\"\"Class to load deeplab model and run inference.\"\"\"\n\n INPUT_TENSOR_NAME = \"ImageTensor:0\"\n OUTPUT_TENSOR_NAME = \"SemanticPredictions:0\"\n INPUT_SIZE = 513\n FROZEN_GRAPH_NAME = \"frozen_inference_graph\"\n\n def __init__(self, frozen_graph_path):\n \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n self.graph = tf.Graph()\n\n graph_def = tf.compat.v1.GraphDef.FromString(\n open(frozen_graph_path, \"rb\").read()\n )\n # graph_def = None\n # # Extract frozen graph from tar archive.\n # tar_file = tarfile.open(tarball_path)\n # for tar_info in tar_file.getmembers():\n # if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):\n # file_handle = tar_file.extractfile(tar_info)\n # graph_def = tf.GraphDef.FromString(file_handle.read())\n # break\n\n # tar_file.close()\n\n if graph_def is None:\n raise RuntimeError(\"Cannot find inference graph in tar archive.\")\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name=\"\")\n\n self.sess = tf.compat.v1.Session(graph=self.graph)\n\n def run(self, image):\n \"\"\"Runs inference on a single image.\n\n Args:\n image: A PIL.Image object, raw input image.\n\n Returns:\n resized_image: RGB image resized from original input image.\n seg_map: Segmentation map of `resized_image`.\n \"\"\"\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert(\"RGB\").resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]},\n )\n seg_map = batch_seg_map[0]\n return resized_image, seg_map\n \n\nORI_CLASS2IDX = {k: i for i, k in enumerate(CLASSES)}\n\nCONSIDER_CLASSES = {\n \"building, edifice\": 1,\n \"house\": 1,\n \"skyscraper\": 1,\n \"car, auto, automobile, machine, motorcar\": 2,\n \"truck, motortruck\": 2,\n \"airplane, aeroplane, plane\": 3\n} # class to our new label indices\n\nIDX2CONSIDER_CLASS = {1: \"building\", 2: \"car+truck\", 3: \"plane\"}\nMODEL = DeepLabModel(\n \"deeplabv3_xception_ade20k_train/frozen_inference_graph.pb\"\n # \"deeplabv3_mnv2_ade20k_train_2018_12_03/frozen_inference_graph.pb\"\n)\n\ndef detect_object(img, x1, y1, x2, y2, max_n_objects=1):\n print(\"Image:\", img)\n x1 = float(x1)\n y1 = float(y1)\n x2 = float(x2)\n y2 = float(y2)\n img = img.crop((x1, y1, x2, y2))\n resized_im, seg_map = MODEL.run(img)\n\n filter_seg_map = np.zeros_like(seg_map, dtype=np.int32)\n for label in CONSIDER_CLASSES.keys():\n filter_seg_map[seg_map == ORI_CLASS2IDX[label]] = CONSIDER_CLASSES[label]\n boxes = get_largest_object_polygon(filter_seg_map, x1, y1, img.width, img.height, IDX2CONSIDER_CLASS, max_n_objects)\n \n # print(\"Type of box:\", type(box))\n return boxes\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n\n Returns:\n A Colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap\n\n\ndef label_to_color_image(label):\n \"\"\"Adds color defined by the dataset colormap to the label.\n\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the PASCAL color map.\n\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError(\"Expect 2-D input label\")\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError(\"label value too large.\")\n\n return colormap[label]\n\n\ndef get_largest_object_polygon(segment_img, origin_x, origin_y, im_width, im_height, IDX2CLASS, max_n_objects=2):\n vals = list(set(segment_img.flatten()))\n vals = [x for x in vals if x != 0]\n if len(vals) == 0:\n return {\n \"label\": \"\",\n \"shape_type\": \"\",\n \"points\": [],\n }\n\n # find largest object\n\n all_labels = []\n all_contours = []\n all_contours_areas = []\n for val in vals:\n mask = np.zeros_like(segment_img, dtype=np.uint8)\n # smooth image\n mask[segment_img == val] = 255\n kernel = np.ones((7, 7), np.float32) / 49\n mask = cv2.filter2D(mask, -1, kernel)\n mask[mask >= 127] = 255\n mask[mask < 127] = 0\n mask[mask == 255] = 1\n kernel = np.ones((3, 3), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n # end smooth image\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # contours = [x for x in contours if cv2.contourArea(x) > 20]\n contours_areas = [cv2.contourArea(x) for x in contours]\n all_contours.extend(contours)\n all_contours_areas.extend(contours_areas)\n all_labels.extend([val] * len(contours))\n\n boxes = []\n # all_contours_areas.sort(reverse=True)\n for i in range(max_n_objects):\n largest_ind = np.argmax(all_contours_areas)\n contour = all_contours[largest_ind]\n\n # smooth contour\n contour = contour.reshape(-1, 2)\n new_contour = [contour[0]]\n for i in range(1, len(contour)):\n prev_pt = new_contour[-1]\n dist = np.sqrt(np.sum((contour[i] - prev_pt) ** 2))\n if dist > 10:\n new_contour.append(contour[i])\n if len(new_contour) >= 3:\n contour = new_contour\n box = {\n \"label\": IDX2CLASS[all_labels[largest_ind]],\n \"shape_type\": \"polygon\",\n \"points\": [\n [\n int(origin_x + int(x * im_width / mask.shape[1])),\n int(origin_y + int(y * im_height / mask.shape[0])),\n ]\n for x, y in contour\n ],\n }\n\n boxes.append(box)\n\n # remove the previous largest contour_areas\n all_contours_areas[largest_ind] = -1\n\n return boxes","repo_name":"phkhanhtrinh23/qgis_object_detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"15"} +{"seq_id":"33910172477","text":"str_input1 = input(\"Enter your favorite plant: \")\nstr2 = (\"Spathiphyllum\")\nstr3 = (\"spathiphyllum\")\nstr4 = (\"SPATHIPHYLLUM\")\n\n#comparing by ==\nif str_input1 == str2:\n print (\"Yes - Spathiphyllum is the best plant ever\")\n\nelif str_input1 == str3:\n print (\"No, I want a big Spathiphyllum!\")\n\nelif str_input1 == str4:\n print (\"No, I want a big Spathiphyllum!\")\n\n\nelse:\n print (\"No, I want a big Spathiphyllum!\")\n","repo_name":"jmockbee/Pythonpart7","sub_path":"july20.py","file_name":"july20.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"23927372570","text":"from enum import IntEnum, unique\n\nfrom libs.database import PivotReportRecord\n\n\n@unique\nclass PivotReportColumn(IntEnum):\n \"\"\"行情记录表栏目枚举\"\"\"\n # 次级回升\n SECONDARY_RALLY = 1\n # 自然回升\n NATURAL_RALLY = 2\n # 上升趋势\n UPWARD_TREND = 3\n # 下降趋势\n DOWNWARD_TREND = 4\n # 自然回撤\n NATURAL_REACTION = 5\n # 次级回撤\n SECONDARY_REACTION = 6\n\n\ndef update_historical_pivot_report(combination, default_column=PivotReportColumn.DOWNWARD_TREND):\n \"\"\"\n 为指定的Combination生成从特定时间点开始(默认为该Combination最早的组合价格数据的时间戳的时间),生成到最新的时间戳为止的日线行情记录表。\n - 本程序只能在休市日运行\n - 本程序需要在日线行情记录表的前至少运行一次\n :param combination: 待生成行情记录表的Combination对象\n :param default_column: 初始记录栏目,默认为下降趋势栏\n :return:\n \"\"\"\n pass\n\n\ndef price_need_record(price, pivot_report, default_column=PivotReportColumn.DOWNWARD_TREND):\n \"\"\"\n 判断价格是否需要记录,对应需求为:https://trello.com/c/zVFRorLF\n :param price: 待判断是否要记录的价格\n :param pivot_report: pivot_report编号,用于获取当前最新记录栏及最新的有记录价格\n :param default_column: 初始记录栏目,默认为下降趋势栏\n :return: True/False\n \"\"\"\n # 先判断是否有数据,有则取ts最大的\n # 没有则以默认栏计算\n latest_column = default_column\n latest_price = price\n # 取最新的且有记录的数据\n records = PivotReportRecord.select().where(PivotReportRecord.pivot_report == pivot_report,\n PivotReportRecord.is_recorded == True).order_by(\n PivotReportRecord.date.desc()).limit(1).execute()\n if len(records) > 0:\n record = records[0]\n try:\n latest_column = PivotReportColumn(record.recorded_column)\n except ValueError: # 值非法,使用默认值\n pass\n latest_price = record.price\n if latest_column == PivotReportColumn.NATURAL_RALLY:\n return True if price >= latest_price else False\n elif latest_column == PivotReportColumn.UPWARD_TREND:\n return True if price >= latest_price else False\n elif latest_column == PivotReportColumn.DOWNWARD_TREND:\n return False if price > latest_price else True\n elif latest_column == PivotReportColumn.NATURAL_REACTION:\n return False if price > latest_price else True\n elif latest_column == PivotReportColumn.SECONDARY_RALLY:\n return True if price >= latest_price else False\n elif latest_column == PivotReportColumn.SECONDARY_REACTION:\n return False if price > latest_price else True\n else:\n raise Exception(\"当前不支持的栏目逻辑:%s\" % str(latest_column))\n\ndef get_combination_symbol_timezone(combination_id):\n '''\n 根据combination id,获取组成该combination的symbol的所有时区列表,用于生成属于该时区对应的日线行情记录表。\n :param combination_id: 待获取时区的组合ID\n :return:{'combination_timezone':该组合ID去重后的时区列表}\n '''\n try:\n combination = Combination.get(Combination.id == combination_id)\n except:\n logger.error(\"缺少对应combination的行情数据:combo_id=%s\", combination_id)\n return None\n # 根据symbol ID到Tbl_symbol_method中获取对应的时区\n else:\n tz = []\n for symbolid in combination.symbol_list.split(\",\"):\n ts = Symbol.get(Symbol.id == int(symbolid))\n tz.append(ts.timezone)\n # 去除重复元素\n tz = list(set(tz))\n result = {'combination_timezone': tz}\n return result\n","repo_name":"jinwu12/l2","sub_path":"libs/gen_pivot_report.py","file_name":"gen_pivot_report.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"4918380061","text":"from Classes.command import Command\nfrom db import connection\nfrom psycopg2 import Error\nfrom telebot.types import Message\n\n\nclass TelegramUser:\n \"\"\"\n RU:\n Базовый класс, описывает пользователя\n\n Атрибуты:\n self.user_id: уникальный телеграмм-айди пользователя\n self.commands_list: список всех команд пользователя (объекты класса Command)\n self.cities: объект класса City с информацией о городе\n self.hotel: объект класса Hotel с информацией об отелях\n self.check_in: дата заезда\n self.check_out: дата выезда\n self.keyboard_control: статус текущего запроса\n EN:\n Base class, describes the user\n\n Attributes:\n self.user_id: unique telegram ID of the user\n self.commands_list: a list of all user commands (objects of the Command class)\n self.cities: an object of the City class with information about the city\n self.hotel: a Hotel class object with information about hotels\n self.check_in: check-in date\n self.check_out: departure date\n self.keyboard_control: status of the current request\n \"\"\"\n\n def __init__(self, message: Message):\n self.user_id = message\n self.cities = None\n self.hotel = None\n self.check_in = None\n self.check_out = None\n self.commands_list = list()\n self.keyboard_control = True\n\n @property\n def user_id(self):\n return self._user_id\n\n @user_id.setter\n def user_id(self, message: Message):\n \"\"\"\n RU:\n Добавление пользователя через его телеграмм-id, добавление пользователя в базу данных\n\n EN:\n Adding a user via his telegram-id, adding a user to the database\n\n :param message: object Message\n :return:\n \"\"\"\n with connection.cursor() as cursor:\n try:\n cursor.execute(\n \"INSERT INTO tg_user(user_id, user_name) VALUES ({tg_user_id}, '{tg_user_name}')\".format(\n tg_user_id=message.from_user.id,\n tg_user_name=message.from_user.first_name)\n )\n except Error:\n print(f'{message.from_user.first_name} зашёл на огонёк')\n\n self._user_id = message.from_user.id\n\n def log_command(self, command: Command):\n \"\"\"\n RU:\n Логирование команд пользователя, добавление команд в базу данных\n EN:\n Logging user commands, adding commands to the database\n\n :param command: object Command\n :return:\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(\n \"\"\"INSERT INTO command(command_name, command_date, command_time, fk_user_id) VALUES\n ('{command_name}', '{command_date}', '{command_time}', {user_id})\"\"\".format(\n command_name=command.name, command_date=command.command_date,\n command_time=command.command_time, user_id=self.user_id)\n )\n self.commands_list.append(command)\n\n\nclass TelegramUsers:\n \"\"\"\n RU:\n Базовый класс, описывающий список юзеров\n\n Атрибуты:\n self.__user_list - список id пользователей.\n EN:\n Base class describing the list of users\n\n Attributes:\n self.__user_list: list of user ids\n \"\"\"\n\n def __init__(self):\n self.__users_list = []\n\n def append_user(self, tg_user: TelegramUser) -> None:\n \"\"\"\n RU:\n Добавление пользователя в список пользователей\n EN:\n Adding a user to the list of users\n\n :param tg_user: object of TelegramUser\n :return:\n \"\"\"\n if tg_user not in self.__users_list:\n self.__users_list.append(tg_user)\n\n def get_user_from_id(self, user_id: int) -> False or TelegramUser:\n \"\"\"\n RU:\n Поиск пользователя по его id в списке пользователей\n EN:\n Search for a user by his id in the list of users\n\n :param user_id: user telegram-id\n :return: object of TelegramUser or False if TelegramUser is not\n \"\"\"\n for i_user in self.__users_list:\n if i_user.user_id == user_id:\n return i_user\n else:\n return False\n","repo_name":"777boeing777/hotel-og-bot","sub_path":"Classes/telegram_user.py","file_name":"telegram_user.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"16631382310","text":"__author__ = 'szeitlin'\n\nimport sys\n\ndef read_input():\n ''' Takes stdin, converts to a generator which will yield one line at a time.\n\n >>> read_input()\n 4\n >>> read_input()\n 7 4 5 2 3 -4 -3 -5\n\n :return:\n '''\n line = \"\"\n line_count = 0\n while True:\n try:\n line = sys.stdin.readline()\n line_count +=1\n except KeyboardInterrupt:\n break\n if not line:\n break\n\n yield line, line_count\n\n\n\ndef read_more():\n '''\n Fancy way to call the generator.\n :return:\n '''\n gen = read_input()\n return next(gen)\n\nread_input()\nread_more()\n","repo_name":"szeitlin/python-practice","sub_path":"hackerrank/read_input.py","file_name":"read_input.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"11494372797","text":"# 导入库\nimport numpy as np\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\nfrom statistics import stdev\nimport math\nimport cv2\nimport os\nfrom xlwt import Workbook\nwb = Workbook()\nsheet1 = wb.add_sheet('Sheet 1')\npath = \"test/image/\"\ndir = \"test/mask/\"\nfiles = os.listdir(dir) #获得掩膜图片白色\nfiles.sort()\nflag =0\nfor i in (files):\n # if flag<3:\n if i==\".DS_Store\":\n continue\n image = cv2.imread(path + i)\n mask = cv2.imread(dir+i)\n mask = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)\n for j in range(512):\n for k in range(512):\n if mask[j][k]!=0:\n mask[j][k]=255\n print(path+i)\n masked = cv2.bitwise_and(image, image, mask=mask)\n hist_full = cv2.calcHist([image], [0], None, [256], [0, 256])\n hist_mask = cv2.calcHist([image], [0], mask, [256], [0, 256])\n # plt.subplot(221), plt.imshow(image, 'gray')\n # plt.subplot(222), plt.imshow(mask, 'gray')\n # plt.subplot(223), plt.imshow(masked, 'gray')\n # plt.subplot(224), plt.plot(hist_full, color='r'), plt.plot(hist_mask, color='b')\n # plt.xlim([0, 256])\n # plt.tight_layout()\n # plt.show()\n '''shb'''\n print(flag,hist_mask.mean(),hist_mask.var(),stats.skew(hist_mask)[0],stats.kurtosis(hist_mask)[0]\n ,stats.entropy(hist_mask)[0],np.std(hist_mask))\n\n sheet1.write(flag + 1, 0, i)\n sheet1.write(flag+1,1, str(hist_mask.mean()))#均值\n sheet1.write(flag + 1, 2, str(hist_mask.var()))#方差\n sheet1.write(flag + 1, 3, str(stats.skew(hist_mask)[0]))#偏度\n sheet1.write(flag + 1, 4, str(stats.kurtosis(hist_mask)[0]))#峰度\n sheet1.write(flag + 1, 5, str(stats.entropy(hist_mask)[0]))#熵\n sheet1.write(flag + 1, 6, str(np.std(hist_mask)))#标准差\n flag+=1\n # cv2.imwrite(\"test/mask/\"+images[int(img_index)],masked)\n\n #计算GLCM特征\n\nwb.save(\"data/mean.xls\")\n","repo_name":"Breeze1in1drizzle/MedicalImaging-Master","sub_path":"Unet-liverCT-master/Mask.py","file_name":"Mask.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"32171332087","text":"#!/usr/bin/python3\n\n# Configure a raw stream and capture an image from it.\nimport time\n\nfrom picamera2 import Picamera2, Preview\n\npicam2 = Picamera2()\npicam2.start_preview(Preview.QTGL)\n\npreview_config = picam2.create_preview_configuration(raw={\"size\": picam2.sensor_resolution})\nprint(preview_config)\npicam2.configure(preview_config)\n\npicam2.start()\ntime.sleep(2)\n\nraw = picam2.capture_array(\"raw\")\nprint(raw.shape)\nprint(picam2.stream_configuration(\"raw\"))\n","repo_name":"raspberrypi/picamera2","sub_path":"examples/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"15"} +{"seq_id":"2938432271","text":"import tempfile\nimport subprocess\nimport time\nimport threading\nimport logging\nfrom .baseplatform import BasePlatform\n\nlogger = logging.getLogger(__name__)\n\nconst_setup = '''\\\necho 63 >/sys/class/gpio/export\necho out >/sys/class/gpio/gpio63/direction\necho 64 >/sys/class/gpio/export\necho out >/sys/class/gpio/gpio64/direction\necho 77 >/sys/class/gpio/export\necho in >/sys/class/gpio/gpio77/direction\n'''\nconst_cleanup = '''\\\necho 63 >/sys/class/gpio/unexport\necho 64 >/sys/class/gpio/unexport\necho 77 >/sys/class/gpio/unexport\n'''\n\ndef run_script(script):\n with tempfile.NamedTemporaryFile() as scriptfile:\n scriptfile.write(script)\n scriptfile.flush()\n return subprocess.check_output(['/bin/bash', scriptfile.name])\n\nclass ZedboardPlatform(BasePlatform):\n\n\tdef __init__(self, config, silent):\n\t\tsuper(ZedboardPlatform, self).__init__(config, silent, 'zedboard')\n\t\tself.trigger_thread = None\n\t\tself.started = 0\n\n\tdef setup(self):\n\t\tlogging.debug(\"setup\")\n\t\trun_script(const_setup)\n\t\tif self._silent:\n\t\t\trun_script('echo 78 >/sys/class/gpio/export')\n\t\t\trun_script('echo in >/sys/class/gpio/gpio78/direction')\n\t\n\tdef cleanup(self):\n\t\tlogging.debug(\"cleanup\")\n\t\trun_script(const_cleanup)\n\t\tif self._silent:\n\t\t\trun_script('echo 78 >/sys/class/gpio/unexport')\n\n\tdef indicate_failure(self):\n\t\tlogger.info(\"setup_failure\")\n\t\tfor _ in range(0, 5):\n\t\t\ttime.sleep(.1)\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio63/value')\n\t\t\ttime.sleep(.1)\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio63/value')\n\n\tdef indicate_success(self):\n\t\tlogger.info(\"setup_complete\")\n\t\tfor _ in range(0, 5):\n\t\t\ttime.sleep(.1)\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio64/value')\n\t\t\ttime.sleep(.1)\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio64/value')\n\n\tdef indicate_recording(self, state=True):\n\t\tlogger.info(\"indicate_recording_on %s\", state)\n\t\tif state:\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio63/value')\n\t\telse:\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio63/value')\n\n\tdef indicate_playback(self, state=True):\n\t\tlogger.info(\"indicate_playback %s\", state)\n\t\tif state:\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio64/value')\n\t\telse:\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio64/value')\n\n\tdef indicate_processing(self, state=True):\n\t\tlogger.info(\"indicate_processing %s\", state)\n\t\tif state:\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio63/value')\n\t\t\trun_script('echo 1 >/sys/class/gpio/gpio64/value')\n\t\telse:\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio63/value')\n\t\t\trun_script('echo 0 >/sys/class/gpio/gpio64/value')\n\n\tdef after_setup(self, trigger_callback=None): \n\t\tlogger.info(\"after_setup\")\n\t\tself._trigger_callback = trigger_callback\n\t\tif self._trigger_callback:\n\t\t\t# threaded detection of button press\n\t\t\tself.trigger_thread = DesktopPlatformTriggerThread(self, trigger_callback)\n\t\t\tself.trigger_thread.setDaemon(True)\n\t\t\tself.trigger_thread.start()\n\n\tdef force_recording(self):\n\t\treturn time.time() - self.started < self._pconfig['min_seconds_to_record']\n\nclass DesktopPlatformTriggerThread(threading.Thread):\n\tdef __init__(self, platform, trigger_callback):\n\t\tthreading.Thread.__init__(self)\n\t\tself.platform = platform\n\t\tself._trigger_callback = trigger_callback\n\t\tself.should_run = True\n\n\tdef stop(self):\n\t\tself.should_run = False\n\n\tdef run(self):\n\t\twhile self.should_run:\n\t\t\tif self.platform._silent:\n\t\t\t\tpressed = run_script('cat /sys/class/gpio/gpio78/value')\n\t\t\t\twhile pressed[0] == '0':\n\t\t\t\t\tpressed = run_script('cat /sys/class/gpio/gpio78/value')\t\n\t\t\telse:\n\t\t\t\tpressed = run_script('cat /sys/class/gpio/gpio77/value')\n\t\t\t\twhile pressed[0] == '0':\n\t\t\t\t\tpressed = run_script('cat /sys/class/gpio/gpio77/value')\n\t\t\t# \t\t\t\t\t\n\t\t\tself.platform.started = time.time()\n\t\t\tif self._trigger_callback:\n\t\t\t\tself._trigger_callback(self.platform.force_recording)\n\n","repo_name":"ubriquejazz/AlexaPi","sub_path":"Alexa/src/alexapi/device_platforms/zedboardplatform.py","file_name":"zedboardplatform.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"39551212771","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\n\ndataset = pd.read_csv('zoo.csv', names=['animal_name', 'hair', 'feathers', 'eggs', 'milk', 'airbone', 'aquatic',\n 'predator', 'toothed', 'backbone', 'breathes', 'venomous', 'fins', 'legs', 'tail', 'domestic', 'catsize', 'class', ])\ndataset = dataset.drop(columns=['animal_name'], axis=1)\n\ntrain_features = dataset.iloc[:80, :-1]\ntest_features = dataset.iloc[80:, :-1]\ntrain_targets = dataset.iloc[:80, -1]\ntest_targets = dataset.iloc[80:, -1]\ntree = DecisionTreeClassifier(criterion='entropy').fit(\n train_features, train_targets)\nprediction = tree.predict(test_features)\nprint(\"The prediction accuracy is: \", tree.score(\n test_features, test_targets)*100, \"%\")\n","repo_name":"VirangParekh/Sem6Pracs","sub_path":"DWM-Lab/ID3DecisionTree/ID3usingLib.py","file_name":"ID3usingLib.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"33112512158","text":"import json\nimport paramiko\nimport os\nimport pandas as pd\n\n\ndef create_temp_file(self):\n filestr = '''\nfrom talos import RemoteScan\nimport numpy as np\nimport json\nimport pickle\n\nx=np.load('tmp/x_data_remote.npy')\ny=np.load('tmp/y_data_remote.npy')\n \n{}\n\nwith open('tmp/arguments_remote.json','r') as f:\n arguments_dict=json.load(f)\n \nt=RemoteScan(x=x,\n y=y,\n params=arguments_dict['params'],\n model={},\n experiment_name=arguments_dict['experiment_name'],\n x_val=arguments_dict['x_val'],\n y_val=arguments_dict['y_val'],\n val_split=arguments_dict['val_split'],\n random_method=arguments_dict['random_method'],\n seed=arguments_dict['seed'],\n performance_target=arguments_dict['performance_target'],\n fraction_limit=arguments_dict['fraction_limit'],\n round_limit=arguments_dict['round_limit'],\n time_limit=arguments_dict['time_limit'],\n boolean_limit=arguments_dict['boolean_limit'],\n reduction_method=arguments_dict['reduction_method'],\n reduction_interval=arguments_dict['reduction_interval'],\n reduction_window=arguments_dict['reduction_window'],\n reduction_threshold=arguments_dict['reduction_threshold'],\n reduction_metric=arguments_dict['reduction_metric'],\n minimize_loss=arguments_dict['minimize_loss'],\n disable_progress_bar=arguments_dict['disable_progress_bar'],\n print_params=arguments_dict['print_params'],\n clear_session=arguments_dict['clear_session'],\n save_weights=arguments_dict['save_weights'],\n config='tmp/remote_config.json'\n )\n '''.format(self.model_func, self.model_name)\n\n with open(\"tmp/scanfile_remote.py\", \"w\") as f:\n f.write(filestr)\n\n\ndef return_current_machine_id(self,):\n ''' return machine id after checking the ip from config'''\n\n current_machine_id = 0\n if 'current_machine_id' in self.config_data.keys():\n current_machine_id = int(self.config_data['current_machine_id'])\n\n return current_machine_id\n\n\ndef return_central_machine_id(self):\n ''' return central machine id as mentioned in config'''\n central_id = 0\n config_data = self.config_data\n if 'database' in config_data.keys():\n central_id = int(config_data['database']['DB_HOST_MACHINE_ID'])\n return central_id\n\n\ndef read_config(self):\n '''read config from file'''\n with open('config.json', 'r') as f:\n config_data = json.load(f)\n return config_data\n\n\ndef write_config(self, new_config):\n ''' write config to file'''\n with open('config.json', 'w') as outfile:\n json.dump(new_config, outfile, indent=2)\n\n\ndef ssh_connect(self):\n '''\n Returns\n -------\n clients | `list` | List of client objects of machines after connection.\n\n '''\n configs = self.config_data['machines']\n clients = {}\n for config in configs:\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n host = config['TALOS_IP_ADDRESS']\n port = config['TALOS_PORT']\n username = config['TALOS_USER']\n if 'TALOS_PASSWORD' in config.keys():\n password = config['TALOS_PASSWORD']\n client.connect(host, port, username, password)\n elif 'TALOS_KEY_FILENAME' in config.keys():\n client.connect(\n host, port, username, key_filename=config['TALOS_KEY_FILENAME']\n )\n\n clients[config['machine_id']] = client\n return clients\n\n\ndef ssh_file_transfer(self, client, machine_id):\n '''transfer the current talos script to the remote machines'''\n create_temp_file(self)\n\n sftp = client.open_sftp()\n\n try:\n sftp.chdir(self.dest_dir) # Test if dest dir exists\n except IOError:\n sftp.mkdir(self.dest_dir) # Create dest dir\n sftp.chdir(self.dest_dir)\n\n for file in os.listdir(\"tmp\"):\n sftp.put(\"tmp/\"+file, file)\n\n sftp.put('tmp/remote_config.json', 'remote_config.json')\n sftp.close()\n\n\ndef ssh_run(self, client, machine_id):\n '''\n\n Parameters\n ----------\n client | `Object` | paramiko ssh client object\n params | `dict`| hyperparameter options\n machine_id | `int`| Machine id for each of the distribution machines\n\n Returns\n -------\n None.\n\n '''\n # Run the transmitted script remotely without args and show its output.\n # SSHClient.exec_command() returns the tuple (stdin,stdout,stderr)'''\n\n stdin, stdout, stderr = client.exec_command(\n 'python3 tmp/scanfile_remote.py')\n if stderr:\n for line in stderr:\n try:\n # Process each error line in the remote output\n print(line)\n except:\n print('Cannot Output error')\n\n for line in stdout:\n try:\n # Process each line in the remote output\n print(line)\n except:\n print('Cannot Output error')\n\n\ndef fetch_latest_file(self):\n\n # fetch the latest csv for an experiment'''\n\n experiment_name = self.experiment_name\n save_timestamp = self.save_timestamp\n\n if not os.path.exists(experiment_name):\n return []\n\n filelist = [\n os.path.join(experiment_name, i)\n for i in os.listdir(experiment_name)\n if i.endswith('.csv') and int(i.replace('.csv', '')) >= int(save_timestamp)\n ]\n\n if filelist:\n\n latest_filepath = max(filelist, key=os.path.getmtime)\n\n try:\n results_data = pd.read_csv(latest_filepath)\n except Exception as e:\n\n return []\n\n return results_data\n\n else:\n return []\n\n\ndef add_experiment_id(self, results_data, machine_id):\n\n # create hashmap for a dataframe and use it for experiment id'''\n\n results_data = results_data.drop(['experiment_id'], axis=1, errors='ignore')\n results_data['experiment_id'] = pd.util.hash_pandas_object(results_data)\n results_data['experiment_id'] = [\n str(i) + '_machine_id_' + str(machine_id)\n for i in results_data['experiment_id']\n ]\n return results_data\n","repo_name":"abhijithneilabraham/talos","sub_path":"talos/distribute/distribute_utils.py","file_name":"distribute_utils.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"15"} +{"seq_id":"30362751116","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 9 10:30:33 2022\n\n@author: fedib\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# import dataset\ndataset=pd.read_csv(\"C:/STUDY/CIII/Deep learning/tp/tp2/Dataset_spine.csv\")\n\ndataset=dataset.drop(['Unnamed: 13'], axis=1) # drop column unnamed 13 because its unnamed with random info\n\ndataset.describe()\nlabel=dataset['Class_att']\ndata =dataset.drop(['Class_att'], axis=1)\n\n\ntrain_data,test_data,train_label,test_label=train_test_split(data,label,test_size = 0.33,random_state = 0)\n\nfrom sklearn.neural_network import MLPClassifier\n\nclf=MLPClassifier(activation='logistic',hidden_layer_sizes=(50,50,50),max_iter=500,solver='adam',random_state=0)\nclf.fit(train_data,train_label)\npred=clf.predict(test_data)\nACC=accuracy_score(test_label, pred)*100\nACC\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"EX2\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nfrom sklearn import datasets\niris = datasets.load_iris()\ndata = iris.data\nlabel = iris.target\n\ntrain_data,test_data,train_label,test_label=train_test_split(data,label,test_size = 0.33,random_state = 0)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras import losses\n\nmodel= Sequential()\nmodel.add(Dense(1000,input_dim=4,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(300,activation='relu'))\nmodel.add(Dense(3,activation='softmax'))\nmodel.compile('adam',loss=losses.mean_squared_error,metrics=['accuracy'])\nmodel.fit(train_data,train_label,batch_size=16,epochs=10,validation_data=(test_data,test_label))\n\n\n","repo_name":"dramajohns/Multi-layer-perceptron","sub_path":"Spine.py","file_name":"Spine.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"24939458263","text":"#!/usr/bin/env python2.7\nimport sys\nimport re\nimport lib\nenv = lib.init()\nenv.check_assets_version()\n\nbranch = lib.output([\"git\", \"branch\"]).rstrip()\nall_args = sys.argv[1:]\n\nif branch != \"* master\":\n print(\"Not on master, aborting.\")\n lib.call([\"git\", \"commit\", \"-a\", \"--amend\"] + all_args)\n exit()\n\nlib.call([env.script(\"presubmit.py\")])\n\ntime = lib.output([\"date\", \"+%Y-%m-%d %H:%M\"])\nkey = lib.output([\"md5\", \"-q\", \"-s\", time]).rstrip()\nprevious_message = lib.output([\"git\", \"log\", \"-1\", \"--pretty=%B\"]).rstrip()\nwithout_key = re.sub(r\"KEY:.*\", \"\", previous_message).rstrip()\n\nprint(\"Creating commit...\")\nlib.call(\n [\"git\", \"commit\"] +\n all_args +\n [\"-a\", \"--amend\", \"-m\", without_key, \"-m\", \"KEY: \" + key]\n)\n","repo_name":"thurn/dungeonstrike","sub_path":"scripts/git_amend_head.py","file_name":"git_amend_head.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"8513538161","text":"import matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nimport math\nimport corridor\n\n\nmatplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'font.size': '10',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n 'figure.autolayout': True,\n # 'figure.figsize': [7, 4],\n 'axes.titlesize': 'medium',\n 'xtick.labelsize': 'small',\n 'ytick.labelsize': 'small',\n 'legend.fontsize': 'x-small',\n 'legend.title_fontsize': 'small',\n # 'axes.labelsize': 'small',\n})\n\n# 3D plot\nfig = plt.figure(figsize=plt.figaspect(0.5))\n# set up the axes for the first plot\nax_sigma_l = fig.add_subplot(1, 2, 1, projection='3d')\nax_length = fig.add_subplot(1, 2, 2, projection='3d')\n\n\n# Define plain features of the object\nfeatures = corridor.CorridorAssignmentFeature()\nfeatures.corridor_length = 10\nfeatures.l = 0 # <-- X\nfeatures.sigma_l = 0.5 # <-- variable\nfeatures.obj_length_ratio = 0.2 # <-- variable\n\n# Details\nl_obj = features.corridor_length * features.obj_length_ratio\nx_min = -2*l_obj\nl_c = features.corridor_length\nx_max = l_c - x_min\n\n# problem parameters\nn_l = 1000\nn_sd = 1000\nn_w = 1000\n\nl = np.linspace(x_min, x_max, n_l,)\nsigma_l = np.linspace(0.01, 10, n_sd,)\nw = np.linspace(0.0, 2, n_w,)\n\n# variable sigma d\nxx_sd = np.zeros((n_l, n_sd), dtype='d')\nyy_sd = np.zeros((n_l, n_sd), dtype='d')\nzz_sd = np.zeros((n_l, n_sd), dtype='d')\n\n# variable relative object width\nxx_w = np.zeros((n_l, n_w), dtype='d')\nyy_w = np.zeros((n_l, n_w), dtype='d')\nzz_w = np.zeros((n_l, n_w), dtype='d')\n\n# Details\n# populate x,y,z arrays\nfor i in range(n_l):\n # variable sigma_l\n features.obj_length_ratio = 0.2\n for j in range(n_sd):\n xx_sd[i, j] = l[i]\n yy_sd[i, j] = sigma_l[j]\n features.l = l[i]\n features.sigma_l = sigma_l[j]\n zz_sd[i, j] = corridor.LongitudinalAssignmentConfidence(features)\n # variable width\n features.sigma_l = 0.5\n for j in range(n_w):\n xx_w[i, j] = l[i]\n yy_w[i, j] = w[j]\n features.d = l[i]\n features.obj_length_ratio = w[j]\n zz_w[i, j] = corridor.LongitudinalAssignmentConfidence(features)\n\nsurf_sd = ax_sigma_l.plot_surface(xx_sd, yy_sd, zz_sd, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nsurf_w = ax_length.plot_surface(xx_w, yy_w, zz_w, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\nax_sigma_l.set_xlabel('longitudinal position $l_{r}$ [m]')\nax_sigma_l.set_ylabel('standard deviation $\\sigma_{l}$ [m]')\nax_sigma_l.set_zlabel('assignment confidence')\n\nax_length.set_xlabel('longitudinal position $l_{r}$ [m]')\nax_length.set_ylabel('object length ratio $\\hat{l}_{obj}/l_{corr}$')\nax_length.set_ylabel('object length ratio $\\hat{l}_{obj}/l_{corr}$')\nax_length.set_zlabel('assignment confidence')\n\n# cset = ax.contour(xx_sd, yy_sd, zz_sd, zdir='x', cmap=cm.coolwarm)\n# cset = ax.contour(xx_sd, yy_sd, zz_sd, zdir='y', cmap=cm.coolwarm)\n\n# c = ax.pcolormesh(xx_sd, yy_sd, zz_sd, cmap='RdBu')\n# plt.savefig(\n# '/home/dsp/Pictures/Matplotlib_PGFs/CorridorAssignment.pdf', bbox_inches='tight')\nplt.savefig(\n '/tmp/LongitudinalAssignmentConfidence.pdf')\nplt.show()\n","repo_name":"dspetrich/corridor","sub_path":"python_api/scripts/main_longitudinal_assignment_confidence.py","file_name":"main_longitudinal_assignment_confidence.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"15"} +{"seq_id":"30586144677","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Ang Ming Liang\n\nPlease run the following command before running the script\n\nwget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py\nor curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py\n\nThen, make sure to get your kaggle.json from kaggle.com then run \n\nmkdir /root/.kaggle \ncp kaggle.json /root/.kaggle/kaggle.json\nchmod 600 /root/.kaggle/kaggle.json\nrm kaggle.json\n\nto copy kaggle.json into a folder first \n\"\"\"\n\nimport superimport\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom pytorch_lightning import LightningModule, Trainer\nfrom data import CelebADataModule\n\n\nIMAGE_SIZE = 64\nBATCH_SIZE = 256\nCROP = 128\nDATA_PATH = \"kaggle\"\n\ntrans = []\ntrans.append(transforms.RandomHorizontalFlip())\nif CROP > 0:\n trans.append(transforms.CenterCrop(CROP))\ntrans.append(transforms.Resize(IMAGE_SIZE))\ntrans.append(transforms.ToTensor())\ntransform = transforms.Compose(trans)\n \nclass AE(LightningModule):\n \"\"\"\n Standard VAE with Gaussian Prior and approx posterior.\n \"\"\"\n\n def __init__(\n self,\n input_height: int,\n enc_type: str = 'resnet18',\n first_conv: bool = False,\n maxpool1: bool = False,\n hidden_dims = None,\n in_channels = 3,\n enc_out_dim: int = 512,\n kl_coeff: float = 0.1,\n latent_dim: int = 256,\n lr: float = 1e-4,\n **kwargs\n ):\n \"\"\"\n Args:\n input_height: height of the images\n enc_type: option between resnet18 or resnet50\n first_conv: use standard kernel_size 7, stride 2 at start or\n replace it with kernel_size 3, stride 1 conv\n maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2\n enc_out_dim: set according to the out_channel count of\n encoder used (512 for resnet18, 2048 for resnet50)\n kl_coeff: coefficient for kl term of the loss\n latent_dim: dim of latent space\n lr: learning rate for Adam\n \"\"\"\n\n super(AE, self).__init__()\n\n self.save_hyperparameters()\n\n self.lr = lr\n self.kl_coeff = kl_coeff\n self.enc_out_dim = enc_out_dim\n self.latent_dim = latent_dim\n self.input_height = input_height\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(in_channels, out_channels=h_dim,\n kernel_size= 3, stride= 2, padding = 1),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU())\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)\n self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)\n\n # Build Decoder\n modules = []\n\n self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)\n\n hidden_dims.reverse()\n\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride = 2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU())\n )\n\n self.decoder = nn.Sequential(*modules)\n\n self.final_layer = nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels= 3,\n kernel_size= 3, padding= 1),\n nn.Sigmoid())\n\n @staticmethod\n def pretrained_weights_available():\n return list(AE.pretrained_urls.keys())\n\n def from_pretrained(self, checkpoint_name):\n if checkpoint_name not in AE.pretrained_urls:\n raise KeyError(str(checkpoint_name) + ' not present in pretrained weights.')\n\n return self.load_from_checkpoint(AE.pretrained_urls[checkpoint_name], strict=False)\n\n def encode(self, x):\n x = self.encoder(x)\n x = torch.flatten(x, start_dim=1)\n mu = self.fc_mu(x)\n return mu\n\n def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result\n\n def forward(self, x):\n z = self.encode(x)\n return self.decode(z)\n\n def step(self, batch, batch_idx):\n x, y = batch\n x_hat= self(x)\n\n loss = F.mse_loss(x_hat, x, reduction='mean')\n\n logs = {\n \"loss\": loss,\n }\n return loss, logs\n\n def training_step(self, batch, batch_idx):\n loss, logs = self.step(batch, batch_idx)\n self.log_dict({f\"train_{k}\": v for k, v in logs.items()}, on_step=True, on_epoch=False)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss, logs = self.step(batch, batch_idx)\n self.log_dict({f\"val_{k}\": v for k, v in logs.items()})\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.lr)\n\nif __name__ == \"__main__\":\n m = AE(input_height=IMAGE_SIZE)\n runner = Trainer(gpus = 2,gradient_clip_val=0.5,\n max_epochs = 15)\n dm = CelebADataModule(data_dir=DATA_PATH,\n target_type='attr',\n train_transform=transform,\n val_transform=transform,\n download=True,\n batch_size=BATCH_SIZE,\n num_workers=3)\n runner.fit(m, datamodule=dm)\n torch.save(m.state_dict(), \"ae-celeba-latent-dim-256.ckpt\")\n","repo_name":"probml/pyprobml","sub_path":"deprecated/scripts/ae_celeba_lightning.py","file_name":"ae_celeba_lightning.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","stars":6015,"dataset":"github-code","pt":"15"} +{"seq_id":"20125805294","text":"import random\n\nclass Deck:\n# Создаем колоду\n def __init__(self):\n self._suits = [\"\\u2660\", \"\\u2665\", \"\\u2663\", \"\\u2666\"]\n self._cards = [\"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\n self.deck={self._cards[i]: i for i in range(len(self._cards))}\n _mixed_deck=[]\n self.all_deck = {}\n for i in range(len(self._suits)):\n for key, values in self.deck.items():\n key = self._suits[i] + key\n self.all_deck[key] = values\n\n# Перемешиваем колоду\n def mix_deck(self):\n self._mixed_deck=list(self.all_deck)\n random.shuffle(self._mixed_deck)\n\n# Раздаем карты\n def hand_cards(self):\n self.player_hand = self._mixed_deck[-6:]\n self._comp_hand = self._mixed_deck[-12:-6]\n del self._mixed_deck[-12:]\n\n# Кто ходит первым\n def first_turn(self):\n if random.randint(1, 2) == 1:\n print('Ваш ход')\n return 'Ваш ход'\n else:\n print('Ход компьютера')\n return 'Ход компьютера'\n\n# Ход игрока\n def pl_turn(self):\n print('У вас на руках такие карты:\\n{}'.format(self.player_hand))\n self.player_card = []\n self.player_hand_len = []\n for i in range(len(self.player_hand)):\n self.player_hand_len.append(i)\n self.player_choice = int(input('Какой картой пойдете? Введите номер: {}'.format(self.player_hand_len)))\n self.player_card.append(self.player_hand[self.player_choice])\n self.player_hand.remove(self.player_hand[self.player_choice])\n\n# Ответ компьютера\n def computer_answer(self):\n self.answer_cards = []\n for card in self._comp_hand:\n if (card[0:1] == self.player_card[0][0:1]):\n if self.all_deck[card] > self.all_deck[self.player_card[0]]:\n self.answer_cards.append(card)\n\n if len(self.answer_cards)>0:\n self.comp_move = self.answer_cards[0]\n self._comp_hand.remove(self.comp_move)\n print('Ваша карта бита {}.'.format(self.comp_move))\n else:\n print('Компьютер взял.')\n self._comp_hand.append(self.player_card)\n\n# Добор карт\n def new_cards(self):\n if (len(self._comp_hand) < 6) and (len(self._mixed_deck) > 0):\n self._comp_hand.append(self._mixed_deck[-1])\n del self._mixed_deck[-1]\n\n print('Карту компьютеру. Осталось карт в колоде: {}'.format(len(self._mixed_deck)))\n print('Всего карт у компьютера: {}'.format(len(self._comp_hand)))\n\n if (len(self.player_hand) < 6) and (len(self._mixed_deck) > 0):\n self.player_hand.append(self._mixed_deck[-1])\n del self._mixed_deck[-1]\n print('Карту вам. Осталось карт в колоде: {}'.format(len(self._mixed_deck)))\n print('Ваши карты: {}'.format(self.player_hand))\n\n# Следующий ход\n def computer_move(self):\n\n if len(self._comp_hand) > 0:\n self.min_comp_card = 20\n self.comp_move = ''\n for card in range(len(self._comp_hand)):\n self.comp_move = str(self._comp_hand[card])\n self._comp_hand.remove(self.comp_move)\n print('Ход компьютера: {}'.format(self.comp_move))\n\n# Ответ\n def player_answer(self):\n while True:\n print('Ваши карты:\\n{}'.format(self.player_hand))\n player__answer_hand_len = []\n for i in range(len(self.player_hand)):\n player__answer_hand_len.append(i + 1)\n player_answer_cards = int(input('Какой картой будем бить компьютер?.\\nВведите 0, чтобы забрать карту.'.format(player__answer_hand_len)))\n if player_answer_cards == 0:\n self.player_hand.append(self.comp_move)\n print('Взяли. \\nВаши карты: {}'.format(len(self.player_hand), self.player_choice))\n return 'Ход Компьютера'\n\n def is_winner(self):\n if len(self.player_hand) == 0:\n return 'Игрок победил'\n if len(self._comp_hand) == 0:\n return 'Компьютер победил'\n\ndeck = Deck()\n\nif __name__ == '__main__':\n deck = Deck()\n deck.mix_deck()\n deck.hand_cards()\n deck.first_turn()\n deck.pl_turn()\n deck.computer_answer()\n deck.new_cards()\n deck.computer_move()\n deck.player_answer()\n deck.is_winner()\n","repo_name":"TatyanaKuleshova/lesson9","sub_path":"game_fool.py","file_name":"game_fool.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"9371102090","text":"while True: \n user_string=input('''Please Type anything:''')\n while True:\n try:\n num1 = int(input(\"\\nEnter number(s) for copies:\"))\n except ValueError:\n print('''\"\\nPlease enter only number\"''')\n # can also use \"num1=abs(num1)\"\n else:\n if num1 < 0 :\n print('''\\n\"Please enter positive number(s) only\"''')\n else:\n break\n new_string=user_string * num1\n print(new_string)\n while True:\n Repeat=input(\"\\nDo you want to repeat?\\n\\nYes or No:\")\n Repeat=Repeat.lower()\n if Repeat not in [\"yes\",\"y\",\"no\",\"n\"]:\n print(\"\\nPlease select correct option\")\n else:\n break\n \n \n if Repeat in [\"yes\",\"y\"]:\n continue\n else:\n if Repeat in [\"no\",\"n\"]:\n print(\"\\n-----Thank you for using-----\")\n input()\n break","repo_name":"Waseem6409/PIAIC","sub_path":"Copies of String.py","file_name":"Copies of String.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"37410801193","text":"from django.contrib.gis.db import models\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Given unique look-up attributes, and extra data attributes,\n# either updates the entry referred to if it exists, or\n# creates it if it doesn't.\n# Returns string describing what has happened.\ndef update_or_create(self, filter_attrs, attrs):\n try:\n obj = self.get(**filter_attrs)\n changed = False\n for k, v in attrs.items():\n if obj.__dict__[k] != v:\n changed = True\n obj.__dict__[k] = v\n if changed:\n obj.save()\n return 'updated'\n return 'unchanged'\n except ObjectDoesNotExist:\n attrs.update(filter_attrs)\n self.create(**attrs)\n return 'created'\n\nclass GeoManager(models.GeoManager):\n def update_or_create(self, filter_attrs, attrs):\n return update_or_create(self, filter_attrs, attrs)\n\nclass Manager(models.Manager):\n def update_or_create(self, filter_attrs, attrs):\n return update_or_create(self, filter_attrs, attrs)\n\n","repo_name":"michaelmcandrew/mapit","sub_path":"pylib/mapit/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"15"} +{"seq_id":"38117265041","text":"from logger import logger\nfrom annotation_utils.ndds.structs import NDDS_Dataset\nfrom annotation_utils.coco.structs import COCO_Category_Handler\nfrom annotation_utils.coco.dataset_specific import Measure_COCO_Dataset\nfrom common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir\n\n# Load NDDS Dataset\nndds_dataset = NDDS_Dataset.load_from_dir(\n json_dir='/home/clayton/workspace/prj/data_keep/data/ndds/measure_kume_map3_1_200',\n show_pbar=True\n)\n\n# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)\ntarget_obj_type = 'seg'\n\nfor frame in ndds_dataset.frames:\n # Fix Naming Convention\n for ann_obj in frame.ndds_ann.objects:\n # Note: Part numbers should be specified in the obj_type string.\n if ann_obj.class_name == 'measure':\n obj_type, obj_name = target_obj_type, 'measure'\n ann_obj.class_name = f'{obj_type}_{obj_name}'\n elif ann_obj.class_name.startswith('num_'):\n temp = ann_obj.class_name.replace('num_', '')\n temp_parts = temp.split('_')\n if len(temp_parts) == 2:\n obj_type, obj_name, instance_name = target_obj_type, temp_parts[1], temp_parts[0]\n ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'\n elif len(temp_parts) == 3:\n obj_type, obj_name, instance_name = target_obj_type, f'{temp_parts[1]}part{temp_parts[2]}', '0'\n ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'\n else:\n raise Exception\n\n# Convert To COCO Dataset\ndataset = Measure_COCO_Dataset.from_ndds(\n ndds_dataset=ndds_dataset,\n categories=COCO_Category_Handler.load_from_path('/home/clayton/workspace/prj/data_keep/data/ndds/categories/measure_all.json'),\n naming_rule='type_object_instance_contained', delimiter='_',\n ignore_unspecified_categories=True,\n show_pbar=True,\n bbox_area_threshold=1,\n default_visibility_threshold=0.10,\n visibility_threshold_dict={'measure': 0.01},\n allow_unfound_seg=True,\n class_merge_map={\n 'mark_10th_place': 'seg_measure',\n 'marking_bottom': 'seg_measure',\n 'marking_top': 'seg_measure',\n 'hook': 'seg_measure'\n }\n)\n\n# Output Directories\nmake_dir_if_not_exists('measure_coco')\ndelete_all_files_in_dir('measure_coco')\n\nmeasure_dir = 'measure_coco/measure'\nwhole_number_dir = 'measure_coco/whole_number'\ndigit_dir = 'measure_coco/digit'\njson_output_filename = 'output.json'\n\nmeasure_dataset, whole_number_dataset, digit_dataset = dataset.split_measure_dataset(\n measure_dir=measure_dir,\n whole_number_dir=whole_number_dir,\n digit_dir=digit_dir,\n allow_no_measures=True,\n allow_missing_parts=True\n)\n\nif False: # Change to True if you want to remove all segmentation from the measure dataset.\n from common_utils.common_types.segmentation import Segmentation\n for coco_ann in measure_dataset.annotations:\n coco_ann.seg = Segmentation()\n\nmeasure_dataset.display_preview(show_details=True, window_name='Measure Dataset Preview')\nlogger.info(f'Saving Measure Dataset')\nmeasure_dataset.save_to_path(f'{measure_dir}/{json_output_filename}', overwrite=True)\n\nwhole_number_dataset.display_preview(show_details=True, window_name='Whole Number Dataset Preview')\nlogger.info(f'Saving Whole Number Dataset')\nwhole_number_dataset.save_to_path(f'{whole_number_dir}/{json_output_filename}', overwrite=True)\n\nif False: # For debugging 2-digit digit annotations\n del_ann_id_list = []\n for coco_image in digit_dataset.images:\n anns = digit_dataset.annotations.get_annotations_from_imgIds([coco_image.id])\n if len(anns) == 1:\n del_ann_id_list.append(anns[0].id)\n digit_dataset.annotations.remove(del_ann_id_list)\n digit_dataset.images.remove_if_no_anns(\n ann_handler=digit_dataset.annotations,\n license_handler=digit_dataset.licenses,\n verbose=True\n )\n\ndigit_dataset.display_preview(show_details=True, window_name='Digit Dataset Preview')\nlogger.info(f'Saving Digit Dataset')\ndigit_dataset.save_to_path(f'{digit_dir}/{json_output_filename}', overwrite=True)","repo_name":"cm107/annotation_utils","sub_path":"test/ndds2coco/measure_ndds2coco_with_merged_is.py","file_name":"measure_ndds2coco_with_merged_is.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"15"} +{"seq_id":"17416738670","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass TextComponent:\n string: str\n\n changed = False\n original = \"\"\n\n def style(self, style):\n styles = {\n \"bold\": \"**\",\n \"italic\": \"*\",\n \"mono\": \"`\",\n \"strike\": \"~~\",\n \"underline\": \"_\"\n }\n self.changed = True\n self.original = self.string\n self.string = styles[style] + str(self.string) + styles[style]\n return self\n\n\nclass BuildComponent:\n\n def __init__(self, *args: [str or TextComponent]):\n result = \"\"\n for arg in args:\n if type(arg) == TextComponent:\n result += arg.string\n else:\n result += arg\n self.result = result\n\n def get(self):\n return self.result\n","repo_name":"nik-1x/pytools","sub_path":"socials/Telegram.py","file_name":"Telegram.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"17754269203","text":"from rest_framework.response import Response\nfrom rest_framework.renderers import JSONRenderer\nimport jwt\nfrom api.models.user import User\nimport datetime\n\n\nclass RequestMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if (request.path.find('forgotpassword') == -1 and request.path.find('signin') == -1 and request.path.find('signup') == -1):\n authHeader = request.META.get('HTTP_AUTHORIZATION')\n if (authHeader == None):\n return self.unauthorized_response(request, 'Authorization token is not provided.')\n\n if authHeader.find('Bearer') > -1:\n token = authHeader.split(\" \")[1]\n else:\n return self.unauthorized_response(request, 'Invalid Token.')\n\n payload = jwt.decode(token, 'speakSuperJWTSecret')\n userId = payload.get('userId')\n\n try:\n user = User.objects.get(id=userId)\n except User.DoesNotExist:\n return Response(data=\"A user with this email could not be found.\", status=400)\n\n request.userId = userId\n\n return self.get_response(request)\n\n def unauthorized_response(self, request, message):\n response = Response(\n {\"data\": message},\n content_type=\"application/json\",\n status=401,\n )\n response.accepted_renderer = JSONRenderer()\n response.accepted_media_type = \"application/json\"\n response.renderer_context = {}\n\n return response\n","repo_name":"bhavesh1974/RestAPI","sub_path":"Pythonserver/api/requestmiddleware.py","file_name":"requestmiddleware.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"11344983738","text":"import cv2 as cv\n\n#-----------------------------Abri camara desde python----------------------------------#\n#Se pone 0 si es una camara de pc, 1 si es una camara externa como de vigilancia\ncapturaVideo=cv.VideoCapture(0)\nif not capturaVideo.isOpened():\n print(\"No se encontro camara\")\n exit()\n#Si el valor es verdaderon entonces...\nwhile True:\n #encuentra la camara mientras esta activada\n tipocamara,camara = capturaVideo.read()\n #muestra la visualizacion de la camara\n cv.imshow(\"camara on\", camara)\n\n gris = cv.cvtColor(camara, cv.COLOR_BGR2GRAY)\n cv.imshow(\"camara on\", gris)\n\n #Ya como es un video entonces se pone 1, y se cierra con la tecla Q\n #no se cierra ni dandole a X\n if cv.waitKey(1) == ord(\"q\"):\n break\n#------------------------------------------------------------------------------#\n\n\n\n\n\ncapturaVideo.release()\ncv.destroyAllWindows()","repo_name":"Jhonierk/Programacion","sub_path":"Udemy/Python/2. Python para no matematicos/3. Mostrar camara desde python/MostrarCamara.py","file_name":"MostrarCamara.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"10435021211","text":"########################### James Hong and Alec Plante | 3.20.21 ###########################\r\n\r\n################################# Import the needed packages #################################\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport numpy\r\n\r\n\r\n################################# use requests and bs4 to get soup #################################\r\nURL = \"https://www.pro-football-reference.com/years/2020/passing.htm\" ## This is the website that we will be extracting data from\r\npage = requests.get(URL) ## Load the page\r\nsoup = BeautifulSoup(page.content, \"html.parser\") ## Extract the page into the soup, which is one large list of 5 strings \r\n\r\n\r\n\r\n\r\n################################# Take the data from the soup and extract the needed info #################################\r\n\r\n# rows = soup.find_all('tr') #Gives all of the rows in the data set\r\n ## Not used because we do not want the thead rows\r\n\r\nrows = [] ## This list is to store the data from each row, with all of the given row data in 1 single index\r\nfor z in range(len(soup.find_all('tr'))): ## For all of the rows\r\n if (z != 30 and z != 92 and z != 61): ## Get rid of the rows that we do not want [normally I wouldn't hardcode this, but there is a time crunch, and this is only being used once]\r\n rows.append(soup.select(\"tr\")[z])\r\n\r\n\r\n\r\n\r\n################################# Set up the array #################################\r\n\r\nncol = 30 ## number of columns not including the index\r\ndata = [[0 for i in range((int(ncol)+1))] for j in range((len(rows)))]\r\n\r\n#How to check column and row length\r\n # print (\"number of rows \" + str(len(data)))\r\n # print (\"number of col \" + str(len(data[0])))\r\n\r\n# data = [['']*(int(ncol)+1)]*(len(rows)-4+1) \r\n ## THIS FORM CAUSED A PROBLEM BECAUSE IT CREATES A POINTER TO 1 ARRAY. You cannot change the arrays without\r\n\r\n\r\n\r\n\r\n################################# Find all of the column names #################################\r\n\r\nrawLabels = soup.find(\"thead\") ## to retrieve column title data, which is under thead\r\n\r\ncategories = rawLabels.find_all(\"th\", scope = 'col') # find all of the individual elemnets that we want. They are listed as 'th' with scope = col\r\n\r\n################################# We are starting to fill the data into the list #################################\r\n\r\n## This loop fills our list with all of the data from the website\r\n\r\nfor i in range(len(data)): ## For all of the rows in data [112 rows]\r\n data[i][0] = i ## Creates the index in column 0\r\n colval = rows[i].find_all('td') ## Column values are being taken from each element of rows [112 rows] before being cleaned\r\n for j in range(len(colval)): ## For all of the elements in colval, which contain the data [30 entries] for a given row\r\n data[i][j+1]=colval[j].getText() ## Get the text/value for each cell and copy it to our list (data)\r\n print(\"data[\"+str(i)+\"][\"+str(j)+\"] = \" + str(data[i][j])) ## Print to check if the right information go into the right box, not necessary\r\n\r\n## This loop takes the title for each categories entry and put it in row 0 of data\r\n\r\nfor i in range(len(categories)): # goes from 0 -> 30 tp create the column titles\r\n data[0][i] = categories[i].string\r\n\r\n## This loop is to remove the stars and the pluses from the player names \"Patrick Mahomes *\" -> \"Patrick Mahomes\"\r\nfor i in range(1,len(data)): ## for all the elements in the name column\r\n star =str(data[i][1]).find(\"*\") ## find the index of the first *\r\n if(star!=-1): ## If there is a star in the element\r\n data[i][1] = str(data[i][1])[0:star-1] ## take from the first part of the string to the space before the star\r\n\r\n\r\nprint(data)\r\nprint(len(data))\r\n\r\n\r\n\r\n\r\n################################# Export into a csv file #################################\r\n\r\nfilename = \"stats.csv\" ## This is the CSV file that we will be saving to\r\nnumpy.savetxt(filename, data, delimiter = \",\", fmt ='% s')\r\n #Uncomment this line if you want to save it again\r\n\r\n\r\n\r\n################################################################## Notes ##################################################################\r\n\r\n## To access the Player in:\r\n# Player\r\n## ____.string\r\n\r\n#How to check column and row length\r\n # print (\"number of rows \" + str(len(data)))\r\n # print (\"number of col \" + str(len(data[0])))","repo_name":"AlecPlante/NFLStats2020","sub_path":"statWebScrape.py","file_name":"statWebScrape.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"34193953839","text":"import os\nimport sys\n\nfrom os.path import dirname, realpath\nfrom site import addsitedir\nimport json\nfrom os.path import dirname, realpath, join\n\nBASE_DIR = dirname(realpath(__file__))\n\ndef get_env(base_dir):\n \"\"\" Obtenemos las 'variables de entorno' desde un archivo json.\"\"\"\n with open(join(base_dir, 'settings/settings.json')) as f:\n env = json.load(f)\n return env\n return {}\n\nENV = get_env(BASE_DIR)\nWORKON_HOME = ENV.get('WORKON_HOME', '')\nSETTINGS_PROD = ENV.get('SETTINGS_PROD')\nif not WORKON_HOME:\n WORKON_HOME = os.environ['WORKON_HOME']\nVENV = ENV.get('ENV', 'django_1_5')\n\naddsitedir('{0}/{1}/lib/python2.7/site-packages'.format(WORKON_HOME, VENV))\n\nsys.path = [BASE_DIR] + sys.path\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', SETTINGS_PROD)\n\n# activamos el entorno virtual\nif not sys.platform == 'win32':\n activate_this = os.path.expanduser(\n '{0}/{1}/bin/activate_this.py'.format(WORKON_HOME, VENV))\n execfile(activate_this, dict(__file__=activate_this))\n\nfrom django.core.wsgi import get_wsgi_application\n\napplication = get_wsgi_application()\n","repo_name":"diegodqu/gaia_","sub_path":"django_1_5/staff-gaia/src/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"40700749299","text":"from bootstrap_modal_forms.forms import BSModalModelForm\n\nfrom .models import OrderItem\n\n\nclass OrderItemModelForm(BSModalModelForm):\n class Meta:\n model = OrderItem\n fields = [\n \"order\",\n \"product\",\n \"quantity\",\n \"package_count\",\n \"list_price\",\n \"total_amount\",\n \"total_amount_with_vat\",\n ]\n","repo_name":"barisortac/mini-erp-docker","sub_path":"app/apps/order_item/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"5011529377","text":"print(\"\\n--------------Sender side-------------------------\")\nnumbers=input(\"Enter the dataword (16bit) : \\n\")\ndef binaryToDecimal(a):\n n=len(a)\n sums=0\n for i in range(0,n):\n sums=sums+(int(a[i])*pow(2,n-i-1))\n return sums\ndef decimalToBinary(val):\n rem=0\n sums1=0\n i=1\n val=int(val)\n while val>0:\n rem=val%2\n sums1=sums1+int(rem)*int(i)\n i=i*10\n val=val/2\n return sums1\ndef reciever():\n print(\"\\n--------------Reciever side-------------------------\")\n inputs=input(\"Enter the reciever side dataword 24 bit (16+8 bit checksum) \\n\")\n string1=inputs[0:8]\n string2=inputs[8:16]\n string3=inputs[16:24]\n num1=binaryToDecimal(string1)\n num2=binaryToDecimal(string2)\n num3=binaryToDecimal(string3)\n ans=decimalToBinary(num1+num2+num3)\n ans1=str(ans)\n if len(ans1)>8:\n chars=int(ans1[0])\n fn=binaryToDecimal(ans1[1:])\n finalAnswer=decimalToBinary(fn+chars)\n else:\n finalAnswer=int(ans1)\n finalAnswer=11111111-finalAnswer\n if finalAnswer==0 :\n print(\"The checksum at reciever side\",\"0\"*8)\n print(\"There is no error in transmission\",)\n else:\n print(\"The checksum at reciever side is : \",finalAnswer)\n print(\"There is Error in transmission \")\nstring1=numbers[0:8]\nstring2=numbers[8:16]\nnum1=binaryToDecimal(string1)\nnum2=binaryToDecimal(string2)\nans=decimalToBinary(num1+num2)\nans1=str(ans)\nif len(ans1)>8: \n chars=int(ans1[0])\n fn=binaryToDecimal(ans1[1:])\n finalAnswer=decimalToBinary(fn+chars)\nelse: \n finalAnswer=int(ans1)\nfinalAnswer=11111111-finalAnswer\nprint(\"The checksum is \",finalAnswer)\nreciever()\n","repo_name":"Deepakmukka1/Checksum","sub_path":"checksum.py","file_name":"checksum.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"1698708208","text":"import os\nimport random\nimport sys\nimport time\n\nimport numpy as np\nimport importlib\nimport itertools\nfrom zlib import crc32\n\nfrom ..models.baseline_constants import MODEL_PARAMS, ACCURACY_KEY\nfrom ..core import Tangle, Transaction, Node, MaliciousNode, PoisonType\nfrom ..core.tip_selection import TipSelector\nfrom .lab_transaction_store import LabTransactionStore\n\n\nclass Lab:\n def __init__(self, tip_selector_factory, config, model_config, node_config, poisoning_config, tx_store=None):\n self.tip_selector_factory = tip_selector_factory\n self.config = config\n self.model_config = model_config\n self.node_config = node_config\n self.poisoning_config = poisoning_config\n self.tx_store = tx_store if tx_store is not None else LabTransactionStore(self.config.tangle_dir, self.config.src_tangle_dir)\n\n # Set the random seed if provided (affects client sampling, and batching)\n random.seed(1 + config.seed)\n np.random.seed(12 + config.seed)\n\n @staticmethod\n def create_client_model(seed, model_config):\n model_path = '.%s.%s' % (model_config.dataset, model_config.model)\n mod = importlib.import_module(model_path, package='tangle.models')\n ClientModel = getattr(mod, 'ClientModel')\n\n # Create 2 models\n model_params = MODEL_PARAMS['%s.%s' % (model_config.dataset, model_config.model)]\n if model_config.lr != -1:\n model_params_list = list(model_params)\n model_params_list[0] = model_config.lr\n model_params = tuple(model_params_list)\n\n model = ClientModel(seed, *model_params)\n model.num_epochs = model_config.num_epochs\n model.batch_size = model_config.batch_size\n model.num_batches = model_config.num_batches\n return model\n\n def create_genesis(self):\n import tensorflow as tf\n\n # Suppress tf warnings\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n client_model = self.create_client_model(self.config.seed, self.model_config)\n\n genesis = Transaction([])\n genesis.add_metadata('time', 0)\n self.tx_store.save(genesis, client_model.get_params())\n\n return genesis\n\n def create_node_transaction(self, tangle, round, client_id, cluster_id, train_data, eval_data, seed, model_config, tip_selector, tx_store):\n\n client_model = Lab.create_client_model(seed, model_config)\n\n # Choose which nodes are malicious based on a hash, not based on a random variable\n # to have it consistent over the entire experiment run\n # https://stackoverflow.com/questions/40351791/how-to-hash-strings-into-a-float-in-01\n use_poisoning_node = \\\n self.poisoning_config.poison_type != PoisonType.Disabled and \\\n self.poisoning_config.poison_from <= round and \\\n (float(crc32(client_id.encode('utf-8')) & 0xffffffff) / 2**32) < self.poisoning_config.poison_fraction\n\n if use_poisoning_node:\n ts = TipSelector(tangle, particle_settings=self.tip_selector_factory.particle_settings) \\\n if self.poisoning_config.use_random_ts else tip_selector\n print(f'client {client_id} is is poisoned {\"and uses random ts\" if self.poisoning_config.use_random_ts else \"\"}')\n node = MaliciousNode(tangle, tx_store, ts, client_id, cluster_id, train_data, eval_data, client_model, self.poisoning_config.poison_type, config=self.node_config)\n else:\n node = Node(tangle, tx_store, tip_selector, client_id, cluster_id, train_data, eval_data, client_model, config=self.node_config)\n\n tx, tx_weights = node.create_transaction()\n\n if tx is not None:\n tx.add_metadata('time', round)\n\n return tx, tx_weights\n\n def create_node_transactions(self, tangle, round, clients, dataset):\n tip_selectors = [self.tip_selector_factory.create(tangle) for _ in range(len(clients))]\n\n result = [self.create_node_transaction(tangle, round, client_id, cluster_id, dataset.train_data[client_id], dataset.test_data[client_id], self.config.seed, self.model_config, tip_selector, self.tx_store)\n for ((client_id, cluster_id), tip_selector) in zip(clients, tip_selectors)]\n\n for tx, tx_weights in result:\n if tx is not None:\n self.tx_store.save(tx, tx_weights)\n\n return [tx for tx, _ in result]\n\n def create_malicious_transaction(self):\n pass\n\n def train(self, num_nodes, start_from_round, num_rounds, eval_every, eval_on_fraction, dataset):\n if num_rounds == -1:\n rounds_iter = itertools.count(start_from_round)\n else:\n rounds_iter = range(start_from_round, num_rounds)\n\n if start_from_round > 0:\n tangle_name = int(start_from_round)-1\n print('Loading previous tangle from round %s' % tangle_name)\n tangle = self.tx_store.load_tangle(tangle_name)\n\n for round in rounds_iter:\n begin = time.time()\n print('Started training for round %s' % round)\n sys.stdout.flush()\n\n if round == 0:\n genesis = self.create_genesis()\n tangle = Tangle({genesis.id: genesis}, genesis.id)\n else:\n clients = dataset.select_clients(round, num_nodes)\n print(f\"Clients this round: {clients}\")\n for tx in self.create_node_transactions(tangle, round, clients, dataset):\n if tx is not None:\n tangle.add_transaction(tx)\n\n print(f'This round took: {time.time() - begin}s')\n sys.stdout.flush()\n\n self.tx_store.save_tangle(tangle, round)\n\n if eval_every != -1 and round % eval_every == 0:\n self.print_validation_results(self.validate(round, dataset, eval_on_fraction), round)\n\n def test_single(self, tangle, client_id, cluster_id, train_data, eval_data, seed, set_to_use, tip_selector):\n import tensorflow as tf\n\n random.seed(1 + seed)\n np.random.seed(12 + seed)\n tf.compat.v1.set_random_seed(123 + seed)\n\n client_model = self.create_client_model(seed, self.model_config)\n node = Node(tangle, self.tx_store, tip_selector, client_id, cluster_id, train_data, eval_data, client_model, config=self.node_config)\n\n reference_txs, reference = node.obtain_reference_params()\n metrics = node.test(reference, set_to_use)\n #if 'clusterId' in tangle.transactions[reference_txs[0]].metadata.keys():\n # tx_cluster = tangle.transactions[reference_txs[0]].metadata['clusterId']\n #else:\n # tx_cluster = 'None'\n #if cluster_id != tx_cluster:\n # with open(os.path.join(os.path.dirname(self.config.tangle_dir), 'validation_nodes.txt'), 'a') as f:\n # f.write(f'{client_id}({cluster_id}): {reference_txs}({tx_cluster}) (acc: {metrics[\"accuracy\"]:.3f}, loss: {metrics[\"loss\"]:.3f})\\n')\n\n # How many unique poisoned transactions have found their way into the consensus\n # through direct or indirect approvals?\n\n approved_poisoned_transactions_cache = {}\n\n def compute_approved_poisoned_transactions(transaction):\n if transaction not in approved_poisoned_transactions_cache:\n tx = tangle.transactions[transaction]\n result = set([transaction]) if 'poisoned' in tx.metadata and tx.metadata['poisoned'] else set([])\n result = result.union(*[compute_approved_poisoned_transactions(parent) for parent in tangle.transactions[transaction].parents])\n approved_poisoned_transactions_cache[transaction] = result\n\n return approved_poisoned_transactions_cache[transaction]\n\n approved_poisoned_transactions = set(*[compute_approved_poisoned_transactions(tx) for tx in reference_txs])\n metrics['num_approved_poisoned_transactions'] = len(approved_poisoned_transactions)\n\n return metrics\n\n def validate_nodes(self, tangle, clients, dataset):\n tip_selector = self.tip_selector_factory.create(tangle)\n return [self.test_single(tangle, client_id, cluster_id, dataset.train_data[client_id], dataset.test_data[client_id], random.randint(0, 4294967295), 'test', tip_selector) for client_id, cluster_id in clients]\n\n def validate(self, round, dataset, client_fraction=0.1):\n print('Validate for round %s' % round)\n #import os\n #with open(os.path.join(os.path.dirname(self.config.tangle_dir), 'validation_nodes.txt'), 'a') as f:\n # f.write('\\nValidate for round %s\\n' % round)\n tangle = self.tx_store.load_tangle(round)\n if dataset.clients[0][1] is None:\n # No clusters used\n client_indices = np.random.choice(range(len(dataset.clients)),\n min(int(len(dataset.clients) * client_fraction), len(dataset.clients)),\n replace=False)\n else:\n # validate fairly across all clusters\n client_indices = []\n clusters = np.array(list(map(lambda x: x[1], dataset.clients)))\n unique_clusters = set(clusters)\n num = max(min(int(len(dataset.clients) * client_fraction), len(dataset.clients)), 1)\n div = len(unique_clusters)\n clients_per_cluster = [num // div + (1 if x < num % div else 0) for x in range(div)]\n for cluster_id in unique_clusters:\n cluster_client_ids = np.where(clusters == cluster_id)[0]\n client_indices.extend(np.random.choice(cluster_client_ids, clients_per_cluster[cluster_id], replace=False))\n validation_clients = [dataset.clients[i] for i in client_indices]\n return self.validate_nodes(tangle, validation_clients, dataset)\n\n def print_validation_results(self, results, rnd):\n avg_acc = np.average([r[ACCURACY_KEY] for r in results])\n avg_loss = np.average([r['loss'] for r in results])\n\n avg_message = 'Average %s: %s\\nAverage loss: %s' % (ACCURACY_KEY, avg_acc, avg_loss)\n print(avg_message)\n\n import csv\n import os\n with open(os.path.join(os.path.dirname(self.config.tangle_dir), 'acc_and_loss.csv'), 'a', newline='') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow([rnd, avg_acc, avg_loss])\n\n write_header = False\n if not os.path.exists(os.path.join(os.path.dirname(self.config.tangle_dir), 'acc_and_loss_all.csv')):\n write_header = True\n\n with open(os.path.join(os.path.dirname(self.config.tangle_dir), 'acc_and_loss_all.csv'), 'a', newline='') as f:\n for r in results:\n r['round'] = rnd\n\n r['conf_matrix'] = r['conf_matrix'].tolist()\n\n w = csv.DictWriter(f, r.keys())\n if write_header:\n w.writeheader()\n write_header = False\n\n w.writerow(r)\n","repo_name":"osmhpi/federated-learning-dag","sub_path":"tangle/lab/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":11023,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"15"} +{"seq_id":"9913317319","text":"__author__ = \"Tomasz Kornuta & Vincent Marois\"\n\nimport os\nimport shutil\nimport subprocess\nfrom functools import partial\nfrom multiprocessing.pool import ThreadPool\n\nfrom miprometheus.grid_workers.grid_worker import GridWorker\n\n\nclass GridTesterCPU(GridWorker):\n \"\"\"\n Implementation of the Grid Tester running on CPUs.\n\n Reuses the :py:class:`miprometheus.workers.Tester` to start one test experiment.\n\n \"\"\"\n\n def __init__(self, name=\"GridTesterCPU\", use_gpu=False):\n \"\"\"\n Constructor for the :py:class:`miprometheus.grid_workers.GridTesterCPU`:\n\n - Calls the base constructor to set the worker's name and add default command lines arguments,\n - Adds some ``GridTester`` specific command line arguments.\n\n :param name: Name of the worker (DEFAULT: \"GridTesterCPU\").\n :type name: str\n\n :param use_gpu: Indicates whether the worker should use GPU or not.\n :type use_gpu: bool\n\n \"\"\"\n # call base constructor\n super(GridTesterCPU, self).__init__(name=name,use_gpu=use_gpu)\n\n # Get number_of_repetitions\n self.parser.add_argument('--repeat',\n dest='experiment_repetitions',\n type=int,\n default=1,\n help='Number of experiment repetitions to run for each model (DEFAULT=1).')\n\n # Get number_of_repetitions\n self.parser.add_argument('--max_concur_runs',\n dest='max_concurrent_runs',\n type=int,\n default=-1,\n help='Value limiting the number of concurrently running experiments.'\n 'The set limit will be truncated by number of available CPUs/GPUs.'\n ' (DEFAULT=-1, meaning that it will be set to the number of CPUs/GPUs)')\n\n def setup_grid_experiment(self):\n \"\"\"\n Setups the overall grid of experiments:\n\n - Calls :py:func:`GridWorker.setup_grid_experiment()` to parse arguments,\n - Recursively creates the paths to the experiments folders, verifying that they are valid (e.g. \\\n they contain a saved model, `model_best.pt`).\n\n \"\"\"\n super(GridTesterCPU, self).setup_grid_experiment()\n\n # Check the presence of mip-tester script.\n if shutil.which('mip-tester') is None:\n self.logger.error(\"Cannot localize the 'mip-tester' script! (hint: please use setup.py to install it)\")\n exit(-1)\n\n self.experiment_rootdir = self.flags.expdir\n\n # Get grid settings.\n experiment_repetitions = self.flags.experiment_repetitions\n self.max_concurrent_runs = self.flags.max_concurrent_runs\n\n # get all sub-directories paths in expdir, repeating according to flags.experiment_repetitions\n self.experiments_list = []\n\n for _ in range(experiment_repetitions):\n for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True):\n for name in dirs:\n self.experiments_list.append(os.path.join(root, name))\n\n # Keep only the folders that contain best_model.pt in model subdirectory.\n # We assume that training configuration is there as well.\n self.experiments_list = [elem for elem in self.experiments_list\n if os.path.isfile(elem + '/model_best.pt')]\n\n # Check if these are 'valid' folders, e.g. they contain a saved model\n if len(self.experiments_list) == 0:\n self.logger.error(\"There are no models in {} directory!\".format(self.experiment_rootdir))\n exit(-2)\n\n # List folders.\n exp_str = \"Found the following models in {} directory:\\n\".format(self.experiment_rootdir)\n exp_str += '='*80 + '\\n'\n for exp in self.experiments_list:\n exp_str += \" - {}/model_best.pt\\n\".format(exp)\n exp_str += '='*80 + '\\n'\n self.logger.info(exp_str)\n\n self.logger.info('Number of experiments to run: {}'.format(len(self.experiments_list)))\n self.experiments_done = 0\n\n # Ask for confirmation - optional.\n if self.flags.user_confirm:\n try:\n input('Press to confirm and start the grid of experiments\\n')\n except KeyboardInterrupt:\n exit(0)\n\n\n def run_grid_experiment(self):\n \"\"\"\n Main function of the :py:class:`miprometheus.grid_workers.GridTesterCPU`.\n\n Maps the grid experiments to CPU cores in the limit of the maximum concurrent runs allowed or maximum\\\n available cores.\n\n \"\"\"\n try:\n\n # Check max number of child processes. \n if self.max_concurrent_runs <= 0: # We need at least one process!\n max_processes = self.get_available_cpus()\n else: \n # Take into account the minimum value.\n max_processes = min(self.get_available_cpus(), self.max_concurrent_runs)\n self.logger.info('Spanning experiments using {} CPU(s) concurrently'.format(max_processes))\n\n # Run in as many threads as there are CPUs available to the script.\n with ThreadPool(processes=max_processes) as pool:\n func = partial(GridTesterCPU.run_experiment, self, prefix=\"\")\n pool.map(func, self.experiments_list)\n\n self.logger.info('Grid testing finished')\n\n except KeyboardInterrupt:\n self.logger.info('Grid testing interrupted!')\n\n def run_experiment(self, experiment_path: str, prefix=\"\"):\n \"\"\"\n Runs a test on the specified model (experiment_path) using the :py:class:`miprometheus.workers.Tester`.\n\n :param experiment_path: Path to an experiment folder containing a trained model.\n :type experiment_path: str\n\n :param prefix: Prefix to position before the command string (e.g. 'cuda-gpupick -n 1'). Optional.\n :type prefix: str\n\n ..note::\n\n - Visualization is deactivated to avoid any user interaction.\n - Command-line arguments such as the logging interval (``--li``) and log level (``--ll``) are passed \\\n to the :py:class:`miprometheus.workers.Tester`.\n\n \"\"\"\n try:\n\n path_to_model = os.path.join(experiment_path, 'model_best.pt')\n self.logger.warning(path_to_model)\n\n # Run the test\n command_str = \"{}mip-tester --model {} --li {} --ll {}\".format(\n prefix, path_to_model,\n self.flags.logging_interval,\n self.flags.log_level)\n\n # Add gpu flag if required.\n if self.app_state.use_CUDA:\n command_str += \" --gpu \"\n\n self.logger.info(\"Starting: {}\".format(command_str))\n with open(os.devnull, 'w') as devnull:\n result = subprocess.run(command_str.split(\" \"), stdout=devnull)\n self.experiments_done += 1\n self.logger.info(\"Finished: {}\".format(command_str))\n\n self.logger.info(\n 'Number of experiments done: {}/{}.'.format(self.experiments_done, len(self.experiments_list)))\n\n if result.returncode != 0:\n self.logger.info(\"Testing exited with code: {}\".format(result.returncode))\n\n except KeyboardInterrupt:\n self.logger.info('Grid testing interrupted!')\n\n\ndef main():\n \"\"\"\n Entry point function for the :py:class:`miprometheus.grid_workers.GridTesterCPU`.\n\n \"\"\"\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"IBM/mi-prometheus","sub_path":"miprometheus/grid_workers/grid_tester_cpu.py","file_name":"grid_tester_cpu.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"15"} +{"seq_id":"71722271691","text":"from ..models import Flight, Profile\nfrom django.http import JsonResponse\nfrom ..serializers import FlightSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import APIException\n\n# Create your views here.\n\n@api_view(['GET'])\ndef flight(req, id=-1):\n if int(id)>-1:\n try:\n return JsonResponse(FlightSerializer.FlightInfo(Flight.objects.get(_id = id)))\n except:\n return JsonResponse({\"id does not exist\":id})\n res = []\n for fly in Flight.objects.all():\n if req.user.is_superuser or fly.is_active:\n res.append(FlightSerializer.FlightInfo(fly))\n return JsonResponse(res,safe=False)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getFlightByAirline(req):\n airline = Profile.objects.get(user = req.user)\n res = []\n try:\n for fly in airline.flight_set.all():\n if fly.is_active:\n res.append(FlightSerializer.FlightInfo(fly))\n except:\n return JsonResponse({\"id does not exist\":req.user.id})\n return JsonResponse(res, safe=False)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef addFlight(req):\n if req.user.is_staff:\n user = req.user\n data = req.data\n print(data[\"origin\"][\"_id\"])\n print(user.id)\n try:\n thisflight = Flight.objects.create(airline_company_id=user.id, origin_country_id=data[\"origin\"][\"_id\"],destination_country_id=data[\"destination\"][\"_id\"],\n departure_time=data[\"departure\"],landing_time=data[\"landing\"],remaining_tickets=data[\"tickets\"])\n except Exception as e:\n raise APIException(e)\n return JsonResponse(FlightSerializer.FlightInfo(thisflight), safe=False)\n return JsonResponse({\"ERROR\":\"NOT AN AIRLINE COMPANY\"})\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delFlight(req, id=-1):\n if int(id) > 0:\n try:\n flight2del = Flight.objects.get(_id = id)\n flight2del.is_active = False\n flight2del.save()\n except Exception as e:\n raise APIException(e)\n return JsonResponse(FlightSerializer.FlightInfo(flight2del), safe=False)\n return JsonResponse({\"FAILED\":f\"no such id: {id}\"})\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef recoverFlight(req, id=-1):\n if int(id) > 0:\n try:\n flight2del = Flight.objects.get(_id = id)\n flight2del.is_active = True\n flight2del.save()\n except Exception as e:\n raise APIException(e)\n return JsonResponse(FlightSerializer.FlightInfo(flight2del), safe=False)\n return JsonResponse({\"FAILED\":f\"no such id: {id}\"})\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef updateFlight(req, id=-1):\n print(\"got to update\")\n if int(id) > 0:\n data = req.data\n try:\n flight2update = Flight.objects.get(_id = id)\n flight2update.origin_country_id = data[\"origin\"][\"_id\"]\n flight2update.destination_country_id = data[\"destination\"][\"_id\"]\n flight2update.departure_time = data[\"departure\"]\n flight2update.landing_time = data[\"landing\"]\n flight2update.remaining_tickets = data[\"tickets\"]\n flight2update.save()\n except Exception as e:\n print(e)\n raise APIException(e)\n return JsonResponse(FlightSerializer.FlightInfo(flight2update), safe=False)\n return JsonResponse({\"FAILED\":f\"no such id: {id}\"})\n","repo_name":"ItsEyt/FlightManager","sub_path":"back/base/views/flightViews.py","file_name":"flightViews.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"1373828927","text":"from collections import namedtuple\nfrom email.header import Header\n\nfrom heyurl.views import short_url\n\nFSL_HOME = \"http://fullstacklabs.co\"\nFSL_PYTHON = \"https://www.fullstacklabs.co/projects/python\"\nINEXISTENT_URLS = ['https://bitcoinmagazine.com', \"https://cryptonews.net\", \"https://news.bitcoin.com\" ]\nFSL_SHORT = 'a'\nINVALID_URL = 'inv4lid#url'\n\nBrowserMock = namedtuple(\n \"ChromeMock\",\n \"family\",\n defaults=['chrome']\n)\n\nUserAgentMock = namedtuple(\n \"UserAgentMock\",\n [\"browser\", \"is_mobile\", \"is_tablet\", \"is_pc\"],\n defaults = [BrowserMock, None, None, None]\n)\n\n###########################################################\n\nPOSTMock = namedtuple(\n \"POSTMock\",\n \"get\",\n defaults=[lambda orignal_url: FSL_HOME]\n)\n\nGETMock = namedtuple(\n \"GETMock\",\n \"get\",\n # defaults=[lambda *args: FSL_SHORT if args[0]=='short_url' else None]\n defaults=[lambda *args: FSL_SHORT if args[0]=='short_url' else None]\n)\n\nRequestMock = namedtuple(\n 'RequestMock',\n ['POST', 'GET','user_agent', 'path', 'method'],\n defaults=[\n POSTMock(),\n dict(short_url=FSL_SHORT),\n UserAgentMock(),\n FSL_SHORT, \n 'POST'\n ]\n)\n\n\nHelperTests = namedtuple(\n \"HelperTests\",[\n \"original_url_fsl_home\",\n \"original_url_fsl_python\",\n \"browser\",\n \"user_agent\",\n \"http_post\",\n \"http_get\",\n \"http_request\",\n \"inexistent_urls\",\n \"invalid_url\",\n \"fsl_short\",\n ],\n defaults=[\n FSL_HOME,\n FSL_PYTHON,\n BrowserMock,\n UserAgentMock,\n POSTMock,\n GETMock,\n RequestMock,\n INEXISTENT_URLS,\n INVALID_URL,\n FSL_SHORT\n ] \n)\n\nhelper_tests = HelperTests()\n","repo_name":"Sidon/django-hey-url","sub_path":"tests/data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"18096870291","text":"##################################\n# load taskonomy dataset\n# JY Gao, 20210422\n##################################\n\nimport os\nfrom os.path import join as pjoin\nimport collections\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom loader.loader_utils import png_reader_32bit, png_reader_uint8\nfrom torch.utils import data\nfrom PIL import Image\n\n\nclass TaskLoader(data.Dataset):\n \"\"\"Data loader for the taskonomy dataset.\n\n \"\"\"\n\n def __init__(self, root, split, img_size=(256, 256), img_norm=True, mode=None):\n self.root = os.path.expanduser(root)\n self.split = split\n self.img_norm = img_norm\n # self.files = collections.defaultdict(list)\n self.files = []\n self.img_size = img_size if isinstance(img_size, tuple) \\\n else (img_size, img_size)\n self.mode = mode\n\n # for split in ['train', 'test']:\n # path = pjoin(self.root, 'taskonomy_' + split + '_list.txt')\n # file_list = tuple(open(path, 'r'))\n # file_list = [id_.rstrip() for id_ in file_list]\n # self.files[split] = file_list\n\n path = pjoin(self.root, 'taskonomy_' + split + '_list.txt')\n file_list = tuple(open(path, 'r'))\n if split == 'train':\n file_list = [id_.rstrip() for id_ in file_list]\n if split == 'test':\n file_list = [id_.rstrip() for id_ in file_list]\n # self.files[split] = file_list\n self.files = file_list\n\n def __len__(self):\n # return len(self.files[self.split])\n return len(self.files)\n\n def __getitem__(self, index):\n # im_name_base = self.files[self.split][index]\n im_name_base = self.files[index]\n\n # raw_depth\n raw_depth_path = pjoin(self.root, str(im_name_base))\n raw_depth = png_reader_32bit(raw_depth_path, self.img_size)\n raw_depth[raw_depth == 65535] = 0.001\n raw_depth = raw_depth.astype(float)\n raw_depth = raw_depth / 10000\n raw_depth = raw_depth[np.newaxis, :, :]\n raw_depth = torch.from_numpy(raw_depth).float()\n\n # raw_depth_mask\n # raw_depth_mask = (raw_depth > 0.0001).astype(float)\n # raw_depth_mask = torch.from_numpy(raw_depth_mask).float()\n\n # image\n rgb_path = raw_depth_path.replace('depth_zbuffer', 'rgb')\n img = png_reader_uint8(rgb_path, self.img_size)\n img = img.astype(float)\n # img = img / 255\n img = (img - 128) / 255\n img = img.transpose(2, 0, 1)\n img = torch.from_numpy(img).float()\n\n # normal\n normal_path = raw_depth_path.replace('depth_zbuffer', 'normal')\n normal = png_reader_uint8(normal_path, self.img_size)\n normal = normal.astype(float)\n normal = normal / 255\n normal = normal.transpose(2, 0, 1)\n normal = 2 * normal - 1\n normal = torch.from_numpy(normal).float()\n\n return img, raw_depth, normal\n\n\n\n\n\nif __name__ == '__main__':\n # Config your local data path\n\n # depth_path = '/home/gao/depth.png'\n #\n # pred_depth = Image.open(depth_path)\n # depth = np.array(pred_depth)\n # depth = depth[:, :, 0]\n # depth = (depth - 128) / 255\n # depth = depth.astype(np.float32)\n # depth = depth.reshape(1, 1, depth.shape[0], depth.shape[1])\n\n\n local_path = '/media/gao/Gao106/taskonomy/'\n bs = 2\n dst = TaskLoader(root=local_path, split='train')\n trainloader = data.DataLoader(dst, batch_size=bs)\n for i, data in enumerate(trainloader):\n image, depths, normal = data\n\n imgs = image.numpy()\n imgs = np.transpose(imgs, [0, 2, 3, 1])\n imgs = imgs + 0.5\n\n normal = normal.numpy()\n normal = 0.5 * (normal + 1)\n normal = np.transpose(normal, [0, 2, 3, 1])\n\n # normal_mask = normal_mask.numpy()\n # normal_mask = np.repeat(normal_mask[:, :, :, np.newaxis], 3, axis=3)\n\n depths = depths.numpy()\n depths = np.transpose(depths, [0, 2, 3, 1])\n depths = np.repeat(depths, 3, axis=3)\n\n # raw_depth_mask = raw_depth_mask.numpy()\n # raw_depth_mask = np.repeat(raw_depth_mask[:, :, :, np.newaxis], 3, axis=3)\n\n\n f, axarr = plt.subplots(bs, 3)\n for j in range(bs):\n # print(im_name[j])\n axarr[j][0].imshow(imgs[j])\n # axarr[j][2].imshow(normal[j])\n # axarr[j][2].imshow(normal_mask[j])\n axarr[j][1].imshow(depths[j])\n axarr[j][2].imshow(normal[j])\n\n plt.show()\n plt.close()\n","repo_name":"JennyGao00/Depth-and-Surface-Normal-Estimation","sub_path":"loader/Taskonomy_loader.py","file_name":"Taskonomy_loader.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"35166262416","text":"__author__ = 'pmacharl'\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndriver = webdriver.Firefox()\ndriver.get(\"http://www.seleniumframework.com\")\nprint(driver.title)\nassert \"Selenium Framework | Selenium, Cucumber, Ruby, Java et al.\" in driver.title\nelem = driver.find_element_by_link_text(\"ABOUT\")\nelem.click()\nassert \"Pradeep K. Macharla\" in driver.page_source\ndriver.close()","repo_name":"machzqcq/PythonExperiments","sub_path":"RandomScripts/first_selenium.py","file_name":"first_selenium.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"10311103763","text":"import sys\nimport traceback\nfrom os import remove\n\n\ndef delete_duplicate_line(file_path):\n \"\"\" \"\"\"\n lines_seen = set() # holds lines already seen\n outfile = open(file_path + \"_tmp_\", \"w\")\n for line in open(file_path, \"r\"):\n if line not in lines_seen: # not a duplicate\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n\n with open(file_path + \"_tmp_\", \"r+\") as filet:\n with open(file_path, \"w\") as filet2:\n filet2.write(filet.read())\n\n # We delete the tempory file\n remove(file_path + \"_tmp_\")\n\n\ndef record_or_not(record_mode, line, start_block, end_block):\n \"\"\" \"\"\"\n if not record_mode:\n if start_block in line:\n record_mode = True\n elif end_block in line:\n record_mode = False\n return record_mode\n\n\ndef returnStateMentIfMessageIsEmpty(msg, statement):\n \"\"\" \"\"\"\n if msg is None:\n return statement + \":\"\n else:\n return msg + \":\\n| >> \" + statement\n\n\ndef selfOnParams(check, self_on_function_params, add_semicolon=False):\n \"\"\" \"\"\"\n if self_on_function_params:\n comma = \"\"\n if add_semicolon:\n comma = \",\"\n if len(check) > 2:\n return check + comma\n else:\n return \"\"\n\n\ndef get_trace():\n print(\"Exception in code:\")\n print(\"-\" * 60)\n traceback.print_exc(file=sys.stdout)\n print(\"-\" * 60)\n","repo_name":"Sanix-Darker/testa","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"15"} +{"seq_id":"72617573131","text":"\"\"\" MultiQC submodule to parse output from deepTools plotProfile \"\"\"\n\nimport logging\n\nfrom multiqc.plots import linegraph\n\n# Initialise the logger\nlog = logging.getLogger(__name__)\n\n\nclass plotProfileMixin:\n def parse_plotProfile(self):\n \"\"\"Find plotProfile output\"\"\"\n self.deeptools_plotProfile = dict()\n for f in self.find_log_files(\"deeptools/plotProfile\", filehandles=False):\n parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f)\n for k, v in parsed_data.items():\n if k in self.deeptools_plotProfile:\n log.warning(\"Replacing duplicate sample {}.\".format(k))\n self.deeptools_plotProfile[k] = v\n if len(parsed_data) > 0:\n self.add_data_source(f, section=\"plotProfile\")\n\n # Superfluous function call to confirm that it is used in this module\n # Replace None with actual version if it is available\n self.add_software_version(None, f[\"s_name\"])\n\n self.deeptools_plotProfile = self.ignore_samples(self.deeptools_plotProfile)\n\n if len(self.deeptools_plotProfile) > 0:\n # Write data to file\n self.write_data_file(self.deeptools_plotProfile, \"deeptools_plot_profile\")\n\n # Try to do plot bands but don't crash if the labels aren't as we expect\n xPlotBands = []\n xPlotLines = []\n plotBandHelp = \"\"\n try:\n xPlotBands.append(\n {\n \"from\": converted_bin_labels[bin_labels.index(\"TES\")],\n \"to\": converted_bin_labels[-1],\n \"color\": \"#f7cfcf\",\n }\n )\n xPlotBands.append(\n {\n \"from\": converted_bin_labels[bin_labels.index(\"TSS\")],\n \"to\": converted_bin_labels[bin_labels.index(\"TES\")],\n \"color\": \"#ffffe2\",\n }\n )\n xPlotBands.append(\n {\n \"from\": converted_bin_labels[0],\n \"to\": converted_bin_labels[bin_labels.index(\"TSS\")],\n \"color\": \"#e5fce0\",\n }\n )\n xPlotLines.append(\n {\n \"width\": 1,\n \"value\": converted_bin_labels[bin_labels.index(\"TES\")],\n \"dashStyle\": \"Dash\",\n \"color\": \"#000000\",\n }\n )\n xPlotLines.append(\n {\n \"width\": 1,\n \"value\": converted_bin_labels[bin_labels.index(\"TSS\")],\n \"dashStyle\": \"Dash\",\n \"color\": \"#000000\",\n }\n )\n plotBandHelp = \"\"\"\n * Green: {} upstream of gene to {}\n * Yellow: {} to {}\n * Pink: {} to {} downstream of gene\n \"\"\".format(\n list(filter(None, bin_labels))[0],\n list(filter(None, bin_labels))[1],\n list(filter(None, bin_labels))[1],\n list(filter(None, bin_labels))[2],\n list(filter(None, bin_labels))[2],\n list(filter(None, bin_labels))[3],\n )\n except (ValueError, IndexError):\n pass\n\n config = {\n \"id\": \"read_distribution_profile\",\n \"title\": \"deeptools: Read Distribution Profile after Annotation\",\n \"ylab\": \"Occurrence\",\n \"xlab\": None,\n \"smooth_points\": 100,\n \"xPlotBands\": xPlotBands,\n \"xPlotLines\": xPlotLines,\n }\n\n self.add_section(\n name=\"Read Distribution Profile after Annotation\",\n anchor=\"read_distribution_profile_plot\",\n description=\"\"\"\n Accumulated view of the distribution of sequence reads related to the closest annotated gene.\n All annotated genes have been normalized to the same size.\n\n {}\"\"\".format(\n plotBandHelp\n ),\n plot=linegraph.plot(self.deeptools_plotProfile, config),\n )\n\n return len(self.deeptools_plotProfile)\n\n def parsePlotProfileData(self, f):\n d = dict()\n bin_labels = []\n bins = []\n for line in f[\"f\"].splitlines():\n cols = line.rstrip().split(\"\\t\")\n if cols[0] == \"bin labels\":\n for col in cols[2 : len(cols)]:\n if col not in list(filter(None, bin_labels)):\n bin_labels.append(col)\n else:\n break\n elif cols[0] == \"bins\":\n for col in cols[2 : len(cols)]:\n if len(bins) != len(bin_labels):\n bins.append(self._int(col))\n else:\n break\n else:\n s_name = self.clean_s_name(cols[0], f)\n d[s_name] = dict()\n\n # Convert the bins into genomic coordinates if we can\n try:\n factors = {\"Kb\": 1e3, \"Mb\": 1e6, \"Gb\": 1e9}\n convert_factor = 1\n for k, v in factors.items():\n if k in bin_labels[0]:\n convert_factor *= v\n start = float(bin_labels[0].strip(k)) * convert_factor\n step = self._int(abs(start / bin_labels.index(\"TSS\")))\n end = step * (len(bin_labels) - bin_labels.index(\"TSS\"))\n converted_bin_labels = range((self._int(start) + step), (self._int(end) + step), step)\n except (UnboundLocalError, ValueError):\n converted_bin_labels = bins\n\n for i in bins:\n d[s_name].update({converted_bin_labels[i - 1]: float(cols[i + 1])})\n\n return d, bin_labels, converted_bin_labels\n","repo_name":"ewels/MultiQC","sub_path":"multiqc/modules/deeptools/plotProfile.py","file_name":"plotProfile.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","stars":1073,"dataset":"github-code","pt":"15"} +{"seq_id":"19450698868","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2021-11-02\n\n@author: ivespe\n\nModule for handling scenarios for the long-term development of load demand in distribution system.\n\"\"\"\n\nimport pandas as pd\nimport pandapower as pp\nimport os\nimport math\nfrom pandas.core.algorithms import isin\n\n\ndef apply_scenario_to_net(net,scenario_data,year, load_scale=1.0, power_factor=0.95):\n \"\"\" Modify network to be consistent with long-term load scenario for some future year\n\n Inputs:\n net: pandapower network DataFrame\n scenario_data: Dictionary with entries 'point_load' for new and increased point loads\n and 'base_load' for general increase (or decrease) in the 'base load' at existing \n load points. The value for key 'point_load' is a DataFrame with one row (scenario entry) \n for each addition of load demand at a given bus at a given year. Column 'year' is year\n relative to the present year (0), column 'bus' refers to the bus number of the network, \n column 'load added (MW)' is the real power in MW\n year: which year in the scenario that the operating state should be consistent with. \n (Linear interpolation is applied if the load demand is not specified for this year in \n the scenario)\n load_scale: Scaling factor to apply to the load demand value in the scenario data \n (optional; default: 1.0, i.e., no scaling)\n power_factor: Power factor (lagging) to use for all new loads if no power factor is specified \n for individual loads in the scenario input data (optional; default: 0.95)\n\n Return:\n net: pandapower network DataFrame modified with new load points (if necessary)\n\n \n NB: Only scenarios for point loads are currently implemented. \n \"\"\" \n \n years = scenario_data['point_loads']['year_rel']\n buses = scenario_data['point_loads']['bus_i']\n load_add = scenario_data['point_loads']['load_added_MW']\n\n if 'power_factor' in scenario_data['point_loads'].columns:\n # Use provided power factors for each new load\n power_factor_vec = scenario_data['point_loads']['power_factor']\n else:\n # If power factors are not provided for new loads, use either default value or \n # a single custom value provided as input argument\n power_factor_vec = [power_factor] * len(load_add)\n\n for it in years.index:\n if years[it] <= year:\n # Loop through all years up to the year we want to consider\n bus_ID = buses[it]\n load_name = int(bus_ID)\n if any(net.load['bus'] == bus_ID) == False:\n # Add load to bus if none exist; set reactive power for load using a fixed power factor\n Pd = load_add[it]*load_scale\n Qd = Pd * math.tan(math.acos(power_factor_vec[it]))\n pp.create_load(net,bus=bus_ID,name=load_name,p_mw=Pd,q_mvar=Qd)\n \n # Reindex so that DataFrame row index is bus name for all loads\n net.load.set_index('name',drop=False,inplace=True)\n \n else:\n # Increase power consumption if there already is a load at the bus\n net.load.loc[load_name,'p_mw'] += load_add[it]\n\n return net\n\n\ndef read_scenario_from_csv(folder, filename_point_load):\n \"\"\" Generate scenarios for long-term load development from .csv input file\n\n Inputs:\n folder: Folder with files specifying scenarios\n filename_point_load: File name (in folder) for data file specifying new point loads\n that are added \n\n Return:\n scenario_data: Dictionary with entries 'point_load' for new and increased point loads\n and 'base_load' for general increase (or decrease) in the 'base load' at existing \n load points.\n The value for key 'point_load' is a DataFrame with one row (scenario entry) for each \n addition of load demand at a given bus at a given year. Column 'year' is year relative to \n the present year (0), column 'bus' refers to the bus number of the network, \n column 'load added (MW)' is the real power in MW \n (NB: Functionality for 'base load' is not reimplemented)\n\n \"\"\" \n # File names in specified folder\n filename_point_loads_fullpath = os.path.join(folder, filename_point_load)\n\n # Read files from .csv files\n scenario_base_load = None\n scenario_point_loads = pd.read_csv(filename_point_loads_fullpath,sep=';')\n\n # Put together scenario data output\n scenario_data = {'base_load': scenario_base_load, 'point_loads': scenario_point_loads}\n\n return scenario_data\n\n\ndef interp_for_scenario(df,years_interp):\n \"\"\" Interpolate data evaluated for specific years in a scenario. The only type of interpolation\n that is currently supported is to let values for missing years be the previous explicitly\n evaluated year.\n\n Inputs:\n df: pandas DataFrame of Series with index being the years of the scenario that \n has been explicitly evaluated\n years_interp: Years that the values are to be interpolated for.\n \n\n Output:\n df_interp: DataFrame with index equals years_interp and interpolated values for all \n these years.\n \"\"\" \n\n # Index needs to be years \n years = df.index\n\n # Hack to be able to support Series (that don't have columns) as well as DataFrames\n if len(df.shape) == 1:\n df_interp = pd.DataFrame(index = years_interp, columns = ['value'])\n else:\n df_interp = pd.DataFrame(index = years_interp, columns = df.columns)\n \n if years_interp[0] != years[0]:\n print('First year of new list of year for interpolation needs to equal first year in the original list of years') \n raise\n\n # Loop over all years that values are to be returned for\n for year in years_interp:\n I = list(years == year)\n if any(I):\n values = list(df.loc[I].values[0])\n df_interp.loc[year] = values\n else:\n df_interp.loc[year] = values_prev\n\n # Store values for the previous years that are explicitly evaluated\n values_prev = values\n\n return df_interp","repo_name":"SINTEF-Power-system-asset-management/CINELDI_MV_reference_system","sub_path":"load_scenarios.py","file_name":"load_scenarios.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"33179135200","text":"import sys\n\ninput = sys.stdin.readline\n\ndef solution():\n n = int(input())\n s_card = list(map(int, input().split()))\n s_card_set = set()\n for num in s_card:\n s_card_set.add(num)\n\n m = int(input())\n inputs = list(map(int, input().split()))\n\n for num in inputs:\n if num not in s_card_set:\n print(0)\n else:\n print(1)\n\n\nif __name__ == \"__main__\":\n solution()\n","repo_name":"SNURFER/PAI","sub_path":"python/boj/10815.py","file_name":"10815.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"8849242088","text":"from __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\nimport numpy as np\r\nimport time\r\nimport params\r\nimport gui\r\nimport msa.utils\r\nfrom msa.capturer import Capturer\r\nfrom msa.framestats import FrameStats\r\n\r\ncapture = None # msa.capturer.Capturer, video capture wrapper\r\ngenerator = None\r\n\r\nimg_cap = np.empty([]) # captured image before processing\r\nimg_in = np.empty([]) # processed capture image\r\nimg_out = np.empty([]) # output from prediction model\r\n\r\ngui.init_app()\r\n\r\npyqt_params = gui.init_params(params.params_list, target_obj=params, w=320)\r\n\r\n# reading & writing to pyqtgraph.parametertree seems to be slow,\r\n# so going to cache in an object for direct access\r\ngui.params_to_obj(pyqt_params, target_obj=params, create_missing=True, verbose=True)\r\n\r\n# create main window\r\ngui.init_window(x=320, w=(gui.screen_size().width()-320), h=(gui.screen_size().width()-320)*0.4)\r\n\r\nimport datetime\r\nfrom glob import glob\r\nimport os\r\nfrom pathlib import Path\r\nimport tarfile\r\nimport time\r\nimport tensorflow as tf\r\nfrom matplotlib import pyplot as plt\r\n\r\nDATASET_NAME = 'flickr_flower_photos'\r\nIMG_SIZE = 256 # images must be square\r\n\r\nROOT_DIR = '/Users/pje/recog'\r\nUNIQUE_SESSION_NAME = DATASET_NAME + '_' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\r\n\r\ntry:\r\n from google.colab import drive\r\nexcept ImportError: # we're NOT running on colab. use local filesystem\r\n CHECKPOINTS_DIR = os.path.join(ROOT_DIR, 'checkpoints')\r\n DATASETS_DIR = os.path.join(ROOT_DIR, 'datasets')\r\n DATASET_DIR = os.path.join(DATASETS_DIR, DATASET_NAME)\r\nelse: # we ARE running on colab. Use Drive for file reads/writes\r\n drive.mount('/content/gdrive')\r\n DRIVE_ROOT = os.path.join(ROOT_DIR, 'gdrive', 'My Drive')\r\n CHECKPOINTS_DIR = os.path.join(DRIVE_ROOT, 'checkpoints')\r\n DATASETS_DIR = os.path.join(DRIVE_ROOT, 'datasets')\r\n DATASET_DIR = os.path.join(DATASETS_DIR, DATASET_NAME)\r\n if not os.path.isdir(os.path.join(DATASET_DIR)):\r\n tar = tarfile.open(os.path.join(DATASETS_DIR, DATASET_NAME+'.tar.gz'))\r\n tar.extractall(path=DATASETS_DIR)\r\n tar.close()\r\n\r\nCHECKPOINT_DIR = os.path.join(CHECKPOINTS_DIR, DATASET_NAME)\r\nCHECKPOINT_PREFIX = 'ckpt'\r\nLOGS_DIR = os.path.join(ROOT_DIR, 'logs')\r\nLOG_DIR = os.path.join(LOGS_DIR, UNIQUE_SESSION_NAME)\r\nOUTPUT_CHANNELS = 3\r\nLAMBDA = 100\r\n\r\nSUMMARY_WRITER = tf.summary.create_file_writer(LOG_DIR)\r\n\r\ndef console():\r\n from code import InteractiveConsole\r\n InteractiveConsole(locals={**globals(), **locals(), **vars()}).interact()\r\n\r\n\r\ndef load(image_file):\r\n image = tf.io.read_file(image_file)\r\n image = tf.image.decode_jpeg(image)\r\n w = tf.shape(image)[1]\r\n w = w // 2\r\n input_image = image[:, :w, :]\r\n real_image = image[:, w:, :]\r\n input_image = tf.cast(input_image, tf.float32)\r\n real_image = tf.cast(real_image, tf.float32)\r\n return input_image, real_image\r\n\r\n\r\ndef resize(input_image, real_image, height, width):\r\n input_image = tf.image.resize(\r\n input_image,\r\n [height, width],\r\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n )\r\n real_image = tf.image.resize(\r\n real_image,\r\n [height, width],\r\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR\r\n )\r\n return input_image, real_image\r\n\r\n\r\ndef random_crop(input_image, real_image):\r\n stacked_image = tf.stack([input_image, real_image], axis=0)\r\n cropped_image = tf.image.random_crop(\r\n stacked_image,\r\n size=[2, IMG_SIZE, IMG_SIZE, 3]\r\n )\r\n return cropped_image[0], cropped_image[1]\r\n\r\n\r\n# normalizing the images to [-1, 1]\r\ndef normalize(input_image, real_image):\r\n input_image = (input_image / 127.5) - 1\r\n real_image = (real_image / 127.5) - 1\r\n return input_image, real_image\r\n\r\n\r\n@tf.function()\r\ndef random_jitter(input_image, real_image):\r\n scaling_factor = 1.1171875 # turns 256 into 286 (as the original paper used)\r\n scaled_size = round(scaling_factor * IMG_SIZE)\r\n # resize to 286 x 286 x 3\r\n input_image, real_image = resize(input_image, real_image, scaled_size, scaled_size)\r\n # ...then randomly crop it back down to 256 x 256 x 3\r\n input_image, real_image = random_crop(input_image, real_image)\r\n if tf.random.uniform(()) > 0.5:\r\n # random mirroring\r\n input_image = tf.image.flip_left_right(input_image)\r\n real_image = tf.image.flip_left_right(real_image)\r\n return input_image, real_image\r\n\r\ndef downsample(filters, size, apply_batchnorm=True):\r\n initializer = tf.random_normal_initializer(0., 0.02)\r\n result = tf.keras.Sequential()\r\n result.add(\r\n tf.keras.layers.Conv2D(\r\n filters,\r\n size,\r\n strides=2,\r\n padding='same',\r\n kernel_initializer=initializer,\r\n use_bias=False\r\n )\r\n )\r\n if apply_batchnorm:\r\n result.add(tf.keras.layers.BatchNormalization())\r\n result.add(tf.keras.layers.LeakyReLU())\r\n return result\r\n\r\n\r\ndef upsample(filters, size, apply_dropout=False):\r\n initializer = tf.random_normal_initializer(0., 0.02)\r\n result = tf.keras.Sequential()\r\n result.add(\r\n tf.keras.layers.Conv2DTranspose(\r\n filters,\r\n size,\r\n strides=2,\r\n padding='same',\r\n kernel_initializer=initializer,\r\n use_bias=False\r\n )\r\n )\r\n result.add(tf.keras.layers.BatchNormalization())\r\n if apply_dropout:\r\n result.add(tf.keras.layers.Dropout(0.5))\r\n result.add(tf.keras.layers.ReLU())\r\n return result\r\n\r\n\r\n\r\n\r\n\r\n\r\ngenerator = tf.keras.models.load_model(os.path.join(ROOT_DIR, 'models', DATASET_NAME + '_generator.h5'))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef init_capture(capture, output_shape):\r\n if capture:\r\n capture.close()\r\n\r\n capture_shape = (params.Capture.Init.height, params.Capture.Init.width)\r\n capture = Capturer(sleep_s = params.Capture.sleep_s,\r\n device_id = params.Capture.Init.device_id,\r\n capture_shape = capture_shape,\r\n capture_fps = params.Capture.Init.fps,\r\n output_shape = output_shape\r\n )\r\n\r\n capture.update()\r\n\r\n if params.Capture.Init.use_thread:\r\n capture.start()\r\n\r\n return capture\r\n\r\n\r\ncapture = init_capture(capture, output_shape=[IMG_SIZE, IMG_SIZE, 3])\r\nframe_stats = FrameStats('Main')\r\n\r\nwhile not params.Main.quit:\r\n\r\n # reinit capture device if parameters have changed\r\n if params.Capture.Init.reinitialise:\r\n params.child('Capture').child('Init').child('reinitialise').setValue(False)\r\n capture = init_capture(capture, output_shape=[IMG_SIZE, IMG_SIZE, 3])\r\n\r\n\r\n capture.enabled = params.Capture.enabled\r\n if params.Capture.enabled:\r\n # update capture parameters from GUI\r\n capture.output_shape = [IMG_SIZE, IMG_SIZE, 3]\r\n capture.verbose = params.Main.verbose\r\n capture.freeze = params.Capture.freeze\r\n capture.sleep_s = params.Capture.sleep_s\r\n for p in msa.utils.get_members(params.Capture.Processing):\r\n setattr(capture, p, getattr(params.Capture.Processing, p))\r\n\r\n # run capture if multithreading is disabled\r\n if params.Capture.Init.use_thread == False:\r\n capture.update()\r\n\r\n while capture.img is None:\r\n time.sleep(0.001)\r\n\r\n img_cap = np.copy(capture.img) # create copy to avoid thread issues\r\n\r\n\r\n # interpolate (temporal blur) on input image\r\n img_in = msa.utils.np_lerp( img_in, img_cap, 1 - params.Prediction.pre_time_lerp)\r\n\r\n # run prediction\r\n if params.Prediction.enabled and generator:\r\n generator_input = tf.expand_dims(img_in, 0) if len(img_in.shape) < 4 else img_in\r\n\r\n # print('img_in (after expand):\\n')\r\n # tf.print(generator_input)\r\n # print(generator_input)\r\n # print(\"...........\\n\\n\\n\\n \")\r\n\r\n generator_input = (generator_input * 2 - 1) # transform values: (0..1.0) -> (-1.0..1.0)\r\n # print('generator_input:\\n')\r\n # print(generator_input)\r\n # tf.print(generator_input)\r\n # print(\"...........\\n\\n\\n\\n \")\r\n\r\n # raise Exception\r\n\r\n img_predicted = generator(generator_input, training=True)[0]\r\n img_predicted = np.interp(img_predicted, [-1.0, 1.0], (0.0, 1.0))\r\n else:\r\n img_predicted = capture.img0\r\n\r\n # interpolate (temporal blur) on output image\r\n img_out = msa.utils.np_lerp(img_out, img_predicted, 1 - params.Prediction.post_time_lerp)\r\n\r\n # update frame states\r\n frame_stats.verbose = params.Main.verbose\r\n frame_stats.update()\r\n\r\n # update gui\r\n gui.update_image(0, capture.img0)\r\n gui.update_image(1, img_in)\r\n gui.update_image(2, img_out)\r\n gui.update_stats(frame_stats.str + \" | \" + capture.frame_stats.str)\r\n gui.process_events()\r\n\r\n time.sleep(params.Main.sleep_s)\r\n\r\ncapture.close()\r\ngui.close()\r\n\r\ncapture = None\r\ngenerator = None\r\n\r\nprint('Finished')\r\n","repo_name":"pje/recog","sub_path":"webcam-pix2pix-tensorflow/webcam-pix2pix.py","file_name":"webcam-pix2pix.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"4664944304","text":"import boto3\n\ndef profile():\n print(\"Please enter the profile set up for the environment you need to run this script for!\")\n inputProfile = input()\n return inputProfile\n\ndef server():\n print(\"Please enter the Server Name!(Not FQDN)\")\n inputServer = input()\n return inputServer\n\ndef region():\n print(\"Please enter the region!(example: us-east-1)\")\n inputRegion = input()\n return inputRegion\n\ndef snapshotDescription():\n print(\"Please enter a Snapshot name based on the above List!\")\n inputDescription = input()\n return inputDescription\n\ndef ec2InstanceCall(inputServer,ec2client):\n # Gets Instance ID and Az from Instance input\n responseDescribeInstances = ec2client.describe_instances(Filters=[{'Name': 'tag:Name','Values': [str(inputServer)]}])\n for ec2 in responseDescribeInstances['Reservations']:\n for ec2instance in ec2['Instances']:\n instanceId = ec2instance['InstanceId']\n availabilityZone = ec2instance['Placement']['AvailabilityZone']\n return instanceId, availabilityZone\n\ndef snapshotCheck(ec2client):\n # Gets List of Snapshots ISO files\n responseDescribeSnapshots = ec2client.describe_snapshots(OwnerIds=['711940113766'])\n for snapshot in responseDescribeSnapshots['Snapshots']:\n isoSnapshot = snapshot.get('Description')\n if \"English\" in isoSnapshot:\n print(isoSnapshot)\n\ndef snapshotCall(inputDescription,ec2client,instanceId):\n #Ges Snapshot information from aws for the iso input\n responseDescribeSnapshots = ec2client.describe_snapshots(OwnerIds=['711940113766'])\n for snapshot in responseDescribeSnapshots['Snapshots']:\n isoSnapshot = snapshot.get('Description')\n if inputDescription in isoSnapshot:\n snapshotId = snapshot.get('SnapshotId')\n #create the Ec2 voulume\n responseCreateVolume = ec2client.create_volume(AvailabilityZone=availabilityZone,SnapshotId=snapshotId)\n\n #waits for ec2 volume to be ready\n while (ec2resource.Volume(responseCreateVolume.get('VolumeId')).state) != 'available':\n pass\n #once ec2 is ready attaches the volume to the insanceid above\n response = ec2client.attach_volume(Device='xvdp',InstanceId=instanceId,VolumeId=responseCreateVolume.get('VolumeId'))\n if response['State'] == 'attaching':\n print(\"ISO Volume has been Attached!\")\n else:\n print(\"Error in the Attachments!\")\n\n\ninputProfile = profile()\nprint(\"Entered Profile \" + inputProfile)\ninputRegion = region()\nprint(\"Entered Region \" + inputRegion)\ninputServer = server()\nprint(\"Entered Server \" + inputServer)\n\nsession = boto3.Session(profile_name=inputProfile)\nec2client = session.client('ec2',region_name=inputRegion)\nec2resource = session.resource('ec2',region_name=inputRegion)\n\ninstanceId, availabilityZone = ec2InstanceCall(inputServer,ec2client)\nprint(\"Available SnapShots\")\nsnapshotList = snapshotCheck(ec2client)\n\n\ninputDescription = snapshotDescription()\nsnapshotCall(inputDescription,ec2client,instanceId)\n","repo_name":"Grasume/AWSPython","sub_path":"Aws_Attach_Windows_ISO.py","file_name":"Aws_Attach_Windows_ISO.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"2247527480","text":"# import tensorflow\r\nimport pickle\r\nfrom urllib import response\r\nimport pandas as pd\r\nfrom pprint import pprint\r\nfrom sklearn.utils import resample\r\nfrom tensorflow.keras.models import load_model\r\nimport numpy as np\r\nimport json\r\nimport random\r\nfrom flask import Flask, request, Response, abort, send_file, jsonify, url_for\r\nfrom flask_cors import CORS\r\n\r\ndataset = pd.read_csv('tweet_emotions.csv')\r\n\r\ndataset.sentiment.value_counts()\r\ntarget_class = 9\r\n\r\n# classes_ids = {name:ids for name, ids in zip(set(dataset.sentiment.to_list()),range(len(set(dataset.sentiment.to_list()))))}\r\nclasses_ids = {name: idx for idx, name in enumerate(dataset.sentiment.unique())}\r\ninv_classes_ids = {value:key for key, value in zip(list(classes_ids.keys()), list(classes_ids.values()))}\r\n\r\npprint(classes_ids)\r\n\r\ntarget_majority = dataset[dataset.sentiment==inv_classes_ids[target_class]]\r\n\r\nfor cl in range(len(classes_ids)):\r\n train_minority = dataset[dataset.sentiment==inv_classes_ids[cl]]\r\n train_minority_upsampled = resample(train_minority, replace=True, n_samples=len(target_majority), random_state=123)\r\n if cl == 0:\r\n dataset_upsampled = pd.concat([train_minority_upsampled, target_majority])\r\n #train_upsampled = pd.concat([train_upsampled, ])\r\n if cl>0 and cl!=target_class:\r\n dataset_upsampled = pd.concat([train_minority_upsampled, dataset_upsampled])\r\ndataset_upsampled = dataset_upsampled.sample(frac=1).reset_index(drop=True)\r\n\r\n\r\nwith open('tokenizer.pickle', 'rb') as handle:\r\n tokenizer = pickle.load(handle)\r\n\r\nmodel = load_model('chatbot6.h5')\r\n\r\ntest_sentence = ['i am anger']\r\n\r\nsequence = tokenizer.texts_to_sequences(test_sentence)\r\n# {'anger': 12,\r\n# 'boredom': 10,\r\n# 'empty': 0,\r\n# 'enthusiasm': 2,\r\n# 'fun': 7,\r\n# 'happiness': 9,\r\n# 'hate': 8,\r\n# 'love': 6,\r\n# 'neutral': 3,\r\n# 'relief': 11,\r\n# 'sadness': 1,\r\n# 'surprise': 5,\r\n# 'worry': 4}\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route('/text', methods=['POST'])\r\ndef get_text():\r\n text = request.json['text']\r\n sentence = [text]\r\n sequence = tokenizer.texts_to_sequences(sentence)\r\n predictions = model.predict(sequence)\r\n print(predictions)\r\n print(np.argmax(predictions))\r\n s = np.argmax(predictions)\r\n p = inv_classes_ids.get(s)\r\n print(p)\r\n\r\n if (p == 'hate' or p == 'anger'):\r\n data = json.load(open('anger.json'))\r\n r =random.randint(0, len(data['texts']) - 1)\r\n print(data['texts'][r]['text'])\r\n return jsonify({\"msg\":data['texts'][r]['text'], \"emotion\": p})\r\n \r\n elif(p == 'happiness' or p=='fun' or p == 'enthusiasm' or p == 'love'):\r\n data = json.load(open('happy.json'))\r\n r =random.randint(0, len(data['texts']) - 1)\r\n print(data['texts'][r]['text'])\r\n return jsonify({\"msg\":data['texts'][r]['text'], \"emotion\": p})\r\n elif(p == 'boredom'):\r\n data = json.load(open('boredom.json'))\r\n r =random.randint(0, len(data['texts']) - 1)\r\n print(data['texts'][r]['text'])\r\n return jsonify({\"msg\":data['texts'][r]['text'], \"emotion\": p})\r\n elif(p == 'worry'):\r\n data = json.load(open('worry.json'))\r\n r =random.randint(0, len(data['texts']) - 1)\r\n print(data['texts'][r]['text'])\r\n return jsonify({\"msg\":data['texts'][r]['text'], \"emotion\": p})\r\n else:\r\n data = json.load(open('notUnderstood.json'))\r\n r =random.randint(0, len(data['texts']) - 1)\r\n print(data['texts'][r]['text'])\r\n return jsonify({\"msg\":data['texts'][r]['text'], \"emotion\": p})\r\n \r\n \r\n \r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"rayat927/ai-therapy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"69866098892","text":"try:\n from Tkinter import *\nexcept:\n from tkinter import *\n\nfrom layout import layout\n\nblock = 5\n\nclass Keyboard(Frame):\n def __init__(self, parent, callback):\n Frame.__init__(self, parent, height=5*block, width=11*block)\n self.shift = False\n self.caps = False\n self.__initwidgets__(callback)\n\n def __initwidgets__(self, callback):\n for i,row in enumerate(layout):\n j = 0\n for column in row:\n b = KeyButton(self, column[0], callback)\n b.config(height=block, width=column[1]*block)\n b.grid(row=i, column=j, columnspan=column[1], sticky=N+E+W+S)\n j += column[1]\n\n def changeCase(self):\n for child in self.winfo_children():\n child.checkText(self)\n\n\nclass KeyButton(Button):\n def __init__(self, parent, t, callback):\n self.__call = callback\n Button.__init__(self, parent, command= lambda:self.__determineOutput__(parent))\n self.__type = \"special\"\n\n if \"\\n\" in t:\n val = t.split(\"\\n\")\n self.__val1 = val[0]\n self.__val2 = val[1]\n self.__type = \"dual\"\n\n else:\n self.__val1 = t\n self.__val2 = t\n\n if self.__val1.isalpha() and len(self.__val1) is 1:\n self.__val2 = self.__val1.lower()\n self.__type = \"letter\"\n\n if self.__val1 == \"Shift\":\n self.__type = \"Shift\"\n\n if self.__val1 == \"CapsLock\":\n self.__type = \"Caps\"\n\n self.config(text=self.__val2)\n\n #check type, parent.shift and parent.caps for the output\n #set shift to false after using the shifted key\n def __determineOutput__(self, parent):\n #for letters\n if self.__type is \"letter\":\n if bool(parent.shift) ^ bool(parent.caps):\n self.__call(str(self.__val1))\n else:\n self.__call(str(self.__val2))\n\n parent.shift = False\n parent.changeCase()\n\n elif self.__type is \"dual\":\n if parent.shift:\n self.__call(str(self.__val1))\n else:\n self.__call(str(self.__val2))\n\n parent.shift = False\n parent.changeCase()\n\n elif self.__type is \"special\":\n self.__call(str(self.__val1))\n parent.changeCase()\n\n elif self.__type is \"Shift\":\n parent.shift = not parent.shift\n parent.changeCase()\n\n elif self.__type is \"Caps\":\n parent.caps = not parent.caps\n parent.changeCase()\n\n def checkText(self, parent):\n if self.__type is \"letter\":\n if bool(parent.shift) ^ bool(parent.caps):\n self.config(text=self.__val1)\n else:\n self.config(text=self.__val2)\n\n elif self.__type is \"dual\":\n if parent.shift:\n self.config(text=self.__val1)\n else:\n self.config(text=self.__val2)\n","repo_name":"Drew-Miller/CS481","sub_path":"HW5/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"39585800699","text":"class CreatePartyMapping:\n import Levenshtein as lv\n import json\n from difflib import SequenceMatcher\n\n def __init__(self):\n data = self.import_file('final_mapping.json')\n self.create_party_mapping(data)\n # self.merge_mapping_files('final_party_mapping.json', 'manual_mapped_parties.json', 'merges_parties.json')\n print('DONE')\n\n def import_file(self, file_name):\n with open(file_name) as f:\n data = self.json.load(f)\n return data.get('mapping')\n\n def create_file(self, file_name, mapping):\n f = open(file_name, 'w')\n f.write('{ \"mapping\": [')\n for item in mapping:\n f.write(self.json.dumps(item) + \",\\n\")\n f.write(\"]}\")\n print('done')\n\n def get_abbreviation(self, name):\n abbrv = \"\".join(e[0] for e in str.split(name))\n if len(abbrv) < 3:\n abbrv = \"ABBREVIATION TOO SHORT - THIS STRING WONT MATCH\"\n return abbrv\n\n def create_party_mapping(self, final_data):\n big_nine_names = ['vvd', 'd66', 'pvda', 'cda', 'christenunie', 'pvv', 'sgp', 'sp', 'groenlinks', 'cusgp']\n final_party_mapping = []\n unmappable_parties = []\n all_party_mapping = []\n unmappable_count = 0\n\n for gmnt in final_data:\n party_mapping = []\n party_unmap = []\n for ar in gmnt.get('mapping'):\n maps_with = ar.get('maps_with')\n for cndt in maps_with:\n cndt_name_data = cndt.get('name')\n cndt_name = cndt_name_data.get('initials') + \" \" + cndt_name_data.get('prefix') + cndt_name_data.get('last_name')\n if ar.get('name_allmanak') == cndt_name:\n abbrv_pa = self.get_abbreviation(ar.get('party_allmanak').lower())\n abbrv_pc = self.get_abbreviation(cndt.get('party').lower())\n\n tknz_pa = \"\".join(e for e in ar.get('party_allmanak').lower() if e.isalnum())\n tknz_pc = \"\".join(e for e in cndt.get('party').lower() if e.isalnum())\n\n # longest common substring problem\n sqm = self.SequenceMatcher(None, tknz_pa, tknz_pc)\n matching_blocks = sqm.get_matching_blocks()\n total_matching_size = 0\n for mb in matching_blocks:\n total_matching_size += mb.size\n\n if tknz_pa in tknz_pc or tknz_pc in tknz_pa or abbrv_pa in tknz_pc or abbrv_pc in tknz_pa:\n party_mapping.append({'al': ar.get('party_allmanak'), 'kr': cndt.get('party')})\n elif total_matching_size > 8:\n party_mapping.append({'al': ar.get('party_allmanak'), 'kr': cndt.get('party'), 'mb_size': total_matching_size})\n elif tknz_pa not in big_nine_names:\n party_unmap.append({'al': ar.get('party_allmanak'), 'kr': cndt.get('party')})\n\n party_mapping = [dict(t) for t in {tuple(d.items()) for d in party_mapping}]\n party_unmap = [dict(t) for t in {tuple(d.items()) for d in party_unmap}]\n unmappable_count += len(party_unmap)\n if all_party_mapping:\n # all_party_mapping.extend(party_mapping)\n counter = 0\n else:\n all_party_mapping = party_mapping\n\n final_party_mapping.append({'municipality_name': gmnt.get('municipality_name'), 'party_mapping': party_mapping})\n if len(party_unmap) > 0:\n unmappable_parties.append({'municipality_name': gmnt.get('municipality_name'), 'party_mapping': party_unmap})\n\n print(\"unmap count \" + str(unmappable_count))\n self.create_file('final_party_mapping.json', final_party_mapping)\n self.create_file('all_party_mapping.json', [dict(t) for t in {tuple(d.items()) for d in all_party_mapping}])\n self.create_file('unmappable_parties.json', unmappable_parties)\n\n return [dict(t) for t in {tuple(d.items()) for d in all_party_mapping}]\n\n def merge_mapping_files(self, fn1, fn2, output_name):\n mapping1 = self.import_file(fn1)\n mapping2 = self.import_file(fn2)\n for munip2 in mapping2:\n for munip1 in mapping1:\n if munip1.get('municipality_name') == munip2.get('municipality_name'):\n pm1 = munip1.get('party_mapping')\n pm2 = munip2.get('party_mapping')\n pm1.extend(pm2)\n\n self.create_file(output_name, mapping1)\n\nif __name__ == \"__main__\":\n CreatePartyMapping()\n","repo_name":"openstate/almanak-kiesraad-matcher","sub_path":"import_data/CreatePartyMapping.py","file_name":"CreatePartyMapping.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"34038687044","text":"import pathlib\nimport numpy as np\nimport tensorflow as tf\nimport config.configure as cfg\nimport time\nfrom PIL import Image\n\n# dynamic range quantization 动态范围量化\n# 激活值以浮点存储,在计算时动态量化为8位,处理后去量化为浮点精度\n# 权重在训练后量化,激活在推理时动态量化\nconverter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir=cfg.pb_model_path)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT] # 指定optimizations 属性可以转变tflite量化的方式\ntflite_dynamic_model = converter.convert()\ntflite_model_dir = pathlib.Path('.\\\\tflite_model')\ntflite_model_dir.mkdir(exist_ok=True,parents=True)\ntflite_dynamic_model_file = tflite_model_dir/\"dynamic_model.tflite\"\ntflite_dynamic_model_file.write_bytes(tflite_dynamic_model)\n\n# 测试dynamic_tflite量化推理的时间\ntest_image = np.array(Image.open(r'..\\VOCdevkit\\VOC2012\\JPEGImages\\2007_000032.jpg').resize(cfg.img_szie),dtype='float32')\ntest_image = np.expand_dims(test_image,axis=0)\ninterpreter_dynamic = tf.lite.Interpreter(model_path=str(tflite_dynamic_model_file))\ninterpreter_dynamic.allocate_tensors()\ninput_index = interpreter_dynamic.get_input_details()[0]['index']\noutput_index = interpreter_dynamic.get_output_details()[0]['index']\n\nstart_time = time.process_time()\ninterpreter_dynamic.set_tensor(input_index,test_image)\ninterpreter_dynamic.invoke()\nprediction = interpreter_dynamic.get_tensor(output_index)\nend_time = time.process_time()\n# intel cpu没有对量化推理计算进行优化,arm cpu进行优化后可以加快推理\nprint(f'dynamic_tflite推理时间{end_time - start_time}')","repo_name":"aikangjun/Unet","sub_path":"quantization/dynamic_range_quantization.py","file_name":"dynamic_range_quantization.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"72870692170","text":"# coding=utf-8\nimport emoji\nimport re\n\n\nclass clean_f(object):\n\n def __init__(self):\n self.URL_REGEX = re.compile(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:\\'\".,<>?«»“”‘’]))',\n re.IGNORECASE)\n self.T1 = re.compile('#\\w+')\n self.T2 = re.compile('@\\w+')\n self.T3 = re.compile(r\"\\:\\S+\\:\")\n self.T4 = re.compile(r\"\\&\\S+\\;\")\n self.T5 = re.compile(r\"\\(.*\\)\")\n self.T6 = re.compile(r\"^\\s*\")\n self.T7 = re.compile(r\"([=,-,*])\\1+\")\n self.T8 = re.compile(r\"(\\d{4}[-/]\\d{1,2}[-/]\\d{1,2}\\s\\d{1,2}:\\d{1,2}\\d{1,2}:\\d{1,2}|\\d{4}[-/]\\d{1,2}[-/]\\d{1,2}\\s\\d{1,2}:\\d{1,2}|\\d{4}[-/]\\d{1,2}[-/]\\d{1,2})\")\n\n # 多余的字符串添加在这里\n self.RE = ['Read more here:', '复制', 'RT', 'Read More - - -', '■', 'T', '内容']\n\n def clean(self, t):\n print(t)\n if self.T8.fullmatch(t):\n return ''\n if self.URL_REGEX.fullmatch(t):\n return ''\n out = self.T1.sub(r'', t)\n out = self.T2.sub(r'', out)\n out = emoji.demojize(out)\n # 可以保留表情信息 作为str标签\n if not self.T8.sub(r'', out):\n out = self.T3.sub(r'', out)\n #out = self.T3.sub(r'', out)\n out = self.T4.sub(r'', out)\n out = self.T5.sub(r'', out)\n out = self.T6.sub(r'', out)\n out = self.T7.sub(r'', out)\n #out = self.URL_REGEX.sub(r'', out)\n #if re.search(self.URL_REGEX, out):\n #match_url = re.search(self.URL_REGEX, out)\n #pos = -1\n #while '//' in out:\n #if pos == out.index('//'):\n #break\n #pos = out.index('//')\n #if out.index('//') < match_url.start() or out.index('//') > match_url.end():\n #out = out[:out.index('//')] + out[out.index('//')+2:]\n #match_url = re.search(self.URL_REGEX, out)\n\n for s in self.RE:\n out = out.replace(s, '')\n return out\n","repo_name":"fortunatekiss/DocTranslation","sub_path":"clean_f.py","file_name":"clean_f.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"9647599030","text":"import turtle\n\nwn = turtle.Screen()\nwn.screensize(800, 600)\nalex = turtle.Turtle()\nalex.pensize(3)\n\ncolors = ['coral', 'cyan', 'gold', 'lavender', 'LightGrey']\nshape_numbers = 24\n\nfor k in range(shape_numbers):\n for i in colors:\n alex.color(i)\n alex.forward(100)\n alex.left(72)\n alex.left(360 / shape_numbers)\n\nwn.exitonclick()","repo_name":"yan-ren/programming-class","sub_path":"python_demo_programs/turtle_graphic/turtle_drawing_2.py","file_name":"turtle_drawing_2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"70586275213","text":"import arrow\nimport logging\nimport datetime\n\nlogging.basicConfig(\n format=\"[%(asctime)s] %(levelname)s: %(message)s\", level=logging.INFO\n)\n\nfrom comms.models import (\n Email,\n InstructorNotificationSettings,\n ParentNotificationSettings,\n)\nfrom comms.templates import SESSION_REMINDER_TEMPLATE\nfrom scheduler.models import Session\n\n\nMISSED_TEMPLATE = \"d-128e4bccf2ee49f0b6190d0cb76eb045\"\n\n\ndef run():\n \"\"\"Checks for upcoming sessions and send reminder emails to parents and instructors.\"\"\"\n sessions = Session.objects.filter(\n start_datetime__gte=arrow.now().shift(hours=-8).datetime,\n start_datetime__lt=arrow.now().datetime,\n sent_missed_reminder=False,\n )\n logging.info(sessions)\n for session in sessions:\n email_data = {\n \"first_name\": \"Gina\",\n \"sessions\": [\n {\n \"title\": session.course.title,\n \"date\": (\n session.start_datetime - datetime.timedelta(hours=7)\n ).strftime(\"%m/%d/%Y\"),\n \"start_time\": (\n session.start_datetime - datetime.timedelta(hours=7)\n ).strftime(\"%I:%M %p\"),\n \"end_time\": (\n session.end_datetime - datetime.timedelta(hours=7)\n ).strftime(\"%I:%M %p\"),\n },\n ],\n }\n\n # parent reminders\n for enrollment in session.course.enrollment_set.all():\n primary_parent = enrollment.student.primary_parent\n email_data[\"first_name\"] = primary_parent.user.first_name\n parent_settings = ParentNotificationSettings.objects.get(\n parent=primary_parent\n )\n if parent_settings.session_reminder_email:\n Email.objects.create(\n template_id=MISSED_TEMPLATE,\n recipient=primary_parent.user.email,\n data=email_data,\n )\n\n session.sent_upcoming_reminder = True\n session.save()\n","repo_name":"omou-org/mainframe","sub_path":"comms/cronjobs/missed_session_reminder.py","file_name":"missed_session_reminder.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"28606501231","text":"import time # mengimport library time\nimport zmq # mengimport module zero mq\n\ncontext = zmq.Context() # membuat variabel baru dari zmq Context\nsocket = context.socket(zmq.REP) # membuat socket baru dengan tipe REPLY\nsocket.bind(\"tcp://10.20.0.252:5555\") # menginput protokol, IP address, dan port\n\nwhile True: # Berjalan selama kondisi True\n message = socket.recv() # socket/server menerima pesan yang dikirim\n print(\"Received request: %s\" % message) # memunculkan pesan yang diterima\n\n # do some work\n time.sleep(1) # membuat delay 1 detik\n\n socket.send(b\"World\") # mengirim pesan kepada client\n","repo_name":"adityaalifn/CSH3J3-Distributed-and-Parallel-System","sub_path":"Indirect Communiaction/hwserver.py","file_name":"hwserver.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"13596034737","text":"import numpy as np\nimport cv2\n\ndef fill_frame(frame):\n h, w = frame.shape[0], frame.shape[1]\n if h > w:\n _pad = np.zeros([h, int((h - w) / 2), 3])\n frame = np.concatenate([_pad, frame, _pad], axis=1)\n elif h < w:\n _pad = np.zeros([int((w - h) / 2), w, 3])\n frame = np.concatenate([_pad, frame, _pad], axis=0)\n frame = np.transpose(frame, [1, 0, 2])\n frame = cv2.resize(frame, (1024, 1024))\n return frame","repo_name":"shinnthant-lib/frames_split","sub_path":"fill_frame.py","file_name":"fill_frame.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"30014425615","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Python API using the REST interface of MISP \"\"\"\n\nimport json\nimport datetime\nimport requests\n\n\nclass PyMISP(object):\n \"\"\"\n Python API for MISP\n\n :param url: URL of the MISP instance you want to connect to\n :param key: API key of the user you want to use\n :param ssl: can be True or False (to check ot not the validity\n of the certificate. Or a CA_BUNDLE in case of self\n signed certiifcate (the concatenation of all the\n *.crt of the chain)\n :param out_type: Type of object (json or xml)\n \"\"\"\n\n def __init__(self, url, key, ssl=True, out_type='json'):\n self.url = url + '/events'\n self.key = key\n self.ssl = ssl\n self.out_type = out_type\n self.rest = self.url + '/{}'\n\n def __prepare_session(self, force_out=None):\n \"\"\"\n Prepare the headers of the session\n\n :param force_out: force the type of the expect output\n (overwrite the constructor)\n\n \"\"\"\n if force_out is not None:\n out = force_out\n else:\n out = self.out_type\n session = requests.Session()\n session.verify = self.ssl\n session.headers.update(\n {'Authorization': self.key,\n 'Accept': 'application/' + out,\n 'content-type': 'text/' + out})\n return session\n\n def __query(self, session, path, query):\n if query.get('error') is not None:\n return query\n url = self.rest.format(path)\n query = {'request': query}\n print(json.dumps(query))\n r = session.post(url, data=json.dumps(query))\n return r.json()\n\n # ############### REST API ################\n\n def get_index(self):\n \"\"\"\n Return the index.\n\n Warning, there's a limit on the number of results\n \"\"\"\n session = self.__prepare_session()\n return session.get(self.rest)\n\n def get_event(self, event_id):\n \"\"\"\n Get an event\n\n :param event_id: Event id to get\n \"\"\"\n session = self.__prepare_session()\n return session.get(self.rest.format(event_id))\n\n def add_event(self, event):\n \"\"\"\n Add a new event\n\n :param event: Event object to add\n \"\"\"\n session = self.__prepare_session()\n return session.post(self.url, data=event)\n\n def update_event(self, event_id, event):\n \"\"\"\n Update an event\n\n :param event_id: Event id to update\n :param event: Elements to add\n \"\"\"\n session = self.__prepare_session()\n return session.post(self.rest.format(event_id), data=event)\n\n def delete_event(self, event_id):\n \"\"\"\n Delete an event\n\n :param event_id: Event id to delete\n \"\"\"\n session = self.__prepare_session()\n return session.delete(self.rest.format(event_id))\n\n # ######## REST Search #########\n\n def __prepare_rest_search(self, values, not_values):\n \"\"\"\n Prepare a search, generate the chain processed by the server\n\n :param values: Values to search\n :param not_values: Values that should not be in the response\n \"\"\"\n to_return = ''\n if values is not None:\n if not isinstance(values, list):\n to_return += values\n else:\n to_return += '&&'.join(values)\n if not_values is not None:\n if len(to_return) > 0:\n to_return += '&&!'\n else:\n to_return += '!'\n if not isinstance(values, list):\n to_return += not_values\n else:\n to_return += '&&!'.join(not_values)\n return to_return\n\n def search(self, values=None, not_values=None, type_attribute=None,\n category=None, org=None, tags=None, not_tags=None, date_from=None,\n date_to=None):\n \"\"\"\n Search via the Rest API\n\n :param values: values to search for\n :param not_values: values *not* to search for\n :param type_attribute: Type of attribute\n :param category: Category to search\n :param org: Org reporting the event\n :param tags: Tags to search for\n :param not_tags: Tags *not* to search for\n :param date_from: First date\n :param date_to: Last date\n\n \"\"\"\n val = self.__prepare_rest_search(values, not_values).replace('/', '|')\n tag = self.__prepare_rest_search(tags, not_tags).replace(':', ';')\n query = {}\n if len(val) != 0:\n query['value'] = val\n if len(tag) != 0:\n query['tags'] = tag\n if type_attribute is not None:\n query['type'] = type_attribute\n if category is not None:\n query['category'] = category\n if org is not None:\n query['org'] = org\n if date_from is not None:\n if isinstance(date_from, datetime.date) or isinstance(date_to, datetime.datetime):\n query['from'] = date_from.strftime('%Y-%m-%d')\n else:\n query['from'] = date_from\n if date_to is not None:\n if isinstance(date_to, datetime.date) or isinstance(date_to, datetime.datetime):\n query['to'] = date_to.strftime('%Y-%m-%d')\n else:\n query['to'] = date_to\n\n session = self.__prepare_session()\n return self.__query(session, 'restSearch/download', query)\n\n def get_attachement(self, event_id):\n \"\"\"\n Get attachement of an event (not sample)\n\n :param event_id: Event id from where the attachements will\n be fetched\n \"\"\"\n attach = self.url + '/attributes/downloadAttachment/download/{}'\n session = self.__prepare_session()\n return session.get(attach.format(event_id))\n\n # ############## Export ###############\n\n def download_all(self):\n \"\"\"\n Download all event from the instance\n \"\"\"\n xml = self.url + '/xml/download'\n session = self.__prepare_session('xml')\n return session.get(xml)\n\n def download(self, event_id, with_attachement=False):\n \"\"\"\n Download one event in XML\n\n :param event_id: Event id of the event to download (same as get)\n \"\"\"\n template = self.url + '/events/xml/download/{}/{}'\n if with_attachement:\n attach = 'true'\n else:\n attach = 'false'\n session = self.__prepare_session('xml')\n return session.get(template.format(event_id, attach))\n\n ##########################################\n","repo_name":"Solgrid/PyMISP","sub_path":"pymisp/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"15"} +{"seq_id":"14707271233","text":"import stanfordnlp\nimport pandas as pd\n\nnlp = stanfordnlp.Pipeline(processors=\"tokenize,mwt,lemma,pos\")\nwith open('/Users/aprzybycien/projects/inzynierka/BEng-Thesis---Semantic-Analysis/data/articles/txt-format'\n '/article12.txt', encoding=\"utf-16\") as file:\n data = file.read()\ndoc = nlp(data)\ndoc.sentences[0].print_tokens()\n# dictionary that contains pos tags and their explanations\npos_dict = {\n 'CC': 'coordinating conjunction', 'CD': 'cardinal digit', 'DT': 'determiner',\n 'EX': 'existential there (like: \\\"there is\\\" ... think of it like \\\"there exists\\\")',\n 'FW': 'foreign word', 'IN': 'preposition/subordinating conjunction', 'JJ': 'adjective \\'big\\'',\n 'JJR': 'adjective, comparative \\'bigger\\'', 'JJS': 'adjective, superlative \\'biggest\\'',\n 'LS': 'list marker 1)', 'MD': 'modal could, will', 'NN': 'noun, singular \\'desk\\'',\n 'NNS': 'noun plural \\'desks\\'', 'NNP': 'proper noun, singular \\'Harrison\\'',\n 'NNPS': 'proper noun, plural \\'Americans\\'', 'PDT': 'predeterminer \\'all the kids\\'',\n 'POS': 'possessive ending parent\\'s', 'PRP': 'personal pronoun I, he, she',\n 'PRP$': 'possessive pronoun my, his, hers', 'RB': 'adverb very, silently,',\n 'RBR': 'adverb, comparative better', 'RBS': 'adverb, superlative best',\n 'RP': 'particle give up', 'TO': 'to go \\'to\\' the store.', 'UH': 'interjection errrrrrrrm',\n 'VB': 'verb, base form take', 'VBD': 'verb, past tense took',\n 'VBG': 'verb, gerund/present participle taking', 'VBN': 'verb, past participle taken',\n 'VBP': 'verb, sing. present, non-3d take', 'VBZ': 'verb, 3rd person sing. present takes',\n 'WDT': 'wh-determiner which', 'WP': 'wh-pronoun who, what', 'WP$': 'possessive wh-pronoun whose',\n 'WRB': 'wh-abverb where, when', 'QF': 'quantifier, bahut, thoda, kam (Hindi)', 'VM': 'main verb',\n 'PSP': 'postposition, common in indian langs', 'DEM': 'demonstrative, common in indian langs'\n}\n\n\n# extract parts of speech\ndef extract_pos(document):\n parsed_text = {'word': [], 'pos': [], 'exp': []}\n for sent in document.sentences:\n for wrd in sent.words:\n if wrd.pos in pos_dict.keys():\n pos_exp = pos_dict[wrd.pos]\n else:\n pos_exp = 'NA'\n parsed_text['word'].append(wrd.text)\n parsed_text['pos'].append(wrd.pos)\n parsed_text['exp'].append(pos_exp)\n # return a dataframe of pos and text\n return pd.DataFrame(parsed_text)\n\n\n# extract pos\ndf = extract_pos(doc)\nprint(df.head(10))\n","repo_name":"szymeklimek/BEng-Thesis---Semantic-Analysis","sub_path":"src/stanford/pos_extraction/pos_extract.py","file_name":"pos_extract.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"74298286735","text":"#display a data\nimport gradio as gr\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\ndef create_heatmap(matrix):\n heatmap = px.imshow(matrix.round(2), color_continuous_scale='RdYlGn', text_auto=True)\n heatmap.update_layout(width=500, height=500)\n heatmap.update_layout(xaxis_title=\"Row\", yaxis_title=\"Column\")\n return heatmap\ndef mat_mult(A, B):\n\n #convert dataframes A and B to numpy arrays, and multiply them\n A = A.to_numpy().astype(np.float32)\n B = B.to_numpy().astype(np.float32)\n #Matrix multiplication\n C = np.dot(A, B)\n #convert matrix C to a heatmap image\n return create_heatmap(C)\n\n#Some Interesting examples\n\nexample0 = [pd.DataFrame([[1, 1, -1]]), pd.DataFrame([[-1], [1], [0]])]\nexample1 = [pd.DataFrame([[1, 2], [3, 4]]), pd.DataFrame([[5, 6], [7, 8]])]\nexample2 = [pd.DataFrame([[-1, 2], [3, 0]]), pd.DataFrame([[0, 2], [1, -2]])]\nexample3 = [pd.DataFrame([[1, 2, 3], [4, 5, 6]]), pd.DataFrame([[7, 8], [9, 10], [11, 12]])]\nexample4 = [pd.DataFrame([[1, 0, -1], [0, 1, 0], [1, 0, 1]]), pd.DataFrame([[1, 1, 1], [-1, -1, -1], [0, 0, 0]])]\nexample5 = [pd.DataFrame(np.random.randn(10, 10)).round(2), pd.DataFrame(np.random.randn(10, 10)).round(2)]\nexamples = [example0, example1, example2, example3, example4, example5]\n\ntitle = \"Matrix Multiplication\"\ndescription = \"Multiply two matrices and display the resulting heatmap.\"\n\napp = gr.Interface(mat_mult, \n inputs=[gr.DataFrame(row_count=(1, 'dynamic'), col_count=(1, 'dynamic'), headers=None), \n gr.DataFrame(row_count=(1, 'dynamic'), col_count=(1, 'dynamic'), headers=None)], \n outputs=gr.Plot(),\n examples=examples, title=title, description=description)\napp.launch()\n","repo_name":"ku5h/gradiolly_ml","sub_path":"mat_mult.py","file_name":"mat_mult.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"39962406087","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom scapy.all import sniff, ARP\nfrom signal import signal, SIGINT\nimport sys\n\n\ndef start(db_file, iface):\n\n ip_mac = {}\n\n def watch_arp(pkt):\n \"\"\"executes when got arp packet from sniffer\"\"\"\n\n if pkt[ARP].op == 2:\n print(\"{} at {}\".format(pkt[ARP].hwsrc, pkt[ARP].psrc))\n\n if not ip_mac.get(pkt[ARP].psrc):\n print(\"->new\")\n ip_mac[pkt[ARP].psrc] = pkt[ARP].hwsrc\n else:\n if ip_mac[pkt[ARP].psrc] == pkt[ARP].hwsrc:\n print(\"->old\")\n else:\n print(\"->changed{}->{}, probably mitm or reconnect\"\n .format(ip_mac[pkt[ARP].psrc], pkt[ARP].hwsrc))\n\n def sig_int_handler(signum, frame):\n \"\"\"manages SIGINT\"\"\"\n\n print(\"received SIGINT. saving ARP db to {}\".format(db_file))\n try:\n f = open(db_file, \"w\")\n for (ip, mac) in ip_mac.items():\n f.write(ip + \" \" + mac + \"\\n\")\n\n f.close()\n print(\"done.\")\n sys.exit(0)\n except IOError:\n print(\"Cannot write file \" + db_file)\n sys.exit(1)\n\n signal(SIGINT, sig_int_handler)\n\n try:\n fh = open(db_file, \"r\")\n except IOError:\n print(\"Cann'ot read file {}\".format(db_file))\n sys.exit(1)\n\n for line in fh:\n line = line.rstrip()\n (ip, mac) = line.split(\" \")\n ip_mac[ip] = mac\n\n sniff(prn=watch_arp,\n filter=\"arp\",\n iface=iface,\n store=0)\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 3:\n print(\"Usage: python scriptname.py ifname db_file\")\n sys.exit(1)\n\n start(sys.argv[2], sys.argv[1])\n","repo_name":"amine177/python_attack_and_defense","sub_path":"ch4/arp_watcher.py","file_name":"arp_watcher.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"73458910735","text":"class Player:\r\n\t\"\"\"Player-class: stores data on team colors and points.\"\"\"\r\n\t\t\r\n\tdef __init__( self, teamcolor):\r\n\t\tself.teamcolor = teamcolor\r\n\t\tself.points = 0\r\n\t\r\n\tdef tellscore( self ):\r\n\t\tprint( \"I am\", self.teamcolor, \", we have\", self.points, \"points!\" )\r\n\t\t\r\n\tdef goal( self ):\r\n\t\tself.points += 1\r\n\t\t\r\ncolor1 = input( \"What color do I get?: \" )\r\nplayer1 = Player( color1 )\r\n\r\ncolor2 = input( \"What color do I get?: \" )\r\nplayer2 = Player( color2 )\r\n\r\nfor i in range(2):\r\n\tplayer1.goal()\r\n\t\r\nplayer2.goal()\r\n\r\nplayer1.tellscore()\r\nplayer2.tellscore()","repo_name":"laivii/PY-perusteet","sub_path":"Module 10/initializing_a_class.py","file_name":"initializing_a_class.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"34022660130","text":"# D8 Emulator\nimport os\nimport queue\nimport re\nimport socket\nimport threading\nfrom textwrap import wrap\n\nfrom d8 import instruction\nfrom d8 import register as map_reg_num\n\nmap_num_reg = {\n value: key.upper() for key, value in map_reg_num.items()\n} # invert the dictionary\n\ninstruction_map = {value: key for key, value in instruction.items()}\n\n# Memory locations of the peripherals\nperiph_map = {\"SPPS\": 2, \"TERM\": 3, \"KBD\": 4}\n\nstop_threads = False\n\n\ndef terminal(screen_q, keyboard_q):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"127.0.0.1\", 6543))\n s.listen()\n s.settimeout(0.1)\n while not stop_threads:\n try:\n conn, addr = s.accept()\n except socket.timeout:\n continue\n with conn:\n # print(\"Connection from:\", addr)\n conn.settimeout(0.1)\n while not stop_threads:\n try:\n keystrokes = conn.recv(100)\n for key in keystrokes: # TODO may need .decode() ?\n if isinstance(key, str):\n key = ord(key)\n keyboard_q.put(key)\n except socket.timeout:\n pass\n\n try:\n screen_char = screen_q.get(block=False)\n if screen_char:\n conn.sendall(chr(screen_char).encode())\n except queue.Empty:\n pass\n conn.close()\n\n\nclass Screen:\n \"\"\"Screen peripheral handler.\"\"\"\n\n def __init__(self, screen_q):\n self.screen_q = screen_q\n self.value = 0\n\n def read(self):\n return self.value\n\n def write(self, value):\n self.value = value\n self.screen_q.put(value)\n\n\nclass Keyboard:\n \"\"\"Keyboard peripheral handler.\"\"\"\n\n def __init__(self, keyboard_q):\n self.keyboard_q = keyboard_q\n self.key = 0\n\n def read(self):\n try:\n self.key = self.keyboard_q.get(block=False)\n except queue.Empty:\n self.key = 0\n return self.key\n\n def write(self, value):\n # Writes to keyboad are ignored\n pass\n\n\nclass Memory(dict):\n \"\"\"Read and write to memory, calling out to memory mapped peripherals as needed.\"\"\"\n\n def __init__(self, screen_q, keyboard_q):\n \"\"\"Initialise an empty memory and the peripherals.\"\"\"\n dict.__init__(self, {})\n screen = Screen(screen_q)\n keyboard = Keyboard(keyboard_q)\n self._peripherals = {periph_map[\"TERM\"]: screen, periph_map[\"KBD\"]: keyboard}\n\n def __getitem__(self, address):\n if address in self._peripherals:\n return self._peripherals[address].read()\n else:\n return dict.__getitem__(self, address)\n\n def __setitem__(self, address, data):\n if not isinstance(address, int) or address < 0 or address > 65535:\n raise KeyError(\n f\"Memory location {address} must be an int in range [0, 65535]\"\n )\n if address in self._peripherals:\n self._peripherals[address].write(data)\n else:\n dict.__setitem__(self, address, data)\n\n\nclass Emulator:\n def __init__(self, filename):\n screen_q = queue.Queue()\n keyboard_q = queue.Queue()\n self._threads = []\n t = threading.Thread(\n name=\"terminal\", target=terminal, args=(screen_q, keyboard_q)\n )\n t.start()\n self._threads.append(t)\n\n self.memory = Memory(screen_q, keyboard_q)\n self.line_map = {}\n self.variables = {}\n self._load_d8_file(filename, self.memory, self.line_map, self.variables)\n self.reset()\n self.breakpoints = []\n\n def shutdown(self):\n global stop_threads\n stop_threads = True\n for t in self._threads:\n t.join() # wait for termination\n\n def reset(self):\n self.pc = 0\n self.status = {\"zero\": False, \"carry\": False, \"stop\": False}\n self.registers = [0] * 8\n self.ir = 0\n\n def step(self):\n \"\"\"Step the CPU by 1 instruction.\"\"\"\n if not self.status[\"stop\"]:\n self._fetch()\n opcode, operands = self._decode()\n self._execute(opcode, operands)\n\n def run(self):\n \"\"\"\n Run the CPU until we hit a breakpoint or Stop flag is true.\n Break out of the loop with CTRL+C in case it hangs\n \"\"\"\n self.step() # First step to move away from any breakpoints\n while not self.status[\"stop\"] and self.pc not in self.breakpoints:\n self.step()\n\n def add_breakpoint(self, address):\n \"\"\"Add a breakpoint.\"\"\"\n if address not in self.breakpoints:\n self.breakpoints.append(address)\n\n def delete_breakpoint(self, address):\n \"\"\"Delete an existing breakpoint.\"\"\"\n if address in self.breakpoints:\n self.breakpoints.remove(address)\n\n def display_source(self, source):\n line_number = self.line_map[self.pc]\n source_line = source[line_number - 1]\n print(f\"{line_number} : {source_line}\")\n\n def display_variables(self):\n \"\"\"Display all the variables and their content.\"\"\"\n print(f\"Status: {self.status}\")\n print(f\"Registers: {self.registers}\\tPC: 0x{self.pc:04x}\")\n for name, v in self.variables.items():\n content = [\n self.memory[adr]\n for adr in range(v[\"address\"], v[\"address\"] + v[\"length\"])\n ]\n print(f'{name}[{v[\"length\"]}]: {content}')\n\n def _load_d8_file(self, filename, memory, line_map, variables):\n \"\"\"Load the .d8 file in to memory.\"\"\"\n with open(filename, \"r\") as f:\n for line in f.readlines():\n mem, line_number, variable = self._parseline(line)\n if mem:\n memory.update(mem)\n if line_number:\n line_map.update(line_number)\n if variable:\n variables.update(variable)\n\n def load_source(self, filename):\n \"\"\"\n Load the original source .asm file\n Returns a list of lines that you can index in to\n \"\"\"\n filename = (\n os.path.splitext(filename)[0] + \".asm\"\n ) # TODO should get this from the header in .d8 file\n with open(filename, \"r\") as f:\n lines = f.readlines()\n lines = [line.rstrip() for line in lines]\n return lines\n\n def _parseline(self, line):\n \"\"\"\n Given a line in d8 format, return a dictionary with the key as the address\n and the value as a tuple of (memory contents, line number, variable)\n \"\"\"\n if line.startswith(\";\"):\n # Skip comment lines\n return None, None, None\n else:\n line = line.split(\"|\")\n address = int(line[0].strip(), 16)\n value = line[1].strip()\n line_number = int(line[2].strip(), 10)\n debug = line[3].strip()\n if debug.startswith(\"var:\"):\n # Handle variables\n memory = Memory(None, None) # TODO fix this hack\n result = re.search(r\"var\\:(\\w+)\\[(\\d+)\\]\", debug)\n name = result.groups()[0]\n length = int(result.groups()[1], 10)\n for adr, val in zip(range(address, address + length), wrap(value, 2)):\n memory[adr] = int(val, 16)\n return (\n memory,\n {address: line_number},\n {name: {\"length\": length, \"address\": address}},\n )\n else:\n if len(value) == 4:\n # Handle machine instruction\n high_byte = int(value[0:2], 16)\n low_byte = int(value[2:], 16)\n return (\n {address: high_byte, address + 1: low_byte},\n {address: line_number},\n None,\n )\n else:\n raise Exception(f\"Error parsing line {line_number}\")\n\n def _fetch(self):\n \"\"\"\n Fetch an instruction from memory, load it in to the instruction register (ir)\n Increment the program counter (pc)\n \"\"\"\n self.ir = self.memory[self.pc] << 8 # Load high byte of instruction register\n self.pc += 1\n self.ir |= self.memory[self.pc] # Load the low byte\n self.pc += 1\n\n def _decode(self):\n \"\"\"\n Decode the instruction register (ir) in to the opcode and operations.\n Map the integer opcode to the text instruction.\n \"\"\"\n opcode = (self.ir & 0b1111100000000000) >> 11\n operands = self.ir & 0b0000011111111111\n opcode = instruction_map[opcode]\n return opcode, operands\n\n def _get_reg(self, operands):\n \"\"\"Get 1 register.\"\"\"\n R = operands >> 8\n return R\n\n def _get_reg_reg(self, operands):\n \"\"\"Get 2 registers.\"\"\"\n Rd = operands >> 8\n Rs = (operands & 0b01110000) >> 4\n return Rd, Rs\n\n def _get_reg_reg_reg(self, operands):\n \"\"\"Get 3 registers.\"\"\"\n Rd = operands >> 8\n Rs1 = (operands & 0b01110000) >> 4\n Rs2 = operands & 0b00000111\n return Rd, Rs1, Rs2\n\n def _get_reg_opr8u(self, operands):\n \"\"\"Get 1 register and an 8-bit unsigned value.\"\"\"\n Rd = operands >> 8\n opr8u = operands & 0xFF\n return Rd, opr8u\n\n def _get_reg_opr8s(self, operands):\n \"\"\"Get 1 register and an 8-bit signed value.\"\"\"\n Rd = operands >> 8\n opr8s = operands & 0xFF\n # Two's complement\n if opr8s > 127:\n offset = opr8s - 2 ** 8\n else:\n offset = opr8s\n return Rd, offset\n\n def _get_opr11s(self, operands):\n \"\"\"Get an 11-bit value.\"\"\"\n opr11s = operands\n # Two's complement\n if opr11s > 1023:\n offset = opr11s - 2 ** 11\n else:\n offset = opr11s\n return offset\n\n def _execute(self, opc, opr):\n \"\"\"Execute the current opcode.\"\"\"\n if opc == \"stop\":\n self.status[\"stop\"] = True\n elif opc == \"ldi\":\n Rd, data = self._get_reg_opr8u(opr)\n self.registers[Rd] = data\n # print(f'{map_num_reg[Rd]}<-{data}')\n elif opc == \"ldd\":\n Rd, lsb = self._get_reg_opr8u(opr)\n address = self.registers[map_reg_num[\"PAGE\"]] << 8 | lsb\n data = self.memory[address]\n self.registers[Rd] = data\n # print(f'{map_num_reg[Rd]}<-{data}<-memory[{address}]')\n elif opc == \"ldx\":\n Rd, offset = self._get_reg_opr8s(opr)\n address = (\n self.registers[map_reg_num[\"PAGE\"]] << 8\n | self.registers[map_reg_num[\"X\"]] + offset\n )\n data = self.memory[address]\n self.registers[Rd] = data\n # print(f'{map_num_reg[Rd]}<-{data}<-memory[X={address}]')\n elif opc == \"ldsp\":\n Rd, offset = self._get_reg_opr8s(opr)\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n data = self.memory[address]\n self.registers[Rd] = data\n # print(f'{map_num_reg[Rd]}<-{data}<-memory[X={address}]')\n elif opc == \"std\":\n Rs, lsb = self._get_reg_opr8u(opr)\n address = self.registers[map_reg_num[\"PAGE\"]] << 8 | lsb\n data = self.registers[Rs]\n self.memory[address] = data\n # print(f'memory[{address}]<-{data}<-{map_num_reg[Rs]}')\n elif opc == \"stx\":\n Rs, offset = self._get_reg_opr8s(opr)\n data = self.registers[Rs]\n address = (\n self.registers[map_reg_num[\"PAGE\"]] << 8\n | self.registers[map_reg_num[\"X\"]] + offset\n )\n self.memory[address] = data\n # print(f'memory[{address}]<-{data}<-{map_num_reg[Rs]}')\n elif opc == \"stsp\":\n Rs, offset = self._get_reg_opr8s(opr)\n data = self.registers[Rs]\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n self.memory[address] = data\n # print(f'memory[{address}]<-{data}<-{map_num_reg[Rs]}')\n elif opc in [\"mov\", \"nop\"]:\n Rd, Rs = self._get_reg_reg(opr)\n data = self.registers[Rs]\n self.registers[Rd] = data\n # print(f'{map_num_reg[Rd]}<-{data}<-{map_num_reg[Rs]}')\n elif opc == \"bra\":\n self.pc = self.pc + self._get_opr11s(opr)\n elif opc == \"beq\":\n if self.status[\"zero\"]:\n self.pc = self.pc + self._get_opr11s(opr)\n elif opc == \"bne\":\n if not self.status[\"zero\"]:\n self.pc = self.pc + self._get_opr11s(opr)\n elif opc == \"bcs\":\n if self.status[\"carry\"]:\n self.pc = self.pc + self._get_opr11s(opr)\n elif opc == \"bcc\":\n if not self.status[\"carry\"]:\n self.pc = self.pc + self._get_opr11s(opr)\n elif opc == \"bsr\":\n data = self.pc & 0xFF # Low byte first\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n )\n self.memory[address] = data\n self.registers[map_reg_num[\"SP\"]] += -1 # Always post decrement\n data = self.pc >> 8 # High byte\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n )\n self.memory[address] = data\n self.registers[map_reg_num[\"SP\"]] += -1 # Always post decrement\n self.pc = self.pc + self._get_opr11s(opr) # Then branch\n elif opc == \"rts\":\n # Because of post increment use the operand to store an offset of 1 so get correct byte from stack\n _, offset = self._get_reg_opr8s(opr)\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n SPH = self.memory[address]\n self.registers[map_reg_num[\"SP\"]] += 1 # Always post increment\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n SPL = self.memory[address]\n self.registers[map_reg_num[\"SP\"]] += 1 # Always post increment\n self.pc = SPH << 8 | SPL\n elif opc in [\n \"add\",\n \"adc\",\n \"inc\",\n \"sbb\",\n \"dec\",\n \"and\",\n \"or\",\n \"xor\",\n \"not\",\n \"rolc\",\n \"rorc\",\n ]:\n self._alu(opc, opr)\n elif opc in [\"clc\", \"sec\"]:\n self.status[\"carry\"] = opc == \"sec\"\n elif opc == \"psh\":\n Rs, offset = self._get_reg_opr8s(opr)\n data = self.registers[Rs]\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n self.memory[address] = data\n self.registers[map_reg_num[\"SP\"]] += -1 # Always post decrement\n # print(f'memory[{address}]<-{data}<-{map_num_reg[Rs]}')\n elif opc == \"pul\":\n Rd, offset = self._get_reg_opr8s(opr)\n address = (\n self.memory[periph_map[\"SPPS\"]] << 8 | self.registers[map_reg_num[\"SP\"]]\n ) + offset\n data = self.memory[address]\n self.registers[Rd] = data\n self.registers[map_reg_num[\"SP\"]] += 1 # Always post increment\n # print(f'{map_num_reg[Rd]}<-{data}<-memory[X={address}]')\n else:\n # print(self.status)\n # print(self.registers)\n raise Exception(f\"Unrecognised opcode: {opc} opr: 0b{opr:011b} ({opr})\")\n\n def _alu(self, opcode, operands):\n \"\"\"Emulate the ALU execution cycles.\"\"\"\n\n def _full_add(Rs1, Rs2, carry):\n \"\"\"Implement the full adder.\"\"\"\n Rd = Rs1 + Rs2 + carry\n carry = Rd > 0xFF\n Rd &= 0xFF # Ensure result in range 0 to 0xFF\n return Rd, carry\n\n def _sub_with_borrow(Rs1, Rs2, carry):\n \"\"\"Implement subtract.\"\"\"\n Rd = Rs1 - Rs2 - carry\n carry = Rd < 0\n Rd = int(\n bin(Rd & 0b11111111), 2\n ) # Convert negative number to 2's complement\n return Rd, carry\n\n Rd, Rs1, Rs2 = self._get_reg_reg_reg(operands)\n\n if opcode == \"add\":\n data, self.status[\"carry\"] = _full_add(\n self.registers[Rs1], self.registers[Rs2], 0\n )\n elif opcode in [\"adc\", \"rolc\"]:\n data, self.status[\"carry\"] = _full_add(\n self.registers[Rs1], self.registers[Rs2], self.status[\"carry\"]\n )\n elif opcode == \"inc\":\n data, self.status[\"carry\"] = _full_add(self.registers[Rs1], 0, 1)\n elif opcode == \"sbb\":\n # If bit 7 in IR (CMP flag) is set then force carry to 0\n if operands & 0b10000000:\n carry = 0\n else:\n carry = self.status[\"carry\"]\n data, self.status[\"carry\"] = _sub_with_borrow(\n self.registers[Rs1], self.registers[Rs2], carry\n )\n elif opcode == \"dec\":\n data, self.status[\"carry\"] = _sub_with_borrow(self.registers[Rs1], 0, 1)\n elif opcode == \"and\":\n data = self.registers[Rs1] & self.registers[Rs2]\n self.status[\"carry\"] = 0\n elif opcode == \"or\":\n data = self.registers[Rs1] | self.registers[Rs2]\n self.status[\"carry\"] = 0\n elif opcode == \"xor\":\n data = self.registers[Rs1] ^ self.registers[Rs2]\n self.status[\"carry\"] = 0\n elif opcode == \"not\":\n data = ~self.registers[Rs1]\n self.status[\"carry\"] = 0\n elif opcode == \"rorc\":\n # Rotate right through carry\n data = int(self.status[\"carry\"]) << 8\n data |= self.registers[Rs1]\n self.status[\"carry\"] = bool(\n data % 2\n ) # New carry value is the least sig bit\n data = data >> 1\n\n # Set the status bits\n self.status[\"zero\"] = data == 0\n\n # If bit 7 in IR (CMP flag) is clear then save the result\n if operands & 0b10000000 == 0:\n self.registers[Rd] = data\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\", help=\".d8 file to load in to emulator\")\n args = parser.parse_args()\n\n d8 = Emulator(args.filename)\n source = d8.load_source(args.filename)\n\n while not d8.status[\"stop\"]:\n d8.display_source(source)\n d8.step()\n d8.display_variables()\n input() # Press Enter to execute next instruction\n","repo_name":"dalehumby/d8","sub_path":"src/emulate.py","file_name":"emulate.py","file_ext":"py","file_size_in_byte":19247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"28977263757","text":"from ..models import *\nfrom rest_framework import serializers\nfrom myaccounts.models import MyUser\n\n\nclass ChoiceSerializer(serializers.ModelSerializer):\n class Meta:\n model = Choice\n fields = ['choice_title']\n\nclass AnswerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Answer\n fields = ['answer']\n\nclass QuestionSerializer(serializers.ModelSerializer):\n\n choices_of_question = ChoiceSerializer(many=True)\n answer_of_question = AnswerSerializer() #only show for create or updeat\n\n class Meta:\n model = Question\n fields = '__all__'\n\nclass AssignmentListSerializer(serializers.ModelSerializer): #for list all of assignment without questions\n class Meta:\n model = Assignment\n fields = '__all__'\n\nclass AssignmentSerializer(serializers.ModelSerializer):\n\n questions_of_assignment = QuestionSerializer(many=True)\n \n class Meta:\n model = Assignment\n fields = ['questions_of_assignment']\n\n def create(self, validated_data):\n \"\"\"Creating assignment in DB\"\"\"\n # choices = question_data.pop('choices') # it's wrong cause question_data haven't define yet\n questions_data = validated_data.pop('questions_of_assignment')\n assignment = Assignment.objects.create(**validated_data)\n\n \"\"\"Creating questions and choices in DB\"\"\"\n order = 0\n for question_data in questions_data:\n choices_data = question_data.pop('choices_of_question')\n answer_data = question_data.pop('answer_of_question')\n created_question = Question.objects.create(assignment=assignment, **question_data)\n created_answer = Answer.objects.create(question = created_question, **answer_data)\n order += 1\n\n for choice_data in choices_data:\n created_choice = created_question.choices_of_question.create(**choice_data)\n return assignment\n \nclass GradedAssignmentListSerializer(serializers.ModelSerializer):\n class Meta:\n model = GradedAssignment\n fields = '__all__'\n\nclass StudentAnswerSerializer(serializers.ModelSerializer):\n class Meta:\n model = StudentAnswer\n fields = ['answer_text']\n\nclass TakeQuestionSerializer(serializers.ModelSerializer):\n # answer_of_question = AnswerSerializer() #only show for create or updeat\n answer_of_student = StudentAnswerSerializer()\n id = serializers.IntegerField()\n class Meta:\n model = Question\n fields = ['id', 'question_title', 'answer_of_student']\n\nclass GradedAssignmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = GradedAssignment\n fields = ['id', 'grade', 'progress', 'completed']\n # extra_kwargs = {\n # 'grade': {'read_only': True},\n # 'completed': {'read_only': True},\n # 'progress': {'read_only': True}\n # }\n\n\nclass TakeAssignmentSerializer(serializers.ModelSerializer):\n \n questions_of_assignment = TakeQuestionSerializer(many=True)\n # graded_assignment = GradedAssignmentSerializer()\n # assignment_id = serializers.IntegerField();\n\n class Meta:\n model = Assignment\n fields = ['teacher', 'title', 'questions_of_assignment']\n\n def create(self, validated_data): #validated_data == request and use .pop to get list[] \n # data = request\n # print ('VALIDATED DATA', validated_data)\n valid_student = self.context.get('student')\n taken_assignment = Assignment.objects.get(title=validated_data['title'])\n \n \"\"\"Counting grade\"\"\"\n counting = GradedAssignment.graded_objects.compute_grade(\n student=valid_student,\n assignment=taken_assignment,\n validated_data = validated_data\n )\n\n\n \"\"\"Creating or Updating GradedAssignment and StudentAnswer\"\"\"\n taken_assignment = GradedAssignment.graded_objects.create(\n student=valid_student,\n assignment=taken_assignment,\n validated_data = validated_data\n )\n\n return taken_assignment\n\n\n\nclass PendingAssignmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = GradedAssignment\n fields = ['assignment', 'grade', 'progress']\n\n \n","repo_name":"dangthien2704/assignment_project","sub_path":"assignments/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"37044783757","text":"\"\"\"\nImage processing tools that use the lsst stack. \n\"\"\"\nfrom __future__ import division, print_function\n\nimport lsst.afw.math as afwMath\n\n__all__ = ['smooth_gauss']\n\ndef smooth_gauss(masked_image, sigma, nsigma=7.0):\n \"\"\"\n Smooth image with a Gaussian kernel. \n\n Parameters\n ----------\n masked_image : lsst.afw.image.imageLib.MaskedImageF\n Masked image object to be smoothed\n sigma : float\n Standard deviation of Gaussian\n nsigma : float, optional\n Number of sigma for kernel width\n\n Returns\n -------\n convolved_image : lsst.afw.image.imageLib.MaskedImageF\n The convolved masked image\n \"\"\"\n width = (int(sigma*nsigma + 0.5) // 2)*2 + 1 # make sure it is odd\n gauss_func = afwMath.GaussianFunction1D(sigma)\n gauss_kern = afwMath.SeparableKernel(width, width, gauss_func, gauss_func)\n convolved_image = masked_image.Factory(masked_image.getBBox())\n afwMath.convolve(convolved_image, masked_image, gauss_kern, \n afwMath.ConvolutionControl())\n return convolved_image\n","repo_name":"johnnygreco/lsstutils","sub_path":"lsstutils/imgproc/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"37922837329","text":"# 1.Funcție care să calculeze și să returneze suma a două numere\n\ndef suma():\n a = [2, 5]\n x = sum(a)\n return x\n\n\nsuma()\n\n\n# 2. Funcție care sa returneze TRUE dacă un număr este par, FALSE pt impar\n\ndef odd_even_number(x):\n if x % 2 == 0:\n return True\n else:\n return False\n\n\nprint(odd_even_number(4))\n\n\n# 3. Funcție care returnează numărul total de caractere din numele tău complet.(nume, prenume, nume_mijlociu)\n\ndef numar_caracter(nume, prenume, nume_mijlociu):\n nume_complet = nume + prenume + nume_mijlociu\n return len(nume_complet)\n\n\nprint(numar_caracter(\"klonoczki\", \"florin\", \"silviu\"))\n\n\n# 4. Funcție care returnează aria dreptunghiului\n\ndef aria_dreptunghiului(latime, lungime):\n aria = latime * lungime\n return aria\n\n\nprint(aria_dreptunghiului(10, 40))\n\n# 5. Funcție care returnează aria cercului\nimport math\n\n\ndef area_of_the_circle(Radius):\n area = Radius ** 2 * math.pi\n return area\n\n\nRadius = float(input(\"Please enter the radius of the given circle: \"))\nprint(\" The area of the given circle is: \", area_of_the_circle(Radius))\n\n\n# 6. Funcție care returnează True dacă un caracter x se găsește într-un string dat și Talse dacă nu găsește.\n\ndef get_char(text):\n if 'Buna' in text:\n return True\n else:\n return False\n\n\nprint(get_char('Buna, ce faci?'))\n\n\n# 7. Funcție fără return, primește un string și printează pe ecran:\n# ● Nr de caractere lower case este x\n# ● Nr de caractere upper case exte y\n\n# 7. Funcție fără return, primește un string și printează pe ecran:\n# ● Nr de caractere lower case este x\n# ● Nr de caractere upper case exte y\n\ndef char_lower_upper(text):\n calc_upper = 0\n calc_lower = 0\n for cuvinte in text:\n if cuvinte.islower():\n calc_lower += 1\n if cuvinte.isupper():\n calc_upper += 1\n print(f\"Nr. de caractere lower case este{calc_lower}\")\n print(f\"Nr. de caractere lower case este{calc_upper}\")\n\n\nprint(char_lower_upper(\"Buna, ce faci Geo?\"))\n\n\n# 8. Funcție care primește o LISTA de numere și returnează o LISTA doar cu numerele pozitive\n\ndef positive_list(my_list):\n my_list_positive = []\n for x in my_list:\n if x >= 0:\n my_list_positive.append(x)\n return my_list_positive\n\n\nmy_list = [2, 3, -1, 5, 0, -6]\nprint(positive_list(my_list))\n\n\n# 9. Funcție care nu returneaza nimic. Primește două numere și PRINTEAZA\n# ● Primul număr x este mai mare decat al doilea nr y\n# ● Al doilea nr y este mai mare decat primul nr x\n# ● Numerele sunt egale.\n\ndef numere(x, y):\n if x > y:\n print(f\"Primul număr {x} este mai mare decat al doilea nr {y}\")\n if y > x:\n print(f\"Al doilea nr {y} este mai mare decat primul nr {x}\")\n else:\n print(\"Numerele sunt egale.\")\n\n\nnumere(2, 2)\n\n\n# 10. Funcție care primește un număr și un set de numere.\n# ● Printeaza ‘am adaugat numărul nou în set’ + returnează True\n# ● Printeaza ‘nu am adaugat numărul în set. Acesta există deja’ + returnează False\n\ndef adauga(x, my_set):\n if x not in my_set:\n my_set.add(x)\n print(f'I added the new number {x} in the set!')\n\n else:\n print(f\"I didn't add the number {x}, it is already in the set!\")\n return False\n\n\nprint(adauga(5, {2, 3, 6}))\n\n# 11. Funcție care primește o lună din an și returnează câte zile are acea luna.\n\nyear = {'ianuarie': '31', 'februarie': '28/29', 'martie': '31', 'aprilie': '30', 'mai': '31', 'iunie': '30',\n 'iulie': '31', 'august': '31', 'septembrie': '30', 'octombrie': '31', 'noiembrie': '30',\n 'decembrie': '31'}\n\n\ndef get_days_of_month(get_month, year):\n for month, days in year.items():\n if month == get_month:\n return days\n\n\nprint(get_days_of_month(get_month=input('enter a month: '),\n year={'ianuarie': '31', 'februarie': '28/29', 'martie': '31', 'aprilie': '30', 'mai': '31',\n 'iunie': '30', 'iulie': '31', 'august': '31', 'septembrie': '30', 'octombrie': '31',\n 'noiembrie': '30', 'decembrie': '31'}))\n\n\n# 12. Funcție calculator care să returneze 4 valori. Suma, diferența, înmulțirea,\n# împărțirea a două numere.\n# În final vei putea face:\n# a, b, c, d = calculator(10, 2)\n# ● print(\"Suma: \", a)\n# ● print(\"Diferenta: \", b)\n# ● print(\"Inmultirea: \", c)\n# ● print(\"Impartirea: \", d)\n\ndef calculator(x, y):\n a = x + y\n b = x - y\n c = x * y\n d = x / y\n print(\"Suma: \", a)\n print(\"Diferenta: \", b)\n print(\"Inmultirea: \", c)\n print(\"Impartirea: \", d)\n\n\ncalculator(10, 2)\n\n\n# 13. Funcție care primește o lista de cifre (adică doar 0-9)\n# Exemplu: [1, 3, 1, 5, 9, 7, 7, 5, 5]\n# Returnează un DICT care ne spune de câte ori apare fiecare cifră\n# => dict {\n# 0: 0\n# 1 :2\n# 2: 0\n# 3: 1\n# 4: 0\n# 5: 3\n# 6: 0\n# 7: 2\n# 8: 0\n# 9: 1\n# }\ndef count_list(my_list):\n contor = {}\n for i in my_list:\n if i in contor:\n contor[i] += 1\n else:\n contor[i] = 1\n return contor\n\n\nprint(count_list(my_list=[1, 3, 1, 5, 9, 7, 7, 5, 5]))\n\n\n# 14. Funcție care primește 3 numere. Returnează valoarea maximă dintre ele\n\ndef max_number(x, y, z):\n if x > y and x > z:\n return x\n elif y > x and y > z:\n return y\n else:\n return z\n\n\nprint(f\"numarul mai mare este {max_number(2, 3, 1)}\")\n\n\n# 15. Funcție care să primească un număr și să returneze suma tuturor numerelor\n# de la 0 la acel număr\n# Exemplu: daca dam nr 3. Suma va fi 6 (0+1+2+3)\n\ndef sum_numbers(number):\n \"\"\"iteram prin numere si returnam suma lor\"\"\"\n\n total = sum(range(0, number + 1))\n\n return total\n\n\nprint(f\"suma numerelor este={sum_numbers(5)}\")\n\n\n# Exerciții Opționale - Bonus\n#\n# 1.Funcție care primește 2 liste de numere (numerele pot fi dublate). Returnați numerele comune.\n#\n# Exemplu:\n# list1 = [1, 1, 2, 3]\n# list2 = [2, 2, 3, 4]\n# Răspuns: {2, 3}\ndef afisare_numere_duble(list1, list2):\n numere_duble = []\n for i in list1:\n if i in list2:\n numere_duble.append(i)\n print(numere_duble)\n\n\nlist1 = [1, 1, 2, 3]\nlist2 = [2, 2, 3, 4]\n\nafisare_numere_duble(list1, list2)\n\n# sau mai simplu\nmatches = {x for x in list1 if x in list2}\nprint(matches)\n\n\n# 2. Funcție care să aplice o reducere de preț.\n# Dacă produsul costă 100 lei și aplicăm reducere de 10%. Pretul va fi 90 de lei.\n# Tratează cazurile în care reducerea e invalidă. De exemplu o reducere de 110% e invalidă.\n\n\ndef discount_calculator(user_price, user_discount):\n if 0 < user_discount < 100:\n final_price = user_price - user_price * user_discount / 100\n return final_price\n else:\n print(\"\\nThe entered discount is invalid!\")\n\n\nprice = float(input(\"\\nEnter the product price to calculate the discount price:\\t\"))\ndiscount = float(input(\"\\nEnter the discount (%):\\t\"))\nprint(f\"\\nThe final price is: {round(discount_calculator(price, discount), 2)}\")\n\n# 3.Funcție care să afișeze data și ora curentă din România.\n# (bonus: afișazăi și data și ora curentă din China)\nfrom datetime import datetime\nimport pytz\n\nnow = datetime.now()\nprint(now)\nchina = pytz.timezone('Asia/Shanghai')\nchina_time = datetime.now(china)\nprint(china_time.strftime(\"Date is %d-%m-%y and time is %H:%M:%S\"))\n\n# 4. Funcție care să afișeze câte zile mai sunt până la ziua ta / sau până la Crăciun dacă nu vrei să ne zici cand e ziua ta :)\n\n\nfrom datetime import date\n\n\ndef zile_pana_la_craciun(chistmas_date, today):\n zile_ramase = (chistmas_date - today).days\n print(f\"Zile ramase pana la craciun sunt:{zile_ramase}\")\n\n\nnow = date.today()\nchistmas = date(2023, 12, 25)\n\nzile_pana_la_craciun(chistmas, now)\n","repo_name":"FlorinKlonoczki/Python-exercise","sub_path":"Functii_python_exercitii.py","file_name":"Functii_python_exercitii.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72050205135","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport errno\nfrom glob import glob\nimport shutil\nimport time\n\nOUTPUT_DIR = 'html'\nif len(sys.argv) > 1:\n OUTPUT_DIR = sys.argv[1]\nprint(\"OUTPUTTING TO\",OUTPUT_DIR)\ntime.sleep(1)\n\nprogram = \"jupyter nbconvert --CodeFoldingPreprocessor.remove_folded_code=True --TagRemovePreprocessor.remove_cell_tags=\\\"{'remove_cell_html'}\\\"\"\ntarget_default = 'html_toc'\ntargets = {'Book.ipynb':'html'}\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\ndef replace_in_file(fn,src,dest):\n f = open(fn,'r')\n lines = ''.join(f.readlines())\n f.close()\n f = None\n \n f = open(fn+'.tmp','w')\n f.write(lines.replace(src,dest))\n f.close()\n \n os.replace(fn+'.tmp',fn)\n\nfor fn in glob(\"*.ipynb\"):\n print(\"Running on \",fn)\n os.system('%s --to %s --output-dir=%s \"%s\"'%(program,targets.get(fn,target_default),OUTPUT_DIR,fn))\n\nfor fn in glob(OUTPUT_DIR+\"/*.html\"):\n replace_in_file(fn,\".ipynb\\\"\",\".html\\\"\")\n replace_in_file(fn,\".ipynb#\",\".html#\")\n\neqn_numbering_location = \"\"\"\"\"\"\n\ninject_eqn_numbering = \"\"\"\n\n\"\"\"\n\nfor fn in glob(OUTPUT_DIR+\"/*.html\"):\n replace_in_file(fn,eqn_numbering_location,inject_eqn_numbering+eqn_numbering_location)","repo_name":"krishauser/RoboticSystemsBook","sub_path":"to_html.py","file_name":"to_html.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"14"} +{"seq_id":"35241714328","text":"\nimport dimod\nimport itertools\nfrom dwave.system import DWaveSampler, EmbeddingComposite\n\n\n\n\npeople = ['A', 'B', 'C']\nroles = ['PM', 'SW', 'HW']\ngamma = 1\n\n# people_roles = {'A PM': 3, 'A SW': 2, 'A HW': 1,\n# 'B PM': 1, 'B SW': 3, 'B HW': 3,\n# 'C PM': 2, 'C SW': 1, 'C HW': 3}\n\npeople_roles = {'A PM': 3, 'A SW': 2, 'A HW': 1, 'A Des': 1, 'A Rep': 3,\n 'B PM': 1, 'B SW': 3, 'B HW': 3,'B Des': 1, 'B Rep': 2,\n 'C PM': 2, 'C SW': 1, 'C HW': 3, 'C Des': 2, 'C Rep': 2,\n 'D PM': 3, 'D SW': 1, 'D HW': 1, 'D Des': 2, 'D Rep': 3,\n 'E PM': 1, 'E SW': 1, 'E HW': 3, 'E Des': 1, 'E Rep': 3}\n\n# change the linear biases\nfor x in people_roles.keys():\n people_roles[x] = (people_roles[x] - 2)*gamma\n\nbqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)\nbqm.linear = people_roles\nbqm.offset = 5 # this helps us choose 5 people from the set\n\n\n\n# Evaluate every potential edge in the graph\nfor i, j in itertools.combinations(range(len(people_roles)), 2):\n k = list(people_roles.keys())\n\n [person1, role1] = k[i].split()\n [person2, role2] = k[j].split()\n\n if person1 == person2 or role1 == role2:\n bqm.add_interaction(k[i], k[j], 4, dimod.BINARY) # enforce strong pos quad bias if nodes have same person or role\n else:\n bqm.add_interaction(k[i], k[j], -1, dimod.BINARY)\n\n# print(bqm)\n\nsampler = EmbeddingComposite(DWaveSampler())\nresponse = sampler.sample(bqm, chain_strength=8, num_reads=100)\n\n# print('\\n')\nprint(response)\n\n# Print the variables chosen in the lowest energy sample\n# sample = response.samples()[0]\n# result = ''\nfor x in range(20):\n result = ''\n sample = response.samples()[x]\n\n for i in sample.keys():\n if sample[i] == 1:\n result = result + i + ', '\n\n result += ' cbf = ' + str(response.record['chain_break_fraction'][x])\n\n print(result)\n\n# print(result)\n\n\n\n\n","repo_name":"dtquantumc-zz/quantumKit","sub_path":"Workshops/Grades_9_12/Quantum_Fun/dwaveDemos/demoK12_choosing_roles.py","file_name":"demoK12_choosing_roles.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"71280229456","text":"import csv\nfrom typing import List\n\nfrom japaneseverbconjugator.JapaneseVerbFormGenerator import JapaneseVerbFormGenerator\nfrom japaneseverbconjugator.constants.EnumeratedTypes import (\n Formality,\n Polarity,\n Tense,\n VerbClass,\n)\n\nfrom slt import settings, japanese\n\n\nIRREGULAR_POS = set([\"30\", \"34\", \"38\", \"42\", \"45\", \"47\", \"48\", \"52\", \"58\"])\n\nSUFFIXES = [\"べきだ\", \"が\", \"べしだ\", \"べき\", \"べし\"]\n\n\nclass VerbTense:\n NonPast = 1\n Past = 2 # (~ta)\n Conjunctive = 3 # (~te)\n Provisional = 4 # (~eba)\n Potential = 5\n Passive = 6\n Causative = 7\n CausativePassive = 8\n Volitional = 9\n Imperative = 10\n Conditional = 11 # (~tara)\n Alternative = 12 # (~tari)\n Continuative = 13 # (~i)\n\n\nclass Conjugator:\n def __init__(self):\n self.conjugator = JapaneseVerbFormGenerator()\n with open(settings.CONJUGATOR_DATA, newline=\"\") as f:\n reader = csv.DictReader(f, delimiter=\"\\t\")\n self.conjugator_data = sorted(reader, key=lambda x: -len(x[\"okuri\"]))\n\n self.verbs = {}\n with open(settings.VERBS_PATH) as f:\n for verb in csv.DictReader(f):\n self.verbs.setdefault(verb[\"normalized\"], {})\n if verb[\"form\"] == \"基本形\":\n self.verbs[verb[\"normalized\"]][\"type\"] = verb[\"subpos2\"]\n elif verb[\"form\"] == \"未然形\":\n self.verbs[verb[\"normalized\"]][\"root\"] = verb[\"verb\"]\n\n def detect_class(self, verb: str, lemma: str):\n verb_info = self.verbs.get(lemma)\n if verb_info and verb_info[\"type\"] == \"一段\":\n verb = verb[len(verb_info[\"root\"]) :]\n for datum in self.conjugator_data:\n if verb.endswith(datum[\"okuri\"]):\n conj = datum\n break\n else:\n return False\n return {\n \"formality\": Formality.POLITE if conj[\"fml\"] == \"t\" else Formality.PLAIN,\n \"polarity\": Polarity.NEGATIVE if conj[\"neg\"] == \"t\" else Polarity.POSITIVE,\n \"tense\": int(conj[\"conj\"]),\n }\n\n def adjust_conjugation(\n self, source_verb: str, source_lemmas: List[str], target_verb: str\n ):\n print(source_verb, source_lemmas, target_verb)\n if not japanese.has_hiragana(target_verb) and \"する\" in source_lemmas:\n # both verbs are noun+する, simply: append the same ending\n return target_verb + source_verb.replace(source_lemmas[0], \"\")\n\n suffix = \"\"\n\n for potential_suffix in SUFFIXES:\n if source_verb.endswith(potential_suffix):\n suffix = source_verb[-len(potential_suffix) :] + suffix\n source_verb = source_verb[: len(potential_suffix)]\n\n if source_verb.endswith(\"ら\") or source_verb.endswith(\"り\"):\n suffix = source_verb[-1] + suffix\n source_verb = source_verb[:-1]\n conjugated = source_verb\n conj = self.detect_class(source_verb, source_lemmas[0])\n if not conj:\n return target_verb\n tense, formality, polarity = (\n conj[\"tense\"],\n conj[\"formality\"],\n conj[\"polarity\"],\n )\n\n verb_info = self.verbs.get(target_verb)\n if not verb_info:\n return target_verb\n\n if verb_info[\"type\"] == \"一段\":\n verb_class = VerbClass.ICHIDAN\n elif verb_info[\"type\"].startswith(\"五段\"):\n verb_class = VerbClass.GODAN\n else:\n verb_class = VerbClass.IRREGULAR\n\n args = [target_verb, verb_class, formality, polarity]\n if tense in [VerbTense.NonPast, VerbTense.Past]:\n if formality == Formality.PLAIN:\n func = self.conjugator.generate_plain_form\n else:\n func = self.conjugator.generate_polite_form\n conjugated = func(\n target_verb,\n verb_class,\n Tense.PAST if tense == VerbTense.Past else Tense.NONPAST,\n polarity,\n )\n elif tense == VerbTense.Conjunctive:\n conjugated = self.conjugator.generate_te_form(target_verb, verb_class)\n elif tense == VerbTense.Provisional:\n conjugated = self.conjugator.generate_provisional_form(*args)\n elif tense == VerbTense.Potential:\n conjugated = self.conjugator.generate_potential_form(*args)\n elif tense == VerbTense.Passive:\n conjugated = self.conjugator.generate_passive_form(*args)\n elif tense in [VerbTense.Causative, VerbTense.CausativePassive]:\n conjugated = self.conjugator.generate_causative_form(*args)\n elif tense == VerbTense.Volitional:\n conjugated = self.conjugator.generate_volitional_form(*args)\n elif tense == VerbTense.Imperative:\n conjugated = self.conjugator.generate_imperative_form(*args)\n elif tense == VerbTense.Conditional:\n conjugated = self.conjugator.generate_conditional_form(*args)\n return conjugated + suffix\n","repo_name":"tmicltw/slt-prototype","sub_path":"slt/conjugation.py","file_name":"conjugation.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"20871486938","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import User, Advertiser\n\n# defineing custom widget for rendering html form with that widget\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\nclass UserModelForm(UserCreationForm):\n \n class Meta:\n model = User\n fields = \"__all__\" #[\n \n # 'username',\n # 'dob',\n # 'location',\n # 'country',\n # 'mobile',\n # 'email',\n # 'gender',\n # 'password',\n # ]\n widgets = {\n 'dob': DateInput()\n }\n\nclass UserProfileModelForm(UserChangeForm):\n# class UserProfileModelForm(forms.Form):\n class Meta:\n model = User\n fields = [\n # 'username',\n # 'dob',\n # 'location',\n # 'country',\n # 'mobile',\n # 'email',\n # 'gender',\n 'language',\n 'relationship_status',\n 'hobbies',\n 'movies',\n 'tv_shows',\n 'books',\n 'profile_pic',\n 'cover_pic',\n 'friends',\n 'user_community',\n ]\n widgets = {\n 'dob': DateInput()\n }\n\nclass AdvertiserModelForm(forms.ModelForm):\n \n class Meta:\n model = Advertiser\n fields = [\n # 'username',\n # 'dob',\n # 'location',\n # 'country',\n # 'mobile',\n # 'email',\n # 'gender',\n # 'password',\n 'balance',\n ]\n widgets = {\n 'dob': DateInput()\n }","repo_name":"vis7/connection","sub_path":"user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"6832312292","text":"\n#distribute elements over circle\nfor i,r in enumerate(['C5', 'C7', 'C9', 'C11', 'C13', 'C3']):\n M[r].align(M['C5'])\n M[r].rotate(-60*i)\n\n#outline edges\ndef gen_inner_edge(r, b):\n edge = PolyLine(pcb, [], layer=L['Edge.Cuts'], width=0.2)\n\n #add first segment\n startp = [(to_cart([r, 6])[0] - b), to_cart([r, 6])[1]]\n p = PolyLine(pcb, [startp, [startp[0], -startp[1]]])\n p.append_arc(r, -6, -60+6, segments=24)\n edge.merge(p)\n\n #add the rest using -60 deg rotation\n for i in range(5):\n p.rotate(-60)\n edge.merge(p)\n\n #close the outline\n edge.merge(startp)\n\n return edge\n\ninner_edge = gen_inner_edge(46.5, 7)\n\nmods = [M['C2'], M['C12'], M['C10'], M['C8'], M['C6']]\ncircle_pattern(mods, M['C4'])\n\nmods = [M['R1'], M['R6'], M['R5'], M['R4'], M['R3']]\ncircle_pattern(mods, M['R2'])","repo_name":"wojtekbe/rgbir-lights","sub_path":"ctrl_board/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72246543055","text":"#Questo programma chiede in input un numero e dice se è positivo o negativo\n\nprint(\"\"\"CIAO ~Simone Giacomini\nQuesto programma chiede in input un numero e dice se è positivo o negativo\"\"\")\nnumero = int(input(\"Inserisci un numero: \"))\nif numero == 0 :\n print(\"il numero è 0\")\nelif numero > 0 :\n print(\"il numero è positivo\")\nelse :\n print(\"il numero è negativo\")\n \ninput(\"\")\n","repo_name":"SimoneGiacomini/Python_UFS09_ITSAR","sub_path":"2021-10-8 Consegna 4/1_positivoNegativo.py","file_name":"1_positivoNegativo.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"13039567896","text":"import datetime\nimport stripe\nfrom django.core.mail import send_mail\nfrom django.db.models import Sum\nfrom django.conf import settings\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import Phones, Bascet_products, AirPods\n\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\n\n@csrf_exempt\ndef CreateCheckoutSessionView(request, *args, **kwargs):\n if request.method == 'POST':\n product_id = kwargs['pk']\n product = Bascet_products.objects.filter(accounts_id=product_id) & Bascet_products.objects.filter(\n product_buy=False)\n product = product.aggregate(total1=Sum('price'))\n checkout_session = stripe.checkout.Session.create(\n line_items=[\n {\n 'price_data': {\n 'currency': 'usd',\n 'unit_amount': round(product['total1'] / 11386) * 100,\n 'product_data': {\n 'name': 'общая сумма всех товаров'\n }\n\n },\n 'quantity': 1,\n },\n ],\n metadata={'id': product_id},\n mode='payment',\n success_url='http://smartshopcenter.org:3000/customer/message/',#'http://smartshopcenter.org:3000',\n cancel_url='http://smartshopcenter.org:3000/customer/message/'#'http://smartshopcenter.org:3000',\n )\n return JsonResponse({'id': checkout_session.url})\n\n\ndef get_time():\n delta = datetime.timedelta(hours=5, minutes=0)\n return datetime.datetime.now(datetime.timezone.utc) + delta\n\n\n@csrf_exempt\ndef stripe_webhook(request):\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, settings.STRIPE_WEBHOOK_SECRET\n )\n except ValueError as e:\n return HttpResponse(status=400)\n except stripe.error.SignatureVerificationError as e:\n return HttpResponse(status=400)\n if event['type'] == 'checkout.session.completed':\n session = event['data']['object']\n customer_email = session['customer_details']['email']\n products = Bascet_products.objects.filter(\n accounts_id=session['metadata']['id']) & Bascet_products.objects.filter(product_buy=False)\n check_in = 0\n check_in_inf = False\n time = get_time()\n print(time)\n for i in products:\n a = 0\n if i.group_product == 1:\n change = Phones.objects.get(id=i.product_id)\n change.count -= i.count\n if change.count < 0:\n print(change.count)\n change.count += i.count\n a = 1\n send_mail(\n subject=f\"{change.name}\",\n message=f\"к сожалению данного продукта пока нету в наличии. вся оплата будет возращена за данный товар. С вами свяжеться техническая поддержка в ближайшое время доставки в ближайшое время\",\n recipient_list=[customer_email],\n from_email=\"matt@test.com\"\n )\n else:\n print(change.count)\n change.save()\n elif i.group_product == 2:\n change = AirPods.objects.get(id=i.product_id)\n change.count -= i.count\n if change.count < 0:\n change.count += i.count\n a = 1\n send_mail(\n subject=f\"{change.name}\",\n message=f\"к сожалению данного продукта пока нету в наличии. вся оплата будет возращена за данный товар. С вами свяжеться техническая поддержка в ближайшое время доставки в ближайшое время\",\n recipient_list=[customer_email],\n from_email=\"matt@test.com\"\n )\n else:\n change.save()\n if a == 0:\n check_in_inf = True\n i.product_buy = True\n print(i.time)\n i.time = time\n i.save()\n print(i.time)\n else:\n check_in = 1\n if check_in == 0:\n send_mail(\n subject=f\"Вот ваши товары\",\n message=f\"Спасибо за оплату. Ваши товары уже в заказе и в пути. С вами свяжеться служба доставки в ближайшое время\",\n recipient_list=[customer_email],\n from_email=\"matt@test.com\"\n )\n elif check_in == 1 and check_in_inf == True:\n send_mail(\n subject=f\"Вот ваши товары\",\n message=f\"Спасибо за оплату. Ваши товары уже в заказе и в пути.Некоторые товари были отменены в х��де оплаты, вам деньги компенсирует администрация. С вами свяжеться служба доставки в ближайшое время\",\n recipient_list=[customer_email],\n from_email=\"matt@test.com\"\n )\n else:\n send_mail(\n subject=f\"Отмена покупки\",\n message=f\"Данные товари которые вы заказывали были отменены так как их в наличии нету,вам в ближайшое время компенсируют полностью затраты\",\n recipient_list=[customer_email],\n from_email=\"matt@test.com\"\n )\n return HttpResponse(status=200)\n","repo_name":"DmitroPodolsky/smartphone_proj","sub_path":"app/phones/views2.py","file_name":"views2.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"41919009954","text":"from hivemind_etl_helpers.src.utils.mongo import MongoSingleton\n\n\ndef get_all_discord_communities() -> list[str]:\n \"\"\"\n Getting all communities having discord from database\n \"\"\"\n mongo = MongoSingleton.get_instance()\n communities = (\n mongo.client[\"Core\"][\"platforms\"]\n .find({\"name\": \"discord\"})\n .distinct(\"community\")\n )\n # getting the str instead of ObjectId\n communities = [str(comm) for comm in communities]\n return communities\n","repo_name":"TogetherCrew/airflow-dags","sub_path":"dags/hivemind_etl_helpers/src/utils/get_mongo_discord_communities.py","file_name":"get_mongo_discord_communities.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"23428242981","text":"# # ABC is a right triangle, 90° at B.\n# Therefore, ')\n connection.send(bytes(getRegisterByAddress(dev, i)))\n connection.send(b'\\n')\n i +=1\n\n connection.close()\n\n\ndef getRegisterByAddress(dev, addr):\n v = dev.GetCoreReg(addr)\n return v\n\n\nif __name__ == \"__main__\":\n\n proc = sys.argv[1]\n elffile = sys.argv[2]\n\n sim = SimulavrAdaptor.SimulavrAdapter()\n dev = sim.loadDevice(proc, elffile)\n\n a0 = XPin(dev, \"A0\")\n a1 = XPin(dev, \"A1\", \"H\")\n a7 = XPin(dev, \"A7\", \"H\")\n\n while True:\n\n sim.doStep()\n","repo_name":"minallaad/SER517-Capstone---Project-10","sub_path":"simulavr/examples/Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"44358839958","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport plotly.express as px\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import KMeans\nclass ClusterProcess:\n def __init__(self,n_pca_components=2):\n self.n_pca_comp=n_pca_components\n self.pca=PCA(n_components=n_pca_components)\n self.normalize_std_scaler=StandardScaler()\n def readData(self,name):\n return pd.read_csv(name+\".csv\")\n def findPCA(self,data):\n pc=self.pca.fit_transform(data)\n pc_df=pd.DataFrame(data=pc,columns=['comp_'+str(i) for i in range(1,self.n_pca_comp+1)])\n return pc_df\n def do_clustering(self,data,n_cluster,method):\n if method.__name__==AgglomerativeClustering.__name__:\n cluster_algo=method(n_clusters=n_cluster)\n return cluster_algo.fit_predict(data),cluster_algo.labels_,silhouette_score(data,cluster_algo.labels_)\n elif method.__name__==KMeans.__name__:\n cluster_algo=method(n_clusters=n_cluster)\n cluster_algo.fit(data)\n return cluster_algo.predict(data),cluster_algo.cluster_centers_,cluster_algo.labels_,silhouette_score(data,cluster_algo.labels_),cluster_algo.inertia_\n else:\n cluster_algo=method(n_clusters=n_cluster)\n cluster_algo.fit(data)\n return cluster_algo.predict(data),cluster_algo.cluster_centers_,cluster_algo.labels_,silhouette_score(data,cluster_algo.labels_),cluster_algo.inertia_\n def fitScale(self,data):\n self.normalize_std_scaler.fit(data)\n def tranScale(self,data):\n return self.normalize_std_scaler.transform(data)\n def invScale(self,data):\n return self.normalize_std_scaler.inverse_transform(data)\n def plot3dScatter(self,data,color='None'):\n if color=='None':\n fig=px.scatter_3d(data,x='comp_1',y='comp_2',z='comp_3',size_max=10)\n else:\n fig=px.scatter_3d(data,x='comp_1',y='comp_2',z='comp_3',color=color,size_max=10)\n fig.update_traces(marker_size = 3)\n fig.show()\n def plotBar(self,data,column_names,color='None'):\n if color=='None':\n fig = px.bar(pd.DataFrame(data=data,columns=column_names), x=column_names[0],y=column_names[1])\n else:\n fig = px.bar(pd.DataFrame(data=data,columns=column_names), x=column_names[0],y=column_names[1],color=color)\n fig.show()","repo_name":"aayushiganatra/ClusterWise-Wine-Data-Exploration","sub_path":"DataProcess.py","file_name":"DataProcess.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72806771213","text":"\"\"\" add phone number to users table\n\nRevision ID: 88e4596adbda\nRevises: d386d3919523\nCreate Date: 2017-04-10 02:46:54.129717\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '88e4596adbda'\ndown_revision = 'd386d3919523'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('users', sa.Column('phone_number', sa.Text(),\n nullable=False))\n\n\ndef downgrade():\n op.drop_column('users', 'phone_number')\n","repo_name":"crumbtech/crumb-api","sub_path":"migrations/versions/88e4596adbda_.py","file_name":"88e4596adbda_.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"31995521765","text":"import os\r\nimport json\r\nimport requests\r\nimport urllib.request\r\n\r\nclass main:\r\n jarname = \"\"\r\n foldername = \"\"\r\n\r\ndef makeserverfolder(jar, version, type):\r\n name = input(\"Name The Server Folder: \")\r\n mem = input(\"Amount Of Allocated Memory (MB): \")\r\n\r\n foldername = (str(name) + \" [\" + type + \"] (\" + version + \")\")\r\n main.foldername = foldername\r\n if str(jar).endswith(\".jar\"):\r\n jarname = str(jar)\r\n else:\r\n jarname = str(jar) + '.jar'\r\n os.mkdir(str(foldername))\r\n os.mkdir((str(foldername) + \"/Batches\"))\r\n os.mkdir((str(foldername) + \"/Server\"))\r\n os.mkdir(str(foldername) + \"/Ngrok\")\r\n\r\n with open( str(foldername) + '/Batches/startserver.bat', 'w') as f:\r\n f.write('cd \"./' + foldername + \"/Batches\" + '\"\\n')\r\n f.write('start ngrok.bat\\n')\r\n f.write('cd \"../Server\"\\n')\r\n f.write('java -Xms' + str(mem) + 'M -Xmx' + str(mem) + 'M -jar ' + jarname + '\\n')\r\n f.write('exit')\r\n f.close()\r\n with open( str(foldername) + '/Batches/ngrok.bat', 'w') as f:\r\n f.write('cd \"../Ngrok\"\\n')\r\n f.write('ngrok --region au tcp 25565\\n')\r\n f.write('cd \"./Batches\"\\n')\r\n f.write('exit')\r\n f.close()\r\n with open( str(foldername) + '/Batches/stopserver.bat', 'w') as f:\r\n f.write('taskill /f /im cmd.exe /t')\r\n f.write('exit')\r\n f.close()\r\n\r\n\r\n\r\nversions = \"https://api.papermc.io/v2/projects/paper\"\r\n\r\nservertype = \"paper\"\r\n\r\n\r\nresponse = requests.get(versions)\r\nversionsraw = response.text\r\nversionsdict = json.loads(versionsraw)\r\nversionslist = versionsdict[\"versions\"]\r\nprint(versionsdict)\r\ncurrent = 0\r\nfor version in versionsdict[\"versions\"]:\r\n current = current + 1\r\n print(\"[\" + str(current) + \"] \" + str(version))\r\n\r\n\r\nprint(\"select a minecraft verion. [type number in brackets]\")\r\nversion = input()\r\n\r\ncurrent = 0\r\nfor ver in versionslist:\r\n launchname = str(versions[current])\r\n if str(version) == (str(current + 1)):\r\n selectedversion = ver\r\n print(\"Found Server\")\r\n\r\n current = current + 1\r\n\r\n##builds\r\n\r\n\r\nbuilds = \"https://api.papermc.io/v2/projects/paper/versions/\" + str(selectedversion)\r\n\r\nresponse = requests.get(builds)\r\nbuildsraw = response.text\r\nbuildsdict = json.loads(buildsraw)\r\nbuildslist = buildsdict[\"builds\"]\r\nprint(buildsdict)\r\ncurrent = 0\r\nfor build in buildslist:\r\n current = current + 1\r\n if current == len(buildslist):\r\n print(\"[\" + str(current) + \"] \" + str(build) + \" (Latest)\")\r\n else:\r\n print(\"[\" + str(current) + \"] \" + str(build))\r\n\r\n\r\nprint(\"select a server build. [type number in brackets]\")\r\nbuild = input()\r\n\r\ncurrent = 0\r\nfor bui in buildslist:\r\n launchname = str(buildslist[current])\r\n if str(build) == (str(current + 1)):\r\n selectedbuild = bui\r\n print(\"Found Server\")\r\n\r\n current = current + 1\r\n\r\ndownload = \"https://api.papermc.io/v2/projects/paper/versions/\" + str(selectedversion) + \"/builds/\" + str(selectedbuild) + \"/downloads/paper-\" + str(selectedversion) + \"-\" + str(selectedbuild) + \".jar\"\r\nfile = \"paper-\" + str(selectedversion) + \"-\" + str(selectedbuild) + \".jar\"\r\n\r\nmakeserverfolder(file, selectedversion, servertype)\r\n\r\nr = requests.get(download, allow_redirects=True)\r\n\r\nopen(file, 'wb').write(r.content)\r\nos.rename('./' + file, './' + str(main.foldername) + \"/Server/\" + file)","repo_name":"Baconzilla123/BaconLauncher","sub_path":"Utils/App/newserver.py","file_name":"newserver.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"43137550280","text":"import re\nimport requests\nimport time\nimport cip\nipl=''\nregkdl=r'(\\d+\\.\\d+\\.\\d+\\.\\d+)\\s*(\\d+)'\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'\n}\nreg66ip=r'(\\d+\\.\\d+\\.\\d+\\.\\d+)(\\d+)'\ndef getip(url,reg):\n global ipl\n try:\n \n r=requests.get(url,headers=headers,proxies=cip.getrandomip(),timeout=2)\n if r.status_code==200:\n\n print(r.status_code)\n ip_list=re.findall(reg,r.text)\n ip_list=set(ip_list)\n print(ip_list)\n \n for ip in ip_list:\n \n if ip[0] in ipl:\n continue\n else: \n ipl=ipl+ip[0]+':'+ip[1]+'\\n'\n else:\n print(r.status_code)\n getip(url,reg)\n except:\n print('ip封禁,获取页面失败!.....')\n getip(url,reg)\n \n \ndef kdl(p):\n for i in range(1,p):\n global regkdl \n url='https://www.kuaidaili.com/free/inha/'+str(i)+'/'\n print(\"正在爬取第:\"+str(i)+'页')\n getip(url,regkdl)\n time.sleep(2)\n\n f=open('ip.txt','a')\n f.write(ipl)\n f.close()\n\ndef g66ip(p):\n for i in range(1,p):\n print(\"正在爬取第:\"+str(i)+'页')\n url='http://www.66ip.cn/'+str(i)+'.html'\n getip(url,reg66ip)\n f=open('66iplist.txt','a')\n f.write(ipl)\n f.close()\n","repo_name":"netcat11/proxies","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"73329229455","text":"import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n url(r'^enrollment/', views.enrollment),\n url(r'^enrollment_claim/', views.enrollment_claim),\n url(r'^claims/', views.claims),\n url(r'^claims_new/', views.claims_new),\n url(r'^claims_prov/', views.claims_prov),\n url(r'^inpatient/', views.inpatient_claim),\n url(r'^outpatient/', views.outpatient_claim),\n url(r'^financial_costanalysis/', views.financial_claimcostanalysis),\n url(r'^financial_onsite/', views.financial_onsite),\n url(r'^health_initiative/', views.health_initiative),\n url(r'^dental_vision/', views.dental_vision),\n url(r'^claims_cost_analysis/', views.claims_cost_analysis),\n url(r'^covidentifier/', views.covidentifier),\n url(r'^cov_filters/', views.cov_filters),\n url(r'^inpatient_claim/', views.inpatient_claim),\n url(r'^inpatient_filters/', views.inpatient_filters),\n url(r'^outpatient_filters/', views.outpatient_filters),\n url(r'^financial_filters/', views.financial_filters),\n]\n","repo_name":"headrun/Tracktion","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"34520560300","text":"\n\nimport os\nimport sys\n\n\n\nSP = sys.argv[-1]\n\n\nos.system(\"python pipeline/rm.py \" + SP)\n\nos.system(\"python pipeline/gather.py \" + SP)\n\nos.system(\"Rscript --vanilla pipeline/self.R \" + SP)\n\n# Grab real values\n\nf=open(\"\" + SP + \"/merged/real/all.txt\",\"r\")\nl=f.readline()\na=l.strip(\"\\n\").split(\"\\t\")\nhm = a[2]\nf.close()\n\n\nf=open(\"\" + SP + \"/merged/real/metrics.txt\",\"r\")\nl=f.readline()\nl=f.readline()\na=l.strip(\"\\n\").split(\"\\t\")\npi = a[2]\nf.close()\n\ntry:\n f=open(\"\" + SP + \"/merged/real/self.txt\",\"r\")\n l=f.readline()\n a=l.strip(\"\\n\").split(\"\\t\")\n itself = a[0]\n f.close()\nexcept:\n os.system(\"Rscript --vanilla pipeline/linear_self.R \" + SP)\n f=open(\"\" + SP + \"/merged/real/self.txt\",\"r\")\n l=f.readline()\n a=l.strip(\"\\n\").split(\"\\t\")\n itself = a[0]\n f.close()\n\n\n\nos.system(\"Rscript --vanilla pipeline/abc.R \" + SP + \" \" + pi + \" \" + itself + \" \" + hm)\n\n\n\n","repo_name":"lbobay/recABC","sub_path":"pipeline/terminate.py","file_name":"terminate.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"28166559714","text":"\nfrom SDK.listExtension import ListExtension\nimport re\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium import webdriver\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom SDK.stringExtension import StringExtension\nfrom SDK.thread import Thread\nfrom SDK import (database, jsonExtension, user, imports, cmd)\n\nconfig = jsonExtension.load(\"config.json\")\n\n\nclass LongPoll(VkLongPoll):\n def __init__(self, instance, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.instance = instance\n\n def listen(self):\n while True:\n try:\n self.instance.check_tasks()\n updates = self.check()\n for event in updates:\n yield event\n except:\n # we shall participate in large amount of tomfoolery\n pass\n\n\nclass FunPay(Thread):\n def __init__(self, bot_class, **kwargs) -> None:\n self.bot_class = bot_class\n self.driver = webdriver.Firefox()\n self.replies = jsonExtension.load(\"data/replies.json\", indent=4)\n self.responses = jsonExtension.load(\"data/responses.json\")\n self.purchases = jsonExtension.load(\"data/purchases.json\")\n self.user_login = None\n self.auth()\n super().__init__(**kwargs)\n\n def auth(self):\n self.driver.get(\"https://funpay.ru\")\n self.driver.find_element(By.CSS_SELECTOR, \"a.vk\").click()\n self.driver.find_element(By.NAME, \"email\").send_keys(\n self.bot_class.config[\"vk_login\"])\n self.driver.find_element(By.NAME, \"pass\").send_keys(\n self.bot_class.config[\"vk_password\"])\n self.driver.find_element(\n By.CSS_SELECTOR, \"button.flat_button.oauth_button.button_wide\").click()\n\n def attempt_find_element(self, fr, by, j):\n try:\n return fr.find_element(by, j)\n except NoSuchElementException:\n return\n\n def attempt_find_elements(self, fr, by, j):\n try:\n return fr.find_elements(by, j)\n except NoSuchElementException:\n return\n\n def reply(self, message):\n reply_field = self.driver.find_element(By.CSS_SELECTOR, \"textarea.form-control\")\n reply_field.send_keys(message)\n self.driver.find_element(By.CSS_SELECTOR, \"button.btn.btn-gray.btn-round\").click()\n\n\n def run(self):\n while True:\n self.driver.get(\"https://funpay.ru/chat/\")\n if self.user_login is None:\n self.user_login = self.driver.find_element(\n By.CSS_SELECTOR, \"div.user-link-name\").get_attribute(\"textContent\")\n\n\n # получить все чаты\n nodes = [node.get_attribute('data-id') for node in self.driver.find_element(\n By.CSS_SELECTOR, \"div.contact-list.custom-scroll\").find_elements(By.CSS_SELECTOR, \"a\")]\n for node in nodes:\n self.driver.get(f\"https://funpay.ru/chat/?node={node}\")\n looking_for = self.driver.find_element(\n By.CSS_SELECTOR, \"div.param-item.chat-panel\")\n if \"hidden\" not in looking_for.get_attribute(\"className\"):\n element = looking_for.find_element(By.CSS_SELECTOR, \"a\")\n string = f\"{node}:\"\n string += re.findall(r'\\d+',\n element.get_attribute('href'))[0]\n if element.text in self.replies and string not in self.responses:\n if not self.replies[element.text][\"active\"]:\n return\n self.reply(self.replies[element.text]['first_reply'])\n self.responses.append(string)\n\n\n # сохранение истории сообщений\n user_login = self.driver.find_element(By.CSS_SELECTOR, \"div.media.media-user\").find_element(By.CSS_SELECTOR, \"div.media-user-name\").find_element(By.TAG_NAME, \"a\").text\n messages_file = jsonExtension.loadAdvanced(f\"data/messages/{user_login}_{node}.json\", content=\"[]\", ident=4)\n messages = self.driver.find_elements(By.CSS_SELECTOR, \"div.chat-message\")\n last_user_name = \"\"\n last_date = \"\"\n for message in messages:\n user_name = getattr(self.attempt_find_element(message, By.TAG_NAME, \"a\"), \"text\", None) or last_user_name\n last_user_name = user_name\n date = getattr(self.attempt_find_element(message, By.CSS_SELECTOR, \"div.chat-message-date\"), \"text\", None) or last_date\n last_date = date\n message_text = message.find_element(By.CSS_SELECTOR, \"div.message-text\").text\n d = {\n \"text\": message_text,\n \"author\": user_name,\n \"date\": date\n }\n if d not in messages_file:\n messages_file.append(d)\n\n\n # выдача товара\n alerts = self.attempt_find_elements(\n self.driver, By.CSS_SELECTOR, \"div.alert.alert-with-icon.alert-info\")\n for alert in alerts:\n if f\"{self.user_login}, не забудьте потом нажать кнопку «Подтвердить выполнение заказа».\" not in alert.text and \"оплатил\" in alert.text:\n # extract uppercase letters from order url\n order_id = ''.join(re.findall('[A-Z]+', alert.find_elements(\"a\")[1].get_attribute(\"href\")))\n # unique string (don't give product to person two times)\n string = f\"{node}:{order_id}\"\n if string not in self.purchases:\n for product in list(self.replies):\n if not self.replies[product][\"active\"]:\n return\n if product in alert.text:\n self.reply(self.replies[product]['product'])\n self.replies[product][\"active\"] = False\n self.purchases.append(string)\n break\n\n sleep(30)\n\n\nclass MainThread(Thread):\n def run(self):\n self.config = config\n imports.ImportTools([\"packages\", \"Structs\"])\n self.database = database.Database(\n config[\"db_file\"], config[\"db_backups_folder\"], self)\n self.db = self.database\n database.db = self.database\n self.vk_session = vk_api.VkApi(token=self.config[\"vk_api_key\"])\n self.longpoll = LongPoll(self, self.vk_session)\n self.vk = self.vk_session.get_api()\n self.group_id = \"-\" + re.findall(r'\\d+', self.longpoll.server)[0]\n FunPay(self).start()\n print(\"Bot started!\")\n super().__init__(name=\"Main\")\n self.poll()\n\n def parse_attachments(self):\n for attachmentList in self.attachments_last_message:\n attachment_type = attachmentList['type']\n attachment = attachmentList[attachment_type]\n access_key = attachment.get(\"access_key\")\n if attachment_type != \"sticker\":\n self.attachments.append(\n f\"{attachment_type}{attachment['owner_id']}_{attachment['id']}\") if access_key is None \\\n else self.attachments.append(\n f\"{attachment_type}{attachment['owner_id']}_{attachment['id']}_{access_key}\")\n else:\n self.sticker_id = attachment[\"sticker_id\"]\n\n def reply(self, *args, **kwargs):\n return self.user.write(*args, **kwargs)\n\n def wait(self, x, y):\n return cmd.set_after(x, self.user.id, y)\n\n def write(self, user_id, *args, **kwargs):\n user.User(self.vk, user_id).write(*args, **kwargs)\n\n def set_after(self, x, y=None):\n if y is None:\n y = []\n cmd.set_after(x, self.user.id, y)\n\n def poll(self):\n for event in self.longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me:\n self.attachments = ListExtension()\n self.sticker_id = None\n self.user = user.User(self.vk, event.user_id)\n self.raw_text = StringExtension(event.message.strip())\n self.event = event\n self.text = StringExtension(self.raw_text.lower().strip())\n self.txtSplit = self.text.split()\n self.command = self.txtSplit[0] if len(\n self.txtSplit) > 0 else \"\"\n self.args = self.txtSplit[1:]\n self.messages = self.user.messages.getHistory(count=3)[\"items\"]\n self.last_message = self.messages[0]\n self.attachments_last_message = self.last_message[\"attachments\"]\n self.parse_attachments()\n cmd.execute_command(self)\n\n\nif __name__ == \"__main__\":\n _thread = MainThread()\n _thread.start()\n _thread.join()\n","repo_name":"BydloCoding/FunPayBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"11431058280","text":"import torch\nimport numpy as np\nfrom torch import nn\nimport math\nfrom typing import Union, Tuple\n\n\n\n\n\nclass SystemEnergyLoss(nn.Module):\n def __init__(self, H: Union[np.ndarray, torch.Tensor], device: torch.device = torch.device(\"cpu\")):\n super(SystemEnergyLoss, self).__init__()\n\n if isinstance(H, np.ndarray):\n self.H = torch.from_numpy(H).to(device)\n elif isinstance(H, torch.Tensor):\n self.H = H.to(device)\n else:\n raise TypeError(\"H should be of type np.ndarray or torch.Tensor.\")\n E, V = torch.linalg.eigh(self.H)\n self.offset = E[-1]\n self.eye_offset = self.offset * torch.eye(self.H.shape[0], device=device)\n self.X = V[:,0].to(device)\n\n def forward(self, U: torch.Tensor) -> torch.Tensor:\n A = torch.matmul(U, torch.matmul(self.H, U.T))\n result_abs = self.stoquastic(A)\n E = torch.linalg.eigvalsh(result_abs)\n z = torch.exp(-E * 1).sum()\n return torch.log(z)\n\n def stoquastic(self, A : torch.Tensor):\n return -torch.abs(A - self.eye_offset) + self.eye_offset\n\nclass SystemQuasiEnergyLoss(SystemEnergyLoss):\n def __init__(\n self, \n H: Union[np.ndarray, torch.Tensor], \n N: int = 10,\n r: float = 0, #* regularization\n device: torch.device = torch.device(\"cpu\")):\n super(SystemQuasiEnergyLoss, self).__init__(H, device)\n self.H = self.H - self.eye_offset\n self.N = int(N)\n\n\n def forward(self, U: torch.Tensor, r: float = 0) -> torch.Tensor:\n SUx = torch.abs(U @ self.X)\n A = U @ self.H @ U.T\n SUH = self.stoquastic(A)\n y = SUx\n for _ in range(self.N):\n y = SUH @ y\n if _ % 4 == 0:\n y = y / torch.norm(y)\n quasi_Sgs = torch.abs(y / torch.norm(y))\n\n # gap = (SUx - quasi_Sgs) @ SUH @ (quasi_Sgs + SUx) \n # return gap - SUx @ y - self.offset\n z = SUH @ quasi_Sgs\n #* if H is real and symmetric\n return - (quasi_Sgs @ z + self.offset) + r * (1 - torch.abs(quasi_Sgs.dot(z) / torch.norm(z)))","repo_name":"MKrbm/worms","sub_path":"python/rmsKit/rms_torch/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"7021454348","text":"import glob\nimport cv2\nimport os\n\nIMGHEIGHT = 680\nIMGWIDTH = 480\n# Get all the images in the directory with tif format\nimg_list = glob.glob(\"*.tif\")\n\ncount = 0\n\n# Get each image\nfor img in img_list:\n # Open image in opencv\n file = cv2.imread(img, cv2.IMREAD_COLOR)\n\n # Resize image\n final_file = cv2.resize(file, (IMGHEIGHT, IMGWIDTH))\n\n # Check Image height and width and channels of new images\n dimensions = final_file.shape\n height = dimensions[0]\n width = dimensions[1]\n channels = dimensions[2]\n\n # Write file to image in jpeg\n count += 1\n cv2.imwrite(str(count) + \".jpeg\", final_file)\n # Print out data\n print('Image Height : ', height)\n print('Image Width : ', width)\n print('Number of Channels : ', channels)\n\n# Remove all images with tif format\nfor img in img_list:\n os.remove(img)\n","repo_name":"Raynard-O/Scanner","sub_path":"DB4_B/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"11483203928","text":"from forecaster.utils import load_stock_prices, plot_predictions\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential as KerasSequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\nimport matplotlib.pyplot as pyplot\nimport os as python_os\nimport pandas\nimport numpy\n\n\nclass LSTMForecaster:\n class TrainingSet:\n def __init__(self, feature_set, labels):\n self.feature_set = feature_set\n self.labels = labels\n\n def __init__(self):\n self.scaler = MinMaxScaler(feature_range=(0, 1))\n self.model_memo = {}\n\n def generate_training_set(self, training_data, cluster_size):\n scaled_training_data = self.scaler.fit_transform(training_data.reshape(-1, 1))\n feature_set = []\n labels = []\n for i in range(cluster_size, len(scaled_training_data)):\n feature_set.append(scaled_training_data[i - cluster_size:i])\n labels.append(scaled_training_data[i][0])\n feature_set = numpy.array(feature_set)\n feature_set = numpy.reshape(feature_set, (feature_set.shape[0], feature_set.shape[1], 1))\n labels = numpy.array(labels)\n return LSTMForecaster.TrainingSet(feature_set, labels)\n\n def compile_model(self, model_builder, feature_set):\n model = model_builder()\n model.add(LSTM(units=50, return_sequences=True, input_shape=(feature_set.shape[1], 1)))\n model.add(Dropout(0.2))\n model.add(LSTM(units=50, return_sequences=True))\n model.add(Dropout(0.2))\n model.add(LSTM(units=50, return_sequences=True))\n model.add(Dropout(0.2))\n model.add(LSTM(units=50))\n model.add(Dropout(0.2))\n model.add(Dense(units=1))\n model.compile(optimizer='adam', loss='mean_squared_error')\n return model\n\n def fit_model(self, model, training_set):\n model.fit(training_set.feature_set, training_set.labels, epochs=100, batch_size=32)\n return model\n\n def generate_test_set(self, training_data, test_data, cluster_size):\n total = numpy.concatenate((training_data, test_data))\n test_inputs = total[len(total) - len(test_data) - cluster_size:]\n test_inputs = test_inputs.reshape(-1, 1)\n test_inputs = self.scaler.transform(test_inputs)\n test_set = []\n if len(test_data) == 0:\n test_set.append(test_inputs)\n else:\n for i in range(cluster_size, (len(test_data) + cluster_size)):\n test_set.append(test_inputs[i - cluster_size:i, 0])\n test_set = numpy.array(test_set)\n test_set = numpy.reshape(test_set, (test_set.shape[0], test_set.shape[1], 1))\n return test_set\n\n def test_predictions(self, model, test_set, test_data, plotter, os, graph_dir, ticker):\n predictions = model.predict(test_set)\n predictions = self.scaler.inverse_transform(predictions)\n\n plot_predictions(test_data, predictions, ticker)\n\n def prod_prediction(self, model, test_set):\n prediction = model.predict(test_set)\n prediction = self.scaler.inverse_transform(prediction)\n return prediction\n\n def evaluate_model(self, ticker, training_timestamp, training_size, test_timestamp, test_size, cluster_size):\n base_data_dir = 'model_eval/one-minute'\n training_data = load_stock_prices(f'{base_data_dir}/{ticker}.csv', training_timestamp, training_size)\n training_set = self.generate_training_set(training_data, cluster_size)\n stock_model = self.compile_model(KerasSequential, training_set.feature_set)\n stock_model = self.fit_model(stock_model, training_set)\n\n test_data = load_stock_prices(f'{base_data_dir}/{ticker}.csv', test_timestamp, test_size)\n test_set = self.generate_test_set(training_data, test_data, cluster_size)\n self.test_predictions(stock_model, test_set, test_data, pyplot, python_os, f'../forecaster_data/graphs', ticker)\n\n def generate_forecast(self, ticker, training_timestamp, training_size, cluster_size):\n base_data_dir = '../forecaster_data/prices'\n training_data = load_stock_prices(f'{base_data_dir}/{ticker}.csv', training_timestamp, training_size)\n training_set = self.generate_training_set(training_data, cluster_size)\n\n stock_model = self.model_memo.get(ticker)\n if stock_model is None:\n stock_model = self.compile_model(KerasSequential, training_set.feature_set)\n stock_model = self.fit_model(stock_model, training_set)\n self.model_memo[ticker] = stock_model\n\n test_set = self.generate_test_set(training_data, numpy.array([]), cluster_size)\n return (self.prod_prediction(stock_model, test_set) - training_data[-1]) / training_data[-1]\n","repo_name":"joshua-ivan/thesis_stock-forecaster","sub_path":"forecaster/lstm_forecaster.py","file_name":"lstm_forecaster.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38726544669","text":"\"\"\"\nTests for `kolibri.utils.cli` module.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport tempfile\n\nimport pytest\nfrom django.db.utils import OperationalError\nfrom mock import patch\n\nimport kolibri\nfrom kolibri.plugins.utils import autoremove_unavailable_plugins\nfrom kolibri.utils import cli\nfrom kolibri.utils import options\n\nlogger = logging.getLogger(__name__)\n\n\nLOG_LOGGER = []\n\n\ndef log_logger(logger_instance, LEVEL, msg, args, **kwargs):\n \"\"\"\n Monkeypatching for logging.Logger._log to scoop up log messages if we wanna\n test something specific was logged.\n \"\"\"\n LOG_LOGGER.append((LEVEL, msg))\n # Call the original function\n logger_instance.__log(LEVEL, msg, args, **kwargs)\n\n\ndef activate_log_logger(monkeypatch):\n \"\"\"\n Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern\n of py.test (test accepts a ``monkeypatch`` argument)\n \"\"\"\n monkeypatch.setattr(logging.Logger, \"__log\", logging.Logger._log, raising=False)\n monkeypatch.setattr(logging.Logger, \"_log\", log_logger)\n\n\n@pytest.fixture\ndef plugins():\n from kolibri import plugins\n\n _, config_file = tempfile.mkstemp(suffix=\"json\")\n old_config_file = plugins.conf_file\n plugins.conf_file = config_file\n plugins.config.set_defaults()\n yield plugins\n plugins.conf_file = old_config_file\n\n\ndef test_bogus_plugin_autoremove(plugins):\n \"\"\"\n Checks that a plugin is auto-removed when it cannot be imported\n \"\"\"\n plugin_name = \"giraffe.horse\"\n plugins.config[\"INSTALLED_PLUGINS\"].add(plugin_name)\n plugins.config.save()\n autoremove_unavailable_plugins()\n assert plugin_name not in plugins.config[\"INSTALLED_PLUGINS\"]\n\n\ndef test_bogus_plugin_autoremove_no_path(plugins):\n \"\"\"\n Checks that a plugin without a dotted path is also auto-removed\n \"\"\"\n plugin_name = \"giraffehorse\"\n plugins.config[\"INSTALLED_PLUGINS\"].add(plugin_name)\n plugins.config.save()\n autoremove_unavailable_plugins()\n assert plugin_name not in plugins.config[\"INSTALLED_PLUGINS\"]\n\n\ndef test_bogus_plugin_disable(plugins):\n installed_apps_before = plugins.config[\"INSTALLED_PLUGINS\"].copy()\n disabled_apps_before = plugins.config[\"DISABLED_PLUGINS\"].copy()\n try:\n cli.disable.callback((\"i_do_not_exist\",), False)\n except Exception:\n pass\n assert installed_apps_before == plugins.config[\"INSTALLED_PLUGINS\"]\n assert disabled_apps_before == plugins.config[\"DISABLED_PLUGINS\"]\n\n\ndef test_plugin_cannot_be_imported_disable(plugins):\n \"\"\"\n A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled\n \"\"\"\n plugin_name = \"giraffe.horse\"\n plugins.config[\"INSTALLED_PLUGINS\"].add(plugin_name)\n plugins.config.save()\n try:\n cli.disable.callback((plugin_name,), False)\n except Exception:\n pass\n assert plugin_name not in plugins.config[\"INSTALLED_PLUGINS\"]\n # We also don't want to endlessly add cruft to the disabled apps\n assert plugin_name not in plugins.config[\"DISABLED_PLUGINS\"]\n\n\ndef test_real_plugin_disable(plugins):\n installed_apps_before = plugins.config[\"INSTALLED_PLUGINS\"].copy()\n test_plugin = \"kolibri.plugins.media_player\"\n assert test_plugin in installed_apps_before\n # Because RIP example plugin\n cli.disable.callback((test_plugin,), False)\n assert test_plugin not in plugins.config[\"INSTALLED_PLUGINS\"]\n assert test_plugin in plugins.config[\"DISABLED_PLUGINS\"]\n\n\ndef test_real_plugin_disable_twice(plugins):\n installed_apps_before = plugins.config[\"INSTALLED_PLUGINS\"].copy()\n test_plugin = \"kolibri.plugins.media_player\"\n assert test_plugin in installed_apps_before\n cli.disable.callback((test_plugin,), False)\n assert test_plugin not in plugins.config.ACTIVE_PLUGINS\n assert test_plugin not in plugins.config[\"INSTALLED_PLUGINS\"]\n assert test_plugin in plugins.config[\"DISABLED_PLUGINS\"]\n installed_apps_before = plugins.config[\"INSTALLED_PLUGINS\"].copy()\n cli.disable.callback((test_plugin,), False)\n assert test_plugin not in plugins.config.ACTIVE_PLUGINS\n assert test_plugin not in plugins.config[\"INSTALLED_PLUGINS\"]\n assert test_plugin in plugins.config[\"DISABLED_PLUGINS\"]\n\n\ndef test_plugin_with_no_plugin_class(plugins):\n \"\"\"\n Expected behavior is that nothing blows up with exceptions, user just gets\n a warning and nothing is enabled or changed in the configuration.\n \"\"\"\n # For fun, we pass in a system library\n installed_apps_before = plugins.config[\"INSTALLED_PLUGINS\"].copy()\n try:\n cli.enable.callback((\"os.path\",), False)\n except Exception:\n pass\n assert installed_apps_before == plugins.config[\"INSTALLED_PLUGINS\"]\n\n\n@pytest.mark.django_db\ndef test_kolibri_listen_port_env(monkeypatch):\n \"\"\"\n Starts and stops the server, mocking the actual server.start()\n Checks that the correct fallback port is used from the environment.\n \"\"\"\n\n with patch(\"django.core.management.call_command\"), patch(\n \"kolibri.utils.server.start\"\n ) as start:\n from kolibri.utils import server\n\n def start_mock(port, *args, **kwargs):\n assert port == test_port\n try:\n os.remove(server.STARTUP_LOCK)\n except OSError:\n pass\n\n activate_log_logger(monkeypatch)\n start.side_effect = start_mock\n\n test_port = 1234\n\n os.environ[\"KOLIBRI_HTTP_PORT\"] = str(test_port)\n\n # force a reload of plugins.OPTIONS so the environment variable will be read in\n from kolibri.utils import conf\n\n conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))\n\n cli.start.callback(test_port, False)\n with pytest.raises(SystemExit) as excinfo:\n cli.stop.callback()\n assert excinfo.code == 0\n\n # Stop the server AGAIN, asserting that we can call the stop command\n # on an already stopped server and will be gracefully informed about\n # it.\n with pytest.raises(SystemExit) as excinfo:\n cli.stop.callback()\n assert excinfo.code == 0\n assert \"Already stopped\" in LOG_LOGGER[-1][1]\n\n def status_starting_up():\n raise server.NotRunning(server.STATUS_STARTING_UP)\n\n # Ensure that if a server is reported to be 'starting up', it doesn't\n # get killed while doing that.\n monkeypatch.setattr(server, \"get_status\", status_starting_up)\n with pytest.raises(SystemExit) as excinfo:\n cli.stop.callback()\n assert excinfo.code == server.STATUS_STARTING_UP\n assert \"Not stopped\" in LOG_LOGGER[-1][1]\n\n\n@pytest.mark.django_db\n@patch(\"kolibri.utils.cli.get_version\", return_value=\"\")\n@patch(\"kolibri.utils.cli.update\")\n@patch(\"kolibri.utils.cli.plugin.callback\")\n@patch(\"kolibri.core.deviceadmin.utils.dbbackup\")\ndef test_first_run(dbbackup, plugin, update, get_version):\n \"\"\"\n Tests that the first_run() function performs as expected\n \"\"\"\n\n cli.initialize()\n update.assert_called_once()\n dbbackup.assert_not_called()\n\n # Check that it got called for each default plugin\n from kolibri import plugins\n\n assert set(plugins.config[\"INSTALLED_PLUGINS\"]) == set(plugins.DEFAULT_PLUGINS)\n\n\n@pytest.mark.django_db\n@patch(\"kolibri.utils.cli.get_version\", return_value=\"0.0.1\")\n@patch(\"kolibri.utils.cli.update\")\ndef test_update(update, get_version):\n \"\"\"\n Tests that update() function performs as expected\n \"\"\"\n cli.initialize()\n update.assert_called_once()\n\n\n@pytest.mark.django_db\n@patch(\"kolibri.utils.cli.get_version\", return_value=\"0.0.1\")\ndef test_update_exits_if_running(get_version):\n \"\"\"\n Tests that update() function performs as expected\n \"\"\"\n with patch(\"kolibri.utils.cli.server.get_status\"):\n try:\n cli.initialize()\n pytest.fail(\"Update did not exit when Kolibri was already running\")\n except SystemExit:\n pass\n\n\n@pytest.mark.django_db\ndef test_version_updated():\n \"\"\"\n Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes\n \"\"\"\n assert cli.version_updated(\"0.10.0\", \"0.10.1\")\n assert not cli.version_updated(\"0.10.0\", \"0.10.0\")\n assert not cli.should_back_up(\"0.10.0-dev0\", \"\")\n assert not cli.should_back_up(\"0.10.0-dev0\", \"0.10.0\")\n assert not cli.should_back_up(\"0.10.0\", \"0.10.0-dev0\")\n assert not cli.should_back_up(\"0.10.0-dev0\", \"0.10.0-dev0\")\n\n\n@pytest.mark.django_db\n@patch(\"kolibri.utils.cli.get_version\", return_value=kolibri.__version__)\n@patch(\"kolibri.utils.cli.update\")\n@patch(\"kolibri.core.deviceadmin.utils.dbbackup\")\ndef test_update_no_version_change(dbbackup, update, get_version):\n \"\"\"\n Tests that when the version doesn't change, we are not doing things we\n shouldn't\n \"\"\"\n cli.initialize()\n update.assert_not_called()\n dbbackup.assert_not_called()\n\n\ndef test_cli_usage():\n # Test the -h\n with pytest.raises(SystemExit) as excinfo:\n cli.main(\"-h\")\n assert excinfo.code == 0\n with pytest.raises(SystemExit) as excinfo:\n cli.main(\"--version\")\n assert excinfo.code == 0\n\n\n@patch(\"kolibri.utils.cli.click.echo\")\ndef test_list_plugins(echo_mock, plugins):\n cli.list.callback()\n test_plugin = \"kolibri.plugins.media_player\"\n any(\n map(\n lambda x: test_plugin in x[0] and \"ENABLED\" in x[0],\n echo_mock.call_args_list,\n )\n )\n\n\n@patch(\"kolibri.utils.cli.click.echo\")\ndef test_list_plugins_disabled(echo_mock, plugins):\n cli.list.callback()\n test_plugin = \"kolibri.plugins.media_player\"\n cli.disable.callback((test_plugin,), False)\n any(\n map(\n lambda x: test_plugin in x[0] and \"DISABLED\" in x[0],\n echo_mock.call_args_list,\n )\n )\n\n\n@patch(\"kolibri.utils.cli._migrate_databases\")\n@patch(\"kolibri.utils.cli.version_updated\")\ndef test_migrate_if_unmigrated(version_updated, _migrate_databases):\n # No matter what, ensure that version_updated returns False\n version_updated.return_value = False\n from morango.models import InstanceIDModel\n\n with patch.object(\n InstanceIDModel, \"get_or_create_current_instance\"\n ) as get_or_create_current_instance:\n get_or_create_current_instance.side_effect = OperationalError(\"Test\")\n cli.initialize()\n _migrate_databases.assert_called_once()\n","repo_name":"sanmoy/kolibri-azure","sub_path":"docker/alpine/kolibri/utils/tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":10459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"17175935159","text":"import csv\n\nimport h5py\nimport numpy as np\n\n\ndef get_query_patch_and_coordinates(\n patches_hdf5_file, coordinates_hdf5_file, patch_key\n):\n \"\"\"Get query patch data and its coordinates.\"\"\"\n query_patch_data = patches_hdf5_file[patch_key][\n len(patches_hdf5_file[patch_key]) // 2\n ]\n patch_coordinates = coordinates_hdf5_file[patch_key][\n len(coordinates_hdf5_file[patch_key]) // 2\n ]\n return query_patch_data, patch_coordinates\n\n\ndef cosine_similarity(vec1, vec2):\n \"\"\"Calculate cosine similarity between two vectors.\"\"\"\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n\n\ndef find_most_similar_patch(\n query_patch_data, patches_hdf5_file, coordinates_hdf5_file, key\n):\n \"\"\"Find the patch that is most similar to the query_patch_data.\"\"\"\n patches = np.array(patches_hdf5_file[key])\n query_patch_data_flatten = query_patch_data.flatten()\n patches_flatten = patches.reshape(patches.shape[0], -1)\n\n similarities = np.apply_along_axis(\n cosine_similarity, 1, patches_flatten, query_patch_data_flatten\n )\n max_similarity_index = np.argmax(similarities)\n\n return (\n similarities[max_similarity_index],\n coordinates_hdf5_file[key][max_similarity_index],\n max_similarity_index,\n )\n\n\ndef write_to_csv(data, file_name):\n \"\"\"Write data to a CSV file.\"\"\"\n with open(file_name, \"w\", newline=\"\") as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_writer.writerow(\n [\"Image\", \"Max Similarity\", \"Coordinates\", \"Index\", \"Max Patches\"]\n )\n for row in data:\n csv_writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n # Paths (ideally these should be command-line arguments or read from a config file)\n patches_hdf5_path = \"patches.hdf5\"\n coordinates_hdf5_path = \"coordinates.hdf5\"\n csv_file_name = \"queried_coordinates.csv\"\n\n output_data = []\n\n with h5py.File(patches_hdf5_path, \"r\") as patches_hdf5_file, h5py.File(\n coordinates_hdf5_path, \"r\"\n ) as coordinates_hdf5_file:\n patch_key = list(patches_hdf5_file.keys())[0]\n query_patch_data, patch_coordinates = get_query_patch_and_coordinates(\n patches_hdf5_file, coordinates_hdf5_file, patch_key\n )\n\n for key in coordinates_hdf5_file.keys():\n (\n max_similarity,\n most_similar_coordinates,\n most_similar_index,\n ) = find_most_similar_patch(\n query_patch_data, patches_hdf5_file, coordinates_hdf5_file, key\n )\n\n output_data.append(\n [\n key,\n max_similarity,\n most_similar_coordinates,\n most_similar_index,\n len(coordinates_hdf5_file[key]),\n ]\n )\n\n write_to_csv(output_data, csv_file_name)\n","repo_name":"Geeks-Sid/histo-cbir","sub_path":"find_cbir_cosine.py","file_name":"find_cbir_cosine.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"10675168367","text":"class Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n visited = set()\n q = deque()\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 0:\n visited.add((i, j))\n q.append((i, j))\n while q:\n x, y = q.popleft()\n for dirr in [(1,0), (-1, 0), (0, 1), (0, -1)]:\n newX, newY = x + dirr[0], y+dirr[1]\n if newX >= 0 and newX <= len(mat) - 1 and newY >= 0 and newY <= len(mat[0]) - 1 and (newX, newY) not in visited:\n mat[newX][newY] = mat[x][y]+1\n visited.add((newX, newY))\n q.append((newX, newY))\n return mat","repo_name":"Dongfang1021/Leetcode","sub_path":"01-matrix/01-matrix.py","file_name":"01-matrix.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"16117296424","text":"import base64\nimport importlib.util\nimport io\nimport os\nimport re\nimport sys\nimport functools\nfrom hashlib import md5\nfrom mimetypes import guess_type\nfrom os.path import dirname, abspath, split\nfrom urllib.parse import parse_qs\n\n\ndef import_python_module_by_filename(name, module_filename):\n \"\"\"\n Import's a file as a python module, with specified name.\n\n Don't ask about the `name` argument, it's required.\n\n :param name: The name of the module to override upon imported filename.\n :param module_filename: The filename to import as a python module.\n :return: The newly imported python module.\n \"\"\"\n\n sys.path.append(abspath(dirname(module_filename)))\n spec = importlib.util.spec_from_file_location(\n name,\n location=module_filename)\n imported_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(imported_module)\n return imported_module\n\n\ndef construct_class_by_name(name, *args, **kwargs):\n \"\"\"\n Construct a class by module path name using *args and **kwargs\n\n Don't ask about the `name` argument, it's required.\n\n :param name: class name\n :return: The newly imported python module.\n \"\"\"\n parts = name.split('.')\n module_name, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)(*args, **kwargs)\n\n\ndef to_camel_case(text):\n return re.sub(r'(_\\w)', lambda x: x.group(1)[1:].upper(), text)\n\n\ndef copy_stream(source, target, *, chunk_size: int= 16 * 1024) -> int:\n length = 0\n while 1:\n buf = source.read(chunk_size)\n if not buf:\n break\n length += len(buf)\n target.write(buf)\n return length\n\n\ndef md5sum(f):\n if isinstance(f, str):\n file_obj = open(f, 'rb')\n else:\n file_obj = f\n\n try:\n checksum = md5()\n while True:\n d = file_obj.read(1024)\n if not d:\n break\n checksum.update(d)\n return checksum.digest()\n finally:\n if file_obj is not f:\n file_obj.close()\n\n\ndef split_url(url):\n if '?' in url:\n path, query = url.split('?')\n else:\n path, query = url, ''\n\n return path, {k: v[0] if len(v) == 1 else v for k, v in parse_qs(\n query,\n keep_blank_values=True,\n strict_parsing=False\n ).items()}\n\n\ndef encode_multipart_data(fields, files, boundary=None):\n boundary = boundary or ''.join([\n '-----',\n base64.urlsafe_b64encode(os.urandom(27)).decode()\n ])\n crlf = b'\\r\\n'\n lines = []\n\n if fields:\n for key, value in fields.items():\n lines.append('--' + boundary)\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\n lines.append('')\n lines.append(value)\n\n if files:\n for key, file_path in files.items():\n filename = split(file_path)[1]\n lines.append('--' + boundary)\n lines.append(\n 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (key, filename))\n lines.append(\n 'Content-Type: %s' %\n (guess_type(filename)[0] or 'application/octet-stream'))\n lines.append('')\n lines.append(open(file_path, 'rb').read())\n\n lines.append('--' + boundary + '--')\n lines.append('')\n\n body = io.BytesIO()\n length = 0\n for l in lines:\n line = (l if isinstance(l, bytes) else l.encode()) + crlf\n length += len(line)\n body.write(line)\n body.seek(0)\n content_type = 'multipart/form-data; boundary=%s' % boundary\n return content_type, body, length\n\n\ndef noneifnone(func):\n\n @functools.wraps(func)\n def wrapper(value):\n return func(value) if value is not None else None\n\n return wrapper\n\n","repo_name":"pylover/restfulpy","sub_path":"restfulpy/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"14"} +{"seq_id":"34671264947","text":"from importlib import import_module\nfrom flask import request, abort\nfrom dynaconf import FlaskDynaconf, settings\n\n# This file is part of the Nanobrok Open Source Project.\n# nanobrok is licensed under the Apache 2.0.\n\n# Copyright 2021 p0cL4bs Team - Marcos Bomfim (mh4x0f)\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef load_extensions(app):\n print(\"\\nRegisters: Extensions\\n\")\n for extension in app.config.EXTENSIONS:\n module_name, factory = extension.split(\":\")\n print(f\"ext: {module_name}\")\n ext = import_module(module_name)\n getattr(ext, factory)(app)\n\n print(\"\\nRegisters: Routers\\n\")\n for route in app.config.ROUTERS:\n module_name, factory = route.split(\":\")\n ext = import_module(module_name)\n getattr(ext, factory)(app)\n \n print(\"\\n[*] Nanobrok is running\")\n\n\ndef setHeaderResponse(app):\n @app.after_request\n def apply_caching(response):\n response.headers[\"X-Frame-Options\"] = \"SAMEORIGIN\"\n return response\n\n # block http connection rule\n if app.config.BLOCK_HTTP_CONNECTION:\n \n @app.before_request\n def before_request():\n if not request.is_secure:\n return abort(403)\n\n\ndef init_app(app, **config):\n FlaskDynaconf(app, **config)\n setHeaderResponse(app)\n","repo_name":"P0cL4bs/Nanobrok","sub_path":"nanobrok/ext/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"14"} +{"seq_id":"13511402147","text":"import cv2\nimport numpy as np\n\nfrom fdfat.utils.utils import LMK_PARTS, LMK_PART_NAMES, get_color\n\nfrom fdfat.utils.pose_estimation import PoseEstimator\nfrom fdfat.utils import box_utils\nfrom fdfat.tracking import karman_filter\n\nclass Face:\n\n counter = 0\n\n def __init__(self, bbox, frame_size, landmark=None, score=-1):\n self.frame_width, self.frame_height = frame_size\n\n self.time_since_update = 0\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n self.face_score = score\n\n self.id = Face.counter\n Face.counter += 1\n\n self.bbox = bbox.copy()\n self._bbox_stable = bbox.copy()\n self.bbox_filter = karman_filter.create_bbox_filter(self.bbox)\n\n if landmark is not None:\n self._init_landmark(landmark)\n else:\n self.landmarked_initiated = False\n\n @property\n def stable_landmark(self):\n stabled = []\n for kal in self.landmark_filters:\n stabled.append([kal.x[0, 0], kal.x[2, 0]])\n return np.array(stabled)\n \n @property\n def stable_bbox(self):\n return karman_filter.convert_x_to_bbox(self.bbox_filter.x).reshape(-1)\n\n def _init_landmark(self, landmark):\n self.num_landmark = len(landmark)\n self.landmark = landmark.copy()\n self._landmark_stable = landmark.copy()\n\n self.landmark_filters = [\n karman_filter.create_point_filter(point) for _, point in zip(range(self.num_landmark), landmark)\n ]\n\n self.pose_estimator = PoseEstimator(self.frame_width, self.frame_height)\n self.estimate_pose()\n\n self.landmarked_initiated = True\n\n def predict(self):\n self.bbox_filter.predict()\n\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n\n self.time_since_update += 1\n\n self.history.append(self.stable_bbox)\n\n def update_bbox(self, bbox):\n\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n\n self.bbox_filter.update(karman_filter.convert_bbox_to_z(bbox))\n self.bbox = bbox\n\n def update_ladnmark(self, landmark):\n\n if not self.landmarked_initiated:\n self._init_landmark(landmark)\n return\n\n for kal, (lmx, lmy) in zip(self.landmark_filters, landmark):\n kal.predict()\n kal.update((lmx, lmy))\n \n self.landmark = landmark\n self.estimate_pose()\n\n def estimate_pose(self, stable=True):\n lmk = self.stable_landmark if stable else self.landmark\n self._pose = self.pose_estimator.solve(np.float32([(a[0], a[1]) for a in lmk[:68,:]]))\n\n def render(self, frame):\n\n # tbox = self.bbox.astype(np.int32)\n # cv2.rectangle(frame, (tbox[0], tbox[1]), (tbox[2], tbox[3]), (0,0,0), 2)\n\n sbox = self.stable_bbox.astype(np.int32)\n cv2.rectangle(frame, (sbox[0], sbox[1]), (sbox[2], sbox[3]), get_color(self.id), 2)\n\n if not self.landmarked_initiated:\n return\n\n lmk = self.stable_landmark.astype(np.int32)\n for begin, end in LMK_PARTS[:-1]:\n lx, ly = lmk[begin]\n for idx in range(begin+1, end):\n x, y = lmk[idx]\n cv2.line(frame, (lx, ly), (x, y), (255, 0, 0), 1)\n lx, ly = x, y\n\n for x, y in lmk:\n cv2.circle(frame, (x, y), 2, (255, 255, 255), 1)\n \n cv2.putText(frame, f\"id: {self.id}\", (sbox[0]+5, sbox[1]+30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n\n # cv2.putText(frame, f\"{self.face_score*100:0.1f}\", (sbox[0]+5, sbox[1]+60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n\n self.pose_estimator.visualize(frame, self._pose)","repo_name":"RyanDam/Fast-6DoF-Face-Alignment-and-Tracking","sub_path":"fdfat/tracking/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"15547791234","text":"import speech_recognition as sr #importa los paquetes de audios grbados y demas\n\n\n\ndef Entrada (E_Audio):\n r = sr.Recognizer()\n with sr.Microphone() as E_Audio:\n audio = r.listen(E_Audio)\n try:\n text = r.recognize_google(audio)\n return print(\"Palabra: {}\".format(text))\n except:\n return print(\"No se reconoció la palabra\")","repo_name":"Jhonnyarias712/bot_telegram","sub_path":"Lectura.py","file_name":"Lectura.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"31762775080","text":"from django import forms\nfrom .models import Contact\n\n\nclass ContactForm(forms.ModelForm):\n \"\"\"\n Form for user to be able to contact the admin\n \"\"\"\n class Meta:\n model = Contact\n fields = ('name', 'email', 'subject', 'body',)\n labels = {\n 'body': 'Your message...'\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'name': 'Full Name',\n 'email': 'Email Address',\n 'subject': 'Describe your query',\n 'body': 'Your enquiry...',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n","repo_name":"stephenbeese/FizzyBeese","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25071413346","text":"from selenium import webdriver\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver import ActionChains\r\nfrom pyquery import PyQuery as pq\r\nfrom config import *\r\nimport pymongo\r\nimport pymysql\r\nimport re\r\nimport time\r\n\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\n#搜索内容 mysql创建对应表及修改表名 mongo配置文件config.py中修改table\r\nclient = pymongo.MongoClient(MONGO_URL)\r\ndb = client[MONGO_DB]\r\n\r\nchrome_options = Options()\r\n# chrome_options.add_argument('--headless')\r\n# chrome_options.add_argument('--disable-gpu')\r\nchrome_options.add_argument('user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36\"')\r\n# chrome_options.add_argument('--proxy-server=http://27.17.45.90:43411')\r\nprefs = {\"profile.managed_default_content_settings.images\":2}\r\nchrome_options.add_experimental_option(\"prefs\",prefs)\r\nbrowser = webdriver.Chrome(chrome_options=chrome_options)\r\n# browser = webdriver.Chrome(r'C:\\Users\\Administrator\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe')\r\nwait = WebDriverWait(browser, 10)\r\nbrowser.maximize_window()\r\ndef search():\r\n try:\r\n browser.get('http://www.taobao.com')\r\n input = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#q'))\r\n )\r\n submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button')))\r\n time.sleep(2)\r\n input.send_keys(KEYWORD)\r\n time.sleep(2)\r\n submit.click()\r\n time.sleep(2)\r\n validate()\r\n print('返回')\r\n total = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.total')))\r\n get_products()\r\n print('无法获取网页')\r\n return total.text\r\n except TimeoutException:\r\n print('跳转报错')\r\n return search()\r\n\r\ndef validate():\r\n try:\r\n print('跳转成功')\r\n time.sleep(2)\r\n button = wait.until(\r\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_QRCodeLogin > div.login-links > a.forget-pwd.J_Quick2Static'))\r\n )\r\n button.click()\r\n time.sleep(2)\r\n name = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#TPL_username_1'))\r\n )\r\n name.clear()\r\n name.send_keys(USERNAME)\r\n time.sleep(2)\r\n password = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#TPL_password_1'))\r\n )\r\n password.send_keys(PWD)\r\n print('获取输入框2')\r\n time.sleep(2)\r\n #获取滑块\r\n rolling = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#nc_1_n1z'))\r\n )\r\n action =ActionChains(browser)\r\n action.click_and_hold(rolling)\r\n action.reset_actions()\r\n # time.sleep(5)\r\n action.move_by_offset(400,0)\r\n # action.drag_and_drop_by_offset(rolling,400,0)\r\n print('滑块')\r\n time.sleep(2)\r\n submit = wait.until(\r\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_SubmitStatic'))\r\n )\r\n print('获取提交按钮成功')\r\n # button.click()\r\n # name.clear()\r\n # name.send_keys(USERNAME)\r\n # password.send_keys(PWD)\r\n submit.click()\r\n print('点击提交')\r\n except TimeoutException:\r\n print('操作失败')\r\n return search()\r\n\r\n\r\ndef next_page(page_number):\r\n try:\r\n input = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > input'))\r\n )\r\n submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))\r\n input.clear()\r\n input.send_keys(page_number)\r\n submit.click()\r\n wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > ul > li.item.active > span'), str(page_number)))\r\n get_products()\r\n except TimeoutException:\r\n next_page(page_number)\r\n\r\ndef get_products():\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-itemlist .items .item')))\r\n html = browser.page_source\r\n doc = pq(html)\r\n items = doc('#mainsrp-itemlist .items .item').items()\r\n for item in items:\r\n product = {\r\n 'image': item.find('.pic .img').attr('src'),\r\n 'price': item.find('.price').text().replace('\\n',''),\r\n 'deal': item.find('.deal-cnt').text()[:-3],\r\n 'title': item.find('.title').text().replace('\\n',''),\r\n 'shop': item.find('.shop').text(),\r\n 'location': item.find('.location').text()\r\n }\r\n print(product)\r\n save_to_mongo(product)\r\n # save_to_mysql(product)\r\n\r\ndef save_to_mongo(result):\r\n try:\r\n if db[MONGO_TABLE].insert(result):\r\n print('存入MongoDB成功', result) \r\n except Exception:\r\n print('存入MongoDB失败', result)\r\n\r\n# def save_to_mysql(item):\r\n# params = {\r\n# \"host\": \"localhost\",\r\n# \"port\": 3306,\r\n# \"user\": \"root\",\r\n# \"password\": \"123\",\r\n# \"db\": \"taobao\",\r\n# \"charset\": \"utf8\"\r\n# }\r\n# try:\r\n# connection = pymysql.connect(**params)\r\n# cur = connection.cursor()\r\n# except Exception:\r\n# print(\"数据库连接失败或者游标创建失败\")\r\n# sql = 'insert into pc(title,price,deal,shop,location,image) values(%s,%s,%s,%s,%s,%s)'\r\n# param = (item['title'], item['price'], item['deal'], item['shop'], item['location'],item['image'])\r\n# rowscount = cur.execute(sql, param)\r\n# if rowscount:\r\n# print('写入数据成功!')\r\n# connection.commit()\r\n# cur.close()\r\n# connection.close()\r\n\r\ndef main():\r\n try:\r\n total = search()\r\n total = int(re.compile('(\\d+)').search(total).group(1))\r\n for i in range(2,total+1):\r\n next_page(i)\r\n finally:\r\n browser.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"shw2048/web-crawler","sub_path":"TaoBao/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"8635308904","text":"#!/usr/bin/python3\n\"\"\"BaseModel module\"\"\"\nimport models\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"BaseModel Class, define common\n attribute for the other\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"init a instance of BaseModel,\n *args dont be use\"\"\"\n\n iso_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n if len(kwargs) != 0:\n for key, value in kwargs.items():\n if key == \"created_at\" or key == \"updated_at\":\n self.__dict__[key] = datetime.strptime(value, iso_format)\n else:\n self.__dict__[key] = value\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"Return a string of the class BaseModel\"\"\"\n return \"[{}] ({}) {}\".\\\n format(self.__class__.__name__, self.id, self.__dict__)\n\n def save(self):\n \"\"\"save update_at with current time.\"\"\"\n self.updated_at = datetime.today()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"returns a dictionary containing all keys/values\"\"\"\n new = self.__dict__.copy()\n new['__class__'] = self.__class__.__name__\n new['created_at'] = datetime.isoformat(new['created_at'])\n new['updated_at'] = datetime.isoformat(new['updated_at'])\n return new\n","repo_name":"andresdiaz10/AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25585222357","text":"from typing import List\n\n\nclass Solution:\n def reorderLogFiles(self, logs: List[str]) -> List[str]:\n l_l = []\n d_l = []\n for log in logs:\n if log.split()[1].isdigit():\n d_l.append(log)\n else:\n l_l.append(log)\n\n l_l.sort(key= lambda x: (x.split()[1:], x.split()[0]))\n return l_l + d_l\n\n\nprint(Solution.reorderLogFiles(' ', [\"dig1 8 1 5 1\", \"let1 art can\", \"dig2 8 1 5 1\", \"let2 own kit dig\", \"let3 art zero\"]))","repo_name":"rotomoo/PyStudy","sub_path":"algorithm/leetcode/937.py","file_name":"937.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"74593461135","text":"import random\nimport math\nfrom path_map import Path_Map\n\ncourse_map = Path_Map()\n\n\nclass Individual:\n def __init__(self, num_possible_moves, moves=[], robot_height=4):\n self.moves = moves\n self.possible_moves = ['L', 'U', 'R', 'D']\n self.num_moves = len(moves)\n self.robot_height = robot_height\n\n if (self.num_moves == 0):\n # generate random moves\n\n for _ in range(num_possible_moves):\n self.moves.append(random.choice(self.possible_moves))\n\n self.num_moves = len(self.moves)\n\n self.fitness = self.fitness_function()\n\n\n\n def fitness_function(self):\n f = 0\n out_of_bounds_penalty = 300\n target_reached_reward = len(self.moves)*100\n x, y = course_map.start\n tx, ty = course_map.end\n height = course_map.map[x][y]\n for m in range(self.num_moves):\n # print(m, [x, y], end=\" \")\n if (self.moves[m] == 'L'):\n if y > 0 and height + self.robot_height >= course_map.map[x][y-1]:\n f = f + max(course_map.map[x][y-1] - height + 1, 1)**2\n y = y - 1\n \n else:\n f = f - out_of_bounds_penalty \n elif (self.moves[m] == 'U'):\n if x > 0 and height + self.robot_height >= course_map.map[x-1][y]:\n f = f + max(course_map.map[x-1][y] - height + 1, 1)**2\n x = x - 1\n else:\n f = f - out_of_bounds_penalty\n elif (self.moves[m] == 'R'):\n if y < course_map.columns-1 and height + self.robot_height >= course_map.map[x][y+1]:\n f = f + max(course_map.map[x][y+1] - height + 1, 1)**2\n y = y + 1\n else:\n f = f - out_of_bounds_penalty\n elif (self.moves[m] == 'D'):\n if x < course_map.rows-1 and height + self.robot_height >= course_map.map[x+1][y]:\n f = f + max(course_map.map[x+1][y] - height + 1, 1)**2\n x = x + 1\n else:\n f = f - out_of_bounds_penalty\n if x == tx and y == ty: # award the individual that gets to the final point\n f = f + target_reached_reward\n\n height = course_map.map[x][y]\n # print(f)\n self.fitness = f\n return f\n\n def append_move(self, move):\n self.moves.append(move)\n self.num_moves += 1\n # print(self.moves, \"append\")\n\n def delete_move(self, index):\n removed_ele = self.moves.pop(index)\n self.num_moves -= 1\n # print(self.moves, \"delete\")\n\nclass Population:\n def __init__(self, population_size, num_possible_moves):\n self.population = []\n self.population_size = population_size\n for _ in range(population_size):\n self.population.append(Individual(num_possible_moves))\n\n\n def average_fitness(self):\n total_fitness = 0\n for individual in self.population:\n total_fitness+=individual.fitness\n avg_fitness = total_fitness / len(self.population)\n\n return avg_fitness \n\n def get_adjusted_fitnesses(self):\n fitnesses = []\n\n for i in self.population:\n fitnesses.append(i.fitness)\n\n min_fitness = min(fitnesses)\n\n for i in range(len(fitnesses)):\n fitnesses[i] -= min_fitness\n return fitnesses\n\n def get_fitnesses(self):\n fitnesses = []\n\n for i in self.population:\n fitnesses.append(i.fitness)\n\n return fitnesses\n\n def get_population(self):\n return self.population\n\n \n def get_best_individual(self):\n best_individual = self.population[0]\n for individual in self.population:\n if individual.fitness > best_individual.fitness:\n best_individual = individual\n return best_individual\n","repo_name":"lolzone13/Evolutionary-Path-Planning","sub_path":"population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"17608183196","text":"import os\n\nimport setuptools\n\nfrom eoreader.__meta__ import (\n __author__,\n __author_email__,\n __description__,\n __documentation__,\n __title__,\n __url__,\n __version__,\n)\n\nBASEDIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))\nwith open(os.path.join(BASEDIR, \"README.md\"), \"r\", encoding=\"utf8\") as f:\n readme = f.read()\n\nsetuptools.setup(\n name=__title__,\n version=__version__,\n author=__author__,\n author_email=__author_email__,\n description=__description__,\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=[\n \"lxml\",\n \"h5netcdf\",\n \"scipy\",\n \"rasterio>=1.3.0\",\n \"xarray>=0.18.0\",\n \"rioxarray>=0.10.0\",\n \"geopandas>=0.11.0\",\n \"sertit[full]>=1.30.0\",\n \"spyndex>=0.3.0\",\n \"pyresample\",\n \"zarr\",\n \"rtree\",\n \"validators\",\n \"methodtools\",\n \"dicttoxml\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n package_data={\"\": [\"LICENSE\", \"NOTICE\"], \"eoreader.data\": [\"*.xml\"]},\n include_package_data=True,\n python_requires=\">=3.9\",\n project_urls={\n \"Bug Tracker\": f\"{__url__}/issues/\",\n \"Documentation\": __documentation__,\n \"Source Code\": __url__,\n },\n)\n","repo_name":"sertit/eoreader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"14"} +{"seq_id":"8511321609","text":"INPUT = 303\n\n\ndef spinlock(step):\n \"\"\"\n >>> spinlock(3)\n 638\n \"\"\"\n\n max_value = 2017\n cycle_buffer = [0]\n position = 0\n\n for value in range(1, max_value + 1):\n position = (position + step) % len(cycle_buffer) + 1\n cycle_buffer.insert(position, value)\n\n position = (position + 1) % len(cycle_buffer)\n return cycle_buffer[position]\n\n\ndef part1():\n \"\"\"\n >>> part1()\n 1971\n \"\"\"\n\n return spinlock(INPUT)\n\n\ndef part2():\n \"\"\"\n >>> part2()\n 17202899\n \"\"\"\n\n max_value = 50000000\n buffer_size = 1\n position = 0\n current = None\n step = INPUT\n\n for value in range(1, max_value + 1):\n position = (position + step) % buffer_size + 1\n buffer_size += 1\n if position == 1:\n current = value\n\n return current\n\n\ndef main():\n print(part1())\n print(part2())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"davweb/advent-of-code","sub_path":"advent/year2017/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"30353715326","text":"\n\n\n\nimport numpy as np\nfrom math import log\n\nLESSTHAN = 0\nGREATERTHAN = 1\n\n#http://fn.hgin.com/&_=1405041091\nclass DecisionStump:\n def __init__(self):\n self.threshold = 0\n def predict(self,data,threshold,side):\n result = np.ones(data.shape)\n if side == GREATERTHAN:\n thres = data <= threshold #array of booleans\n else:\n thres = data > threshold\n result[thres] = -1 # changes the indexes where thres is True to -1, false stay the same\n return result\n def train(self,data,labels,weights):\n minError = 100000\n numstep = 10 #number of threshold tries\n numfeat = data.shape[0]\n numexamples = data.shape[1]\n for feature in xrange(numfeat):\n maxi = data[feature,:].max()\n mini = data[feature,:].min()\n for step in xrange(-1,numstep+1):\n threshold = step*(maxi-mini)/numstep +mini\n for side in [LESSTHAN,GREATERTHAN]: \n prediction = self.predict(data[feature,:],threshold,side)\n error = np.ones(numexamples)\n gotRight = prediction == labels\n error[gotRight] = 0\n weightedError = np.dot(weights,error)\n if weightedError < minError:\n minError = weightedError\n self.error = minError\n self.feature = feature\n self.prediction = prediction.copy()\n self.threshold = threshold\n self.side = side\n# \n\n\n \n \n \nclass FinalClassifier:\n def __init__(self,classifiers, alphas=None):\n thresholds = []\n sides = []\n features = []\n alphaFlag = True\n if alphas == None:\n alphas = []\n alphaFlag = False\n for c in classifiers:\n if not alphaFlag:\n alpha = float( 0.5 * log( (1.0-c.error) / max(c.error, 1e-16) ) )\n alphas.append(alpha)\n features.append(c.feature)\n thresholds.append(c.threshold)\n sides.append(c.side)\n \n self.featIndexes = np.array(features)\n# print self.featIndexes\n self.thresholds = np.array(thresholds)\n self.sides = np.array(sides)\n self.sides[self.sides==0] = -1\n self.alphas = np.array(alphas)\n def predict(self,features):\n selFeat = features[self.featIndexes]\n# print selFeat\n# print selFeat\n predictions = np.ones(selFeat.shape[0])\n \n# print self.sides\n thresholded = self.sides*selFeat <= self.sides*self.thresholds #Problem with the equal, but not that big a deal\n\n# print thresholded\n predictions[thresholded] = 0\n# print predictions\n result = (predictions*self.alphas).sum()\n# print result\n# print \"RESULT:\",result\n return result < 0.5*self.alphas.sum()\n def test(self, features, labels):\n \n truePosRate = 0.0\n falsePosRate = 0.0\n error = 0.0\n for i in xrange(features.shape[1]): #transposed to iterate over columns\n imgFeatures = features.T[i]\n face = labels[i]\n \n prediction = self.predict(imgFeatures)\n \n #Update True positive rate\n if labels[i] == 0 and prediction == 0:\n truePosRate += 1\n \n #Update false positive rate\n if labels[i] != 0 and prediction == 0:\n falsePosRate += 1\n if labels[i]!=prediction:\n error += 1\n trueNegRate = truePosRate*100/(features.shape[1]/2)\n falsePosRate = falsePosRate*100/(features.shape[1]/2)\n error = error*100/features.shape[1]\n# print \"True negative:\", trueNegRate,\"%\"\n# print \"False positive:\",falsePosRate,\"%\"\n# print \"Error:\",error,\"%\"\n# print \"--------------------\"\n return (trueNegRate,falsePosRate)\n \n \ndef loadClassifiers(path):\n classifierData = np.load(path)\n alphas = classifierData[:,3]\n classifiers = [] \n for i in xrange(classifierData.shape[0]):\n classifier = DecisionStump()\n classifier.threshold = classifierData[i,0]\n classifier.feature = int(classifierData[i,1])\n classifier.side = classifierData[i,2]\n classifiers.append(classifier)\n return classifiers,alphas\n \n \ndef saveClassifiers(classifiers,path): \n saveData = []\n for DS in classifiers:\n alpha = float( 0.5 * log( (1.0-DS.error) / max(DS.error, 1e-16) ) )\n saveData.append((DS.threshold, DS.feature, DS.side, alpha))\n np.save(path,saveData)\n ","repo_name":"StephenJacobJr/Face-Recognition","sub_path":"DecisionStump.py","file_name":"DecisionStump.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72967310733","text":"# intends to split a string from a given .txt file by the integers a, b, c, and d on the following line separated\r\n# by spaces from index a up to b (inclusive) and index c up to d (inclusive)\r\n\r\nfilename = input(\"Input file name... \")\r\n\r\ndata = open(filename, \"r\")\r\n\r\nword = data.readlines(1)[0]\r\n\r\nnumbers = data.readlines(2)[0].split(\" \")\r\n\r\na = int(numbers[0])\r\nb = int(numbers[1]) + 1\r\nc = int(numbers[2])\r\nd = int(numbers[3]) + 1\r\n\r\nprint(word[a:b] + \" \" + word[c:d])\r\n","repo_name":"zacharyjarnagin/rosalind","sub_path":"Python Village/Strings_And_Lists.py","file_name":"Strings_And_Lists.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72189442574","text":"import pandas as pd\r\nimport os\r\nfrom PIL import Image\r\nfrom paddleocr import PaddleOCR, draw_ocr\r\n\r\nmymap=[]\r\nmate=[\"张三\",\"李四\",\"王五\",\"赵六\"]#需要统计的成员列表\r\n\r\nfor j in range(len(mate)):\r\n mymap.append(False)\r\n\r\n#开启OCR\r\nocr = PaddleOCR(enable_mkldnn=True,use_angle_cls=False, use_gpu=True,use_tensorrt=True,\r\n lang=\"ch\") # need to run only once to download and load model into memory\r\n\r\nimg_path = '***Location***'\r\n\r\nfile_dir = r'***Location***'\r\nfor root,dirs,files in os.walk(file_dir):\r\n for file in files:\r\n img_path=file_dir+file\r\n if file==\"dic.py\" or file==\"ocr.py\" or file==\"result.csv\":\r\n continue\r\n result = ocr.ocr(img_path, cls=True)\r\n txts = [line[1][0] for line in result]\r\n for i in range(len(txts)):\r\n #print(txts[i])\r\n for j in range(len(mate)):\r\n if txts[i].find(mate[j])>=0:\r\n mymap[j]=True\r\n\r\nprint(\"未到者名单如下:\")\r\nfor i in range(len(mate)):\r\n if mymap[i]==False:\r\n print(mate[i])\r\n","repo_name":"yuzh0816/tools","sub_path":"meetingcounting.py","file_name":"meetingcounting.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"17122490425","text":"from django.utils.translation import ugettext_lazy as _\n\n\nMEDIA_OBJECT_MUSIC = 1\nMEDIA_OBJECT_FILM = 2\nMEDIA_OBJECT_TELEVISION = 3\nMEDIA_OBJECT_TORRENT = 4\nMEDIA_OBJECT_OTHER = 255\nMEDIA_OBJECT_TYPE_CHOICES = (\n (MEDIA_OBJECT_MUSIC, _(u'music')),\n (MEDIA_OBJECT_FILM, _(u'film')),\n (MEDIA_OBJECT_TELEVISION, _(u'television')),\n (MEDIA_OBJECT_TORRENT, _(u'torrent')),\n\n (MEDIA_OBJECT_OTHER, _(u'other')),\n)\n\nARTWORK_TYPE_OTHER = 255\nARTWORK_TYPE_CHOICES = (\n (ARTWORK_TYPE_OTHER, _(u'other')),\n)\n\nDEVICE_TYPE_INTEGRATED_AMPLIFIER = 1\nDEVICE_TYPE_POWER_AMPLIFIER = 2\nDEVICE_TYPE_OPERATIONAL_AMPLIFIER = 3\nDEVICE_TYPE_TUBE_AMPLIFIER = 4\nDEVICE_TYPE_PREAMPLIFIER = 5\nDEVICE_TYPE_CARTRIDGE = 7\nDEVICE_TYPE_TURNTABLE = 8\nDEVICE_TYPE_OPTICAL_DISC_PLAYER = 9\nDEVICE_TYPE_AUDIO_INTERFACE = 10\nDEVICE_TYPE_CHOICES = (\n (DEVICE_TYPE_INTEGRATED_AMPLIFIER,_(u'Integrated amplifier')),\n (DEVICE_TYPE_POWER_AMPLIFIER,_(u'Power amplifier')),\n (DEVICE_TYPE_OPERATIONAL_AMPLIFIER,_(u'Operational amplifier')),\n (DEVICE_TYPE_TUBE_AMPLIFIER,_(u'Vacuum tube amplifier')),\n (DEVICE_TYPE_PREAMPLIFIER,_(u'Preamplifier')),\n (DEVICE_TYPE_CARTRIDGE,_(u'Cartridge (turntable)')),\n (DEVICE_TYPE_TURNTABLE,_(u'Turntable')),\n (DEVICE_TYPE_OPTICAL_DISC_PLAYER,_(u'Optical disc player')),\n (DEVICE_TYPE_AUDIO_INTERFACE,_(u'Audio interface')),\n)\n\nENCODING_TYPE_MP3 = 'mp3'\nENCODING_TYPE_MP4 = 'mp4'\nENCODING_TYPE_OGG = 'ogg'\nENCODING_TYPE_FLAC = 'flac'\nENCODING_TYPE_DTS = 'dts'\nENCODING_TYPE_APE = 'ape'\nENCODING_TYPE_WV = 'wv'\nENCODING_TYPE_MPC = 'mpc'\nENCODING_TYPE_CHOICES = (\n (ENCODING_TYPE_MP3, _(u'MP3')),\n (ENCODING_TYPE_MP4, _(u'AAC')),\n (ENCODING_TYPE_OGG, _(u'OGG')),\n (ENCODING_TYPE_FLAC, _(u'FLAC')),\n (ENCODING_TYPE_DTS, _(u'DTS')),\n (ENCODING_TYPE_APE, _(u'APE')),\n (ENCODING_TYPE_WV, _(u'WavPack')),\n (ENCODING_TYPE_MPC, _(u'Musepack')),\n)\n\nBITRATE_192 = 1\nBITRATE_APS = 2\nBITRATE_V2 = 3\nBITRATE_V1 = 4\nBITRATE_256 = 5\nBITRATE_APX = 6\nBITRATE_V0 = 7\nBITRATE_Q8_X = 8\nBITRATE_320 = 9\nBITRATE_LOSSLESS = 10\nBITRATE_CHOICES = (\n (BITRATE_192,_(u'192bps')),\n (BITRATE_APS,_(u'APS (VBR)')),\n (BITRATE_V2,_(u'V2 (VBR)')),\n (BITRATE_V1,_(u'V1 (VBR)')),\n (BITRATE_256,_(u'256bps')),\n (BITRATE_APX,_(u'APX (VBR)')),\n (BITRATE_V0,_(u'V0 (VBR)')),\n (BITRATE_Q8_X,_(u'q8.x (VBR)')),\n (BITRATE_320,_(u'320bps')),\n (BITRATE_LOSSLESS,_(u'Lossless')),\n)\n\nBIT_DEPTH_16_BIT = 1\nBIT_DEPTH_24_BIT = 2\nBIT_DEPTH_32_BIT = 3\nBIT_DEPTH_CHOICES = (\n (BIT_DEPTH_16_BIT, _(u'16bit')),\n (BIT_DEPTH_24_BIT, _(u'24bit')),\n (BIT_DEPTH_32_BIT, _(u'32bit')),\n)\n\nSAMPLING_FREQUENCY_44100 = 1\nSAMPLING_FREQUENCY_48000 = 2\nSAMPLING_FREQUENCY_88200 = 3\nSAMPLING_FREQUENCY_96000 = 4\nSAMPLING_FREQUENCY_176400 = 5\nSAMPLING_FREQUENCY_192000 = 6\nSAMPLING_FREQUENCY_CHOICES = (\n (SAMPLING_FREQUENCY_44100,_(u'44.1kHz')),\n (SAMPLING_FREQUENCY_48000,_(u'48kHz')),\n (SAMPLING_FREQUENCY_88200,_(u'88.2kHz')),\n (SAMPLING_FREQUENCY_96000,_(u'96kHz')),\n (SAMPLING_FREQUENCY_176400,_(u'176.4kHz')),\n (SAMPLING_FREQUENCY_192000,_(u'192kHz')),\n)\n","repo_name":"thomlinton/Media-Library","sub_path":"media_library/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"31081876803","text":"\nfrom test.test_heapq import SideEffectLT\nfrom ctypes.test.test_pickling import name\n\nstr = \"Madhu\"\n\n'''\nStudent\n r_no name marks percentage section fee school_name school_address student_count attendance student_address\n\nbankname ifsccode acc_no name bank_address \n\nmobile_series mobile_model mobile_no iemi \n\n\nInstance variable : Individual to each object\nClass variable : Sharable and modifiable by all objects\n\n\nIV ----> IM\nCV ----> CM\n\n\nAssignment :\n--------------\nIV ----> CM ??\nCV ----> IM ??\n\n'''\n'''\nBehavior -- Action -- Fund Transfer\n\nSTATE -- BeneAccNo Name Branch IFSC Code Amount \n\n\nClass Object\nEncapsulation : Binding the data members and \n member methods into single entity(Class,object)\nAbstrction : Hiding implementation details and exposing only method signature \n \n \n \n Logical Physical \n Class variables methods\n object methods variables\n \nAbstraction : Class - 0% abstraction\n Abstract class - 0% to 100% abstraction\n Interface - 100% abstraction \n'''\nclass Employee1:\n # FIELDS \n # para constructor\n\n def __init__(self, eid , name = \"MAdhhu\", sal = 10000):\n self.eid = eid\n self.name = name\n self.sal = sal\n\n # METHODS\n # INSTANCE METHOD\n def get_emp_hike(self,sal = 1000):\n print(\"----In get employee hike----------\")\n \n # INSTANCE METHOD\n def get_emp_desn(self,hike):\n if(self.sal <= 100000):\n print(self.name, \" is a softwatre trainee\")\n else:\n print(self.name, \" is a softwatre engineer\")\n \n'''\n 1. Class Defined and provided special method i.e, __init__(constructor) method to initialize instance variables, \n define respective methods to get the BEHAVIOR\n2. Create object for the respective class.\n Internally \n Python creates empty object for us,and gives reference to self parameter\n Reamining parameters, we are passing the arguments\n In empty object, instance variables will be initialized with the given data\n3. Finally whole object reference will be given to LHS\n4. We can perform method calls using created object \n \n\n'''\nmadhu = Employee1(100,\"MadhuN\",15000) # Object creation, madhu is an object which is of type Employee\n\n\n\nprint(madhu.get_emp_hike())\nmadhu.get_emp_desn() # Employee.get_emp_desn(madhu)\n \n\nmohan = Employee1(200,\"MohanG\",25000)\nmohan.get_emp_desn()\n \n\n\n\n\n\nclass Student:\n # FIELDS\n # class variables\n school_name = \"ABC\"\n school_address = \"BANGALORE\"\n student_count = 0\n\n def __init__(self, sid, name, marks):\n self.sid = sid \n self.name = name\n self.marks = marks\n Student.student_count += 1\n\n # METHODS\n @classmethod\n def get_student_count(cls):\n print(\"---The student count is :: \",cls.student_count)\n \n\nStudent.get_student_count()\n\nmadhu = Student()\n\nStudent.get_student_count()\n'''\nclass list:\n def __init__(self,*args,**kwargs):\n .....\n \n def append(self):\n \n def pop(self):\n \n ....\n''' \nlist1 = [1,2,3,4]\nlist1 = list( [1,2,3,4] )\nprint(list1.append([2,4]))\n\n\n\n\n# class method calling\nEmployee.emp_count()\n\n\nmadhu = Employee(10,\"MadhuN\",1000)\nprint(madhu)\nlist1 = [1,2,3]\nprint(list1)\n# instance method calling\nmadhu.get_emp_details() \nEmployee.get_emp_details(madhu)\n#print(\"---------Normal Instance calling-------------\")\n\n#raja = Employee(21,\"Rajasekhar\",15000)\n#raja.get_emp_details()\n\n'''\n\nclass var instance var\n-------------------------- ----------------------------\nwhile loading class at the time of object creation\n\nclass var instance var\nclass methods instance methods\n\nNo instance var inside class method**\n\n++ Within instance methods we can use class variables*****\nviceversa is not True ==> within class methods we can't use instance varibales\n'''\n# Built in class attributes '''\nprint(\"Employee.__dict__:\", Employee.__dict__)\nprint(\"Employee.__doc__:\", Employee.__doc__)\nprint(\"Employee.__name__:\", Employee.__name__)\nprint(\"Employee.__module__:\", Employee.__module__)\nprint(\"Employee.__bases__:\", Employee.__bases__)\n\n# Functions\nprint(hasattr(madhu, \"sal\"))\nprint(setattr(madhu, \"name\", \"MAD\"))\nprint(getattr(madhu, \"name\"))\n#print(delattr(madhu, \"sal\"))\n#print(getattr(madhu, \"sal\"))\n\n\nclass Employee1:\n def __init__(self,x):\n print(x)\n #return x \nc = Employee1(5)\n\n","repo_name":"agiri801/python_key_notes","sub_path":"_12_Oops/_04_ClassAnatomy.py","file_name":"_04_ClassAnatomy.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"14743543017","text":"import math\r\n\r\ndef area(r):\r\n return math.pi*(r**2)\r\n\r\n\r\nfor i in range(1, 5, 1):\r\n \r\n for j in range(2, 6, 1):\r\n try:\r\n p = 91.12 / ((1/i**2)-(1/j**2))\r\n print(p, \" \", i, \" \", j)\r\n except ZeroDivisionError:\r\n continue","repo_name":"khandum/stuff","sub_path":"FAP/fortran/physics/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"26574848962","text":"\nimport os\nimport time\nimport datetime\nimport json\n\nimport torch\nfrom utils.seed import set_random_seed\nfrom utils.output_manager import OutputManager\nfrom utils.pd_logger import PDLogger\nfrom torch.nn import DataParallel\n\nfrom models.maml import MAML\nfrom models.meta_ticket import MetaTicket\nfrom models.sinusoid_maml import SinusoidMAML\n\ndef meta_test(exp_name, cfg, gpu_id, prefix=\"\", iteration=None, use_best=False):\n set_random_seed(cfg['seed'])\n device = torch.device(f'cuda:{gpu_id}' if cfg['use_cuda'] and torch.cuda.is_available()\n else 'cpu')\n\n outman = OutputManager(cfg['output_dir'], exp_name)\n outman.print('Number of available gpus: ', torch.cuda.device_count(), prefix=prefix)\n\n if cfg['learning_framework'] == 'MAML':\n learner = MAML(outman, cfg, device, cfg['data_parallel'])\n elif cfg['learning_framework'] == 'MetaTicket':\n learner = MetaTicket(outman, cfg, device, cfg['data_parallel'])\n elif cfg['learning_framework'] == 'SinusoidMAML':\n learner = SinusoidMAML(outman, cfg, device, cfg['data_parallel'])\n else:\n raise NotImplementedError\n\n\n if use_best:\n dump_path = outman.get_abspath(prefix=f\"best.{prefix}\", ext=\"pth\")\n elif iteration is not None:\n dump_path = outman.get_abspath(prefix=f'it{iteration}.{prefix}', ext=\"pth\")\n else:\n dump_path = outman.get_abspath(prefix=f\"dump.{prefix}\", ext=\"pth\")\n\n outman.print(dump_path, prefix=prefix)\n if os.path.exists(dump_path):\n dump_dict = torch.load(dump_path)\n it = dump_dict['it']\n if isinstance(learner.meta_model, DataParallel):\n learner.meta_model.module.load_state_dict(dump_dict['model_state_dict'])\n else:\n learner.meta_model.load_state_dict(dump_dict['model_state_dict'])\n else:\n raise Exception\n\n outman.print('[', str(datetime.datetime.now()) , '] Evaluate on Test Dataset...' , prefix=prefix)\n\n # Test\n result = learner.evaluate(dataset_type='test')\n if use_best:\n outman.print('Test Accuracy (Best):', str(result['accuracy']), prefix=prefix)\n else:\n outman.print('Test Accuracy:', str(result['accuracy']), prefix=prefix)\n\n test_info_dict = {\n 'accuracy': result['accuracy'],\n 'iteration': iteration,\n 'loss': result['loss'],\n 'prefix': prefix,\n }\n\n if use_best:\n output_path = outman.get_abspath(prefix=f\"test_best.{prefix}\", ext=\"json\")\n elif iteration is not None:\n output_path = outman.get_abspath(prefix=f\"test_it{iteration}.{prefix}\", ext=\"json\")\n else:\n output_path = outman.get_abspath(prefix=f\"test_dump.{prefix}\", ext=\"json\")\n\n with open(output_path, 'w') as f:\n json.dump(test_info_dict, f, indent=2)\n\n\n","repo_name":"dchiji-ntt/meta-ticket","sub_path":"commands/meta_test.py","file_name":"meta_test.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"14"} +{"seq_id":"31148053402","text":"# AES implementation in raw Python\r\n# Copyright (c) 2013 Oliver Lau , Heise Zeitschriften Verlag\r\n# All rights reserved.\r\n\r\nimport hashlib\r\nfrom math import ceil\r\n\r\nRcon = [\r\n [ 0x00, 0x00, 0x00, 0x00 ],\r\n [ 0x01, 0x00, 0x00, 0x00 ],\r\n [ 0x02, 0x00, 0x00, 0x00 ],\r\n [ 0x04, 0x00, 0x00, 0x00 ],\r\n [ 0x08, 0x00, 0x00, 0x00 ],\r\n [ 0x10, 0x00, 0x00, 0x00 ],\r\n [ 0x20, 0x00, 0x00, 0x00 ],\r\n [ 0x40, 0x00, 0x00, 0x00 ],\r\n [ 0x80, 0x00, 0x00, 0x00 ],\r\n [ 0x1b, 0x00, 0x00, 0x00 ],\r\n [ 0x36, 0x00, 0x00, 0x00 ]\r\n] \r\n\r\nS = [\r\n 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,\r\n 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,\r\n 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,\r\n 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,\r\n 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,\r\n 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,\r\n 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,\r\n 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,\r\n 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,\r\n 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,\r\n 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,\r\n 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,\r\n 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,\r\n 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,\r\n 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,\r\n 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,\r\n]\r\n\r\n\r\ndef RotWord(w):\r\n return w[1:] + w[:1]\r\n\r\n\r\ndef SubWord(w):\r\n return list(map(lambda x: S[x], list(w)))\r\n\r\n\r\ndef XorWord(a, b):\r\n assert len(a) == 4 and len(b) == 4\r\n return list(map(lambda x, y: x ^ y, list(a), list(b)))\r\n\r\n\r\ndef SubBytes(state):\r\n assert len(state) == 4\r\n return list(map(lambda x: SubWord(x), state))\r\n\r\n\r\ndef ShiftRows(state):\r\n assert len(state) == 4\r\n return list(map(lambda x: RotWord(x), state))\r\n\r\n\r\ndef AddRoundKey(state, key):\r\n assert len(state) == 4 and len(key) == 4\r\n return list(map(lambda x, y: XorWord(x, y), state, key))\r\n\r\n\r\n\"\"\"\r\n key: bytearray\r\n\"\"\"\r\ndef ToState(data):\r\n assert len(data) % 4 == 0\r\n keyLen = len(data) * 8\r\n n = keyLen // 32\r\n block = [ [ None for i in range(4) ] for j in range(n) ]\r\n k, i = 0, 0\r\n while i < n:\r\n j = 0\r\n while j < 4:\r\n block[i][j] = data[k]\r\n j += 1\r\n k += 1\r\n i += 1\r\n return block\r\n\r\n\r\n\"\"\"\r\n key: bytearray\r\n\"\"\"\r\ndef ExpandKey(key):\r\n block = ToState(key)\r\n keyLen = len(key) * 8\r\n if keyLen == 128: Nr = 10\r\n elif keyLen == 192: Nr = 12\r\n elif keyLen == 256: Nr = 14\r\n else: raise Exception(\"key size must be 128, 192 or 256 bits\")\r\n Nb = 4\r\n Nk = keyLen // 32\r\n i = 0\r\n w = []\r\n while i < Nk:\r\n w.append(block[i])\r\n i += 1\r\n i = Nk\r\n while i < Nb * (Nr+1):\r\n print(str(i) + \"\\t\", end=\"\")\r\n temp = w[i-1]\r\n PrintWord(temp)\r\n print(\" \", end=\"\")\r\n if i % Nk == 0:\r\n temp = RotWord(temp)\r\n PrintWord(temp, \" \")\r\n temp = SubWord(temp)\r\n PrintWord(temp, \" \")\r\n PrintWord(Rcon[i//Nk], \" \")\r\n temp = XorWord(temp, Rcon[i//Nk])\r\n PrintWord(temp, \" \")\r\n elif Nk > 6 and (i % Nk) == 4:\r\n temp = SubWord(temp)\r\n print(9*\" \", end=\"\"); PrintWord(temp); print(19*\" \", end=\"\")\r\n else: print(36*\" \", end=\"\")\r\n PrintWord(w[i-Nk], \" \")\r\n XORed = XorWord(w[i-Nk], temp)\r\n PrintWord(XORed, \" \")\r\n w.append(XORed)\r\n i += 1\r\n print()\r\n return Nr, w\r\n\r\n\r\ndef MixColumns(m):\r\n assert len(m) == 4\r\n\r\n def mix(a, b, c, d):\r\n def Mul123(a, b):\r\n if b == 1: return a & 0xff\r\n elif b == 2:\r\n c = a << 1\r\n if a & 0x80: c ^= 0x1b\r\n return c & 0xff\r\n elif b == 3: return Mul123(a, 2) ^ a\r\n else: raise Exception(\"b must be 1, 2 or 3\")\r\n\r\n r0 = Mul123(a, 2) ^ Mul123(b, 3) ^ Mul123(c, 1) ^ Mul123(d, 1)\r\n r1 = Mul123(a, 1) ^ Mul123(b, 2) ^ Mul123(c, 3) ^ Mul123(d, 1)\r\n r2 = Mul123(a, 1) ^ Mul123(b, 1) ^ Mul123(c, 2) ^ Mul123(d, 3)\r\n r3 = Mul123(a, 3) ^ Mul123(b, 1) ^ Mul123(c, 1) ^ Mul123(d, 2)\r\n return r0, r1, r2, r3\r\n\r\n transposed = list(zip(*m))\r\n m = map(lambda x: mix(x[0], x[1], x[2], x[3]), transposed)\r\n return list(zip(*m))\r\n\r\n\r\ndef PrintArray(m, *args):\r\n if len(args) > 0: print(args[0], end=\"\")\r\n print(str(list(map(lambda x: hex(x), m))))\r\n\r\n\r\ndef ByteToHex(x):\r\n assert 0x00 <= x <= 0xff\r\n def ToHex(n): return \"0123456789abcdef\"[n]\r\n return \"\".join([ ToHex(x >> 4), ToHex(x & 0x0f) ])\r\n\r\n\r\ndef AESEncrypt(msg, key):\r\n \"\"\"\r\n :param msg: bytearray\r\n :param key: key\r\n \"\"\"\r\n keyLen = len(key) * 8\r\n if keyLen == 128: Nr = 10\r\n elif keyLen == 192: Nr = 12\r\n elif keyLen == 256: Nr = 14\r\n else: raise Exception(\"key size must be 128, 192 or 256 bits\")\r\n\r\n def AESEncryptBlock(state, w, Nr):\r\n Nb = 4\r\n state = AddRoundKey(state, w[:Nb])\r\n for round in range(Nr):\r\n state = SubBytes(state)\r\n state = ShiftRows(state)\r\n state = MixColumns(state)\r\n state = AddRoundKey(state, w[round*Nb:(round+1)*Nb])\r\n state = SubBytes(state)\r\n state = ShiftRows(state)\r\n state = AddRoundKey(state, w[Nr*Nb:(Nr+1)*Nb])\r\n return state\r\n\r\n Nr, w = ExpandKey(key)\r\n msg += bytearray(16 - len(msg) % 16) # pad if needed\r\n encrypted = bytearray(len(msg))\r\n for p in range(0, len(msg), 16):\r\n state = ToState(msg[p:p+16])\r\n state = AESEncryptBlock(state, w, Nr)\r\n k, i = p, 0\r\n while i < 4:\r\n j = 0\r\n while j < 4:\r\n encrypted[k] = state[i][j]\r\n j += 1\r\n k += 1\r\n i += 1\r\n\r\n return encrypted\r\n\r\n\r\ndef PrintWord(w, *args):\r\n print(\"\".join(map(lambda x: str(ByteToHex(x)), w)) + \" \".join(args), end=\"\")\r\n\r\n\r\ndef PrintMatrix(m):\r\n for row in m:\r\n print(\" \" + str(list(map(lambda x: hex(x), row))))\r\n\r\n\r\ndef PrintKeySchedule(m):\r\n i, j = 0, 1\r\n Nr = len(m) // 4\r\n while i < Nr:\r\n print(\"ROUND KEY \", i);\r\n PrintMatrix(m[i*4:j*4])\r\n i += 1\r\n j += 1\r\n\r\n\r\ndef PBKDF2(P, S, C, kLen):\r\n def XorBytes(dst, src):\r\n assert len(dst) == len(src)\r\n N = len(dst)\r\n for i in range(N):\r\n dst[i] ^= src[i]\r\n S = S.encode()\r\n P = P.encode()\r\n m = hashlib.sha224(P)\r\n hLen = m.digest_size * 8\r\n length = ceil(kLen / hLen)\r\n mk = bytearray()\r\n for i in range(length):\r\n m.update(S + str(i).encode())\r\n T = bytearray(m.digest_size)\r\n for j in range(C):\r\n m.update(P)\r\n XorBytes(T, m.digest())\r\n mk += T\r\n return mk[:kLen // 8]\r\n\r\n\r\ndef PasswordToKey(password):\r\n return PBKDF2(password, \"\", 10, 128)\r\n\r\n\r\ndef demo():\r\n key = PasswordToKey(\"s3cR€7\")\r\n print(\"key = {0:s}\".format(\" \".join(map(lambda x: ByteToHex(x), key))))\r\n\r\n # key = bytearray.fromhex(\"f6 c5 82 03 cc 55 54 ad 34 c5 26 3e cd 41 02 cd\")\r\n # key = bytearray.fromhex(\"2b 7e 15 16 28 ae d2 a6 ab f7 15 88 09 cf 4f 3c\")\r\n # key = bytearray.fromhex(\"8e 73 b0 f7 da 0e 64 52 c8 10 f3 2b 80 90 79 e5 62 f8 ea d2 52 2c 6b 7b\")\r\n # key = bytearray.fromhex(\"60 3d eb 10 15 ca 71 be 2b 73 ae f0 85 7d 77 81 1f 35 2c 07 3b 61 08 d7 2d 98 10 a3 09 14 df f4\")\r\n\r\n print(\"KEY SCHEDULE\")\r\n Nr, w = ExpandKey(key)\r\n PrintKeySchedule(w)\r\n\r\n encrypted = AESEncrypt(\"AES ist cool!\".encode(), key)\r\n PrintArray(encrypted)\r\n\r\ndef main():\r\n demo()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"607011/intrinsics","sub_path":"aes/scripts/aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"71496258575","text":"import sys\nimport random\nimport numpy as np\nimport os\nimport cv2\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\n\n\n#sem_scal_loss, geo_scal_loss, CE_ssc_loss\n\ndef geo_scal_loss(pred, ssc_target, epsilon=1e-6):\n # Get softmax probabilities\n pred = F.softmax(pred, dim=1)\n\n # Check if prediction matches target exactly (pseudo-prediction case)\n if torch.all(pred.argmax(dim=1) == ssc_target):\n return torch.tensor(0.0).to(pred.device)\n\n # Compute empty and nonempty probabilities\n empty_probs = pred[:, 0, :, :, :]\n nonempty_probs = 1 - empty_probs\n\n # Remove unknown voxels (if necessary)\n mask = ssc_target != 255\n nonempty_target = ssc_target != 0\n nonempty_target = nonempty_target[mask].float()\n nonempty_probs = nonempty_probs[mask]\n empty_probs = empty_probs[mask]\n\n intersection = (nonempty_target * nonempty_probs).sum()\n precision = intersection / (nonempty_probs.sum() + epsilon)\n recall = intersection / (nonempty_target.sum() + epsilon)\n spec = ((1 - nonempty_target) * (empty_probs)).sum() / ((1 - nonempty_target).sum() + epsilon)\n\n return (\n F.binary_cross_entropy(precision, torch.ones_like(precision))\n + F.binary_cross_entropy(recall, torch.ones_like(recall))\n + F.binary_cross_entropy(spec, torch.ones_like(spec))\n )\n\n\ndef sem_scal_loss(pred, ssc_target, epsilon=1e-6):\n # Check for perfect match\n if torch.all(pred.argmax(dim=1) == ssc_target):\n return torch.tensor(0.0).to(pred.device)\n\n # Get softmax probabilities\n pred = F.softmax(pred, dim=1)\n loss = 0\n count = 0\n mask = ssc_target != 255\n n_classes = pred.shape[1]\n for i in range(n_classes):\n # Get probability of class i\n p = pred[:, i, :, :, :][mask]\n target = ssc_target[mask]\n\n completion_target = (target == i).float()\n\n if completion_target.sum() > 0:\n count += 1.0\n nominator = (p * completion_target).sum()\n\n # Precision\n precision = nominator / (p.sum() + epsilon)\n loss_precision = F.binary_cross_entropy(\n precision, torch.ones_like(precision)\n )\n\n # Recall\n recall = nominator / (completion_target.sum() + epsilon)\n loss_recall = F.binary_cross_entropy(recall, torch.ones_like(recall))\n\n # Specificity\n specificity = ((1 - p) * (1 - completion_target)).sum() / (\n (1 - completion_target).sum() + epsilon\n )\n loss_specificity = F.binary_cross_entropy(\n specificity, torch.ones_like(specificity)\n )\n\n loss_class = loss_precision + loss_recall + loss_specificity\n loss += loss_class\n\n return loss / max(count, epsilon)\n\ndef CE_ssc_loss(pred, target, class_weights):\n # Check for perfect match\n if torch.all(pred.argmax(dim=1) == target):\n return torch.tensor(0.0).to(pred.device)\n\n criterion = nn.CrossEntropyLoss(\n weight=class_weights, ignore_index=255, reduction=\"none\"\n )\n loss = criterion(pred, target.long())\n loss_valid = loss[target != 255]\n return torch.mean(loss_valid)\n\n\n\ndef one_hot_encoding(labels, num_classes):\n # Assuming labels of shape [batch_size, depth, height, width]\n one_hot = F.one_hot(labels, num_classes) # Convert to one-hot\n return one_hot.permute(0, 4, 1, 2, 3).float()\n\n\n ","repo_name":"zht043/HKU-OccNet","sub_path":"depthseg_occ/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"7803929526","text":"from dash import html \nfrom dash import dcc\n\nimport dash_bootstrap_components as dbc\n\nclass profileCard:\n def __init__(self, name, profileDescription, profession, id, linkedIn):\n self.id = id\n self.name = name\n self.profileDescription = profileDescription\n self.profession = profession\n self.linkedIn=linkedIn\n\n def display(self):\n layout = html.Div([\n html.Div(children=[\n html.Div(className='foreground'),\n html.Div(className='background'),\n html.P(self.profileDescription),\n ], className=f'profile shadow photo_{self.id}'\n ),\n html.Div(children=[\n html.P(children=[html.A(html.B(self.name),href=f'https://www.linkedin.com/in/{self.linkedIn}',target='_blank'),html.Br(),self.profession],className=\"profileText\"),\n ], className='name'\n ),\n ]\n )\n return layout\n\n\n\t","repo_name":"luisca1985/DS4A_bogota_accidents","sub_path":"components/profileCard/profileCard.py","file_name":"profileCard.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"22221659626","text":"#\n# DeepLabCut Toolbox (deeplabcut.org)\n# © A. & M.W. Mathis Labs\n# https://github.com/DeepLabCut/DeepLabCut\n#\n# Please see AUTHORS for contributors.\n# https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS\n#\n# Licensed under GNU Lesser General Public License v3.0\n#\n\n\nimport json\nimport os\nfrom typing import Container\nfrom typing import Literal\n\nfrom deeplabcut.benchmark.base import Benchmark, Result, ResultCollection\n\nDATA_ROOT = os.path.join(os.getcwd(), \"data\")\nCACHE = os.path.join(os.getcwd(), \".results\")\n\n__registry = []\n\n\ndef register(cls):\n \"\"\"Add a benchmark to the list of evaluations to run.\n\n Apply this function as a decorator to a class. Note that the\n class needs to be a subclass of the ``benchmark.base.Benchmark``\n base class.\n\n In most situations, it will be a subclass of one of the pre-defined\n benchmarks in ``benchmark.benchmarks``.\n\n Throws:\n ``ValueError`` if the decorator is applied to a class that is\n not a subclass of ``benchmark.base.Benchmark``.\n \"\"\"\n if not issubclass(cls, Benchmark):\n raise ValueError(\n f\"Can only register subclasses of {type(Benchmark)}, \" f\"but got {cls}.\"\n )\n __registry.append(cls)\n\n\ndef evaluate(\n include_benchmarks: Container[str] = None,\n results: ResultCollection = None,\n on_error=\"return\",\n) -> ResultCollection:\n \"\"\"Run evaluation for all benchmarks and methods.\n\n Note that in order for your custom benchmark to be included during\n evaluation, the following conditions need to be met:\n\n - The benchmark subclassed one of the benchmark definitions in\n in ``benchmark.benchmarks``\n - The benchmark is registered by applying the ``@benchmark.register``\n decorator to the class\n - The benchmark was imported. This is done automatically for all\n benchmarks that are defined in submodules or subpackages of the\n ``benchmark.submissions`` module. For all other locations, make\n sure to manually import the packages **before** calling the\n ``evaluate()`` function.\n\n Args:\n include_benchmarks:\n If ``None``, run all benchmarks that were discovered. If a container\n is passed, only include methods that were defined on benchmarks with\n the specified names. E.g., ``include_benchmarks = [\"trimouse\"]`` would\n only evaluate methods of the trimouse benchmark dataset.\n on_error:\n see documentation in ``benchmark.base.Benchmark.evaluate()``\n\n Returns:\n A collection of all results, which can be printed or exported to\n ``pd.DataFrame`` or ``json`` file formats.\n \"\"\"\n if results is None:\n results = ResultCollection()\n for benchmark_cls in __registry:\n if include_benchmarks is not None:\n if benchmark_cls.name not in include_benchmarks:\n continue\n benchmark = benchmark_cls()\n for name in benchmark.names():\n if Result(method_name=name, benchmark_name=benchmark_cls.name) in results:\n continue\n else:\n result = benchmark.evaluate(name, on_error=on_error)\n results.add(result)\n return results\n\n\ndef get_filepath(basename: str):\n return os.path.join(DATA_ROOT, basename)\n\n\ndef savecache(results: ResultCollection):\n with open(CACHE, \"w\") as fh:\n json.dump(results.todicts(), fh, indent=2)\n\n\ndef loadcache(\n cache=CACHE, on_missing: Literal[\"raise\", \"ignore\"] = \"ignore\"\n) -> ResultCollection:\n if not os.path.exists(cache):\n if on_missing == \"raise\":\n raise FileNotFoundError(cache)\n return ResultCollection()\n with open(cache, \"r\") as fh:\n try:\n data = json.load(fh)\n except json.decoder.JSONDecodeError as e:\n if on_missing == \"raise\":\n raise e\n return ResultCollection()\n return ResultCollection.fromdicts(data)\n","repo_name":"DeepLabCut/DeepLabCut","sub_path":"deeplabcut/benchmark/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":4042,"dataset":"github-code","pt":"14"} +{"seq_id":"19588725471","text":"from django.shortcuts import render\nimport openai\nimport os\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom dotenv import load_dotenv\nfrom . forms import AskGPTForm\nfrom utils.encryption import decrypt_data\nfrom django.core.cache import cache\n\n\n@login_required(login_url=\"/home/login\")\ndef askgpt_view(request):\n \"\"\" This function is used to render the askgpt view\n \"\"\"\n # get user id from request\n user = request.user\n # get the openai api key from user profile\n user_id = user.id\n api_key_encrpted = user.profile.openai_key\n # decrypt the api key if its encrypted\n if api_key_encrpted:\n api_key = decrypt_data(api_key_encrpted)\n cache.set(\"encrypted_key\", api_key, timeout=3600)\n\n data = \"AskGpt is an AI assistant that is an expert in Software \\\n Engineering. You can ask it questions about \\\n Software Engineering and it will \\\n give you an answer. \\n \\\n Example: \\n \\\n What is a variable? \\n \\\n \"\n if request.method == \"POST\":\n openai.api_key = api_key\n user_query = request.POST.get('user_query')\n user_prompt = user_query\n chat_response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=\"You are an AI assisstant that is an Expert in Software \\\n engineering\\nYou know about Software Engineering\\nYou can provide \\\n advice on Linux, Programming Languages, Software Engineering \\\n Concepts and Education.\\n If possible make references\\n\\\n to publications for further reading by appending the \\\n name of the publication to this \\\n exact url https://annas-archive.org/search?q= \\\n do not change anything in the url just append the publication\\\n name for example: \\\n https://annas-archive.org/search?q=flask web development by \\\n Grinberg, Miguel. You must critically \\\n percieve the question and make sure\\\n the question is related \\\n to software engineering before you provide an answer,\\\n however, If the question is not related to sofware \\\n engineering at all! please\\\n respond with the phrase \\\"I'm just an expert \\\n in Software engineering, can't help with \\\n that\\n\\n \" + \"\\n\" + user_prompt,\n temperature=0.7,\n max_tokens=500,\n top_p=1,\n frequency_penalty=0.73,\n presence_penalty=0\n )\n data = chat_response['choices'][0]['text'].replace(\"?\", \"\")\n return render(request, 'askgpt/askgpt.html', {'response': data})\n","repo_name":"Ayobami6/Peersonline","sub_path":"askgpt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"30182069113","text":"import pandas as pd\nimport os\nfrom random_ids import randomlist\n\ndf = pd.read_csv('csv/data2023.csv')\nresult_dict = {}\nfor theid in randomlist:\n mask2 = df[df[\"ZP_ID\"] == theid]\n if not mask2.empty:\n filename = f\"filteredbyID_{theid}.csv\"\n mask2.to_csv(filename, index=False)\n dfID = pd.read_csv(filename)\n mask = dfID[\"Round_Month(Datum)\"].str.startswith(\"2023-02\")\n filtered_df = dfID[mask]\n filename = f\"filteredbydate_{theid}.csv\"\n filtered_df.to_csv(filename, index=False)\n df1 = pd.read_csv(filename)\n last_value = df1[\"Stunde_von\"].iloc[0]\n last_date = last_value.split(\"T\")[0]\n first_value = df1[\"Stunde_von\"].iloc[-1]\n first_date = first_value.split(\"T\")[0]\n sum_B = round(df1['Arbeit [kWh]'].sum(),2)\n result_dict[theid] = sum_B\n os.remove(filename)\n os.remove(f\"filteredbyID_{theid}.csv\")\n else:\n print(f\"The ID {theid} does not exist\")\n#apply to see the data\nprint(f\"result dict{result_dict}\")\n","repo_name":"ramielhaj/invoice-generator","sub_path":"reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"70262718096","text":"# Script Name : data.py\n# Author : Howard Zhang\n# Created : 14th June 2018\n# Last Modified\t : 14th June 2018\n# Version : 1.0\n# Modifications\t : \n# Description : The struct of data to sort and display.\n\nclass Data:\n # Total of data to sort\n data_count = 32\n\n def __init__(self, value):\n self.value = value\n self.set_color()\n\n def set_color(self, rgba = None):\n if not rgba:\n rgba = (0,\n 1 - self.value / (self.data_count * 2),\n self.value / (self.data_count * 2) + 0.5,\n 1)\n self.color = rgba\n ","repo_name":"zamhown/sorting-visualizer","sub_path":"sorting/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":231,"dataset":"github-code","pt":"14"} +{"seq_id":"30706132321","text":"from tkinter import font\nimport dash\nfrom dash import html\nimport dash_bootstrap_components as dbc\nfrom html_structure import get_3d_fig, get_main_layout\nimport pandas as pd\n\nfrom dash.dependencies import Input, Output, State, MATCH, ALL\nfrom dash.exceptions import PreventUpdate\n\nfrom data.mock_data import get_steps\n\napp = dash.Dash(\n external_stylesheets=[\n \"assets/bootstrap.css\",\n dbc.icons.BOOTSTRAP, \n dbc.icons.FONT_AWESOME,\n ]\n)\n# create html\napp.layout = get_main_layout()\n\n\n\n# =======================\n# CALLBACKS\n# =======================\n\n# the callback block always refers to the function below\n@app.callback(\n Output({\"type\": \"minimizable_div\", \"index\": MATCH}, \"style\"),\n Output({\"type\": \"minimized_div\", \"index\": MATCH}, \"style\"),\n Input({\"type\": \"minimize_button\", \"index\": MATCH}, \"n_clicks\"),\n Input({\"type\": \"maximize_button\", \"index\": MATCH}, \"n_clicks\"),\n State({\"type\": \"minimizable_div\", \"index\": MATCH}, \"id\"),\n State({\"type\": \"minimizable_div\", \"index\": MATCH}, \"style\"),\n State({\"type\": \"minimized_div\", \"index\": MATCH}, \"style\"),\n prevent_initial_call=True\n)\ndef on_showhide_divs(n_clicks_minimize, n_clicks_maximize, id_minimizable, style_minimizable, style_minimized):\n \"\"\" triggered by minimize_buttons\n hides the div to minimizes and shows the minimized div\n \"\"\" \n event_button_id = dash.callback_context.triggered_id\n # if event_button_id is None:\n # raise PreventUpdate(\"\")\n \n # Check if the minimize button or the maximize button was clicked\n if event_button_id[\"type\"] == \"minimize_button\":\n # Hide the minimizable div\n if style_minimizable is None:\n style_minimizable = {\"display\": \"none\"}\n else:\n style_minimizable.update({\"display\": \"none\"})\n \n # Show the minimized div\n if style_minimized is None:\n style_minimized = {}\n else:\n del style_minimized[\"display\"]\n \n elif event_button_id[\"type\"] == \"maximize_button\":\n # Show the minimizable div\n if style_minimizable is None:\n style_minimizable = {}\n else:\n del style_minimizable[\"display\"]\n \n # Hide the minimized div\n if style_minimized is None:\n style_minimized = {\"display\": \"none\"}\n else:\n style_minimized.update({\"display\": \"none\"})\n \n return style_minimizable, style_minimized\n\n@app.callback(\n Output(\"col_body_left\", \"width\"),\n Output(\"col_body_center\", \"width\"),\n Output(\"col_body_right\", \"width\"),\n Input({\"type\": \"minimizable_div\", \"index\": ALL}, \"style\"),\n State({\"type\": \"minimizable_div\", \"index\": ALL}, \"id\"),\n prevent_initial_call=True\n)\ndef on_div_minimized(minimizable_divs_styles, minimizable_divs_ids):\n \"\"\" triggered after minimizable_div was opened oder closed\n sets the column size to strech the visualization\n \"\"\"\n \n indices = [id[\"index\"] for id in minimizable_divs_ids]\n index_style_dict = {idx: style for idx, style in zip(indices, minimizable_divs_styles)}\n \n # Now we have a dict of\n # { \n # 'div_1': { 'display': 'none', ...},\n # 'div_2': { 'display': 'none', ...},\n # 'div_3': { 'display': 'block', ...},\n # ...\n # } \n \n if index_style_dict[\"div_productmetadata\"] is not None and \"display\" in index_style_dict[\"div_productmetadata\"] and index_style_dict[\"div_productmetadata\"][\"display\"] == \"none\" \\\n and index_style_dict[\"div_materiallist\"] is not None and \"display\" in index_style_dict[\"div_materiallist\"] and index_style_dict[\"div_materiallist\"][\"display\"] == \"none\":\n width_col_left = 1\n else:\n width_col_left = 3\n \n if index_style_dict[\"div_tools\"] is not None and \"display\" in index_style_dict[\"div_tools\"] and index_style_dict[\"div_tools\"][\"display\"] == \"none\" \\\n and index_style_dict[\"div_steplist\"] is not None and \"display\" in index_style_dict[\"div_steplist\"] and index_style_dict[\"div_steplist\"][\"display\"] == \"none\":\n width_col_right = 1\n else:\n width_col_right = 3\n \n width_col_center = 12 - width_col_left - width_col_right\n \n return width_col_left, width_col_center, width_col_right\n\n\n@app.callback(\n Output(\"graph\", \"figure\"),\n Output(\"current_step\", \"data\"),\n Input({\"type\": \"step_button\", \"index\": ALL}, \"n_clicks\"),\n Input({\"type\": \"step_nav_button\", \"index\": ALL}, \"n_clicks\"),\n prevent_initial_call=True\n)\ndef onclick_step_button(n_clicks1, n_clicks2):\n \"\"\" triggered by clicking any step button or the step arrows\n updates graph and current step variable\n \"\"\"\n event_button_id = dash.callback_context.triggered_id\n # if event_button_id is None:\n # raise PreventUpdate(\"\")\n \n step_id = event_button_id[\"index\"]\n fig = get_3d_fig(step_id)\n \n return fig, step_id\n\n\n@app.callback(\n Output(\"div_step_description_stepname\", \"children\"),\n Output(\"div_step_description_steptext\", \"children\"),\n Output(\"div_tools_name\", \"children\"),\n Output(\"div_tools_img\", \"src\"),\n Output(\"div_danger_img\", \"src\"),\n Output(\"p_notifs\", \"children\"),\n Output(\"i_notifs\", \"className\"),\n Output(\"div_notifs\", \"className\"),\n Input(\"current_step\", \"data\")\n)\ndef on_step_changed(current_step_id):\n \"\"\" triggered by the change to a new step\n defines the new updated changes which (can) occur by switching to another step\n e.g. show critical hints and update step description\n \"\"\"\n # Display step name and text in box under the graph\n df_steps = get_steps()\n step_name = df_steps.loc[current_step_id][\"name\"]\n step_description = df_steps.loc[current_step_id][\"description\"]\n \n step_tools_name = df_steps.loc[current_step_id][\"tools\"]\n step_tools_img = df_steps.loc[current_step_id][\"tools_img_path\"]\n notifs = df_steps.loc[current_step_id][\"notifications\"]\n \n if not pd.isna(notifs):\n i_notifs_class = \"fa-solid fa-triangle-exclamation\"\n div_notifs_class = \"border_div_notifs\"\n else:\n i_notifs_class = \"\"\n div_notifs_class = \"no_border_div_notifs\"\n \n \n if not pd.isna(step_tools_img):\n step_tools_img = \"assets/img/\" + step_tools_img\n else:\n step_tools_img = \"\"\n \n if current_step_id == \"step14\":\n danger_img = \"assets/img/danger.png\"\n else:\n danger_img = \"\"\n \n \n return step_name, step_description, step_tools_name, step_tools_img, danger_img, notifs, i_notifs_class, div_notifs_class\n\n@app.callback(\n Output({\"type\": \"step_button\", \"index\": ALL}, \"color\"),\n Output({\"type\": \"step_nav_button\", \"index\": ALL}, \"id\"),\n Input(\"current_step\", \"data\"),\n State({\"type\": \"step_button\", \"index\": ALL}, \"id\"),\n State({\"type\": \"step_nav_button\", \"index\": ALL}, \"id\"),\n)\ndef on_step_changed_2(current_step_id, step_button_ids, tmp):\n \"\"\" triggered by the change to a new step\n sets step button color and re-calculates which step occurs after the step-nav-arrows\n \"\"\"\n \n # Extract the index from the id \n button_indices = [id[\"index\"] for id in step_button_ids]\n \n button_colors_new = []\n for button_index in button_indices:\n if button_index == current_step_id:\n button_colors_new.append(\"primary\")\n else:\n button_colors_new.append(\"secondary\")\n \n # Change step navigation button ID.\n # Set the ID of the back navigation button to the previous step\n \n # Extract the index from the id \n button_indices = [id[\"index\"] for id in step_button_ids]\n \n i = button_indices.index(current_step_id)\n previous_button_index = {\"type\": \"step_nav_button\", \"index\": button_indices[i-1]}\n next_button_index = {\"type\": \"step_nav_button\", \"index\": button_indices[i+1]}\n \n return button_colors_new, [previous_button_index, next_button_index]\n \n\n# ===================\n# END OF CALLBACKS\n# ===================\n\nif __name__ == \"__main__\": \n app.run_server(debug=True)","repo_name":"Shibamacchiato/digital_assembly","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"13722744686","text":"# 4) Вывести все простые числа в произвольном интервале [a, b], используя вложенные for и конструкцию for-else.\n\na = int(input('Enter \"a\"... '))\nb = int(input('Enter \"b\"... '))\n\nif a >= b:\n print('\"a\" must be less than \"b\"')\n exit()\n\nrg = range(a, b+1)\n\ndef isSimple(num):\n delit = 2\n while num % delit != 0:\n delit += 1\n return delit == num\n\nfor i in rg:\n if isSimple(i):\n print(i)\n","repo_name":"andreviich/hometasks","sub_path":"циклы/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"11400742071","text":"import tkinter as tk\nfrom tkinter import ttk\nimport time\nimport random\nfrom PIL import Image, ImageTk\n\ndef cargar_imagen(ruta, ancho, alto):\n imagen = Image.open(ruta)\n imagen = imagen.resize((ancho, alto), Image.LANCZOS)\n imagen = ImageTk.PhotoImage(imagen)\n return imagen\n\ndef contar_minas_alrededor(tablero, x, y):\n count = 0\n for i in range(max(0, x - 1), min(len(tablero), x + 2)):\n for j in range(max(0, y - 1), min(len(tablero[i]), y + 2)):\n if tablero[i][j] == '*' and (i, j) != (x, y):\n count += 1\n return count\n\ndef contadorBanderas(label, simbol, tablero, buttons):\n global banderas_puestas\n if simbol == \"+\":\n banderas_puestas += 1\n elif simbol == \"-\":\n banderas_puestas -= 1\n label.config(text=f\"Banderas: {banderas_puestas}\")\n hasGanado(tablero, buttons)\n\ndef salir_accion(root):\n global board_window\n board_window.destroy()\n root.destroy()\n\ndef mostrarMinas(tablero, buttons):\n for i in range(len(tablero)):\n for j in range(len(tablero[i])):\n if tablero[i][j] == '*':\n buttons[i][j].config(text=\"M\")\n\ndef hasGanado(tablero, buttons):\n global num_minas\n banderas_correctas = 0\n for i in range(len(tablero)):\n for j in range(len(tablero[i])):\n if tablero[i][j] == '*' and buttons[i][j]['text'] == 'B':\n banderas_correctas += 1\n\n celdas_sin_mina = sum(row.count('') for row in tablero)\n if banderas_correctas == num_minas and celdas_sin_mina == 0:\n root = tk.Tk()\n root.title(\"¡Felicidades!\")\n root.geometry(\"200x100\")\n tk.Label(root, text=\"¡Felicidades, has ganado!\").pack()\n tk.Button(root, text=\"Salir\", command=lambda: salir_accion(root)).pack()\n root.mainloop()\n\n\ndef hasPerdido():\n # Mostar mensaje de que has perdido\n root = tk.Tk()\n root.title(\"Has perdido\")\n root.geometry(\"200x100\")\n tk.Label(root, text=\"Has perdido\").pack()\n tk.Button(root, text=\"Salir\", command=lambda: salir_accion(root)).pack()\n root.mainloop()\n\ndef desbloquear_celdas(tablero, buttons, x, y):\n if x < 0 or y < 0 or x >= len(tablero) or y >= len(tablero[0]):\n return\n if buttons[x][y]['state'] == tk.DISABLED:\n return\n minas_cercanas = contar_minas_alrededor(tablero, x, y)\n if minas_cercanas in (0, 1, 2, 3):\n buttons[x][y].config(text=str(minas_cercanas), state=tk.DISABLED)\n if minas_cercanas == 0: # Solo se propagará la recursión si es una celda sin minas cercanas\n for i in range(-1, 2):\n for j in range(-1, 2):\n desbloquear_celdas(tablero, buttons, x + i, y + j)\n\n\ndef revelar_celda(tablero, buttons, x, y, event):\n if event == 'left': # Si el evento es un clic izquierdo\n if buttons[x][y]['text'] == \"B\": # Verificar si hay una bandera\n return # No hacer nada si hay una bandera en la celda\n\n if tablero[x][y] == '*':\n print(\"¡Has encontrado una mina!\")\n #buttons[x][y].config(image=imagenBomba)\n mostrarMinas(tablero, buttons)\n hasPerdido()\n else:\n minas_cercanas = contar_minas_alrededor(tablero, x, y)\n desbloquear_celdas(tablero, buttons, x, y) \n elif event == 'right': # Si el evento es un clic derecho\n if buttons[x][y]['text'] == '': # Si la celda está sin revelar, coloca una bandera\n buttons[x][y].config(text='B')\n contadorBanderas(contador_banderas, \"+\", tablero, buttons)\n elif buttons[x][y]['text'] == 'B': # Si la celda tiene una bandera, quítala\n buttons[x][y].config(text='')\n contadorBanderas(contador_banderas, \"-\", tablero, buttons)\n\ndef crear_botones_tablero(tablero, ventana):\n buttons = []\n\n # Funciones para manejar los clics en los botones\n def left_click_handler(x, y):\n revelar_celda(tablero, buttons, x, y, 'left')\n \n def right_click_handler(x, y):\n revelar_celda(tablero, buttons, x, y, 'right')\n \n for i in range(len(tablero)):\n row = []\n for j in range(len(tablero[i])):\n btn = tk.Button(ventana, width=5, height=2)\n btn.grid(row=i+1, column=j, sticky=\"nsew\")\n row.append(btn)\n btn.bind('', lambda e, x=i, y=j: left_click_handler(x, y), add='+')\n btn.bind('', lambda e, x=i, y=j: right_click_handler(x, y), add='+')\n buttons.append(row)\n\n return buttons\n\ndef crear_tablero(filas, columnas, minas):\n tablero = [[' ' for _ in range(columnas)] for _ in range(filas)]\n\n # Colocar minas aleatorias\n minas_colocadas = 0\n while minas_colocadas < minas:\n x = random.randint(0, filas - 1)\n y = random.randint(0, columnas - 1)\n if tablero[x][y] != '*':\n tablero[x][y] = '*'\n minas_colocadas += 1\n\n return tablero\n\ndef reiniciar_accion(board_window):\n board_window.destroy() # Cerrar la ventana actual\n iniciar_juego() # Volver a iniciar el juego\n\ndef interfaz_tablero(tablero, board_window, alto_ventana, ancho_ventana, num_minas):\n # Contador de minas restantes\n contador_minas = tk.Label(board_window, text=f\"Minas restantes: {num_minas}\")\n contador_minas.grid(row=0, column=0)\n\n # Contador de banderas puestas\n global banderas_puestas # Inicialmente no hay banderas puestas\n banderas_puestas = 0\n global contador_banderas\n contador_banderas = tk.Label(board_window, text=f\"Banderas: {banderas_puestas}\")\n contador_banderas.grid(row=0, column=1)\n\n # Cronómetro\n cronometro = tk.Label(board_window, text=\"Tiempo: 00:00\")\n cronometro.grid(row=0, column=2)\n tiempo_inicio = time.time()\n actualizar_cronometro(cronometro, tiempo_inicio, board_window)\n\n # Reiniciar juego y cerrar la ventana actual\n reiniciar = tk.Button(board_window, text=\"Reiniciar\", command=lambda: reiniciar_accion(board_window))\n reiniciar.grid(row=0, column=3)\n\n # Agregar espacio entre el tablero y la interfaz\n tk.Label(board_window, text=\" \").grid(row=1, columnspan=4, pady=5)\n\n buttons_frame = tk.Frame(board_window)\n buttons_frame.grid(row=1, column=0, columnspan=ancho_ventana, pady=5)\n\n buttons = crear_botones_tablero(tablero, buttons_frame)\n\ndef actualizar_cronometro(cronometro, tiempo_inicio, board_window):\n cronometro.config(text=time.strftime(\"Tiempo: %H:%M:%S\", time.gmtime(time.time() - tiempo_inicio)))\n board_window.after(1000, lambda: actualizar_cronometro(cronometro, tiempo_inicio, board_window))\n \ndef iniciar_juego():\n filas = int(entry_filas.get())\n columnas = int(entry_columnas.get())\n global num_minas\n num_minas = int(entry_minas.get())\n tablero = crear_tablero(filas, columnas, num_minas)\n\n # Crear tablero\n global board_window\n board_window = tk.Toplevel(root)\n board_window.title(\"Buscaminas\")\n tablero_frame = tk.Frame(root)\n\n # Tamaño de la ventana\n tamano = 80\n ancho_ventana = tamano * columnas\n alto_ventana = tamano * filas\n board_window.geometry(str(ancho_ventana) + \"x\" + str(alto_ventana))\n\n # Interfaz del tablero\n interfaz_tablero(tablero, board_window, alto_ventana, ancho_ventana, num_minas)\n\nif __name__ == \"__main__\":\n # Crear ventana\n root = tk.Tk()\n root.title(\"Buscaminas\")\n\n # Etiquetas y entradas para filas, columnas y minas\n tk.Label(root, text=\"Filas:\").grid(row=0, column=0)\n entry_filas = tk.Entry(root)\n entry_filas.insert(0, \"5\") # Valor por defecto para filas\n entry_filas.grid(row=0, column=1)\n\n tk.Label(root, text=\"Columnas:\").grid(row=1, column=0)\n entry_columnas = tk.Entry(root)\n entry_columnas.insert(0, \"5\") # Valor por defecto para columnas\n entry_columnas.grid(row=1, column=1)\n\n tk.Label(root, text=\"Minas:\").grid(row=2, column=0)\n entry_minas = tk.Entry(root)\n entry_minas.insert(0, \"5\") # Valor por defecto para minas\n entry_minas.grid(row=2, column=1)\n\n # Botón para iniciar el juego\n start_button = tk.Button(root, text=\"Iniciar Juego\", command=iniciar_juego)\n start_button.grid(row=3, columnspan=2)\n\n root.mainloop()\n","repo_name":"aramirezf05/Buscaminas","sub_path":"mines.py","file_name":"mines.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"16007310580","text":"import spacy\n\ntext = [\"Net income was $9.4 million compared to the prior year of $2.7 million.\",\n \"$9.4 million was the net income, compared to the prior year of $2.7 million.\"]\n\nnlp = spacy.load(\"en_core_web_sm\")\nnlp.add_pipe(\"merge_entities\")\nnlp.add_pipe(\"merge_noun_chunks\")\n\nfor doc in nlp.pipe(text):\n print(\"---\")\n print(doc)\n for token in doc:\n if token.ent_type_ == \"MONEY\":\n # Attribute and direct object, check for subject\n if token.dep_ in (\"attr\", \"dobj\"):\n subj = [w for w in token.head.lefts if w.dep_ == \"nsubj\"]\n if subj:\n print(subj[0], \"-->\", token)\n # We have a prepositional object with a preposition\n elif token.dep_ == \"pobj\" and token.head.dep_ == \"prep\":\n print(token.head.head, \"-->\", token)\n","repo_name":"mgruppi/s4-code","sub_path":"dependency_parsing.py","file_name":"dependency_parsing.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"3346276821","text":"import settings\nfrom tensorflow import keras\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.io.wavfile import read\nimport os\nimport pickle\n\nchannels = settings.channels\n\ndef get_data():\n '''\n gives data to build model\n '''\n\n safe_dir = os.getcwd() + \"/data/safe/\"\n shot_dir = os.getcwd() + \"/data/shot/\"\n\n # GET TRAINING DATA\n train_audio = []\n for i in range(1, 81):\n audio = read(safe_dir + \"save_{}.wav\".format(i))[1]\n train_audio.append(audio)\n train_labels = [0] * 80\n\n for i in range(1, 91):\n audio = read(shot_dir + \"shot_{}.wav\".format(i))[1]\n train_audio.append(audio)\n train_labels += [1] * 90\n\n # GET TESTING DATA\n test_audio = []\n for i in range(81, 101):\n audio = read(safe_dir + \"save_{}.wav\".format(i))[1]\n test_audio.append(audio)\n test_labels = [0] * 20\n\n for i in range(91, 101):\n audio = read(shot_dir + \"shot_{}.wav\".format(i))[1]\n test_audio.append(audio)\n test_labels += [1] * 10\n \n train_audio = np.asarray(train_audio)\n train_labels = np.asarray(train_labels)\n test_audio = np.asarray(test_audio)\n test_labels = np.asarray(test_labels)\n\n return (train_audio, train_labels), (test_audio, test_labels)\n\ndef build_model():\n\n '''\n builds and returns ML model\n '''\n\n (train_data, train_labels), (test_data, test_labels) = get_data()\n\n train_data = train_data\n test_data = test_data\n \n model = keras.Sequential([\n keras.layers.Flatten(input_shape = (132300, channels)),\n keras.layers.Dense(100, activation = \"relu\"),\n keras.layers.Dense(20, activation = \"softmax\"),\n keras.layers.Dense(5, activation = \"relu\")\n ])\n\n model.compile(optimizer = \"adam\", loss = tf.keras.losses.SparseCategoricalCrossentropy(), metrics = [\"accuracy\"])\n\n test_acc = 0\n while test_acc < 0.80:\n model.fit(train_data, train_labels, epochs = 8)\n\n test_loss, test_acc = model.evaluate(test_data, test_labels)\n\n print(\"test acc: {}\".format(test_acc))\n print(\"test loss: {}\".format(test_loss))\n\n return model","repo_name":"norbusonam/warning-shot","sub_path":"Program/audio_model.py","file_name":"audio_model.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"23408745861","text":"#!/usr/bin/python3.5\n# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\nimport logging\nfrom retrying import retry\n\n\nURL = 'http://gk.vecc.org.cn/ergs/o3/open/protect';\nSAVE_CODE_PATH = \"/home/dzou/Documents/1.png\"\n# 用来发送��件的邮箱,可以注册一个163邮箱\nemail_username = 'xxxxx'\nemail_password = 'xxxxx'\n# 用来接收验证码邮件的邮箱,可以用你的谷歌gmail或者qq邮箱\ntarget_email = 'xxxxxx'\n\n# #定义一个infos类\nclass infos:\n\n #对象初始化\n def __init__(self):\n # webdriver初始化\n logging.basicConfig(level=logging.INFO,#控制台打印的日志级别\n filename='info.log',\n filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format=\n '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n #日志格式\n )\n self.url = URL\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"prefs\", {\"profile.managed_default_content_settings.images\": 1}) # 加载图片\n options.add_experimental_option('excludeSwitches', ['enable-automation']) # 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium\n self.browser = webdriver.Chrome(executable_path=chromedriver_path, options=options)\n self.wait = WebDriverWait(self.browser, 100) #超时时长为100s\n\n\n def retry_if_result_none(result):\n print(\"retry_if_result_none\")\n return result is None\n\n #获取验证码\n # 需要重试把下面一行取消注解\n # @retry(retry_on_result=retry_if_result_none)\n def getcode(self):\n\n # 打开网页\n self.browser.get(self.url)\n\n self.browser.implicitly_wait(30) #智能等待,直到网页加载完毕,最长等待时间为30s\n # 等待页面body加载完毕\n self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'body')))\n sleep(0.5)\n # 获取验证码元素进行截图\n # code = self.wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"kaptcha\"]/img')))\n code = self.browser.find_element_by_xpath('//*[@id=\"kaptcha\"]/img')\n code.screenshot(SAVE_CODE_PATH)\n sleep(0.5)\n #发送邮件,等待输入\n self.send_email()\n #查看邮箱输入验证码\n s = input(\"请输入你邮箱收到的验证码以继续进行爬虫:\")\n # 把你输入的验证码输入到input元素中,\n # 业务逻辑可自行更改\n self.browser.find_element_by_xpath('//*[@id=\"qry-form\"]/div/div[2]/div[2]/div/input').send_keys(s)\n #todo 后面的操作你自行定义,验证码已经输入完毕\n #------------比如-------------\n #------------搜索-------------\n #self.browser.find_element_by_xpath('//*[@id=\"qry-form\"]/div/div[2]/div[4]/div/input').click()\n\n\n #如果验证码输入错误或请求失败则重试,直到成功为止(成功标志:出现table栏显示查询信息)\n #不了解可以查询retry模块\n # check = self.browser.find_element_by_xpath('/html/body/div[2]/div[2]/table')\n # if(check!=None):\n # return 1\n # sleep(1)\n # return None \n\n # 发送邮件\n def send_email(self):\n import smtplib\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n from email.mime.image import MIMEImage\n from email.header import Header\n from email.utils import parseaddr, formataddr\n from email import encoders\n from email.mime.base import MIMEBase\n\n def _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\n sender = email_username\n receiver = target_email\n subject = 'happy day'\n smtpserver = 'smtp.163.com'\n username = email_username\n password = email_password\n boby = \"\"\"\n

email image

\n

\n

\n

\n

\n \"\"\"\n\n msgRoot = MIMEMultipart('related')\n msgRoot['Subject'] = Header(subject,'utf-8')\n msgRoot['From'] = _format_addr('Me <%s>' % sender)\n msgRoot['To'] = _format_addr('SE <%s>' % receiver)\n\n msgText = MIMEText(\n '''happy birthday'''+boby, 'html', 'utf-8')\n msgRoot.attach(msgText)\n # 添加图片,以html形式显示\n fp = open(SAVE_CODE_PATH, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n\n msgImage.add_header('Content-ID', '')\n msgRoot.attach(msgImage)\n\n # 添加附件就是加上一个MIMEBase,从本地读取一个图片:\n with open(SAVE_CODE_PATH, 'rb') as f:\n # 设置附件的MIME和文件名,这里是png类型:\n mime = MIMEBase('image', 'png', filename='1.png')\n # 加上必要的头信息:\n mime.add_header('Content-Disposition', 'attachment', filename='1.png')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n # 把附件的内容读进来:\n mime.set_payload(f.read())\n # 用Base64编码:\n encoders.encode_base64(mime)\n # 添加到MIMEMultipart:\n msgRoot.attach(mime)\n\n smtp = smtplib.SMTP()\n smtp.connect(smtpserver)\n smtp.login(username, password)\n smtp.sendmail(sender, receiver, msgRoot.as_string())\n smtp.quit()\n\nif __name__ == \"__main__\":\n chromedriver_path = \"/home/dzou/Documents/chromedriver\" #改成你的chromedriver的完整路径地址,版本和chrome浏览器一致\n a = infos()\n a.getcode()\n\n\n\n\n","repo_name":"ding-zou/code_img_recognize","sub_path":"selenium_email/send_email_method.py","file_name":"send_email_method.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"12301214100","text":"def sentinel_search(lst, key): # not to be confused with linear search\n lst.append(key) # adding sentinel\n index = 0\n while key != lst[index]: # need no length control (better than linear search)\n index += 1\n lst.pop() # remove sentinel\n if index < len(lst):\n return index\n else:\n return -1\n\n\ndef m2f_heuristic(lst, key):\n index = sentinel_search(lst, key)\n if index != -1:\n lst[0], lst[index] = lst[index], lst[0]\n return index\n\n\ndef t_heuristic(lst, key):\n index = sentinel_search(lst, key)\n if index != -1 and index > 0: # index - 1 >= 0\n lst[index-1], lst[index] = lst[index], lst[index-1]\n return index\n\ndef test_heuristic(lst, key_lst, mode=0):\n if mode == 0:\n print(\"Testing Move To Front Heuristic\".center(50, '-'))\n for key in key_lst:\n m2f_heuristic(lst, key)\n print(f\"{key}\\t {lst}\")\n elif mode == 1:\n print(\"Testing Transposition Heuristic\".center(50, '-'))\n for key in key_lst:\n t_heuristic(lst, key)\n print(f\"{key}\\t {lst}\")\n else:\n print(\"Mode not support (0=m2f,1=t)\")\n\n\nif __name__ == '__main__':\n lst = [19, 56, 2, 7, 25, 18, 40]\n key_lst = [1, 56, 56, 88, 9, 1, 0, -1, 2, 56, 2, 3, 5, 25, 25, 25, 4, 40]\n test_heuristic(lst.copy(), key_lst, 0)\n test_heuristic(lst.copy(), key_lst, 1)\n","repo_name":"ApexTone/DataStructAlgo-Code-KMITL","sub_path":"Searching/UnorderedSearch.py","file_name":"UnorderedSearch.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"73084085775","text":"import contextlib\nfrom typing import Optional\n\nfrom nelli.mlir.passes import Pipeline\n\nfrom nelli.mlir._mlir.ir import Module, InsertionPoint\n\n\nfrom textwrap import dedent\n\nfrom nelli.mlir.arith import constant\nfrom nelli.mlir.utils import run_pipeline\nfrom nelli.utils import find_ops, mlir_mod_ctx\n\nsrc = dedent(\n \"\"\"\\\nmodule {\n func.func @parallel_loop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: memref, %arg7: memref, %arg8: memref, %arg9: memref) {\n %c0 = arith.constant 0 : index\n %c4 = arith.constant 4 : index\n %0 = arith.muli %arg5, %c4 : index\n %1 = llvm.mlir.constant(1 : i64) : i64\n omp.parallel {\n omp.wsloop for (%arg10, %arg11) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %0) {\n memref.alloca_scope {\n %c-1 = arith.constant -1 : index\n %2 = arith.muli %arg10, %c-1 : index\n %3 = arith.addi %arg2, %2 : index\n %4 = arith.cmpi slt, %arg4, %3 : index\n %5 = arith.select %4, %arg4, %3 : index\n %c-1_0 = arith.constant -1 : index\n %6 = arith.muli %arg11, %c-1_0 : index\n %7 = arith.addi %arg3, %6 : index\n %8 = arith.cmpi slt, %0, %7 : index\n %9 = arith.select %8, %0, %7 : index\n %10 = llvm.mlir.constant(1 : i64) : i64\n omp.parallel {\n omp.wsloop for (%arg12, %arg13) : index = (%c0, %c0) to (%5, %9) step (%arg4, %arg5) {\n memref.alloca_scope {\n %11 = arith.addi %arg12, %arg10 : index\n %12 = arith.addi %arg13, %arg11 : index\n %13 = memref.load %arg7[%11, %12] : memref\n %14 = memref.load %arg8[%11, %12] : memref\n %15 = arith.addf %13, %14 : f32\n memref.store %15, %arg9[%11, %12] : memref\n }\n omp.yield\n }\n omp.terminator\n }\n }\n omp.yield\n }\n omp.terminator\n }\n return\n }\n}\n\"\"\"\n)\n\n\nwith mlir_mod_ctx(src) as module:\n func = find_ops(module, lambda op: op.name in {\"func.func\"})\n assert len(func) == 1\n func = func[0].opview\n with InsertionPoint.at_block_begin(func.entry_block):\n constants = find_ops(module, lambda op: op.name in {\"arith.constant\"})\n new_constant_1 = constant(333, index=True)\n new_constant_2 = constant(666, index=True)\n constants[0].result.replace_all_uses_with(new_constant_1)\n constants[1].result.replace_all_uses_with(new_constant_2)\n\nrun_pipeline(\n module,\n Pipeline().cse().materialize(),\n)\nprint(module)\n\n\nclass Visitor:\n def visit(self, op):\n print(op.opview.__class__.__name__, op.location)\n for r in op.regions:\n for b in r.blocks:\n for o in b.operations:\n self.visit(o.operation)\n\n\nVisitor().visit(module.operation)\n","repo_name":"makslevental/nelli","sub_path":"examples/visitors_demo.py","file_name":"visitors_demo.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"14"} +{"seq_id":"30244100611","text":"\"\"\"\nHello command worker\n\"\"\"\n\nimport logging\nfrom ..utilities.tag import Tag\nfrom ..utilities import srp\nfrom ..utilities.constants import Status\n\n\nclass CommandWorkerHelloException(Exception):\n \"\"\"\n Some exception of CommandWorkerHello class\n \"\"\"\n\n pass\n\n\nclass CommandWorkerHelloUtimMethodException(Exception):\n \"\"\"\n No Utim method exception of CommandWorkerHello class\n \"\"\"\n\n pass\n\n\nclass CommandWorkerHello(object):\n \"\"\"\n Hello command worker class\n \"\"\"\n\n def __init__(self, uhost):\n \"\"\"\n Initialization\n \"\"\"\n\n # Check all necessary methods\n methods = [\n 'get_srp_session',\n 'set_srp_session',\n 'remove_srp_session',\n 'save_dev_status'\n ]\n for method in methods:\n if not (hasattr(uhost, method) and callable(getattr(uhost, method))):\n raise CommandWorkerHelloUtimMethodException\n\n self.__uhost = uhost # Uhost instance\n\n def process(self, devid, data, outbound_queue):\n \"\"\"\n Run process\n \"\"\"\n\n packet = None\n packet_backend = None\n\n tag = data[0:1]\n length_bytes = data[1:3]\n length = int.from_bytes(length_bytes, byteorder='big', signed=False)\n value = data[3:3 + length]\n\n # Logging\n logging.debug('Tag: %s', str(tag))\n logging.debug('Length: %d', length)\n logging.debug('Value: %s', [x for x in value])\n\n if length == len(value) and tag == Tag.UCOMMAND.HELLO:\n # Get session values\n session = self.__uhost.get_srp_session(devid)\n A = session.get('A')\n logging.debug(\"GET session A: %s\", [x for x in A] if A is not None else 'None')\n if A != value:\n logging.debug(\"Remove session\")\n # Remove old sessions of utim_name\n self.__uhost.remove_srp_session(devid)\n # Get session values\n session = self.__uhost.get_srp_session(devid)\n A = session.get('A')\n logging.debug(\"GET session A after remove: %s\",\n [x for x in A] if A is not None else 'None')\n\n # Get salt, vkey from SRP\n salt = session.get('salt')\n vkey = session.get('vkey')\n svr = session.get('svr')\n\n logging.debug(\"Verifier params [username: %s, salt: %s, vkey: %s, A: %s\", str(devid),\n str(salt), str(vkey), str(value))\n if svr is None:\n logging.debug(\"SVR is None\")\n svr = srp.Verifier(bytes.fromhex(devid), salt, vkey, value)\n\n s, B = svr.get_challenge()\n if s is None or B is None:\n logging.error('Challenge is None. s: %s, B: %s', str(s), str(B))\n packet = Tag.UCOMMAND.assemble_error(b\"hello no challenge\")\n\n else:\n logging.debug('Challenge s: %s, B: %s', [x for x in s], [x for x in B])\n\n # Remove old sessions of utim_name\n self.__uhost.remove_srp_session(bytes.fromhex(devid))\n\n logging.debug(\"SAVE A: %s\", [x for x in value])\n # Save session values\n session = {\n 'utimname': devid,\n 'salt': salt,\n 'vkey': vkey,\n 'A': value,\n 'svr': svr,\n 'test_data': session.get('test_data'),\n 'platform_verified': session.get('platform_verified')\n }\n self.__uhost.set_srp_session(devid, session)\n\n logging.debug(\"self.M: %s\", str(svr.M))\n session = self.__uhost.get_srp_session(devid)\n svr1 = session.get('svr')\n logging.debug(\"self1.M: %s\", str(svr1.M) if svr1 else None)\n\n packet = Tag.UCOMMAND.assemble_try(s, B)\n\n self.__uhost.database.set_keep_alive_counter(devid, 0)\n self.__uhost.save_dev_status(devid, Status.STATUS_SRP)\n\n else:\n logging.debug(\"Invalid data %s from %s\", [hex(x) for x in data], devid)\n packet = Tag.UCOMMAND.assemble_error(b\"hello invalid data\")\n\n if packet is not None:\n outbound_queue.put([devid, packet])\n","repo_name":"connax-utim/uhost-python","sub_path":"uhost/workers/command_worker_hello.py","file_name":"command_worker_hello.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"1388917059","text":"from functools import reduce\r\n\r\nlst = [1,3,6,3,4,8]\r\n\r\nevens = list(filter(lambda n: n%2==0,lst))\r\nprint(evens)\r\n\r\n\r\n\r\nsqr = list(map(lambda n: n*n, evens))\r\nprint(sqr)\r\n\r\nresult = reduce(lambda a,b: a+b, sqr)\r\nprint(result)\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''f1 = lambda a,b: a + b\r\nf2 = lambda x, y: x*y\r\n\r\nprint(f1(3,4))\r\nprint(f2(4,5))\r\n'''","repo_name":"sanjeev602/Python_Start","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"22269017169","text":"def maxheapify(A, i):\n l = 2 * i + 1\n r = 2 * i + 2\n if l <= H-1:\n if A[l] > A[i]:\n largest = l\n else:\n largest = i\n else:\n largest = i\n if r <= H-1:\n if A[r] > A[largest]:\n largest = r\n\n if largest != i:\n A[i], A[largest] = A[largest], A[i]\n maxheapify(A, largest)\n\n\ndef buildMaxHeap(A):\n for i in reversed(range(0, int(H / 2))):\n maxheapify(A, i)\n return A\n\n\nH = int(input())\nA = list(map(int, input().split()))\n\nout = buildMaxHeap(A)\nprint('', *out)\n","repo_name":"python-practicing/Aizu_Online_Judge","sub_path":"ALDS1_9_B.py","file_name":"ALDS1_9_B.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"14602221267","text":"'''\nCreated on 04.07.2012\n\n@author: Pavel Borisov\n@summary: Manipulate KDE emoticons\n\n'''\nfrom .kdepath import kdepath\nfrom PyKDE4.kdecore import KConfig\nfrom PyKDE4.kdeui import KGlobalSettings\nimport os, glob\n\ndef listIconThemes():\n '''\n @summary: list all emoticons available'''\n dirs = kdepath('emoticons')\n themes = []\n for d in dirs:\n os.chdir(d)\n dirnames = [os.path.dirname(f) for f in glob.glob('*/emoticons.xml')]\n themes.extend(dirnames)\n \n return themes\n\ndef applyIconTheme(themeName):\n '''\n @summary: apply emoticon theme themeName\n @raise AssertionError: if not themeName is vaild \n '''\n assert themeName in listIconThemes()\n config = KConfig('kdeglobals')\n gr = config.group('Emoticons')\n gr.writeEntry('emoticonsTheme', themeName)\n config.sync()\n KGlobalSettings.emitChange(KGlobalSettings.IconChanged)\n \n \n","repo_name":"pashazz/change-theme","sub_path":"core/engines/kde/emoticons.py","file_name":"emoticons.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"33410149815","text":"# n = int(input())\n# data = input().split()\n# d = []\n# x, y = 0, 0\n# for i in range(n):\n# d.append([])\n# for j in range(n):\n# d[i].append(j + 1)\n\n# for i in range(len(data)):\n\n# if data[i] == 'R': # 오른쪽으로 이동하는 경우\n# if y == 4:\n# continue\n# else:\n# y += 1\n# elif data[i] == 'L': # 왼쪽으로 이동하는 경우\n# if y == 0:\n# continue\n# else:\n# y -= 1\n# elif data[i] == 'U': # 위로 이동하는 경우\n# if x == 0:\n# continue\n# else:\n# x -= 1\n# elif data[i] == 'D': # 아래로 이동하는 경우\n# if x == 4:\n# continue\n# else:\n# x += 1\n\n# print(x + 1, y + 1)\n\n# 답안\nn = int(input())\nx, y = 1, 1\nplans = input().split()\n\n# L, R, U, D에 따른 이동 방향\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\nmove_types = ['L', 'R', 'U', 'D']\n\n# 이동 계획을 하나씩 확인\nfor plan in plans:\n\n # 이동 후 좌표 구하기\n for i in range(len(move_types)):\n if plan == move_types[i]:\n nx = x + dx[i]\n ny = y + dy[i]\n\n # 공간을 벗어나는 경우 무시\n if nx < 1 or ny < 1 or nx > n or ny > n:\n continue\n # 이동 수행\n x, y = nx, ny\n\nprint(x, y)\n","repo_name":"dudwk814/coding-test","sub_path":"알고리즘/구현/상하좌우.py","file_name":"상하좌우.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"31360703376","text":"N = int(input())\ncards = list(map(int, input().split()))\nM = int(input())\nnums = list(map(int, input().split()))\n\ncards.sort()\n\n\ndef binary_search(arr, start, end, target):\n\n while start <= end:\n mid = (start + end) // 2\n if arr[mid] == target:\n return 1\n\n elif arr[mid] > target:\n end = mid - 1\n\n elif arr[mid] < target:\n start = mid + 1\n\n return 0\n\n\nresult = []\nfor num in nums:\n isExist = binary_search(cards, 0, len(cards) - 1, num)\n result.append(isExist)\n\nfor r in result:\n print(r, end=' ')","repo_name":"na0i/Algorithm-study","sub_path":"02) BAEKJOON/10815.py","file_name":"10815.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"70331880655","text":"import collections\nimport collections.abc\nimport functools\nimport re\nimport threading\nimport traceback\nfrom typing import Any, Sequence, Union, Callable, Hashable, List, Optional, Set, Tuple, Type\n\nfrom absl import logging\nfrom chex._src import pytypes\nimport jax\nfrom jax.experimental import checkify\nimport jax.numpy as jnp\nimport numpy as np\n\n# Custom pytypes.\nTLeaf = Any\nTLeavesEqCmpFn = Callable[[TLeaf, TLeaf], bool]\nTLeavesEqCmpErrorFn = Callable[[TLeaf, TLeaf], str]\n\n# TODO(iukemaev): define a typing protocol for TChexAssertion.\n# Chex assertion signature:\n# (*args,\n# custom_message: Optional[str] = None,\n# custom_message_format_vars: Sequence[Any] = (),\n# include_default_message: bool = True,\n# exception_type: Type[Exception] = AssertionError,\n# **kwargs)\nTChexAssertion = Callable[..., None]\nTAssertFn = Callable[..., None]\nTJittableAssertFn = Callable[..., pytypes.Array] # a predicate function\n\n# Matchers.\nTDimMatcher = Optional[Union[int, Set[int], type(Ellipsis)]]\nTShapeMatcher = Sequence[TDimMatcher]\n\n\nclass _ChexifyStorage(threading.local):\n \"\"\"Thread-safe storage for internal variables used in @chexify.\"\"\"\n wait_fns = []\n level = 0\n\n\n# Chex namespace variables.\nERR_PREFIX = \"[Chex] \"\nTRACE_COUNTER = collections.Counter()\nDISABLE_ASSERTIONS = False\n\n# This variable is used for _chexify_ transformations, see `asserts_chexify.py`.\nCHEXIFY_STORAGE = _ChexifyStorage()\n\n\ndef assert_collection_of_arrays(inputs: Sequence[pytypes.Array]):\n \"\"\"Checks if ``inputs`` is a collection of arrays.\"\"\"\n if not isinstance(inputs, collections.abc.Collection):\n raise ValueError(f\"`inputs` is not a collection of arrays: {inputs}.\")\n\n\ndef jnp_to_np_array(arr: pytypes.Array) -> np.ndarray:\n \"\"\"Converts `jnp.ndarray` to `np.ndarray`.\"\"\"\n if getattr(arr, \"dtype\", None) == jnp.bfloat16:\n # Numpy does not support `bfloat16`.\n arr = arr.astype(jnp.float32)\n return jax.device_get(arr)\n\n\ndef deprecation_wrapper(new_fn, old_name, new_name):\n \"\"\"Allows deprecated functions to continue running, with a warning logged.\"\"\"\n\n def inner_fn(*args, **kwargs):\n logging.warning(\n \"chex.%s has been renamed to chex.%s, please update your code.\",\n old_name, new_name)\n return new_fn(*args, **kwargs)\n\n return inner_fn\n\n\ndef get_stacktrace_without_chex_internals() -> List[traceback.FrameSummary]:\n \"\"\"Returns the latest non-chex frame from the call stack.\"\"\"\n stacktrace = list(traceback.extract_stack())\n for i in reversed(range(len(stacktrace))):\n fname = stacktrace[i].filename\n if fname.find(\"/chex/\") == -1 or fname.endswith(\"_test.py\"):\n return stacktrace[:i+1]\n\n debug_info = \"\\n-----\\n\".join(traceback.format_stack())\n raise RuntimeError(\n \"get_stacktrace_without_chex_internals() failed. \"\n \"Please file a bug at https://github.com/deepmind/chex/issues and \"\n \"include the following debug info in it. \"\n \"Please make sure it does not include any private information! \"\n f\"Debug: '{debug_info}'.\")\n\n\ndef get_err_regex(message: str) -> str:\n \"\"\"Constructs a regexp for the exception message.\n\n Args:\n message: an exception message.\n\n Returns:\n Regexp that ensures the message follows the standard chex formatting.\n \"\"\"\n # (ERR_PREFIX + any symbols (incl. \\n) + message)\n return f\"{re.escape(ERR_PREFIX)}[\\\\s\\\\S]*{message}\"\n\n\ndef get_chexify_err_message(name: str, msg: str = \"\") -> str:\n \"\"\"Constructs an error message for the chexify exception.\"\"\"\n return f\"{ERR_PREFIX}chexify assertion '{name}' failed: {msg}\"\n\n\ndef _make_host_assertion(assert_fn: TAssertFn,\n name: Optional[str] = None) -> TChexAssertion:\n \"\"\"Constructs a host assertion given `assert_fn`.\n\n This wrapper should only be applied to the assertions that are either\n a) never used in jitted code, or\n b) when used in jitted code they do not check/access tensor values (i.e.\n they do not introduce value-dependent python control flow, see\n https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError).\n\n Args:\n assert_fn: A function implementing the check.\n name: A name for assertion.\n\n Returns:\n A chex assertion.\n \"\"\"\n if name is None:\n name = assert_fn.__name__\n\n def _assert_on_host(*args,\n custom_message: Optional[str] = None,\n custom_message_format_vars: Sequence[Any] = (),\n include_default_message: bool = True,\n exception_type: Type[Exception] = AssertionError,\n **kwargs) -> None:\n # Format error's stack trace to remove Chex' internal frames.\n assertion_exc = None\n value_exc = None\n try:\n assert_fn(*args, **kwargs)\n except AssertionError as e:\n assertion_exc = e\n except ValueError as e:\n value_exc = e\n finally:\n if value_exc is not None:\n raise ValueError(str(value_exc))\n\n if assertion_exc is not None:\n # Format the exception message.\n error_msg = str(assertion_exc)\n\n # Include only the name of the outermost chex assertion.\n if error_msg.startswith(ERR_PREFIX):\n error_msg = error_msg[error_msg.find(\"failed:\") + len(\"failed:\"):]\n\n # Whether to include the default error message.\n default_msg = (f\"Assertion {name} failed: \"\n if include_default_message else \"\")\n error_msg = f\"{ERR_PREFIX}{default_msg}{error_msg}\"\n\n # Whether to include a custom error message.\n if custom_message:\n if custom_message_format_vars:\n custom_message = custom_message.format(*custom_message_format_vars)\n error_msg = f\"{error_msg} [{custom_message}]\"\n\n raise exception_type(error_msg)\n\n return _assert_on_host\n\n\ndef chex_assertion(\n assert_fn: TAssertFn,\n jittable_assert_fn: Optional[TJittableAssertFn],\n name: Optional[str] = None) -> TChexAssertion:\n \"\"\"Wraps Chex assert functions to control their common behaviour.\n\n Extends the assertion to support the following optional auxiliary kwargs:\n custom_message: A string to include into the emitted exception messages.\n custom_message_format_vars: A list of variables to pass as arguments to\n `custom_message.format()`.\n include_default_message: Whether to include the default Chex message into\n the emitted exception messages.\n exception_type: An exception type to use. `AssertionError` by default.\n\n Args:\n assert_fn: A function implementing the check.\n jittable_assert_fn: An optional jittable version of `assert_fn` implementing\n a predicate (returning `True` only if assertion passes).\n Required for value assertions.\n name: A name for assertion. If not provided, use `assert_fn.__name__`.\n\n Returns:\n A Chex assertion (with auxiliary kwargs).\n \"\"\"\n if name is None:\n name = assert_fn.__name__\n\n host_assertion_fn = _make_host_assertion(assert_fn, name)\n\n @functools.wraps(assert_fn)\n def _chex_assert_fn(*args,\n custom_message: Optional[str] = None,\n custom_message_format_vars: Sequence[Any] = (),\n include_default_message: bool = True,\n exception_type: Type[Exception] = AssertionError,\n **kwargs) -> None:\n if DISABLE_ASSERTIONS:\n return\n if (jittable_assert_fn is not None and has_tracers((args, kwargs))):\n if not CHEXIFY_STORAGE.level:\n raise RuntimeError(\n \"Value assertions can only be called from functions wrapped \"\n \"with `@chex.chexify`. See the docs.\")\n\n # A wrapped to inject auxiliary debug info and `custom_message`.\n original_check = checkify.check\n\n def _check(pred, msg, *fmt_args, **fmt_kwargs):\n # Add chex info.\n msg = get_chexify_err_message(name, msg)\n\n # Add a custom message.\n if custom_message:\n msg += f\" Custom message: {custom_message}.\"\n fmt_args = list(fmt_args) + list(custom_message_format_vars)\n\n # Add a traceback and a pointer to the callsite.\n stacktrace = get_stacktrace_without_chex_internals()\n msg += (\n f\" [failed at: {stacktrace[-1].filename}:{stacktrace[-1].lineno}]\"\n )\n\n # Call original `checkify.check()`.\n original_check(pred, msg, *fmt_args, **fmt_kwargs)\n\n # Mock during the assertion's execution time.\n checkify.check = _check\n pred = jittable_assert_fn(*args, **kwargs) # execute the assertion\n checkify.check = original_check # return the original implementation\n\n # A safeguard to ensure that the results of a check are not ignored.\n # In particular, this check fails when `pred` is False and no\n # `checkify.check` calls took place in `jittable_assert_fn`, which would\n # be a bug in the assertion's implementation.\n checkify.check(pred, \"assertion failed!\")\n else:\n try:\n host_assertion_fn(\n *args,\n custom_message=custom_message,\n custom_message_format_vars=custom_message_format_vars,\n include_default_message=include_default_message,\n exception_type=exception_type,\n **kwargs)\n except jax.errors.ConcretizationTypeError as exc:\n msg = (\"Chex assertion detected `ConcretizationTypeError`: it is very \"\n \"likely that it tried to access tensors' values during tracing. \"\n \"Make sure that you defined a jittable version of this chex \"\n \"assertion; if that does not help, please file a bug.\")\n raise exc from RuntimeError(msg)\n\n # Override name.\n setattr(_chex_assert_fn, \"__name__\", name)\n return _chex_assert_fn\n\n\ndef format_tree_path(path: Sequence[Any]) -> str:\n return \"/\".join(str(p) for p in path)\n\n\ndef format_shape_matcher(shape: TShapeMatcher) -> str:\n return f\"({', '.join('...' if d is Ellipsis else str(d) for d in shape)})\"\n\n\ndef num_devices_available(devtype: str, backend: Optional[str] = None) -> int:\n \"\"\"Returns the number of available device of the given type.\"\"\"\n devtype = devtype.lower()\n supported_types = (\"cpu\", \"gpu\", \"tpu\")\n if devtype not in supported_types:\n raise ValueError(\n f\"Unknown device type '{devtype}' (expected one of {supported_types}).\")\n\n return sum(d.platform == devtype for d in jax.devices(backend))\n\n\ndef get_tracers(tree: pytypes.ArrayTree) -> Tuple[jax.core.Tracer]:\n \"\"\"Returns a tuple with tracers from a tree.\"\"\"\n return tuple(\n x for x in jax.tree_util.tree_leaves(tree)\n if isinstance(x, jax.core.Tracer))\n\n\ndef has_tracers(tree: pytypes.ArrayTree) -> bool:\n \"\"\"Checks whether a tree contains any tracers.\"\"\"\n return any(\n isinstance(x, jax.core.Tracer) for x in jax.tree_util.tree_leaves(tree))\n\n\ndef is_traceable(fn) -> bool:\n \"\"\"Checks if function is traceable.\n\n JAX traces a function when it is wrapped with @jit, @pmap, or @vmap.\n In other words, this function checks whether `fn` is wrapped with any of\n the aforementioned JAX transformations.\n\n Args:\n fn: function to assert.\n\n Returns:\n Bool indicating whether fn is traceable.\n \"\"\"\n\n fn_string_tokens = (\n \".reraise_with_filtered_traceback\", # JIT in Python ver. >= 3.7\n \"CompiledFunction\", # C++ JIT in jaxlib 0.1.66 or newer.\n \"pmap.\", # Python pmap\n \"PmapFunction\", # C++ pmap in jaxlib 0.1.72 or newer.\n \"vmap.\", # vmap\n \"_python_pjit\",\n \"_cpp_pjit\",\n )\n\n fn_type_tokens = (\n \"PmapFunction\",\n \"PjitFunction\",\n )\n\n # Un-wrap `fn` and check if any internal fn is jitted by pattern matching.\n fn_ = fn\n while True:\n if any(t in str(fn_) for t in fn_string_tokens):\n return True\n\n if any(t in str(type(fn_)) for t in fn_type_tokens):\n return True\n\n if hasattr(fn_, \"__wrapped__\"):\n # Wrapper.\n fn_globals = getattr(fn_, \"__globals__\", {})\n\n if fn_globals.get(\"__name__\", None) == \"jax.api\":\n # Wrapper from `jax.api`.\n return True\n\n if \"api_boundary\" in fn_globals:\n # api_boundary is a JAX wrapper for traced functions.\n return True\n\n try:\n if isinstance(fn_, jax.lib.xla_extension.PjitFunction):\n return True\n except AttributeError:\n pass\n else:\n break\n\n fn_ = fn_.__wrapped__\n return False\n\n\ndef assert_leaves_all_eq_comparator(\n equality_comparator: TLeavesEqCmpFn,\n error_msg_fn: Callable[[TLeaf, TLeaf, str, int, int],\n str], path: Sequence[Any], *leaves: Sequence[TLeaf]):\n \"\"\"Asserts all leaves are equal using custom comparator. Not jittable.\"\"\"\n path_str = format_tree_path(path)\n for i in range(1, len(leaves)):\n if not equality_comparator(leaves[0], leaves[i]):\n raise AssertionError(error_msg_fn(leaves[0], leaves[i], path_str, 0, i))\n\n\ndef assert_trees_all_eq_comparator_jittable(\n equality_comparator: TLeavesEqCmpFn,\n error_msg_template: str,\n *trees: Sequence[pytypes.ArrayTree]) -> pytypes.Array:\n \"\"\"Asserts all trees are equal using custom comparator. JIT-friendly.\"\"\"\n\n if len(trees) < 2:\n raise ValueError(\n \"Assertions over only one tree does not make sense. Maybe you wrote \"\n \"`assert_trees_xxx([a, b])` instead of `assert_trees_xxx(a, b)`, or \"\n \"forgot the `error_msg_fn` arg to `assert_trees_xxx`?\")\n\n def _tree_error_msg_fn(\n path: Tuple[Union[int, str, Hashable]], i_1: int, i_2: int):\n if path:\n return (\n f\"Trees {i_1} and {i_2} differ in leaves '{path}':\"\n f\" {error_msg_template}\"\n )\n else:\n return f\"Trees (arrays) {i_1} and {i_2} differ: {error_msg_template}.\"\n\n def _cmp_leaves(path, *leaves):\n verdict = jnp.array(True)\n for i in range(1, len(leaves)):\n check_res = equality_comparator(leaves[0], leaves[i])\n checkify.check(\n pred=check_res,\n msg=_tree_error_msg_fn(path, 0, i),\n arr_1=leaves[0],\n arr_2=leaves[i],\n )\n verdict = jnp.logical_and(verdict, check_res)\n return verdict\n\n # Trees are guaranteed to have the same structure.\n paths = [\n convert_jax_path_to_dm_path(path)\n for path, _ in jax.tree_util.tree_flatten_with_path(trees[0])[0]]\n trees_leaves = [jax.tree_util.tree_leaves(tree) for tree in trees]\n\n verdict = jnp.array(True)\n for leaf_i, path in enumerate(paths):\n verdict = jnp.logical_and(\n verdict, _cmp_leaves(path, *[leaves[leaf_i] for leaves in trees_leaves])\n )\n\n return verdict\n\n\nJaxKeyType = Union[\n int,\n str,\n Hashable,\n jax.tree_util.SequenceKey,\n jax.tree_util.DictKey,\n jax.tree_util.FlattenedIndexKey,\n jax.tree_util.GetAttrKey,\n]\n\n\ndef convert_jax_path_to_dm_path(\n jax_tree_path: Sequence[JaxKeyType],\n) -> Tuple[Union[int, str, Hashable]]:\n \"\"\"Converts a path from jax.tree_util to one from dm-tree.\"\"\"\n\n # pytype:disable=attribute-error\n def _convert_key_fn(key: JaxKeyType) -> Union[int, str, Hashable]:\n if isinstance(key, (str, int)):\n return key # int | str.\n if isinstance(key, jax.tree_util.SequenceKey):\n return key.idx # int.\n if isinstance(key, jax.tree_util.DictKey):\n return key.key # Hashable\n if isinstance(key, jax.tree_util.FlattenedIndexKey):\n return key.key # int.\n if isinstance(key, jax.tree_util.GetAttrKey):\n return key.name # str.\n raise ValueError(f\"Jax tree key '{key}' of type '{type(key)}' not valid.\")\n # pytype:enable=attribute-error\n\n return tuple(_convert_key_fn(key) for key in jax_tree_path)\n","repo_name":"deepmind/chex","sub_path":"chex/_src/asserts_internal.py","file_name":"asserts_internal.py","file_ext":"py","file_size_in_byte":15513,"program_lang":"python","lang":"en","doc_type":"code","stars":577,"dataset":"github-code","pt":"14"} +{"seq_id":"73120151373","text":"import json\nimport simplekml\nimport time\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join, isdir\nimport csv\nfrom math import radians, cos, sin, asin, sqrt\n\n\ndef haversine(lon1, lat1, lon2, lat2): #Formula para calcular \n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km\n\ndef cleanJson(database):\n\tcontent = database.readlines()\n\tif \"\\x00\" in (content[-1]): #Busco byte asociado a informacion NULA, Chequeo solo la ultima linea\n\t\tcontent = content[:-1]\n\t\tprint(\"Limpie el archivo\")\n\treturn content\n\ndef convert_coordinates(string): #Transforma coordenadas con Orientacion al final a numero, probado con Formato 2- Limpio 2.json\n\tif isinstance(string, float):\n\t\toutput = string\n\telse:\n\t\toutput = string\n\t\tif string[-1].isalpha():\n\t\t\tif string[-1] in [\"S\",\"W\"]:\n\t\t\t\toutput = -float(string[:-1])/100\n\t\t\telse:\n\t\t\t\toutput = float(string[:-1])/100\n\n\treturn output\n\ndef check_valid_coordinate(string,option): #Avisar!, en el codigo matar\n\tif type(string) == str:\n\t\tif string[-1].isalpha():\n\t\t\tstring = string[:-1]\n\t\tx = string.split(\".\")\n\t\tif option == \"lon\":\n\t\t\ty = 5\n\t\telif option == \"lat\":\n\t\t\ty = 4\n\t\telse:\n\t\t\tprint(\"No se declaro option\")\n\n\t\tif len(x[0]) == y:\n\t\t\tif len(x[1]) == 7:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint(\"Coordenada de largo correcto, precisión incorrecta\")\n\t\t\t\treturn False\n\t\telse:\n\t\t\tprint(\"Coordenada de largo correcto incorrecto\") #No importa chequear precisión\n\t\t\treturn False\n\telse:\n\t\tprint(\"Pasa test por estar en formato numerico (parser)\")\n\t\treturn True\n\n\ndef remove_duplicates(x): #Elimina duplicados de informacion (duplicados perfectos)\n\treturn list(set(x))\n\n\nclass JsonData():\n\tdef __init__(self):\n\t\tself.master_database= [] #lista de diccionarios\n\n\t@property\n\tdef gps_database(self):\n\t\toutput=[]\n\t\tfor i in self.master_database:\n\t\t\tif \"sensors\" in i.keys():\n\t\t\t\tfor sensor in i[\"sensors\"]:\n\t\t\t\t\tif sensor[\"sensorType\"] == \"GPS6000\":\n\t\t\t\t\t\tdic = sensor[\"data\"] #Aparentemente no siempre es el primero!\n\t\t\t\t\t\tdic[\"identifier\"] = sensor[\"sensorID\"]\n\t\t\telse:\n\t\t\t\tdic = i[\"data\"] #Su longitud y latitud tienen una letra al final:\n\t\t\t\tdic[\"identifier\"] = i[\"sensorID\"]\n\n\n\t\t\tif dic[\"_RAW\"] != \"\" and dic[\"valid\"] in [\"1\",\"2\",\"3\",\"4\",\"5\"]:\n\t\t\t\tdic[\"timestamp\"]=i[\"timestamp\"]\n\t\t\t\toutput.append(dic)\n\t\treturn output\n\n\tdef export_to_kml(self,output_path):\n\t\tdata_invalida = [] #Solo util para debug\n\t\tlineas_invalidas = 0\n\t\tdata_locaciones = []\n\t\tfor dic in self.gps_database:\n\t\t\ttry:\n\t\t\t\tlon = convert_coordinates(dic[\"longitude\"])\n\t\t\t\tlat = convert_coordinates(dic[\"latitude\"])\n\t\t\t\tdatatime = int(float(dic[\"timestamp\"]))\n\t\t\t\tdata_locaciones.append((lon,lat,datatime))\n\t\t\texcept:\n\t\t\t\tlineas_invalidas+=1\n\t\t\t\tdata_invalida.append(dic)\n\n\t\t#print(data_locaciones)\n\t\tprint(\"Numero de lineas no parseadas: {}\".format(lineas_invalidas))\n\t\t#for i in data_invalida:\n\t\t#\tprint(i)\n\t\tkml = simplekml.Kml()\n\t\tfor i in data_locaciones:\n\t\t\tpnt = kml.newpoint(name= time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(i[2])), coords=[(i[0],i[1])]) # lon, lat, optional height\n\t\t\tpnt.timestamp.when = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\",time.localtime(i[2]))\n\t\tkml.save(output_path+\"output.kml\")\n\n\t\tprint(\"Exporte archivo KML\")\n\n\tdef add_data(self,ruta,filtro=\"\"): #Agregar el filtro de limpieza de data aca fecha: 2018-02-03\n\t\tif (ruta[-5:] == \".json\" or ruta[-4:] == \".log\") and filtro in ruta:\n\t\t\twith open(ruta, 'r') as content:\n\t\t\t\tdatabase = cleanJson(content) #Lista de diccionarios, cada diccionario es un log\n\t\t\t\tself.master_database += [json.loads(i) for i in database] #Lista de objetos JSON\n\t\t\t\tprint(\"Importe el archivo {}\".format(ruta))\n\n\n\t\telif isdir(ruta):\n\t\t\tfiles = [f for f in listdir(ruta) if isfile(join(ruta, f))]\n\t\t\tcontador = 0\n\t\t\tfor i in files:\n\t\t\t\tif i[-5:] == \".json\" and filtro in i: #Reconozco aquellos archivos que son .json\n\t\t\t\t\twith open(ruta+\"/\"+i, 'r') as database:\n\t\t\t\t\t\tprint(\"Abri archivo {}\".format(ruta+\"/\"+i))\n\t\t\t\t\t\tcontent = cleanJson(database) #Lista de diccionarios, cada diccionario es un log\n\t\t\t\t\t\tself.master_database += [json.loads(i) for i in content] #Lista de objetos JSON\n\t\t\t\t\t\tcontador+=1\n\t\t\tprint(\"Numero de archivos .json importados: {}\".format(contador))\n\n\t\telse:\n\t\t\tprint(\"Error al importar\")\n\n\n\tdef clean_data(self): #Export_Kml lo tiene incorporado, pero en caso de solo querer limpiar aca esta la funcion\n\t\tcontador=0\n\t\teliminados = []\n\t\tfor i in self.master_database:\n\t\t\tif \"sensors\" in i.keys(): #Existen 2 formatos distintos \n\t\t\t\tdic = i[\"sensors\"][0][\"data\"] \n\t\t\telse:\n\t\t\t\tdic = i[\"data\"] #Su longitud y latitud tienen una letra al final\n\n\t\t\tif dic[\"_RAW\"] == \"\" or dic[\"valid\"] not in [\"1\",\"2\",\"3\",\"4\",\"5\"]: #Codigos asociados a data valida\n\t\t\t\tself.master_database.remove(i)\n\t\t\t\tcontador+=1\n\t\t\t\teliminados.append(i)\n\n\t\t#print(eliminados)\n\t\tprint(\"Numero de entradas eliminadas: {}\".format(contador))\n\n\n\tdef export_to_csv(self,output_path,opcion): #Para que funcione a de modificarse la estructura de datos a solo aquello que nos importa\n\t\tif opcion == \"gps\":\n\t\t\tdata = self.gps_database\n\n\t\tkeys = data[0].keys()\n\t\twith open(output_path, 'w') as output_file:\n\t\t\tdict_writer = csv.DictWriter(output_file, keys)\n\t\t\tdict_writer.writeheader()\n\t\t\tdict_writer.writerows(data)\n\t\tprint(\"Exporte la data a csv\")\n\n\n\tdef filter_by_id(self,identifier):\n\t\tprint(\"Filtre segun identifier: {}\".format(identifier))\n\t\treturn [i for i in self.gps_database if i[\"identifier\"]==identifier] \n\n\tdef review(self,speed_limit, time_limit):\n\t\toutput=[]\n\t\tredudant_data=[]\n\t\tdisposable_data=[]\n\t\tduplicated = False\n\n\t\tfor x, y in zip(self.gps_database[:-1], self.gps_database[1:]):\n\n\t\t\tlatitude_2= convert_coordinates(y[\"latitude\"])\n\t\t\tlatitude_1= convert_coordinates(x[\"latitude\"])\n\t\t\tlongitude_2= convert_coordinates(y[\"longitude\"])\n\t\t\tlongitude_1= convert_coordinates(x[\"longitude\"])\n\n\t\t\tdelta_latitud = latitude_2 - latitude_1\n\t\t\tdelta_longitude = longitude_2 - longitude_1\n\t\t\tdelta_time = y[\"timestamp\"]-x[\"timestamp\"]\n\n\t\t\tdistancia=(haversine(longitude_1,latitude_1,longitude_2,latitude_2)) \n\t\t\tvelocidad= distancia/(delta_time/3600)\n\n\t\t\tif velocidad>speed_limit: #Estoy usando timestamp\n\t\t\t\tprint(\"Se alcanzo una velocidad anormal de {} en {}\".format(velocidad,x[\"timestamp\"]))\n\n\t\t\tif delta_time>time_limit:\n\t\t\t\tprint(\"\"\"Se encontró una laguna sin mediciones de {} segundos\\nEsta se encontró entre ({},{})\n\"\"\".format(delta_time,x[\"timestamp\"],y[\"timestamp\"]))\n\n\t\t\tif x[\"utc\"] == y[\"utc\"]:\n\t\t\t\tprint(\"No se actualizó la data en {}, utc constante\".format(x[\"timestamp\"]))\n\n\n\t\t\tif (x[\"latitude\"],x[\"longitude\"]) == (y[\"latitude\"],y[\"longitude\"]) and x[\"utc\"] != y[\"utc\"]:\n\t\t\t\tduplicated = True\n\t\t\t\tredudant_data.append(y) #Considerar que deben quedar al menos 2\n\t\t\telse:\n\t\t\t\tif duplicated == True:\n\t\t\t\t\tduplicated = False\n\t\t\t\t\tredudant_data.pop() #Teoricamente correcto\n\n\t\t\tif check_valid_coordinate(x[\"latitude\"],\"lat\") and check_valid_coordinate(x[\"longitude\"],\"lon\"):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tprint(\"coordenada no cumple con formato valido: {}\".format((x[\"latitude\"],x[\"longitude\"])))\n\t\t\t\tdisposable_data.append(x)\n\n\t\toutput = [i for i in self.gps_database if i not in redudant_data and i not in disposable_data]\n\t\tprint(\"Numero de entradas redundantes: {}\".format(len(redudant_data)))\n\n\t\treturn output\n\n\nif __name__ == '__main__':\n\tpass\n\t#Caso 1 exitoso, el check valid gps omite por que el formato no es verificable, presenta exceso velocidad\n\t#x = JsonData()\n\t#x.add_data(\"/Users/benjamimo1/Documents/AgroBolt/Data-test/Formato1-Limpio.json\")\n\t#x.clean_data()\n\t##for i in x.gps_database:\n\t##\tprint(i)\n\t#x.export_to_csv(\"/Users/benjamimo1/Documents/AgroBolt/Data-test/output.csv\",\"gps\")\n\t#x.export_to_kml(\"/Users/benjamimo1/Documents/AgroBolt/Data-test\")\n\t#x.review(40,10)\n\t#filtrado = x.filter_by_id(\"011\")\n\n\t#Caso 2: Se revisa filtrado operativo a la hora de importar la data, presenta lagunas\n\t#x = JsonData()\n\t#x.add_data(\"/Users/benjamimo1/Documents/AgroBolt/sub_data/\",\"2018-02-03\")\n\t#x.clean_data()\n\t#x.export_to_csv(\"/Users/benjamimo1/Documents/AgroBolt/sub_data/2018-02-03.csv\",\"gps\")\n\t#x.export_to_kml(\"/Users/benjamimo1/Documents/AgroBolt/sub_data/\")\n\t#y = x.review(40,10)\n\t#for i in y:\n\t#\tprint(y)\n\n\t#Caso 3: Archivo del tipo log\n\tx = JsonData()\n\tx.add_data(\"/Users/benjamimo1/Documents/AgroBolt/GARCES_BACKUP_V2/data_2_datalogger-GARCES_01_2018-03-14 20_45_55.056404.log\")\n\tfor i in x.master_database:\n\t\tprint(i)\n\n\t#toCSV = [{'name':'bob','age':25,'weight':200},\n #{'name':'jim','age':31,'weight':180}]\n\t#data_to_csv(toCSV,\"/Users/benjamimo1/Documents/AgroBolt/Data-test/output.csv\")\n\n\n\n","repo_name":"benjamimo1/JsonData","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":8914,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25329064230","text":"#!/usr/bin/python\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\nimport pdb\n\n\"\"\"\nComparison between two implementation of DGMPM.\nOne aims to reduce numerical diffusion by interpolating variation from nodes two material points after resolution of the discrete equations on the mesh\n\"\"\"\n\nppc=4\nMp=100*ppc\nif ppc==1: CFL = 0.5\nelif ppc==2: CFL=0.4286\nelif ppc==4: CFL=0.2258\nparameters={\"algo\":'test',\"sinusoidal\":False,\"Mp\":Mp,\"ppc\":ppc,\"CFL\":CFL}\nDGMPM = dict(parameters)\nexecfile('dgmpm.py', DGMPM)\n\nparameters={\"algo\":'original',\"sinusoidal\":False,\"Mp\":Mp,\"ppc\":ppc,\"CFL\":CFL}\nDGMPM2 = dict(parameters)\nexecfile('dgmpm.py', DGMPM2)\n\nDGFEM = dict(parameters)\nexecfile('dgfem.py', DGFEM)\n\nfig = plt.figure()\n\nplt.grid()\nplt.xlim(0.,1.)\nplt.ylim(-100.,100.)\nplt.xlabel('x (m)', fontsize=18)\nplt.ylabel(r'$\\mathcal{Q}$ (Pa)', fontsize=18)\nplt.title('Stress wave propagation in a bar', fontsize=16)\nline1,= plt.plot([], [],'r-o', lw=1.5)\nline2,= plt.plot([], [],'b-s', lw=1.5)\nline3,= plt.plot([], [],'k', lw=1.5)\nfig.legend((line1,line2,line3),('DGMPM test','DGMPM original','Analytical'),'upper right',numpoints=1)\n\n# initialization function: plot the background of each frame\ndef init():\n line2.set_data([], [])\n line3.set_data([], [])\n return line2,line3\n\n# animation function. This is called sequentially\ndef animate(i):\n line1.set_data(DGMPM[\"xp\"][:,0],DGMPM[\"Stress\"][:,i])\n line2.set_data(DGMPM2[\"xp\"][:,0],DGMPM2[\"Stress\"][:,i])\n line3.set_data(DGMPM2[\"xp\"][:,0],DGMPM[\"analytical\"][:,i])\n return line1,line2,line3\n \n# call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=DGMPM2[\"Stress\"].shape[1], interval=50, blit=True)\n\n#Animation of the stress\nplt.show()\n#anim.save('StressBar.mp4', extra_args=['-vcodec', 'libx264'])\n\n","repo_name":"Reclu/research","sub_path":"codes/linearadvection/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"33007631670","text":"#improt your first a library regx:\r\n#And first of all i tell you this is just a console application.\r\n#I create another project how to build GUI Application\r\nimport re \r\nprint(\"Our Magical Calculator\")\r\nprint(\"type 'quit' and exit/n\")\r\nprevious = 0\r\nrun = True\r\n#create a mathmatical function\r\ndef performMath():\r\n global run\r\n global previous\r\n equation = \"\"\r\n if previous == 0:\r\n equation = input(\"Type Equation:\")\r\n else:\r\n equation = input(str(previous))\r\n \r\n if equation == 'quit':\r\n run = False\r\n else:\r\n equation = re.sub('[a-zA-Z.,\"\":()]','',equation)\r\n if previous == 0:\r\n previous = eval(equation)\r\n else:\r\n previous = eval(str(previous) + equation)\r\n print(\"Ans:\", previous)\r\nwhile run:\r\n performMath()\r\n#Finish work\r\n","repo_name":"99Moshiur/python-project","sub_path":"Calculator/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"32544510415","text":"from fastapi import APIRouter, status, Path, Depends\nfrom fastapi.responses import JSONResponse\nfrom fastapi.encoders import jsonable_encoder\nfrom typing import List\n\n# modulos locales \nfrom config.database import SessionLocal\nfrom schemas.role_schema import RoleSchemaRequest, RoleSechemaResponse\nfrom services.role_service import RoleService\nfrom middlewares.jwt_bearer import JWTBearer\n\n\nrole_router = APIRouter(\n prefix='/roles',\n tags=['Roles']\n)\n\n@role_router.get(path='/get-all-roles', response_model=List[RoleSechemaResponse], status_code=status.HTTP_200_OK, dependencies=[Depends(JWTBearer())])\ndef get_all_roles():\n db = SessionLocal()\n registros = RoleService(db=db).get_roles()\n\n return JSONResponse(content=jsonable_encoder(registros), status_code=status.HTTP_200_OK)\n\n\n@role_router.get(path='/get-role/{id}', response_model=RoleSechemaResponse, status_code=status.HTTP_200_OK, dependencies=[Depends(JWTBearer())])\ndef get_role(id: int = Path(ge=1, le=2000)):\n db = SessionLocal()\n registro = RoleService(db).get_role(id=id)\n\n if not registro:\n return JSONResponse(content={\"message\": \"Rol no encontrado\"}, status_code=status.HTTP_404_NOT_FOUND)\n \n return JSONResponse(content=jsonable_encoder(registro), status_code=status.HTTP_200_OK)\n\n\n@role_router.post(path='/create-role', response_model=RoleSechemaResponse, status_code=status.HTTP_201_CREATED, dependencies=[Depends(JWTBearer())])\ndef create_role(role: RoleSchemaRequest):\n\n db = SessionLocal()\n new_role = RoleService(db).create_role(role=role)\n\n return JSONResponse(content=jsonable_encoder(new_role), status_code=status.HTTP_201_CREATED)\n\n\n@role_router.put(path='/update-role/{id}', response_model=dict, status_code=status.HTTP_200_OK, dependencies=[Depends(JWTBearer())])\ndef update_role(id: int, role: RoleSchemaRequest):\n db = SessionLocal()\n registro = RoleService(db=db).get_role(id=id)\n\n if not registro:\n return JSONResponse(content={\"message\": \"Registro no encontrado\"}, status_code=status.HTTP_404_NOT_FOUND)\n\n registro_editado = RoleService(db).update_role(id=id, data=role)\n\n if not registro_editado:\n return JSONResponse(content={\"message\": \"Error al editar\"}, status_code=status.HTTP_400_BAD_REQUEST)\n\n return JSONResponse(content={\"message\": \"Editado\", \"registro\": jsonable_encoder(registro_editado)}, status_code=status.HTTP_200_OK)\n\n\n@role_router.delete(path='/delete-role/{id}', response_model=dict, status_code=status.HTTP_200_OK, dependencies=[Depends(JWTBearer())])\ndef delete_role(id: int) -> dict:\n db = SessionLocal()\n registro = RoleService(db=db).get_role(id=id)\n\n if not registro:\n return JSONResponse(content={\"message\": \"Registro No encontrado\"}, status_code=status.HTTP_404_NOT_FOUND)\n\n registro_eliminado = RoleService(db=db).delete_role(id=id)\n \n if not registro_eliminado:\n return JSONResponse(content={\"message\": \"Error al eliminar\"}, status_code=status.HTTP_400_BAD_REQUEST)\n \n return JSONResponse(content={\"message\": \"Eliminado\", \"registro\": jsonable_encoder(registro)}, status_code=status.HTTP_200_OK)\n","repo_name":"DrGarciaDev/api-auth-users-fastapi","sub_path":"routers/role_router.py","file_name":"role_router.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72729750414","text":"import torch\nimport os\nimport sys\nfrom torch.autograd import Variable\nimport numpy as np\nfrom model.load_model import ModelLoader\nfrom skimage import io\nfrom skimage.transform import resize\n\nmodel = ModelLoader()\n\ninput_height = 384\ninput_width = 512\n\ndef test_simple(model):\n model.switch_to_eval()\n\n dirListing = os.listdir(\"./imgs_to_test\")\n editFiles = []\n for item in dirListing:\n if \".jpg\" in item:\n editFiles.append(\"./imgs_to_test/\"+item)\n \n imgs = []\n original_size = []\n results = []\n\n for path in editFiles:\n img = np.float32(io.imread(path))/255.0\n img = img[:,:,:3] #ignore alpha\n original_size.append((img.shape[0], img.shape[1]))\n img = resize(img, (input_height, input_width), order = 1)\n imgs.append(img)\n input_img = torch.from_numpy( np.transpose(img, (2,0,1)) ).contiguous().float()\n input_img = input_img.unsqueeze(0)\n if torch.cuda.is_available():\n input_images = Variable(input_img.cuda())\n else:\n input_images = Variable(input_img)\n pred_log_depth = model.netG.forward(input_images) \n pred_log_depth = torch.squeeze(pred_log_depth)\n\n pred_depth = torch.exp(pred_log_depth)\n\n pred_inv_depth = 1/pred_depth\n pred_inv_depth = pred_inv_depth.data.cpu().numpy()\n pred_inv_depth = pred_inv_depth/np.amax(pred_inv_depth)\n results.append(pred_inv_depth)\n\n for i, im in enumerate(results):\n io.imsave('./results/'+str(i)+'.png', im)\n\n for i, im in enumerate(results):\n im_res = resize(im, original_size[i], order = 1)\n io.imsave('./results/'+str(i)+'_resized_to_original.png', im_res)\n\n res = np.vstack(results)\n im = np.vstack(imgs)\n\n res = np.stack([res,res,res], axis=2)\n io.imsave('./results/all_imgs.png', np.hstack([im, res]))\n\n sys.exit()\n\ntest_simple(model)\n\n\n\n\n","repo_name":"ArnaudFickinger/mono","sub_path":"test_imgs.py","file_name":"test_imgs.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72677218573","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg1 = cv2.imread('test2.tif', 0)\n\nmax_val = np.max(img1)\nprint(max_val)\n\nimg2 = (img1 / max_val) # Normalization\n\na = 0\nb = 0.2\n(nr, nc) = img1.shape\n\nx = a + (b * np.random.normal(a, b, (nr, nc))) # Gaussian Distribution with mean (a) and variance (b)\ny = a + ((b - a) * np.random.rand(nr, nc)) # Uniform Distribution\n\nimg_guassian = (img2 + x) * max_val\nimg_guassian_uint8 = np.array(img_guassian, dtype=np.uint8)\n\n###### Arithmetic Mean Filter\nfilterSize = 3\nk1 = np.array(np.ones((filterSize, filterSize), np.float32)) / (filterSize * filterSize) # average filter\nprint(k1)\noutput = cv2.filter2D(img_guassian, -1, k1)\n# printing mask\n# here change mask variable\nplt.subplot(1, 3, 1)\nplt.imshow(img1, cmap='gray')\nplt.title('Original Image')\nplt.xticks([])\nplt.yticks([])\nplt.subplot(1, 3, 2)\nplt.imshow(img_guassian, cmap='gray')\nplt.title('Noisy Image')\nplt.xticks([])\nplt.yticks([])\nplt.subplot(1, 3, 3)\nplt.imshow(output, cmap='gray')\nplt.title('Filtered Image')\nplt.xticks([])\nplt.yticks([])\nplt.show()\ncv2.waitKey(0) # Wait until key strike from keyboard\ncv2.destroyAllWindows() # Close all windows\n","repo_name":"divyeshmiyani/Image-Processing","sub_path":"Experiments/Experiment 8/Exp8_4.py","file_name":"Exp8_4.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"26617921333","text":"def customized_range(*args):\n start, step = 0, 1\n numargs = len(args)\n if numargs < 1:\n raise TypeError(f'expected at least one argument. got {numargs}')\n elif numargs == 1:\n stop = args[0]\n elif numargs == 2:\n start, stop = args\n elif numargs == 3:\n start, stop, step = args\n else:\n raise TypeError(f'Expected at most three arguments. Got {numargs}')\n\n while start <= stop:\n yield start\n start += step\n\nfor i in customized_range(25, 100, 5):\n print(i, end=' ')","repo_name":"QuameOphory/python","sub_path":"tweaked_range.py","file_name":"tweaked_range.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"24549736261","text":"\"\"\"\nEste archivo generaría todos los modelos que tiene la aplicación. En programas más complicados\ntendríamos una cosa así:\n\nsrc/models/actor/chansey.py\nsrc/models/actor/egg.py\nsrc/models/factory/eggcreator.py\n\n...\nY este archivo sería algo como\nsrc/models/model.py --> sólo importaría los objetos que usa el resto de la aplicación, sin tocar el detalle mismo\n\nfrom src.models.actor.chansey import Chansey\nfrom src.models.actor.factory import EggCreator\n...\n\nPero aquí, como nuestra app es sencilla, definimos todas las clases aquí mismo.\n1. Chansey\n2. Los huevos\n\"\"\"\nfrom asyncio.windows_utils import pipe\nfrom contextlib import ContextDecorator\nfrom re import X\nfrom venv import create\nimport glfw\nimport grafica.transformations as tr\nimport grafica.basic_shapes as bs\nimport grafica.scene_graph as sg\nimport grafica.easy_shaders as es\nfrom grafica.assets_path import getAssetPath\nfrom OpenGL.GL import *\n\nfrom OpenGL.GL import glClearColor, GL_STATIC_DRAW\nimport random\nfrom typing import List\n\nimport numpy as np\nimport grafica.text_renderer as tx\n\ndef create_gpu(shape, texture, map, fill, pipeline):\n gpu = es.GPUShape().initBuffers()\n pipeline.setupVAO(gpu)\n gpu.fillBuffers(shape.vertices, shape.indices, GL_STATIC_DRAW)\n gpu.texture = es.textureSimpleSetup(\n getAssetPath(texture), fill, fill, map, map )\n return gpu\n\n\nclass Score(object): # Clase que se encarga de mostrar el Score en la pantalla\n\n global score\n def __init__(self, textPipeline,gpuText3DTexture):\n\n \n self.textPipeline = textPipeline\n self.shapeScore = tx.textToShape(str(score),0.125,0.25)\n\n self.gpuScore = es.GPUShape().initBuffers()\n self.textPipeline.setupVAO(self.gpuScore)\n self.gpuScore.fillBuffers(self.shapeScore.vertices, self.shapeScore.indices, GL_STATIC_DRAW)\n self.gpuScore.texture = gpuText3DTexture\n \n def draw(self, textPipeline):\n glUniform4f(glGetUniformLocation(textPipeline.shaderProgram, \"fontColor\"), 0,0,0,1)\n glUniform4f(glGetUniformLocation(textPipeline.shaderProgram, \"backColor\"), 0, 0, 0, 0)\n glUniformMatrix4fv(glGetUniformLocation(textPipeline.shaderProgram, \"transform\"), 1, GL_TRUE,\n tr.translate(0, -1, 0))\n self.textPipeline.drawCall(self.gpuScore) \n\n\nclass Background(object):\n def __init__(self,pipeline):\n\n self.pipeline = pipeline\n self.gpuBackground = create_gpu(bs.createTextureQuad(1,1), \"background.jpg\", GL_NEAREST, GL_CLAMP_TO_EDGE, pipeline)\n\n self.pos_x = 0\n\n def draw(self, pipeline):\n\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, np.matmul(\n tr.translate(self.pos_x, 1/6, 0),\n tr.scale(2, 5/3, 0)\n ))\n pipeline.drawCall(self.gpuBackground)\n\n\n\nclass Floor(object): \n def __init__(self,pipeline,x):\n\n self.pipeline = pipeline\n self.gpuFloor = create_gpu(bs.createTextureQuad(1, 1),\"suelo.jpg\" , GL_NEAREST, GL_CLAMP_TO_EDGE, self.pipeline)\n\n self.pos_x = x\n\n def draw(self,pipeline):\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, np.matmul(\n tr.translate(self.pos_x, -5/6, 0), \n tr.scale(2, 1/3, 0)\n ))\n self.pipeline.drawCall(self.gpuFloor)\n\n def update(self,dt):\n self.pos_x = self.pos_x - dt\n\nclass Floors(object): # Clase que contiene dos texturas en un lista\n floors: List[\"Floor\"]\n\n def __init__(self):\n self.floors = []\n self.on = True\n\n def create_floor(self, pipeline):\n for i in range(0,2):\n self.floors.append(Floor(pipeline, 0 +i*2))\n\n def draw(self, pipeline):\n for k in self.floors:\n k.draw(pipeline)\n\n def update(self, pipeline):\n if self.on == True:\n for k in self.floors:\n k.update(pipeline)\n\n def swap(self,pipeline): # Funcion que borra el suelo en cierta posicion y lo agrega a la derecha de la pantalla\n \n if self.on == True:\n for k in self.floors: \n if k.pos_x <= -2: \n self.floors.pop(0)\n self.floors.append(Floor(pipeline,2))\n\n\nclass Die_Or_Win(object): # Clase que interpreta crea las textura para luego ser dibujado si gana o pierde\n\n def __init__(self, pipeline): \n\n self.pipeline = pipeline\n self.gpu_w = create_gpu(bs.createTextureQuad(1,1),'win.png', GL_NEAREST, GL_CLAMP_TO_EDGE, self.pipeline)\n self.gpu_l = create_gpu(bs.createTextureQuad(1,1), 'wasted.png', GL_NEAREST, GL_CLAMP_TO_EDGE, self.pipeline)\n\n def draw_w(self,pipeline, pajarito: 'Pajarito'):\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, \n tr.translate(1/2,0,0),\n tr.scale(1, 1, 0)\n )\n self.pipeline.drawCall(self.gpu_w)\n pajarito.gravity = 0\n pajarito.vy = 0\n\n \n def draw_l(self,pipeline, pajarito: 'Pajarito'): \n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, \n tr.translate(0,1/2,0),\n tr.scale(1, 1, 0)\n )\n self.pipeline.drawCall(self.gpu_l)\n pajarito.gravity = 0\n pajarito.vy = 0\n\n\n\nclass Pajarito(object):\n\n def __init__(self, pipeline,pos_x,pos_y,vy, gravity):\n # Figuras básicas\n self.pipeline = pipeline\n self.gpuPajarito = create_gpu(bs.createTextureQuad(1, 1), \"pajarito.png\",GL_NEAREST, GL_CLAMP_TO_EDGE, self.pipeline)\n\n self.alive = True\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.vy = vy\n self.gravity = gravity\n\n def draw(self, pipeline):\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, np.matmul(\n tr.translate(self.pos_x, self.pos_y , 0), \n tr.scale(1/6, 1/6, 0)\n ))\n self.pipeline.drawCall(self.gpuPajarito)\n\n def updatePosition(self, dt): # Funcion de dinamica\n self.vy = self.vy + dt * self.gravity\n self.pos_y = self.pos_y + self.vy * dt\n\n def move_up(self): # Funcion que hace que se mueva para arriba\n if not self.alive or (self.gravity == 0 and self.vy == 0):\n return\n self.vy = 3/4\n\n def collide(self, tubos: 'Tubos', floors: \"Floors\"): # Funcion que detecta colisiones\n if not tubos.on: \n return\n\n if self.pos_y <= -15/24: # Detecta si toco el suelo\n self.pos_y = -15/24\n self.vy = 0\n self.gravity = 0\n tubos.on = False\n self.alive = False\n floors.on = False\n\n for e in tubos.tubos:\n if e.pos_x - 3/16 <= self.pos_x <= e.pos_x + 3/16 and not e.pos_y - 0.156 <= self.pos_y <= e.pos_y + 0.156: # Detecta si toca algun tubo\n \n self.gravity = 0\n self.vy = 0\n tubos.on = False\n self.alive = False\n floors.on = False\n\n\nclass Tubo(object):\n\n def __init__(self, x, y, pipeline):\n\n self.pipeline = pipeline\n \n self.gpu_tubo = create_gpu(bs.createTextureQuad(1,3), \"tubo.png\", GL_NEAREST, GL_CLAMP_TO_EDGE, self.pipeline)\n\n self.pos_y = y\n self.pos_x = x\n self.count = 0\n\n def draw(self,pipeline):\n\n # Dibujo tubo arriba\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, np.matmul(\n tr.translate(self.pos_x, self.pos_y + 1.7, 0), \n tr.scale(1/2, -3, 0)\n ))\n self.pipeline.drawCall(self.gpu_tubo)\n # Dibujo tubo abajo\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"transform\"), 1, GL_TRUE, np.matmul(\n tr.translate(self.pos_x, self.pos_y - 1.7 , 0), \n tr.scale(1/2, 3, 0)\n ))\n self.pipeline.drawCall(self.gpu_tubo)\n\n def counted(self):\n if self.pos_x <= -3/8 and self.count == 0:\n global score\n self.count = 1\n score = score + 1\n \n def update(self, dt):\n self.pos_x = self.pos_x - dt \n\nclass Tubos(object):\n tubos: List['Tubo']\n\n global score\n score = 0\n def __init__(self):\n self.tubos = []\n self.on = True\n\n def create_tubo(self, pipeline,N):\n if len(self.tubos) >= N or not self.on: # No puede haber un máximo de 10 huevos en pantalla\n return\n else:\n for i in range(0,N):\n self.tubos.append(Tubo(1+ 1/4 + i,random.uniform(-1/12,5/12),pipeline))\n\n def draw(self, pipeline):\n for k in self.tubos: \n k.draw(pipeline)\n\n def counting(self): # Funcion que cuenta si un tubo k se contara o no\n if self.on:\n for k in self.tubos:\n k.counted()\n\n def update(self, dt, tubos: \"Tubos\"): \n if self.tubos[-1].pos_x <= -3/8:\n self.on = False\n tubos.on = False\n \n if self.on == True:\n for k in self.tubos:\n k.update(dt)","repo_name":"jkauerl/CC3501","sub_path":"tarea1b/modelo.py","file_name":"modelo.py","file_ext":"py","file_size_in_byte":9199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"13368894018","text":"#!/usr/bin/env python\n\nfrom flask import Flask, jsonify, request\n\n# Instantiate the peer\nfrom bal.variant.pow_blockchain import POWBlockchain\nfrom bal.variant.pos_blockchain import POSBlockchain\n\nfrom bal.variant.pow_blockchain_simulation import POWBlockchainSimulation\nfrom bal.variant.pos_blockchain_simulation import POSBlockchainSimulation\n\nfrom bal.wallet import init_wallet, get_public_from_wallet, get_private_from_wallet, create_transaction\n\nimport threading\nimport yaml\nimport json\nimport traceback\nimport time\napp = Flask(__name__)\n\n\n# Instantiate the Blockchain\nblockchain = None\nloop_started = False\n\n@app.route('/transactions/unspenttxouts', methods=['GET'])\ndef do_unspent_tx_outputs():\n return jsonify(blockchain.unspent_tx_outs), 200\n\n@app.route('/transactions/unspenttxouts/my', methods=['GET'])\ndef do_my_unspent_tx_outs():\n return jsonify(blockchain.get_my_unspent_transaction_outputs()), 200\n\n@app.route('/address/my', methods=['GET'])\ndef do_address():\n address = get_public_from_wallet()\n return jsonify({'address': address}), 200\n\n@app.route('/balance/my', methods=['GET'])\ndef do_get_my_balance():\n balance = blockchain.get_my_account_balance()\n return jsonify({'balance': balance}), 200\n\n@app.route('/balance/

', methods=['GET'])\ndef do_get_balance(address):\n balance = blockchain.get_account_balance(address)\n return jsonify({'balance': balance}), 200\n\n@app.route('/transactions/pool', methods=['GET'])\ndef do_get_transaction_pool():\n return jsonify(blockchain.transaction_pool.get_transaction_pool()), 200\n\n@app.route('/block/generate', methods=['GET'])\ndef do_generate_block():\n new_block = blockchain.generate_next_block()\n if new_block:\n return jsonify(new_block), 200\n else:\n return 'Could not generate new block', 400\n\n@app.route('/block/generate/loop/start', methods=['GET'])\ndef do_generate_loop_start():\n global loop_started\n try:\n if loop_started:\n return \"Loop has already started.\", 200\n loop_started = True\n threading.Thread(\n target = do_generate_loop_helper,\n ).start()\n return \"Started Generation Loop (Asynchronous)\", 200\n except Exception as e:\n traceback.print_exc()\n return jsonify(str(e)), 500\n\n@app.route('/block/generate/loop/stop', methods=['GET'])\ndef do_generate_loop_stop():\n global loop_started\n try:\n loop_started = False\n return \"Stopped Generation Loop\", 200\n except Exception as e:\n traceback.print_exc()\n return jsonify(str(e)), 500\n\ndef do_generate_loop_helper():\n global loop_started\n try:\n while(loop_started):\n blockchain.generate_next_block()\n time.sleep(1)\n except Exception:\n traceback.print_exc()\n\n@app.route('/block/latest', methods=['GET'])\ndef do_latest_block():\n return jsonify(blockchain.get_latest_block()), 200\n\n@app.route('/block/', methods=['GET'])\ndef do_block_index(index):\n return jsonify(blockchain.get_blockchain()[index]), 200\n\n@app.route('/transactions/send', methods=['POST'])\ndef do_new_transaction():\n values = yaml.safe_load(json.dumps(request.get_json()))\n\n # Check that the required fields are in the POST'ed data\n required = ['recipient', 'amount']\n if not all(k in values for k in required):\n return 'Missing values', 400\n\n # Create a new new_transaction\n try:\n tx = blockchain.send_transaction(values['recipient'], values['amount'])\n except Exception as e:\n traceback.print_exc()\n tx = str(e)\n return jsonify(tx), 200\n\n return jsonify(tx), 200\n\n@app.route('/transactions/has_amount/', methods=['GET'])\ndef do_has_amount_for_transaction(amount):\n try:\n create_transaction(get_public_from_wallet(), amount, get_private_from_wallet(), blockchain.get_unspent_tx_outs(), blockchain.transaction_pool.get_transaction_pool())\n return jsonify(True), 200\n except Exception as e:\n traceback.print_exc()\n tx = str(e)\n return jsonify(False), 200\n\n@app.route('/chain', methods=['GET'])\ndef do_full_chain():\n return jsonify(blockchain.full_chain()), 200\n\n@app.route('/chain/length', methods=['GET'])\ndef do_chain_length():\n return jsonify(blockchain.full_chain()['length']), 200\n\n@app.route('/peers', methods=['GET'])\ndef do_get_peers():\n return jsonify(blockchain.p2p.get_peers()), 200\n\n@app.route('/peers/register', methods=['POST'])\ndef do_register_peers():\n values = yaml.safe_load(json.dumps(request.get_json()))['peer']\n if not values:\n return 'Missing values', 200\n return jsonify(blockchain.p2p.add_peer(values)), 200\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')\n parser.add_argument('-s', '--socket', default=6001, type=int, help='p2p port to listen on')\n parser.add_argument('-db', '--database', default='', help='db file')\n parser.add_argument('-v', '--variant', default='pow', help='variant of blockchain \"pow\" or \"pos\"')\n parser.add_argument('-d', '--difficulty', default=4, type=int, help='initial difficulty')\n parser.add_argument('-k', '--keystore', default='/tmp/private_key.pem', help='where the keystore located. default: private_key.pem')\n parser.add_argument('-sp', '--simulationpath', default='', help='specifies if it is a simulation run and where simulation logs will be kept.')\n parser.add_argument('-n', '--name', default='bc', help='specifies blockchain node name(mostly for simulations)')\n\n args = parser.parse_args()\n port = args.port\n dbfile = args.database\n p2p_port = args.socket\n initial_difficulty = args.difficulty\n simulation_path = args.simulationpath\n if simulation_path != '':\n if args.variant.find('pos') == 0:\n blockchain = POSBlockchainSimulation(p2p_port, initial_difficulty, simulation_path, args.name)\n else:\n blockchain = POWBlockchainSimulation(p2p_port, initial_difficulty, simulation_path, args.name)\n else:\n if args.variant.find('pos') == 0:\n blockchain = POSBlockchain(p2p_port, initial_difficulty)\n else:\n blockchain = POWBlockchain(p2p_port, initial_difficulty)\n\n if dbfile:\n print(\"DB: \" + dbfile)\n blockchain.init_db(dbfile)\n init_wallet(args.keystore)\n threading.Thread(\n target = blockchain.p2p.start,\n args = ()\n ).start()\n app.run(host='0.0.0.0', port=port, threaded=True)\n","repo_name":"BAlchemyLab/bal","sub_path":"bal/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"14"} +{"seq_id":"74594785934","text":"#Librerie\nimport datetime\n\n#Variabili globali\ndata = None\norario = None\ngiorno = None\nmese = None\nanno = None\ngiorno_sett = None\nistanze_cliente = {}\nistanze_istruttore = {}\nistanze_abbonamento = {}\n\n#Funzioni globali\ndef definisci_momento():\n global data, orario, giorno, mese, anno, giorno_sett\n now = datetime.datetime.now()\n giorno = now.strftime(\"%d\")\n mese = now.strftime(\"%m\")\n anno = now.strftime(\"%Y\")\n data = f\"{giorno}/{mese}/{anno}\"\n giorno_sett = now.strftime(\"%A\")\n orario = now.strftime(\"%X\")\n\n#Classi\nclass Palestra:\n def __init__(self, nome, elenco_iscritti = [], abbonamenti = {}, corsi = [], aperta = False, istruttori = [], cassa = 0, orari_apertura = {}, planning = {}):\n self.nome = nome\n self.nr_iscritti = len(elenco_iscritti)\n self.elenco_iscritti = []\n self.abbonamenti = abbonamenti\n self.corsi = corsi\n self.aperta = aperta\n self.istruttori = istruttori\n self.cassa = cassa\n self.orari_apertura = orari_apertura\n self.planning = planning\n self.diff = 0\n\n def apri_chiudi(self):\n self.aperta = not self.aperta\n if self.aperta:\n print(\"La palestra adesso è aperta\")\n else:\n print(f\"La palestra adesso è chiusa, oggi la palestra ha incassato {self.diff}, il totale disponibile in cassa è {self.cassa}\")\n \n def aggiungi_cliente(self, cliente):\n if self.aperta:\n self.elenco_iscritti.append(cliente.nome)\n self.elenco_iscritti.sort()\n #print(self.elenco_iscritti)\n self.nr_iscritti += 1\n abb = input(f\"Scegli l'abbonamento tra i seguenti: {self.abbonamenti}\\n> \")\n while not abb in self.abbonamenti.keys():\n abb = input(f\"Abbonamento non trovato, riprova: {self.abbonamenti}\\n> \")\n prezzo = self.abbonamenti[abb]\n abb = abb.split(\"_\")\n istanze_abbonamento[cliente.nome] = Abbonamento(abb[0], prezzo, nr_ingressi = abb[1][0])\n abb = \"_\".join(abb)\n cliente.abbonamento += abb\n self.incasso(self.abbonamenti.get(abb))\n else:\n print(\"La palestra è chiusa!\")\n \n def planner(self):\n if giorno_sett in self.orari_apertura.keys():\n orario_ap = min(self.orari_apertura[giorno_sett])\n orario_chius = max(self.orari_apertura[giorno_sett])\n self.planning[data] = { str(x) : [] for x in range(orario_ap, orario_chius)}\n if giorno_sett == \"Monday\" or giorno_sett == \"Wednesday\" or giorno_sett == \"Friday\":\n self.planning[data][\"18\"].append(\"Pilates\")\n if giorno_sett == \"Monday\" or giorno_sett == \"Wednesday\" or giorno_sett == \"Friday\":\n self.planning[data][\"19\"].append(\"Walking\")\n if giorno_sett == \"Tuesday\" or giorno_sett == \"Thursday\":\n self.planning[data][\"18\"].append(\"Yoga\")\n if giorno_sett == \"Tuesday\" or giorno_sett == \"Thursday\":\n self.planning[data][\"19\"].append(\"Funzionale\")\n for istruttore in istanze_istruttore:\n if giorno_sett in istanze_istruttore[istruttore].orari.keys():\n if min(istanze_istruttore[istruttore].orari[giorno_sett]) <= int(orario[:2]) <= max(istanze_istruttore[istruttore].orari[giorno_sett]):\n istanze_istruttore[istruttore].inizio_fine_turno()\n else:\n return None\n\n def incasso(self, importo):\n if self.aperta:\n self.cassa += importo\n self.diff += importo\n else:\n print(\"La palestra è chiusa!\")\n \n def uscita(self, importo):\n if self.aperta:\n self.cassa -= importo\n self.diff -= importo\n else:\n print(\"La palestra è chiusa!\")\n \n def crea_cliente(self):\n global istanze_cliente\n if self.aperta:\n nome = input(\"Insersci il nome completo del cliente:\\n> \")\n cliente = Cliente(nome)\n istanze_cliente[nome] = cliente\n return cliente\n else:\n print(\"La palestra è chiusa!\")\n \n def crea_istruttore(self):\n global istanze_istruttore\n if self.aperta:\n nome = input(\"Insersci il nome completo dell'istruttore:\\n> \")\n attivita = input(\"Inserisci l'attività dell'istruttore\\n> \")\n if not attivita in self.corsi:\n self.corsi.append(attivita)\n stipendio = input(\"Inserisci stipendio mensile istruttore\\n> \")\n orari = {\"Monday\" : (9,21), \"Tuesday\" : (9,21), \"Wednesday\" :(9,21), \"Thursday\" : (9,21), \"Friday\" : (9,21), \"Saturday\" : (10,18)}\n istruttore = Istruttore(nome, attivita, stipendio, orari)\n istanze_istruttore[nome] = istruttore\n self.istruttori.append(istruttore.nome)\n self.planner()\n return istruttore\n else:\n print(\"La palestra è chiusa!\")\n\n def __repr__(self):\n message = f\"La {self.nome} è una palestra con {self.nr_iscritti} iscritti, i suoi corsi sono: {self.corsi}\\nGli abbonamenti sono: {self.abbonamenti}\\nGli istruttori sono: {self.istruttori}\\nIl planning di oggi è: {self.planning}\\n\" + (f\"La palestra è aperta adesso! Gli istruttori di turno sono: {self.di_turno()}\" if self.aperta else \"La palestra al momento è chiusa!\")\n return message\n\n def cerca_cliente(self):\n if self.aperta:\n nome = input(\"Inserisci il nome completo del cliente da cercare\\n> \")\n if nome in self.elenco_iscritti:\n return True, nome\n else: \n print(\"Nome non presente in sistema\")\n return False\n else:\n print(\"La palestra è chiusa!\")\n\n def di_turno(self):\n global istanze_istruttore\n lst = []\n for nome in istanze_istruttore:\n if istanze_istruttore[nome].di_turno == True:\n lst.append(nome)\n return lst\n\nclass Cliente:\n def __init__(self, nome, abbonamento = \"\", in_struttura = False,):\n self.nome = nome\n self.abbonamento = abbonamento\n self.in_struttura = in_struttura\n \n def __repr__(self):\n message = f\"{self.nome} abbonato con {self.abbonamento}\\n\" + (f\"{self.nome} al momento è in struttura\" if self.in_struttura else f\"{self.nome} al momento non è in struttura\")\n return message\n\n def arrivo_uscita_struttura(self):\n self.in_struttura = not self.in_struttura\n \n def prenotazione(self, palestra):\n orario = input(\"Seleziona l'orario per la prenotazione:\\n> \")\n while not len(palestra.planning[data][str(orario)]) <= 6:\n print(\"Impossibile prenotare, orario pieno\")\n orario = input(\"Seleziona l'orario per la prenotazione:\\n> \")\n palestra.planning[data][str(orario)].append(self.nome)\n \n\nclass Istruttore:\n def __init__(self, nome, corso = \"\", stipendio = 0, orari = {}, schede_allenamento = {}, di_turno = False):\n self.nome = nome\n self.corso = corso\n self.stipendio = stipendio\n self.orari = orari\n self.schede_allenamento = schede_allenamento\n self.di_turno = di_turno\n \n def creazione_scheda(self):\n nome = input(\"Inserire il nome della persona al quale allegare la scheda d'allenamento:\\n> \")\n if nome in self.schede_allenamento:\n answer = input(\"Questo atleta ha già una scheda, vuoi aggiornarla?(s o n)\\n> \")\n if answer == \"s\":\n self.schede_allenamento[nome] = input(\"Inserire gli esercizi:\\n\")\n else:\n return self.creazione_scheda()\n else:\n self.schede_allenamento[nome] = input(\"Inserire gli esercizi:\\n\")\n\n def inizio_fine_turno(self):\n self.di_turno = not self.di_turno\n\n def __repr__(self):\n message = f\"{self.nome} è un istruttore di {self.corso}\" + (f\" {self.nome} al momento è in palestra!\" if self.di_turno else f\" {self.nome} al momento non è in palstra\")\n return message\n\n#Classe implementata nel processo di refactoring\nclass Abbonamento:\n def __init__(self, nome, prezzo = 0, inizio = data, scadenza = None, attivo = False, nr_ingressi = 0, cliente = None):\n self.nome = nome\n self.prezzo = prezzo\n self.inizio = inizio\n self.scadenza = scadenza\n self.attivo = attivo\n self.cliente = cliente\n self.nr_ingressi = nr_ingressi\n self.set_durata()\n self.attiva()\n \n def set_durata(self):\n global data\n if self.nome == \"mensile\":\n start_date = datetime.datetime.strptime(data, \"%d/%m/%Y\")\n end_date = start_date + datetime.timedelta(days = 30)\n end_date = end_date.strftime(\"%d/%m/%Y\")\n self.scadenza = end_date\n print(self.nr_ingressi)\n print(f\"La scadenza è: {self.scadenza}\")\n elif self.nome == \"trimestrale\":\n start_date = datetime.datetime.strptime(data, \"%d/%m/%Y\")\n end_date = start_date + datetime.timedelta(days = 90)\n end_date = end_date.strftime(\"%d/%m/%Y\")\n self.scadenza = end_date\n print(self.nr_ingressi)\n print(f\"La scadenza è: {self.scadenza}\")\n elif self.nome == \"open\":\n start_date = datetime.datetime.strptime(data, \"%d/%m/%Y\")\n end_date = start_date + datetime.timedelta(days = 180)\n end_date = end_date.strftime(\"%d/%m/%Y\")\n self.scadenza = end_date\n print(self.nr_ingressi)\n print(f\"La scadenza è: {self.scadenza}\")\n\n def attiva(self):\n global data\n today = datetime.datetime.strptime(data, \"%d/%m/%Y\")\n expiring = datetime.datetime.strptime(self.scadenza, \"%d/%m/%Y\")\n if today <= expiring:\n self.attivo = True\n \n def __repr__(self):\n return f\"{self.nome}, {self.prezzo}, {self.attivo}\"\n\n\n#Oggetti\nwellness = Palestra(\"Wellness Club\", elenco_iscritti = [], abbonamenti = {\"mensile_2v\": 35, \"trimestrale_2v\": 90, \"mensile_3v\": 40, \"trimestrale_3v\": 105, \"open_6m\": 200}, corsi = [\"Pilates\", \"Walking\", \"Funzionale\", \"Yoga\"], orari_apertura = {\"Monday\" : (9,11,16,21), \"Tuesday\" : (9,11,16,21), \"Wednesday\" : (9,11,16,21), \"Thursday\" : (9,11,16,21), \"Friday\" : (9,11,16,21), \"Saturday\" : (10,11,16,18)}, cassa = 3000)\n#wellness2 = Palestra(\"Wellness Club\", elenco_iscritti = [\"Prova\"], abbonamenti = {\"mensile_2v\": 35, \"trimestrale_2v\": 90, \"mensile_3v\": 40, \"trimestrale_3v\": 105, \"open_6m\": 200}, corsi = [\"Pilates\", \"Walking\", \"Funzionale\", \"Yoga\"], orari_apertura = {\"Monday\" : (9,21), \"Tuesday\" : (9,21), \"Wednesday\" :(9,21), \"Thursday\" : (9,21), \"Friday\" : (9,21), \"Saturday\" : (10,18)})\n#wellness2 test apertura orario continuato, con soli 2 orari al posto di 4 nella lista all'interno del dizionario degli orari\n\n#Script\ndefinisci_momento()\nif giorno_sett in wellness.orari_apertura.keys() and (wellness.orari_apertura[giorno_sett][0] <= int(orario[:2]) <= wellness.orari_apertura[giorno_sett][1] or wellness.orari_apertura[giorno_sett][2] <= int(orario[:2]) <= wellness.orari_apertura[giorno_sett][3]):\n wellness.aperta = True\nelse:\n wellness.aperta = False\nprint(f\"Benvenuto in Wellness Manager, cosa vorresti fare?\\n1-apertura\\n2-aggiungi cliente\\n3-prenota un allenamento\\n4-cerca cliente\\n5-aggiungi istruttore\\n6-informazioni palestra\\n7-informazioni istruttore\\n8-creazione scheda allenamento\\n9-mostra abbonamenti\\nInserisci \\\"esci\\\" per uscire\")\nwellness.planner()\nmaster_input = input(\"> \")\nwhile not master_input == \"esci\":\n if master_input == \"apertura\" or master_input == \"1\":\n wellness.apri_chiudi()\n elif master_input == \"aggiungi cliente\" or master_input == \"2\": \n try:\n cliente = wellness.crea_cliente()\n wellness.aggiungi_cliente(cliente)\n print(cliente)\n except:\n print(\"Errore!\")\n elif master_input == \"prenota un allenamento\" or master_input == \"3\":\n #wellness.planner() spostato dopo il primo controllo sull'apertura della palestra per avere a disposizione il planning globalmente\n print(wellness.planning) \n if wellness.cerca_cliente():\n cliente.prenotazione(wellness)\n print(wellness.planning)\n for h in wellness.planning[data]:\n for name in wellness.planning[data][h]:\n if name in wellness.elenco_iscritti:\n istanze_cliente[name].arrivo_uscita_struttura()\n elif master_input == \"cerca cliente\" or master_input == \"4\":\n try:\n cliente = wellness.cerca_cliente()\n print(istanze_cliente[cliente[1]])\n except:\n print(\"Non è stato possibile trovare il cliente\")\n elif master_input == \"aggiungi istruttore\" or master_input == \"5\":\n wellness.crea_istruttore()\n elif master_input == \"informazioni palestra\" or master_input == \"6\":\n print(wellness)\n elif master_input == \"informazioni istruttore\" or master_input == \"7\":\n nome = input(\"Nome dell'istruttore:\\n> \")\n if nome in istanze_istruttore:\n print(istanze_istruttore[nome])\n elif master_input == \"creazione scheda allenamento\" or master_input == \"8\":\n nome = input(\"Nome dell'istruttore:\\n> \")\n if nome in istanze_istruttore:\n istanze_istruttore[nome].creazione_scheda()\n else:\n print(\"Istruttore non trovato!\")\n elif master_input == \"9\" or master_input == \"mostra abbonamenti\":\n print(istanze_abbonamento)\n else:\n print(\"Input non valido, riprova\")\n master_input = input(\"> \")\n ","repo_name":"ualessioa/basic_gym_manager","sub_path":"well.py","file_name":"well.py","file_ext":"py","file_size_in_byte":13731,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"16870084853","text":"import snntorch as snn\nfrom snntorch import surrogate\nfrom snntorch import backprop\nfrom snntorch import functional as SF\nfrom snntorch import utils\nfrom snntorch import spikeplot as splt\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nfrom IPython.display import HTML\n\nspike_grad = surrogate.fast_sigmoid(slope=25)\nbeta = 0.5\n\nlif1 = snn.Leaky(beta=beta, spike_grad=spike_grad)\n\n# dataloader arguments\nbatch_size = 128\ndata_path = 'propdata/MNIST'\nsubset = 10\n\ndtype = torch.float\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n# Define a transform\ntransform = transforms.Compose([\n transforms.Resize((28, 28)),\n transforms.Grayscale(),\n transforms.ToTensor(),\n transforms.Normalize((0,), (1,))])\n\nmnist_train = datasets.MNIST(data_path, train=True, download=True, transform=transform)\nmnist_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)\n\n# reduce datasets by 10x to speed up training\nutils.data_subset(mnist_train, subset)\nutils.data_subset(mnist_test, subset)\n\n# Create DataLoaders\ntrain_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)\ntest_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=True, drop_last=True)\n\n# neuron and simulation parameters\nspike_grad = surrogate.fast_sigmoid(slope=25)\nbeta = 0.5\nnum_steps = 50\n\n# Initialize Network\nnet = nn.Sequential(nn.Conv2d(1, 12, 5),\n nn.MaxPool2d(2),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True),\n nn.Conv2d(12, 64, 5),\n nn.MaxPool2d(2),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True),\n nn.Flatten(),\n nn.Linear(64 * 4 * 4, 10),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True, output=True)\n ).to(device)\n\ndata, targets = next(iter(train_loader))\ndata = data.to(device)\ntargets = targets.to(device)\n\nfor step in range(num_steps):\n spk_out, mem_out = net(data)\n\ndef forward_pass(net, num_steps, data):\n mem_rec = []\n spk_rec = []\n utils.reset(net) # resets hidden states for all LIF neurons in net\n\n for step in range(num_steps):\n spk_out, mem_out = net(data)\n spk_rec.append(spk_out)\n mem_rec.append(mem_out)\n\n return torch.stack(spk_rec), torch.stack(mem_rec)\n\nspk_rec, mem_rec = forward_pass(net, num_steps, data)\n\nloss_fn = SF.ce_rate_loss()\nloss_val = loss_fn(spk_rec, targets)\nacc = SF.accuracy_rate(spk_rec, targets)\nprint(f\"The loss from an untrained network is {loss_val.item():.3f}\")\nprint(f\"The accuracy of a single batch using an untrained network is {acc * 100:.3f}%\")\n\ndef batch_accuracy(train_loader, net, num_steps):\n with torch.no_grad():\n total = 0\n acc = 0\n net.eval()\n\n train_loader = iter(train_loader)\n\n for data, targets in train_loader:\n data = data.to(device)\n targets = targets.to(device)\n spk_rec, _ = forward_pass(net, num_steps, data)\n\n acc += SF.accuracy_rate(spk_rec, targets) * spk_rec.size(1)\n total += spk_rec.size(1)\n\n return acc/total\n\ntest_acc = batch_accuracy(test_loader, net, num_steps)\nprint(f\"The total accuracy on the test set is: {test_acc * 100:.2f}%\")\n\noptimizer = torch.optim.Adam(net.parameters(), lr=1e-2, betas=(0.9, 0.999))\nnum_epochs = 10\ntest_acc_hist = []\n\n# training loop\nfor epoch in range(num_epochs):\n\n avg_loss = backprop.BPTT(net, train_loader, optimizer=optimizer, criterion=loss_fn,\n num_steps=num_steps, time_var=False, device=device)\n\n print(f\"Epoch {epoch}, Train Loss: {avg_loss.item():.2f}\")\n\n # Test set accuracy\n test_acc = batch_accuracy(test_loader, net, num_steps)\n test_acc_hist.append(test_acc)\n\n print(f\"Epoch {epoch}, Test Acc: {test_acc * 100:.2f}%\\n\")\n\n# Plot Loss\nfig = plt.figure(facecolor=\"w\")\nplt.plot(test_acc_hist)\nplt.title(\"Test Set Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Accuracy\")\nplt.show()\n\nspk_rec, mem_rec = forward_pass(net, num_steps, data)\n\nidx = 0\n\nfig2, ax = plt.subplots(facecolor='w', figsize=(12, 7))\nlabels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nprint(f\"The target label is: {targets[idx]}\")\n\n# Plot spike count histogram\nanim = splt.spike_count(spk_rec[:, idx].detach().cpu(), fig2, ax, labels=labels,\n animate=True, interpolate=4)\n\nHTML(anim.to_html5_video())\n","repo_name":"LeeHyunJongSNN/spikeprop","sub_path":"convprop_mnist.py","file_name":"convprop_mnist.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"952380","text":"'''\n O Índice de Massa Corporal (IMC) é utilizado para mensurar o peso ideal\n de uma pessoa. Escreva um programa que peça o nome, a idade , o peso e \n a altura do usuário. Ao final calcule e mostre o resultado do seu IMC \n e classifique este resultado de acordo com a regra a seguir.\n\n IMC<17 - Muito abaixo do peso ideal\n\n 17<=IMC<18,5 - Abaixo do peso\n\n 18,5<=IMC<25 - Peso normal\n\n 25<=IMC<30 - Acima do peso\n\n 30<=IMC<35 - Obesidade I\n\n 35<=IMC<40 - Obesidade II (severa)\n\n IMC>=40 - Obesidade III (mórbida)\n \n Lembre que: IMC=massa/(altura*altura)\n Fonte: https://pt.wikipedia.org/wiki/%C3%8Dndice_de_massa_corporal\n'''\n\nprint('\\nCalculo do IMC\\n')\nnome = str(input(f'Qual seu nome?\\n»»')).strip().title()\nidade = int(input(f'\\nQual a sua idade?\\n»»'))\npeso = float(input(f'\\nQual seu peso?\\n»»'))\nalt = float(input(f'\\nIndique sua altura: '))\nimc = peso / (alt*alt)\n\nif imc < 17:\n print(f'{nome}, você com {idade} anos de idade está muito abaixo do peso ideal')\nelif imc > 17 and imc < 18.5:\n print(f'{nome}, você com {idade} anos de idade está abaixo do peso')\nelif 18.5 <= imc < 25:\n print(f'{nome}, você com {idade} anos de idade está com peso normal')\nelif 25 <= imc < 30:\n print(f'{nome}, você com {idade} anos de idade está acima do peso')\nelif 30 <= imc < 35:\n print(f'{nome}, você com {idade} anos de idade está com obesidade I')\nelif 35 <= imc < 40:\n print(f'{nome}, você com {idade} anos de idade está com obesidade II (severa)')\nelif imc >= 40:\n print(f'{nome}, você com {idade} anos de idade está com obesidade III (mórbida)')\n","repo_name":"JhonatanRian/Exercicios-projetosPequenos-Estudos","sub_path":"Exercicios_Curso/Variáveis_tipos_de_dados_estruturas_condicionais/exercicio_24.py","file_name":"exercicio_24.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"5389369481","text":"import tkinter as tk\nfrom tkinter import filedialog\nimport pickle\nimport torch\nfrom Condensed_Word import Condensed_word\n\n\nclass Condenser:\n \n def condense(self, loader, model_mode, model=None):\n \n #Condenses all words with the model given, if no model is given a filedialog is opened to select one\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n if model == None:\n root = tk.Tk()\n root.withdraw()\n \n model_path = filedialog.askopenfilename()\n \n with open(model_path, \"rb\") as fp: # Unpickling\n model = pickle.load(fp)\n \n \n list_of_representations = []\n \n print(\"Started condensing...\")\n \n eval_iter = iter(loader)\n \n with torch.no_grad():\n \n model = model.eval()\n \n for u in range(0, len(eval_iter)):\n \n if model_mode == '3DCNN':\n \n eval_item_data, eval_item_label, eval_indices, eval_char_ohe, eval_phoneme_ohe = next(eval_iter)\n \n eval_item_data = torch.stack(eval_item_data)\n \n eval_item_data = eval_item_data.unsqueeze(1)\n \n eval_phoneme_ohe_batch = torch.stack(eval_phoneme_ohe)\n \n representation,_,_,_ = model.encoder(eval_item_data.to(device).to(dtype=torch.float), eval_phoneme_ohe_batch.to(device))\n \n if model_mode == 'LSTM':\n \n eval_item_data, eval_item_label = next(eval_iter)\n \n eval_item_data = torch.stack(eval_item_data)\n \n eval_item_data = eval_item_data.unsqueeze(1)\n \n _, representation = model(eval_item_data.to(device).to(dtype=torch.float))\n \n representation = representation[0][0].squeeze().flatten()\n \n if model_mode == 'GRU':\n \n eval_item_data, eval_item_label, eval_item_phonemes = next(eval_iter)\n \n eval_item_data = torch.stack(eval_item_data)\n \n eval_item_data = eval_item_data.unsqueeze(1)\n \n eval_item_phonemes = torch.stack(eval_item_phonemes)\n \n _, representation = model(eval_item_data.to(device).to(dtype=torch.float), eval_item_phonemes.to(device))\n \n if isinstance(representation, list):\n \n representation = representation[0]\n \n representation = representation.squeeze().flatten()\n \n condensed = Condensed_word(eval_item_label[0], representation.cpu().detach().numpy())\n \n list_of_representations.append(condensed)\n \n print(\"Finished condensing\")\n \n self.representations = list_of_representations\n \n return list_of_representations\n \n \n def condense_and_evaluate(self, loader, model=None):\n \n representations = self.condense(loader, model)\n \n return self.evaluate(representations)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"Master-Thesis-Eli-Stolwijk/Master-Thesis-Eli-Stolwijk-Code","sub_path":"Code/ANALYZER/Condenser.py","file_name":"Condenser.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72340548176","text":"import random\nimport sqlite3\nimport time\n\ndef keygen(self):\n chars = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n code = \"\"\n length = len(chars)-1\n codelen = random.randint(6,9) \n for i in range(0,codelen):\n idx = random.randint(0, length)\n code += chars[idx]\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n return code,now\n\nif __name__ == \"__main__\":\n code, now = keygen()\n try:\n conn = sqlite3.connect('db.sqlite3')\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO index_invitation (CODE, CREATETIME) VALUES ('{}','{}')\".format(code, now))\n print(\"Create code: \"+code)\n except:\n print(\"Fail to create code\")\n finally:\n cursor.close()\n conn.commit()\n conn.close()\n","repo_name":"Fulululu/Graduation_Project","sub_path":"server/mysite/server_keygen.py","file_name":"server_keygen.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"43283044217","text":"from __future__ import annotations\n\nfrom pyschlage.user import User\n\n\ndef test_from_json(lock_users_json: list[dict]):\n user = User(\n name=\"asdf\",\n email=\"asdf@asdf.com\",\n user_id=\"user-uuid\",\n )\n assert User.from_json(lock_users_json[0]) == user\n\n\ndef test_from_json_no_name(lock_users_json: list[dict]):\n for user_json in lock_users_json:\n user_json.pop(\"friendlyName\")\n assert User.from_json(user_json).name is None\n","repo_name":"dknowles2/pyschlage","sub_path":"tests/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"14"} +{"seq_id":"19618188853","text":"import argparse\nfrom deriva.core import ErmrestCatalog, AttrDict, get_credential\nimport deriva.core.ermrest_model as em\nfrom deriva.core.ermrest_config import tag as chaise_tags\nfrom deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args\n\ngroups = {}\n\ntable_name = 'ihm_model_representation'\n\nschema_name = 'PDB'\n\ncolumn_annotations = {'structure_id': {}, 'details': {}, 'id': {}, 'name': {}, 'Owner': {}}\n\ncolumn_comment = {\n 'structure_id': 'A reference to table entry.id.',\n 'details': 'type:text\\nAdditional details about the model representation.',\n 'id': 'type:int4\\nA unique identifier for the model representation.',\n 'name': 'type:text\\nName/brief description for the model representation.',\n 'Owner': 'Group that can update the record.'\n}\n\ncolumn_acls = {}\n\ncolumn_acl_bindings = {}\n\ncolumn_defs = [\n em.Column.define(\n 'structure_id',\n em.builtin_types['text'],\n nullok=False,\n comment=column_comment['structure_id'],\n ),\n em.Column.define('details', em.builtin_types['text'], comment=column_comment['details'],\n ),\n em.Column.define('id', em.builtin_types['int4'], nullok=False, comment=column_comment['id'],\n ),\n em.Column.define('name', em.builtin_types['text'], comment=column_comment['name'],\n ),\n em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],\n ),\n]\n\nvisible_columns = {\n '*': [\n 'RID', {\n 'source': [{\n 'outbound': ['PDB', 'ihm_model_representation_structure_id_fkey']\n }, 'RID'],\n 'comment': 'A reference to table entry.id.',\n 'markdown_name': 'structure id'\n }, 'id', 'name', 'details'\n ],\n 'entry': [\n {\n 'source': [{\n 'outbound': ['PDB', 'ihm_model_representation_structure_id_fkey']\n }, 'RID'],\n 'comment': 'A reference to table entry.id.',\n 'markdown_name': 'structure id'\n }, 'id', 'name', 'details'\n ],\n 'detailed': [\n 'RID', {\n 'source': [{\n 'outbound': ['PDB', 'ihm_model_representation_structure_id_fkey']\n }, 'RID'],\n 'comment': 'A reference to table entry.id.',\n 'markdown_name': 'structure id'\n }, 'id', 'name', 'details', ['PDB', 'ihm_model_representation_RCB_fkey'],\n ['PDB', 'ihm_model_representation_RMB_fkey'], 'RCT', 'RMT',\n ['PDB', 'ihm_model_representation_Owner_fkey']\n ]\n}\n\nvisible_foreign_keys = {\n 'filter': 'detailed',\n 'detailed': [\n ['PDB', 'ihm_model_representation_details_representation_id_fkey'],\n ['PDB', 'ihm_model_list_representation_id_fkey']\n ]\n}\n\ntable_display = {'row_name': {'row_markdown_pattern': '{{{id}}}'}}\n\ntable_annotations = {\n chaise_tags.table_display: table_display,\n chaise_tags.visible_columns: visible_columns,\n chaise_tags.visible_foreign_keys: visible_foreign_keys,\n}\n\ntable_comment = 'List of model representations used'\n\ntable_acls = {}\n\ntable_acl_bindings = {}\n\nkey_defs = [\n em.Key.define(\n ['id', 'structure_id'],\n constraint_names=[['PDB', 'ihm_model_representation_primary_key']],\n ),\n em.Key.define(['RID'], constraint_names=[['PDB', 'ihm_model_representation_RIDkey1']],\n ),\n]\n\nfkey_defs = [\n em.ForeignKey.define(\n ['RMB'],\n 'public',\n 'ERMrest_Client', ['ID'],\n constraint_names=[['PDB', 'ihm_model_representation_RMB_fkey']],\n ),\n em.ForeignKey.define(\n ['RCB'],\n 'public',\n 'ERMrest_Client', ['ID'],\n constraint_names=[['PDB', 'ihm_model_representation_RCB_fkey']],\n ),\n]\n\ntable_def = em.Table.define(\n table_name,\n column_defs=column_defs,\n key_defs=key_defs,\n fkey_defs=fkey_defs,\n annotations=table_annotations,\n acls=table_acls,\n acl_bindings=table_acl_bindings,\n comment=table_comment,\n provide_system=True\n)\n\n\ndef main(catalog, mode, replace=False, really=False):\n updater = CatalogUpdater(catalog)\n table_def['column_annotations'] = column_annotations\n table_def['column_comment'] = column_comment\n updater.update_table(mode, schema_name, table_def, replace=replace, really=really)\n\n\nif __name__ == \"__main__\":\n host = 'pdb.isrd.isi.edu'\n catalog_id = 50\n mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)\n catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))\n main(catalog, mode, replace)\n","repo_name":"informatics-isi-edu/protein-database","sub_path":"deriva-annotations/catalog50/catalog-configs/PDB/ihm_model_representation.py","file_name":"ihm_model_representation.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"32012576453","text":"import mysql.connector\nimport matplotlib.pyplot as plt\nimport numpy as np\ncon=mysql.connector.connect(user='root', password='harish12345678',host='Localhost',database='calorietracker')\ncursor=con.cursor()\nprint(\"You are hands on with a basic meal and calorie tracker, stay fit!\")\nweight=float(input(\"Please input youre weight in KG\"))\nheight=float(input(\"Please enter youre height in metres\"))\nbmi=(weight/height**2)\nprint(\"Your BMI is\",round(bmi,1),\"compare it with the chart below\")\ncursor.execute(\"select * from bmi\")\ndata=cursor.fetchall()\nfor row in data:\n print(row)\nibmi=float(input(\"Enter a BMI in which you would like to see yourself in\"))\ndbmi=bmi-ibmi\nprint(\"You are\",round(dbmi,1),\"points away from your ideal BMI\")\ntarget=int(input(\"Enter your daily calorie target\"))\nprint(\"Here is your available food database:\")\ncursor.execute(\"select * from meal\")\ndata=cursor.fetchall()\nfor row in data:\n print (row)\nflag1=1\nwhile(flag1==1):\n print(\"If you wish to add more items, type yes\")\n print(\"If you wish to delete existing item, type delete\")\n print(\"If you wish to make no changes, type no\")\n ch=str(input(\" \"))\n if((ch!='yes') and (ch!='no') and (ch!='delete')):\n print(\"Wrong input, try again\")\n continue\n elif(ch=='no'):\n break\n elif(ch=='yes'):\n meal=str(input(\"Enter food name\"))\n cal=int(input(\"Enter calories\"))\n sql = \"INSERT INTO meal(name,calories) VALUES (%s, %s)\"\n val = (meal,cal)\n cursor.execute(sql, val)\n con.commit()\n print(\"Food added!\")\n ch2=str(input(\"Type yes to make more changes. Type no to stop\"))\n if((ch2!='yes') and (ch2!='no')):\n print(\"Wrong input, exiting\")\n break\n elif(ch2=='yes'):\n flag1=1\n else:\n flag1=0\n else:\n delete=str(input(\"Enter name of meal you want to delete\"))\n name=delete\n sql=\"delete from meal where name=%s\"\n cursor.execute(sql,(name,))\n con.commit()\n print(\"Food deleted!\")\nbfcal=0\nluncal=0\nsnacal=0\ndincal=0\ntotalcal=0\nprint(\"Let's start your entries\")\nprint(\"press n if this is a new day\")\nprint(\"Press anything to continue\")\nnew=str(input(\"\"))\nif(new=='n'):\n cursor.execute(\"delete from breakfast\");\n cursor.execute(\"delete from lunch\");\n cursor.execute(\"delete from dinner\");\n cursor.execute(\"delete from snacks\");\nflag2=1\nwhile(flag2==1):\n cursor.execute(\"select * from meal\")\n data=cursor.fetchall()\n for row in data:\n print (row)\n bf=str(input(\"Enter food item\"))\n sql1=\"select name from meal where name=%s\"\n sql2=\"select calories from meal where name=%s\"\n cursor.execute(sql1,(bf,))\n data1=cursor.fetchone()\n cursor.execute(sql2,(bf,))\n data2=cursor.fetchone()\n print(\"press 1 to add to breakfast\")\n print(\"press 2 to add to lunch\")\n print(\"press 3 to add to snacks\")\n print(\"press 4 to add to dinner\")\n ch3=int(input(\" \"))\n if(ch3==1):\n sql = \"INSERT INTO breakfast(food,calories) VALUES (%s, %s)\"\n val = (data1[0],data2[0])\n cursor.execute(sql, val)\n con.commit()\n bfcal=bfcal+data2[0]\n totalcal=totalcal+data2[0]\n print(\"calories left for the day:\",(target-totalcal))\n elif(ch3==2):\n sql = \"INSERT INTO lunch(food,calories) VALUES (%s, %s)\"\n val = (data1[0],data2[0])\n cursor.execute(sql, val)\n con.commit()\n luncal=luncal+data2[0]\n totalcal=totalcal+data2[0]\n print(\"calories left for the day:\",(target-totalcal))\n elif(ch3==3):\n sql = \"INSERT INTO snacks(food,calories) VALUES (%s, %s)\"\n val = (data1[0],data2[0])\n cursor.execute(sql, val)\n con.commit()\n snacal=snacal+data2[0]\n totalcal=totalcal+data2[0]\n print(\"calories left for the day:\",(target-totalcal))\n elif(ch3==4):\n sql = \"INSERT INTO dinner(food,calories) VALUES (%s, %s)\"\n val = (data1[0],data2[0])\n cursor.execute(sql, val)\n con.commit()\n dincal=dincal+data2[0]\n totalcal=totalcal+data2[0]\n print(\"calories left for the day:\",(target-totalcal))\n else:\n print(\"Wrong input\")\n print(\"Type yes to continue adding\")\n print(\"Type anything to stop\")\n ch4=str(input(\" \"))\n if(ch4=='yes'):\n flag2=1\n else:\n flag2=0\nprint(\"Calories consumed for breakfast:\",bfcal)\nprint(\"Calories consumed for lunch:\",luncal)\nprint(\"Calories consumed for snacks:\",snacal)\nprint(\"Calories consumed for dinner:\",dincal)\ny=np.array([bfcal,luncal,snacal,dincal])\nmylabels = [\"Breakfast\", \"Lunch\", \"Snacks\", \"Dinner\"]\nplt.pie(y, labels=mylabels, autopct='%1.1f%%', explode=[0,0,0,0], shadow=True, startangle=90)\nplt.axis('equal')\nplt.show()\n \n \n \n \n\n\n\n \n \n\n \n\n\n","repo_name":"rulezcasa/Calorie-tracker","sub_path":"Calorietrack.py","file_name":"Calorietrack.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"39471674859","text":"__author__ = 'neil'\n\nimport math,pill,random,socket,json,clientsocket,mywindow, threading\n\nimport pyglet,physicalobject,player, constants, time,repeatedtimer\n\n\n\nBOTTOM_RIGHT_OF_SCREEN = [100,0]\nSTART_OF_GRID = [0,0]\nGRID_WIDTH = 30\nLEFT = False\nRIGHT = False\nUP = False\nDOWN = False\n\nvelocity = [20,20]\nmag = 0.0\ntotal_moved = 0\nSPEED = 30\ncurrent_frame = 0\nwindow = None\n\n\ndef update():\n global window,player\n while True:\n if window != None:\n temp = player.get_pos()\n #print(\"in blob update, player pos is \",temp)\n if temp != None:\n window.update_player_position(temp)\n temp = window.get_velocity() #[velocity[0] ,velocity[1],mag]\n player.add_frame(temp)\n time.sleep(1/constants.FRAME_RATE)\n #player.send_frames()\n\n\ndef start_client():\n global clientsocket, ip_address\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n print(\"trying to connect to \" )\n clientsocket.connect((\"localhost\", 5554))\n except Exception as e:\n # print(e.__class__)\n # print(type(e))\n print(\"failed\", type(e))\n print (\"connected\")\n\nbatch = pyglet.graphics.Batch()\nbatch2 = pyglet.graphics.Batch()\nplayer_img = pyglet.image.load('blue flag.png')\nthings = []\npills = []\nplayer_sprite = pyglet.sprite.Sprite(player_img,200,200,batch = batch)\nplayer_sprite.scale = 0.55\nfor i in range(0,constants.NUM_PILLS):\n my_pill = pill.Pill(random.randrange(1200),random.randrange(1200),batch2)\n pills.append(my_pill)\nplayer = player.Player()\nplayer.start()\n#print(\"player thread started\")\nplayer.join(0.1)\npyglet.gl.glClearColor(0, 1, 1, 1)\nt = threading.Thread(target = update)\nt.start()\nprint(\"now here\")\nwindow =mywindow.myWindow(constants.FRAME_RATE,batch,batch2)\nwindow.run()\n","repo_name":"neeeel/agarioclone","sub_path":"blob.py","file_name":"blob.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"29285857863","text":"\"\"\"\nim2col 함수를 이용한 convolution 구현\n\"\"\"\nimport numpy as np\nfrom common.util import im2col\n\n\nif __name__ == '__main__':\n np.random.seed(115)\n\n # p.238 그림 7-11\n # 가상의 이미지 데이터 1개\n # (number of image, color, height, width)\n x = np.random.randint(10, size=(1, 3, 7, 7))\n print(x, ', x.shape:', x.shape)\n\n # (3, 5, 5) 크기의 필터 1개 생성\n # (fn, c, fh, fw) = number of filter, color-dept, filter height, filter width)\n w = np.random.randint(5, size=(1, 3, 5, 5))\n print(w, ', w.shape:', w.shape)\n\n # 필터를 stride=1, padding=0으로 적용하면서 convolution 연산\n # 필터를 1차원으로 펼침 -> c *fh*fw = 3*5*5 = 75\n # 이���지 데이터 x를 함수 im2col에 전달\n x_col = im2col(x, filter_h=5, filter_w=5, stride=1, pad=0)\n print('x_col:', x_col.shape)\\\n\n # 4차원 배열인 필터 w를 2차원 배열로 변환\n w_col = w.reshape(1, -1) # row의 개수가 1, 모든 원소들은 column인 모양으로 변환\n print('w_col:', w_col.shape)\n\n w_col = w_col.T\n print('w_col.T:', w_col.shape)\n\n # 2차원으로 변환된 이미지와 필터를 행렬 dot product 연산\n out = x_col.dot(w_col)\n print('out:', out.shape)\n\n # dot product의 결과를 (fn, oh, ow, ?) 형태로 reshape\n out = out.reshape(1, 3, 3, -1)\n print('out:', out.shape) # (1, 3, 3, 1) = (fn, oh, ow, c)\n out = out.transpose(0, 3, 1, 2)\n print('out:', out.shape) # ~> (fn, c, oh, ow)\n\n # w.shape (10, 3, 5, 5) 생성\n x = np.random.randint(10, size=(1, 3, 7, 7))\n x_col = im2col(x, filter_h=5, filter_w=5, stride=1, pad=0)\n\n w = np.random.randint(10, size=(10, 3, 5, 5))\n w_col = w.reshape(10, 75)\n # w를 변형: (fn, c*fh*fw)\n\n # x_col @ w.T 의 shape 확인\n out = x_col.dot(w_col.T)\n print('out.shape1:', out.shape)\n\n # dot 연산의 결과를 reshape:\n out = out.reshape(10, 3, 3, -1)\n print('out.shape2:', out.shape)\n\n out = out.transpose(0, 3, 1, 2)\n print('out.shape3:', out.shape)\n\n print('-----------------------')\n # p.239 그림 7-13, p.244 그림 7-19 참조\n # (3, 7, 7) shape의 이미지 12개를 난수로 생성 -> (n, c, h, w) = (12, 3, 7, 7)\n x = np.random.randint(10, size=(12, 3, 7, 7))\n\n # (3, 5, 5) shape의 필터 10개 난수로 생성 -> (fn, c, fh, ow) = (10, 3, 5, 5)\n w = np.random.randint(10, size=(10, 3, 5, 5))\n\n\n # stride=1, padding=0일 때, output height, output width =?\n # 3, 3\n\n # 이미지 데이터 x를 im2col 함수를 사용해서 x_col로 변환 -> shape?\n x_col = im2col(x, filter_h=5, filter_w=5, stride=1, pad=0)\n print('x_col.shape:', x_col.shape)\n\n # 필터 w를 x_col과 dot 연산을 할 수 있도록 reshape & transpose: w_col -> shape?\n w_col = w.reshape(10, 75)\n print('w_col.shape:', w_col.shape)\n w_col_T = w_col.T\n # x_col @ w_col\n out=x_col.dot(w_col_T)\n print('result.shape:', out.shape)\n # @ 연산의 결과를 reshape & transpose\n out = out.reshape(10, 3, 3, -1)\n out = out.transpose(0, 3, 1, 2)\n print('out.reshape', out.shape)\n\n\n","repo_name":"i-hs/lab_dl","sub_path":"ch07/ex09_convconv.py","file_name":"ex09_convconv.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"23219665760","text":"# Mission to Mars jupyter notebook file\n# Jonathan Surgeon 6/7/21\n\n#import dependencies\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pymongo\nimport pandas as pd\nfrom splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager \nimport time\nfrom pprint import pprint\ndef scrape():\n \"'This function scrapes 4 different sites using splinter and beautiful soup, mainly'\"\n \"'Returns a dictionary'\"\n ###################################\n # # Nasa Mars News\n ###################################\n\n # create flag for use in while loop- necessary because visitting url \n # doesn't always seem to populate information\n flag = 0\n while flag == 0:\n # try to visit url and grab relevant info via BeautifulSoup\n try:\n url = \"https://redplanetscience.com\"\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path,headless=False)\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html,\"html.parser\")\n titles = soup.find_all(\"div\", class_=\"content_title\")\n news_title = titles[0].text\n paragraphs = soup.find_all(\"div\", class_=\"article_teaser_body\")\n news_p = paragraphs[0].text\n\n # if successful, set flag to 1 to leave while loop\n flag = 1\n except:\n \n # if unsuccessful, assign placeholder text to variables and leave flag set to 0\n news_title = \"No title scraped, try again\"\n news_p = \"No paragraph scraped, try again\"\n # quit browswer when successful\n browser.quit()\n ###################################\n # # JPL Mars Space Images - Featured Image\n ###################################\n\n # splinter setup\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path,headless=False)\n url = \"https://spaceimages-mars.com/\"\n browser.visit(url)\n # click \"FULL IMAGE\" button \n browser.links.find_by_partial_text(\"FULL IMAGE\").click()\n\n # save browser html in variable\n html = browser.html\n\n # create BeautifulSoup object\n soup = BeautifulSoup(html,'html.parser')\n # use soup object to find specific image, save the image's url\n image_result = soup.find_all(\"img\", {\"class\":\"fancybox-image\"})\n \n image = image_result[0][\"src\"]\n # create full url\n surface_url = url + image\n \n # quit the browser\n browser.quit()\n\n ###################################\n # # Mars Facts\n ###################################\n\n # set url appropriately\n url = \"https://galaxyfacts-mars.com/\"\n\n # read tables from url\n tables = pd.read_html(url)\n\n # convert correct table to dataframe\n table_df = pd.DataFrame(tables[1])\n # reassign column names\n table_df.columns = [\"attribute\",\"value\"]\n # set index of dataframe\n table_df = table_df.set_index(\"attribute\")\n\n # convert dataframe to html table\n html_table = table_df.to_html()\n\n ###################################\n # # Mars Hemispheres\n ###################################\n\n # splinter setup\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path,headless=False)\n url = \"https://marshemispheres.com/\"\n browser.visit(url)\n\n hemispheres = [\"Cerberus\", \"Schiaparelli\", \"Syrtis Major\", \"Valles Marineris\"]\n hemi_entries = []\n\n for hemi in hemispheres:\n # click into hemisphere link\n browser.links.find_by_partial_text(hemi + \" Hemisphere Enhanced\").click()\n # pull html, soupify\n html = browser.html\n soup = BeautifulSoup(html,'html.parser')\n # find image url\n li = soup.find_all('li')\n a= li[0].find('a')\n image_url = url+a['href']\n \n # add image url to dictionary 'hemi_entries'\n hemi_entries.append({\"title\": hemi + \" Hemisphere Enhanced\", \"img_url\": image_url})\n\n # go back to original webpage\n browser.links.find_by_partial_text(\"Back\").click()\n \n browser.quit()\n\n ###################################\n # create final dictionary to return\n ###################################\n\n mars_dictionary = {\n \"news_title\": news_title,\n \"news_paragraph\" : news_p,\n \"surface_url\" : surface_url,\n \"fact_table\" : html_table,\n \"cerberus_title\" : hemi_entries[0]['title'],\n \"cerberus_img\" : hemi_entries[0]['img_url'],\n \"schiap_title\" : hemi_entries[1]['title'],\n \"schiap_img\" : hemi_entries[1]['img_url'],\n \"syrtis_title\" : hemi_entries[2]['title'],\n \"syrtis_img\" : hemi_entries[2]['img_url'],\n \"valles_title\" : hemi_entries[3]['title'],\n \"valles_img\" : hemi_entries[3]['img_url'],\n }\n \n # return mars_dictionary\n return(mars_dictionary)\n","repo_name":"JSurgeon/Mars_Webscrape","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"10315698329","text":"def calculate_mean(data):\n \"\"\"\n Return the mean of a python list\n\n If data is empty raise a ValueError\n\n :param data: a list of numbers\n :return: the mean of the list\n :rtype: float\n :raise ValueError:\n \"\"\"\n if len(data)== 0:\n raise ValueError(\"Data cannot be empty\")\n else:\n\n sum_list = sum(data)\n len_list = len(data)\n mean_list = round(float((sum_list/ len_list)),2)\n\n print(mean_list)\n return mean_list\n\n\ncalculate_mean(data=[4,5,34,5])\n ","repo_name":"teefats/mostly_complete","sub_path":"calculate_mean.py","file_name":"calculate_mean.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"9951644486","text":"from opal_common.authentication.deps import JWTAuthenticator\nfrom opal_common.authentication.types import JWTClaims\nfrom opal_common.authentication.verifier import Unauthorized\nfrom opal_common.schemas.data import DataUpdate\nfrom opal_common.schemas.security import PeerType\n\n\ndef require_peer_type(\n authenticator: JWTAuthenticator, claims: JWTClaims, required_type: PeerType\n):\n if not authenticator.enabled:\n return\n\n peer_type = claims.get(\"peer_type\", None)\n if peer_type is None:\n raise Unauthorized(description=\"Missing 'peer_type' claim for OPAL jwt token\")\n try:\n type = PeerType(peer_type)\n except ValueError:\n raise Unauthorized(\n description=f\"Invalid 'peer_type' claim for OPAL jwt token: {peer_type}\"\n )\n\n if type != required_type:\n raise Unauthorized(\n description=f\"Incorrect 'peer_type' claim for OPAL jwt token: {str(type)}, expected: {str(required_type)}\"\n )\n\n\ndef restrict_optional_topics_to_publish(\n authenticator: JWTAuthenticator, claims: JWTClaims, update: DataUpdate\n):\n if not authenticator.enabled:\n return\n\n if \"permitted_topics\" not in claims:\n return\n\n for entry in update.entries:\n unauthorized_topics = set(entry.topics).difference(claims[\"permitted_topics\"])\n if unauthorized_topics:\n raise Unauthorized(\n description=f\"Invalid 'topics' to publish {unauthorized_topics}\"\n )\n","repo_name":"permitio/opal","sub_path":"packages/opal-common/opal_common/authentication/authz.py","file_name":"authz.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1892,"dataset":"github-code","pt":"17"} +{"seq_id":"34004707018","text":"\"\"\"\n Author: Chris (https://github.com/machrisaa), modified by Mohamed K. Eid (mohamedkeid@gmail.com)\n Description: tensorflow implemention of VGG 16 and VGG 19 based on tensorflow-vgg16\n\"\"\"\n\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport inspect\nimport urllib.request\n\nVGG_MEAN = [103.939, 116.779, 123.68]\ndata = None\ndir_path = os.path.dirname(os.path.realpath(__file__))\nweights_name = dir_path + \"/../lib/descriptor/vgg16.npy\"\nweights_url = \"https://www.dropbox.com/s/gjtfdngpziph36c/vgg16.npy?dl=1\"\n\n\nclass Vgg16:\n def __init__(self, vgg16_npy_path=None):\n global data\n\n if vgg16_npy_path is None:\n path = inspect.getfile(Vgg16)\n path = os.path.abspath(os.path.join(path, os.pardir))\n path = os.path.join(path, weights_name)\n\n if os.path.exists(path):\n vgg16_npy_path = path\n else:\n print(\"VGG16 weights were not found in the project directory\")\n print(\"Please download the numpy weights file and place it in the 'lib/descriptor' directory\")\n print(\"Download link: https://mega.nz/#!YU1FWJrA!O1ywiCS2IiOlUCtCpI6HTJOMrneN-Qdv3ywQP5poecM\")\n print(\"Exiting the program..\")\n exit(1)\n\n if data is None:\n data = np.load(vgg16_npy_path, encoding='latin1')\n self.data_dict = data.item()\n print(\"VGG net weights loaded\")\n\n else:\n self.data_dict = data.item()\n\n def build(self, rgb, shape):\n rgb_scaled = rgb * 255.0\n num_channels = shape[2]\n channel_shape = shape\n channel_shape[2] = 1\n\n # Convert RGB to BGR\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n\n assert red.get_shape().as_list()[1:] == channel_shape\n assert green.get_shape().as_list()[1:] == channel_shape\n assert blue.get_shape().as_list()[1:] == channel_shape\n\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n\n shape[2] = num_channels\n assert bgr.get_shape().as_list()[1:] == shape\n\n self.conv1_1 = self.__conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.__conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.__avg_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.__conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.__conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.__avg_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.__conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.__conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.__conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.__avg_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.__conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.__conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.__conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.__avg_pool(self.conv4_3, 'pool4')\n\n self.conv5_1 = self.__conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.__conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.__conv_layer(self.conv5_2, \"conv5_3\")\n\n self.data_dict = None\n\n def __avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def __max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def __conv_layer(self, bottom, name):\n with tf.variable_scope(name):\n filt = self.__get_conv_filter(name)\n\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n\n conv_biases = self.__get_bias(name)\n bias = tf.nn.bias_add(conv, conv_biases)\n\n relu = tf.nn.relu(bias)\n return relu\n\n def __get_conv_filter(self, name):\n return tf.constant(self.data_dict[name][0], name=\"filter\")\n\n def __get_bias(self, name):\n return tf.constant(self.data_dict[name][1], name=\"biases\")\n\n def __get_fc_weight(self, name):\n return tf.constant(self.data_dict[name][0], name=\"weights\")\n","repo_name":"mkeid/Feed-Forward-Style-Transfer","sub_path":"src/custom_vgg16.py","file_name":"custom_vgg16.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"17"} +{"seq_id":"2326239614","text":"import json\ndef text_write():\n with open(r'Wite_text.txt', 'w', encoding='utf-8') as ft:\n while True:\n dig=input('Введите строку для записи, если хотите выйти, то ничего не вводите нажмите enter :')\n try:\n if dig != '':\n ft.writelines(f'{dig}\\n')\n elif dig == '':\n return\n except ValueError:\n print('Ошибка вводимого значения')\n\nprint('*' * 50)\nprint('Задание 1')\ntext_write()\n\nprint('*' * 50)\nprint('Задание 2')\nwith open(r'words.txt', 'r', encoding='utf-8') as w:\n print(f'Файл: {w.name}')\n text=w.readlines()\n cnt=len(text)\n words=0\n for i in text:\n s=i.split()\n word=len(s)\n words+=word\n print(f'Количество строк: {cnt}')\n print(f'Количество слов: {words}')\n\nprint('*' * 50)\nprint('Задание 3')\ntry:\n with open(r'Info_empl.txt', 'r', encoding='utf-8') as w:\n print(f'Файл: {w.name}')\n text = w.readlines()\n cnt = len(text)\n summa = 0\n for i in text:\n s = i.split()\n pay = [s[p] for p in range(1, len(s), 2)]\n empl = [s[e] for e in range(0, len(s), 2)]\n pay = float(pay[0])\n summa += pay\n if pay < 20000:\n print(f'Сотрудник с окладом ниже 20000 {empl[0]}')\n print(f'Средний оклад: {round(summa / cnt, 2)}')\nexcept ValueError:\n print(f'Проверьте правильность введенных данных в файле')\n\nprint('*' * 50)\nprint('Задание 4')\ndi= dict(One='Один', Two='Два', Three='Три', Four='Четыре', Five='Пять')\nkey = list(di.keys())\ntry:\n with open(r'number.txt', 'r', encoding='utf-8') as num:\n print(f'Файл: {num.name}')\n text = num.readlines()\n cnt = len(text)\n with open(r'number1.txt', 'w', encoding='utf-8') as num3:\n for i in text:\n s = i.split()\n s1 = str(s[0])\n for k in key:\n if k.lower()==s1.lower():\n s[0]=di[k]\n with open(r'number1.txt', 'a', encoding='utf-8') as num2:\n j=' '.join(s)\n num2.writelines(f'{j}\\n')\n print(f'Новый файл: {num2.name}')\nexcept ValueError:\n print(f'Проверьте правильность введенных данных в файле')\n\nprint('*' * 50)\nprint('Задание 5')\ntry:\n with open(r'sum_num.txt', 'w+', encoding='utf-8') as s:\n print(f'Файл: {s.name}')\n new = [i for i in range(1, 15) if i % 2 == 0]\n r=map(str,new)\n j=' '.join(r)\n print(f'Будут записаны числа: {j}')\n s.writelines(f'{j}\\n')\n s.seek(0)\n text = s.readlines()\n summa=0\n for i in text:\n nums = map(int,i.split())\n for p in nums:\n summa+=p\n print(f'Файл записан: {s.name}')\n print(f'Сумма чисел из файла: {summa}')\nexcept ValueError:\n print(f'Проверьте правильность введенных данных в файле')\n\nprint('*' * 50)\nprint('Задание 6')\ntry:\n with open(r'Hour_lesson.txt', 'r', encoding='utf-8') as h:\n di={}\n print(f'Файл: {h.name}')\n text = h.readlines()\n for i in text:\n sp = i.split(':')\n print(f'Строки: {sp}')\n h1=str(sp[1]).split(')')\n h2=[''.join(filter(str.isdigit, num)) for num in h1]\n h2.remove('')\n summa = 0\n for n in h2:\n nums = map(int, n.split())\n for p in nums:\n summa += p\n di[sp[0]]=summa\n print(f'Словарь: {di}')\nexcept ValueError:\n print(f'Проверьте правильность введенных данных в файле')\n\nprint('*' * 50)\nprint('Задание 7')\ntry:\n with open(r'firms.txt', 'r', encoding='utf-8') as firm:\n profit={}\n avg_profit = {}\n loss={}\n avg=0\n cnt=0\n summa = 0\n print(f'Файл: {firm.name}')\n text = firm.readlines()\n for i in text:\n pr = i.split()\n diff=int(pr[2])-int(pr[3])\n if diff>0:\n profit[pr[0]]=diff\n cnt+=1\n summa+=diff\n avg=(summa)/cnt\n avg_profit['average_profit']=round(avg,2)\n else:\n loss[pr[0]]=diff\n new=[profit, avg_profit, loss]\n print(f'Словарь: {new}')\n with open(r'avg_firms.json', 'w', encoding='utf-8') as firm2:\n json.dump(new,firm2)\nexcept ValueError:\n print(f'Проверьте правильность введенных данных в файле')\n","repo_name":"KhafizArtur/Python","sub_path":"lesson_5.py","file_name":"lesson_5.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"2971503365","text":"\"F Strings\" \"Todo se convierte a texto\"\n\nnombre = 'Ramiro'\n\nbienvenida = f'Hola {nombre} ¿Como estas?' #concatenar\n\n# del(bienvenida) Con esto no estaria mas declara bienvenida entonces tiraria error\n\nprint('Ramiro' not in bienvenida) # OPERADORES DE PERTENENCIA(in / not in)\n \n\n\n","repo_name":"ramiandino/Python","sub_path":"Variables/Variables.py","file_name":"Variables.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"37969347377","text":"# -*- coding: UTF-8 -*-\n\nimport os\n\nimport kikimr.public.sdk.python.client as ydb\n\nimport model\nimport queries\n\n\nclass SeriesRepository(object):\n _table_description = (\n ydb.TableDescription()\n .with_column(ydb.Column(\"series_id\", ydb.OptionalType(ydb.PrimitiveType.Uint64)))\n .with_column(ydb.Column(\"title\", ydb.OptionalType(ydb.PrimitiveType.Utf8)))\n .with_column(ydb.Column(\"series_info\", ydb.OptionalType(ydb.PrimitiveType.Utf8)))\n .with_column(ydb.Column(\"release_date\", ydb.OptionalType(ydb.PrimitiveType.Uint32)))\n .with_column(ydb.Column(\"views\", ydb.OptionalType(ydb.PrimitiveType.Uint64)))\n .with_primary_keys(\"series_id\")\n )\n _table_rev_index_description = (\n ydb.TableDescription()\n .with_column(ydb.Column(\"rev_views\", ydb.OptionalType(ydb.PrimitiveType.Uint64)))\n .with_column(ydb.Column(\"series_id\", ydb.OptionalType(ydb.PrimitiveType.Uint64)))\n .with_primary_keys(\"rev_views\", \"series_id\")\n )\n _table_name = \"series\"\n _table_rev_views_name = \"series_rev_views\"\n\n def __init__(self, ydb_session):\n \"\"\"\n :type ydb_session: ydb.YDBCachedSession\n \"\"\"\n super(SeriesRepository, self).__init__()\n self._session = ydb_session\n\n def create_tables(self):\n self._session.session.create_table(\n os.path.join(self._session.table_prefix, self._table_name),\n self._table_description\n )\n self._session.session.create_table(\n os.path.join(self._session.table_prefix, self._table_rev_views_name),\n self._table_rev_index_description\n )\n\n def drop_tables(self):\n self._session.session.drop_table(os.path.join(self._session.table_prefix, self._table_name))\n self._session.session.drop_table(os.path.join(self._session.table_prefix, self._table_rev_views_name))\n\n def insert(self, series):\n \"\"\"\n :type series: Series\n \"\"\"\n self._session.session.transaction(ydb.SerializableReadWrite()).execute(\n self._session.queries[queries.INSERT_QUERY], {\n \"$seriesId\": series.series_id,\n \"$title\": series.title,\n \"$seriesInfo\": series.series_info,\n \"$releaseDate\": model.to_days(series.release_date),\n \"$views\": series.views,\n },\n commit_tx=True\n )\n\n def delete(self, series_id):\n \"\"\"\n :type series_id: int\n \"\"\"\n result_sets = self._session.session.transaction(ydb.SerializableReadWrite()).execute(\n self._session.queries[queries.DELETE_QUERY], {\n \"$seriesId\": series_id,\n },\n commit_tx=True\n )\n if len(result_sets[0].rows) < 1:\n raise RuntimeError(\"Query count was not returned\")\n return result_sets[0].rows[0].cnt\n\n def update_views(self, series_id, new_views):\n \"\"\"\n :type series_id: int\n :type new_views: int\n \"\"\"\n tx = self._session.session.transaction(ydb.SerializableReadWrite()).begin()\n result_sets = tx.execute(\n self._session.queries[queries.UPDATE_VIEWS_QUERY], {\n \"$seriesId\": series_id,\n \"$newViews\": new_views,\n },\n commit_tx=True\n )\n if len(result_sets[0].rows) < 1:\n raise RuntimeError(\"Query count was not returned\")\n return result_sets[0].rows[0].cnt\n\n def find_by_id(self, series_id):\n \"\"\"\n :type series_id: int\n :rtype: Series or None\n \"\"\"\n tx = self._session.session.transaction(ydb.SerializableReadWrite()).begin()\n result_sets = tx.execute(\n self._session.queries[queries.FIND_BY_ID_QUERY], {\n \"$seriesId\": series_id,\n },\n commit_tx=True\n )\n if len(result_sets[0].rows) < 1:\n return None\n return self._extract_series(result_sets[0].rows[0])\n\n def find_all(self, limit, last_series_id=None):\n \"\"\"\n :type limit: int\n :type last_series_id: int or None\n :rtype: list(Series)\n \"\"\"\n tx = self._session.session.transaction(ydb.SerializableReadWrite()).begin()\n if last_series_id is None:\n result_sets = tx.execute(\n self._session.queries[queries.FIND_ALL_QUERY], {\n \"$limit\": limit,\n },\n commit_tx=True\n )\n else:\n result_sets = tx.execute(\n self._session.queries[queries.FIND_ALL_NEXT_QUERY], {\n \"$limit\": limit,\n \"$lastSeriesId\": last_series_id\n\n },\n commit_tx=True\n )\n return [self._extract_series(row) for row in result_sets[0].rows]\n\n def find_most_viewed(self, limit, last_series_id=None, last_views=None):\n \"\"\"\n :type limit: int\n :type last_series_id: int or None\n :type last_views: int or None\n :rtype: list(Series)\n \"\"\"\n tx = self._session.session.transaction(ydb.SerializableReadWrite()).begin()\n if last_series_id is None or last_views is None:\n result_sets = tx.execute(\n self._session.queries[queries.FIND_MOST_VIEWED_QUERY], {\n \"$limit\": limit,\n },\n commit_tx=True\n )\n else:\n result_sets = tx.execute(\n self._session.queries[queries.FIND_MOST_VIEWED_NEXT_QUERY], {\n \"$limit\": limit,\n \"$lastSeriesId\": last_series_id,\n \"$lastViews\": last_views,\n\n },\n commit_tx=True\n )\n return [self._extract_series(row) for row in result_sets[0].rows]\n\n @staticmethod\n def _extract_series(row):\n \"\"\"\n :rtype: Series\n \"\"\"\n series = model.Series()\n series.series_id = row.series_id\n series.title = row.title\n series.series_info = row.series_info\n series.release_date = model.from_days(row.release_date)\n series.views = row.views\n return series\n","repo_name":"IIKovalenko/ydb-python-sdk","sub_path":"kikimr/public/sdk/python/examples/secondary_index/app/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"17"} +{"seq_id":"74627845464","text":"import asyncio\n\ndb = {}\naccepted_commands = [\"GET\", \"SET\", \"DEL\"]\n\n\nclass RedisServerProtocol(asyncio.Protocol):\n def connection_made(self, transport):\n self.transport = transport\n print(f\"Client {transport.get_extra_info('sockname')} connected\")\n\n def data_received(self, data):\n cmd, key, val = self.parse_command(data.decode())\n\n if cmd == \"GET\":\n response = self.handle_get(key)\n self.transport.write(response)\n return response\n elif cmd == \"SET\":\n response = self.handle_set(key, val)\n self.transport.write(response)\n return response\n elif cmd == \"DEL\":\n response = self.handle_del(key)\n self.transport.write(response)\n return response\n else:\n self.transport.write((\"-ERR Unsupported command\\r\\n\").encode())\n return (\"-ERR Unsupported command\\r\\n\").encode()\n\n def parse_command(self, message):\n try:\n cmd, key, val = \"nil\"\n\n message_data = message.split(\"\\r\\n\")\n cmd = message_data[2].upper()\n\n # Ignore the initial handshake command\n if cmd == \"COMMAND\":\n return \"nil\", \"nil\", \"nil\"\n\n num_args = int(message_data[0][1:])\n\n # We're only handling GET/SET/DEL with a valid amount of args\n if cmd in accepted_commands and num_args >= 2:\n key = message_data[4]\n\n # Special care needed for SET\n if cmd == \"SET\":\n if num_args < 3:\n # This command is invalid, not enough args\n return \"nil\", \"nil\", \"nil\"\n val = message_data[6]\n # Deal with different types of values\n # I'm aware of the character that denotes the data type, but the cli seems to only send strings, so here we are\n if val.isnumeric():\n val = int(val)\n elif val.upper() == \"TRUE\" or val.upper() == \"FALSE\":\n val = True if val.upper() == \"TRUE\" else False\n else:\n # At this point it must be a float or a string\n try:\n val = float(val)\n except:\n pass\n\n return cmd, key, val\n except Exception as e:\n print(f\"Error parsing command: {e}\")\n return \"nil\", \"nil\", \"nil\"\n\n def handle_get(self, key):\n try:\n result = db.get(key)\n if result == None:\n response = (f\"$-1\\r\\n\").encode()\n elif type(result) == bool:\n response = (f\"+{result}\\r\\n\").encode()\n elif isinstance(result, int):\n response = (f\":{result}\\r\\n\").encode()\n else:\n response = (f\"+{result}\\r\\n\").encode()\n\n return response\n except:\n print(\"Error - User supplied invalid key\")\n return b\"$-1\\r\\n\"\n\n def handle_set(self, key, val):\n try:\n db[key] = val\n response = \"+OK\\r\\n\".encode()\n\n return response\n except Exception as e:\n print(f\"Error saving kvp: {e}\")\n return (\"-ERR error saving key value pair\\r\\n\").encode()\n\n def handle_del(self, key):\n try:\n if key in db:\n del db[key]\n response = \"+OK\\r\\n\".encode()\n else:\n response = \"-ERR Invalid key\\r\\n\".encode()\n\n return response\n except Exception as e:\n print(f\"Error deleting item: {e}\")\n return (\"-ERR Invalid key\\r\\n\").encode()\n","repo_name":"Deadpoodle/RedisServerTest","sub_path":"src/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"21097237157","text":"import discord\nimport json\n\nTOKEN = \"\"\nPREFIX = '$'\n\nPATH_TO_JSON = \"./reaction_messages.json\"\n\nintents = discord.Intents.default()\nintents.message_content = True\n\nclient = discord.Client(intents=intents)\n\n@client.event\nasync def on_ready(): \n print(\"Connected\")\n\n@client.event\nasync def on_message(message):\n\n if message.author == client.user:\n return\n \n if message.content.startswith(PREFIX) and message.author.get_role(884151381416615947) is not None:\n split_message = message.content.split(\" \")\n \n if split_message[0] == PREFIX + \"add_role_message\":\n \n try:\n message_id = int(split_message[1])\n except:\n await message.channel.send(\"Please pass a message id.\")\n return\n\n try:\n role_id = int(split_message[2][3:-1])\n except:\n await message.channel.send(\"Please pass a role.\")\n return \n \n def check(reaction, user):\n return user.id == message.author.id\n\n reaction, _ = await client.wait_for(\"reaction_add\", timeout=200, check=check)\n\n if reaction is not None:\n\n with open(PATH_TO_JSON, \"r\") as file:\n message_list = json.loads(file)\n\n for message in message_list:\n if message[\"id\"] == message_id and message[\"reaction\"] == reaction.emoji:\n return\n\n message_list['Messages'].append({ \n 'id': message_id, \n 'role_id': role_id, \n 'reaction': reaction.emoji\n })\n\n with open(PATH_TO_JSON, \"w\") as file:\n file.write(json.dumps(message_list, indent = 4))\n \n else:\n await message.channel.send(\"Reaction Message could not be added.\")\n return\n \n await message.channel.send(f\"Message { message_id } has been added.\")\n return\n\n if split_message[0] == PREFIX + \"remove_role_message\":\n \n try:\n message_id = int(split_message[1])\n except:\n await message.channel.send(\"Please pass a message id.\")\n return\n\n try:\n role_id = split_message[2][3:-1]\n except:\n role_id = None\n\n flag = False\n\n for m in message_list['Messages']:\n if m['id'] == message_id:\n if role_id is None or role_id == m['role_id']:\n message_list['Messages'].remove(m)\n flag = True\n \n if flag:\n with open(PATH_TO_JSON, \"w\") as file:\n file.write(json.dumps(message_list, indent = 4))\n\n if role_id == None:\n await message.channel.send(f\"Message { message_id } has been removed.\")\n else:\n await message.channel.send(f\"Role <@&{ role_id }> has been removed from message { message_id }.\")\n\n \n await message.channel.send(f\"Message could not be removed.\")\n return\n\n if split_message[0] == PREFIX + \"list_role_messages\":\n with open(PATH_TO_JSON, \"r\") as file:\n await message.channel.send(f\"\"\"```json\n{ file.read() }```\"\"\")\n \n if split_message[0] == PREFIX + \"help\":\n await message.channel.send(f\"\"\"The prefix is currently set to: { PREFIX }\n\n**The following commands are available:**\n```{ PREFIX }add_role_message message_id role```\n```{ PREFIX }remove_role_message message_id [optional: role]```\n```{ PREFIX }list_role_messages```\n```{ PREFIX }help```\nIf you have any questions, ask TheSmilingTurtle.\"\"\")\n\n@client.event\nasync def on_raw_reaction_add(payload):\n\n with open(PATH_TO_JSON, \"r\") as file:\n message_list = json.loads(file)\n\n for message in message_list['Messages']:\n if message['id'] == payload.message_id and payload.emoji.name == message['reaction']:\n guild = await client.fetch_guild(payload.guild_id)\n member = await guild.fetch_member(payload.user_id)\n role = guild.get_role(message['role_id'])\n await member.add_roles(role)\n\n@client.event\nasync def on_raw_reaction_remove(payload):\n\n with open(PATH_TO_JSON, \"r\") as file:\n message_list = json.loads(file)\n\n for message in message_list['Messages']:\n if message['id'] == payload.message_id and payload.emoji.name == message['reaction']:\n guild = await client.fetch_guild(payload.guild_id)\n member = await guild.fetch_member(payload.user_id)\n role = guild.get_role(message['role_id'])\n await member.remove_roles(role)\n\nclient.run(TOKEN)\n","repo_name":"TheSmilingTurtle/Python","sub_path":"Experiments/MAVT_DiscordBot/mavt_bot.py","file_name":"mavt_bot.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"17"} +{"seq_id":"16830178928","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Date:2017-06-29 23:01:12\n# @Author:Bryan.Lan (bryan.lan@vengasz.com)\n# @Link:https://github.com/Bryan130\n\nimport pygame\nfrom pygame.sprite import Group\nfrom settings import Settings\nfrom ship import Ship\nimport game_functions as gf\n\n\ndef run_game():\n '''\n 初始化游戏并创建一个屏幕对象\n '''\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,\n ai_settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n\n # 创建一艘飞船、一个子弹和一个外星人的编组\n ship = Ship(ai_settings, screen)\n bullets = Group()\n aliens = Group()\n\n # 创建外星人群\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # 开始游戏主循环\n while True:\n # 监视键盘和鼠标事件\n gf.check_events(ai_settings, screen, ship, bullets)\n ship.update()\n gf.update_bullets(bullets)\n gf.update_screen(ai_settings, screen, ship, aliens, bullets)\n\n\nif __name__ == '__main__':\n run_game()\n","repo_name":"Eric130vv/PythonProject","sub_path":"py3/firstGame/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"73618195223","text":"import numpy as np\nimport tensorflow as tf\n\n# Load the pre-trained models\nloaded_sbp_model = tf.keras.models.load_model(\"model/sbp_model.keras\")\nloaded_dbp_model = tf.keras.models.load_model(\"model/dbp_model.keras\")\n\ndef predict_blood_pressure(age, weight, height, temp, heart_rate, spo2):\n # Create an input array with the provided parameters\n input_data = np.array([[age, weight, height, temp, heart_rate, spo2]])\n \n # Reshape input data for Conv1D model input with shape (1, 6, 1)\n input_data_reshaped = input_data.reshape((input_data.shape[0], input_data.shape[1], 1))\n \n # Use the loaded models to predict SBP and DBP\n predicted_sbp = loaded_sbp_model.predict(input_data_reshaped)[0][0]\n predicted_dbp = loaded_dbp_model.predict(input_data_reshaped)[0][0]\n \n return predicted_sbp, predicted_dbp\n\n# Example usage\n# age = 45\n# weight = 75\n# height = 170\n# temp = 36.7\n# heart_rate = 85\n# spo2 = 97\n\n# age= 45\n# weight= 72\n# height= 170\n# temp= 36.8\n# SBP= 130\n# DBP= 82\n# heart_rate= 112\n# spo2= 97\n\n# predicted_sbp, predicted_dbp = predict_blood_pressure(age, weight, height, temp, heart_rate, spo2)\n# print(\"Predicted SBP:\", predicted_sbp)\n# print(\"Predicted DBP:\", predicted_dbp)","repo_name":"pyTimK/smartmedicalkiosk-cnn","sub_path":"getbp.py","file_name":"getbp.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"6832807295","text":"#coding=utf8\n#python alien_invasion.py\nimport sys\nimport pygame\nfrom pygame.sprite import Group\n\nimport game_functions as gf \nfrom settings import Settings \nfrom ship import Ship\nfrom game_stats import GameStats\nfrom button import Button\n#from alien import Alien\n\n\ndef run_game():\n\t#初始化\n\tpygame.init()\n\tai_settings = Settings()\n\tscreen = pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))\n\tpygame.display.set_caption(\"Alien Invasion\")\n\n\t#create the play button\n\tplay_button = Button(ai_settings, screen, \"Play\")\n\n\t#创建飞船实例\n\tstats = GameStats(ai_settings)\n\n\t#创建飞船\n\tship = Ship(ai_settings, screen)\n\t#创建子弹编组\n\tbullets = Group()\n\t#外星人编组\n\taliens = Group()\n\n\tgf.creat_fleet(ai_settings, screen, ship, aliens)\n\n\t#开始游戏主循环\n\twhile True:\n\n\t\t#监视键鼠\n\t\tgf.check_events(ai_settings, screen, stats, play_button, ship, aliens, bullets)\n\n\t\tif stats.game_active:\n\t\t\tship.update()\n\t\t\tgf.update_bullets(ai_settings, screen, ship, aliens, bullets)\n\t\t\tgf.update_aliens(ai_settings, stats, screen, ship, aliens, bullets)\n\t\t#print(len(bullets))\n\n\t\t#更新屏幕\n\t\tgf.update_screen(ai_settings, screen, stats, ship, aliens, bullets, play_button)\n\n\n\nrun_game()\n ","repo_name":"wkmikw/alien_invasion","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"73774994265","text":"import flask\n\n\ndef savingsaccount_get(accountid=None, memberid=None, status=None, offset=0, limit=None):\n with flask.current_app.db.db_cursor() as cur:\n cur.execute(\"\"\" SELECT\n savingsaccountID,\n Status,\n OpenDate,\n CloseDate,\n currentbalance,\n entrydate,\n memberid\n FROM vw_savingsaccounts\n WHERE (%(accountid)s IS NULL or savingsaccountID = %(accountid)s)\n AND (%(memberid)s IS NULL or memberid = %(memberid)s)\n AND (%(status)s IS NULL or Status = %(status)s)\n OFFSET %(offset)s\n LIMIT %(limit)s\n \"\"\", {'accountid': accountid,\n 'memberid': memberid,\n 'status': status,\n 'offset': offset,\n 'limit': limit,\n })\n accounts = cur.fetchall()\n return accounts\n\n\ndef savingsaccount_create(memberid, currency):\n with flask.current_app.db.db_cursor() as cur:\n cur.execute(\"\"\"SELECT savingsaccount_create(\n %(memberid)s,\n %(currency)s\n )\"\"\",\n {\n 'memberid': memberid,\n 'currency': currency\n })\n accountid = cur.fetchone()\n return accountid['savingsaccount_create']\n\n","repo_name":"Devasta/katsu","sub_path":"katsuserver/blueprints/savings/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"72301390423","text":"# Gunicorn config variables\nloglevel = \"info\"\nerrorlog = \"-\" # stderr\naccesslog = \"-\" # stdout\nworker_tmp_dir = \"/dev/shm\"\ngraceful_timeout = 120\ntimeout = 120\nkeepalive = 5\nthreads = 3\nbind = \"0.0.0.0:443\"\nkeyfile = \"privkey.pem\"\ncertfile = \"fullchain.pem\"\nca_certs = \"ca-cert.pem\"\nssl_version = 2\ncert_reqs = 2\npreload = True\nciphers = \"TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384\"\nssl_options = {\n \"ciphers\": ciphers,\n \"server_side\": True\n}\n","repo_name":"Medioec/ICT3x03-Team-33","sub_path":"cinema-booking/common/gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"17"} +{"seq_id":"9158963179","text":"def fib(N):\r\n # 0이 출력되는 횟수 리스트\r\n zeros = [1, 0, 1]\r\n # 1이 출력되는 횟수 리스트\r\n ones = [0, 1, 1]\r\n \r\n # append해서 추가하는 거 핵심\r\n if N >= 3:\r\n for i in range(2, N):\r\n zeros.append(zeros[i-1] + zeros[i])\r\n ones.append(ones[i-1] + ones[i])\r\n \r\n print(zeros[N], ones[N])\r\n\r\nT = int(input())\r\nfor tc in range(1, T+1):\r\n N = int(input())\r\n fib(N)","repo_name":"seojeong4560/Algorithm","sub_path":"백준/Silver/1003. 피보나치 함수/피보나치 함수.py","file_name":"피보나치 함수.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"14857198341","text":"def score_match(players):\n\n \"\"\"\n >>> score_match({'Sombra': [10,22,24],'Tracer': [13,25,24],'Bastion':[23,38,10],'Widowmaker':[14,39,40]})\n {'Sombra': ['Tracer'], 'Tracer': ['Sombra'], 'Bastion': ['Widowmaker'], 'Widowmaker': ['Bastion']}\n \n >>> score_match({'Sombra': [19,22,22],'Tracer': [20,24,24],'Bastion':[22,23,25]})\n {'Sombra': ['Bastion', 'Tracer'], 'Tracer': ['Bastion', 'Sombra'], 'Bastion': ['Sombra', 'Tracer']}\n\n >>> score_match({'Hanzo': [21],'Mei': [20,24,24], 'Bastion':[10]})\n {'Hanzo': ['Mei'], 'Mei': ['Hanzo'], 'Bastion': []}\n \"\"\"\n matched_player = {k:sorted([k1 for k1,v1 in players.items() if k!=k1 \n and abs(min(v)-min(v1))<=5 \n and abs(max(v)-max(v1))<=5]) for k,v in players.items()}\n return matched_player","repo_name":"avinashtrivedi/Python_Code","sub_path":"match_player.py","file_name":"match_player.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"5197066796","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom typing import Sequence, Tuple, Dict\n\nfrom alphafold.Common import residue_constants\n\nclass ExperimentallyResolvedHead(nn.Module):\n\t\"\"\"\n\thttps://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/modules.py#L1201\n\t\"\"\"\n\tdef __init__(self, config, global_config, num_feat_1d:int) -> None:\n\t\tsuper(ExperimentallyResolvedHead, self).__init__()\n\t\tself.config = config\n\t\tself.global_config = global_config\n\t\tself.logits = nn.Linear(num_feat_1d, 37)\n\n\t\tself.loss_function = nn.BCEWithLogitsLoss(reduction='none')\n\n\tdef load_weights_from_af2(self, data, rel_path: str='experimentally_resolved_head', ind:int=None):\n\t\tmodules=[self.logits]\n\t\tnames=['logits']\n\t\tnums=[1]\n\t\tfor module, name, num in zip(modules, names, nums):\n\t\t\tfor i in range(num):\n\t\t\t\tif i==0:\n\t\t\t\t\tadd_str = ''\n\t\t\t\telse:\n\t\t\t\t\tadd_str = f'_{i}'\n\t\t\t\tif ind is None:\n\t\t\t\t\tw = data[f'{rel_path}/{name}{add_str}']['weights'].transpose(-1,-2)\n\t\t\t\t\tb = data[f'{rel_path}/{name}{add_str}']['bias']\n\t\t\t\telse:\n\t\t\t\t\tw = data[f'{rel_path}/{name}{add_str}']['weights'][ind,...].transpose(-1,-2)\n\t\t\t\t\tb = data[f'{rel_path}/{name}{add_str}']['bias'][ind,...]\n\t\t\t\tif isinstance(module, nn.ModuleList):\n\t\t\t\t\tprint(f'Loading {name}{add_str}.weight: {w.shape} -> {module[i].weight.size()}')\n\t\t\t\t\tprint(f'Loading {name}{add_str}.bias: {b.shape} -> {module[i].bias.size()}')\n\t\t\t\t\tmodule[i].weight.data.copy_(torch.from_numpy(w))\n\t\t\t\t\tmodule[i].bias.data.copy_(torch.from_numpy(b))\n\t\t\t\telse:\n\t\t\t\t\tprint(f'Loading {name}{add_str}.weight: {w.shape} -> {module.weight.size()}')\n\t\t\t\t\tprint(f'Loading {name}{add_str}.bias: {b.shape} -> {module.bias.size()}')\n\t\t\t\t\tmodule.weight.data.copy_(torch.from_numpy(w))\n\t\t\t\t\tmodule.bias.data.copy_(torch.from_numpy(b))\n\n\tdef forward(self, representations:Dict[str,torch.Tensor], batch:Dict[str,torch.Tensor], is_training:bool=False):\n\t\tlogits = self.logits(representations['single'])\n\t\treturn dict(logits=logits)\n\t\n\tdef loss(self, value:Dict[str,torch.Tensor], batch:Dict[str,torch.Tensor]) -> Dict[str,torch.Tensor]:\n\t\tlogits = value['logits']\n\t\tassert logits.ndimension() == 2\n\n\t\tatom_exists = batch['atom37_atom_exists']\n\t\tall_atom_mask = batch['all_atom_mask'].to(dtype=torch.float32)\n\t\txent = self.loss_function(logits, all_atom_mask)\n\t\tloss = torch.sum(xent*atom_exists) / (torch.sum(atom_exists) + 1e-8)\n\t\t\n\t\tif self.config.filter_by_resolution:\n\t\t\tloss *= ( (batch['resolution']>= self.config.min_resolution) & \n\t\t\t\t\t(batch['resolution']< self.config.max_resolution)).to(dtype=torch.float32)\n\t\t\n\t\treturn {'loss':loss}","repo_name":"lupoglaz/OpenFold2","sub_path":"alphafold/Model/Heads/resolved.py","file_name":"resolved.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"17"} +{"seq_id":"73216689624","text":"from datetime import datetime\nfrom flask import render_template, g, redirect, current_app, request, url_for, flash\nfrom flask_login import current_user, login_required\nfrom app import db\nfrom app.main.forms import EditProductPriceForm, SearchForm\nfrom app.main import bp\nfrom app.models import Item, Order, Permission, ProductDetailClick, Searches\nfrom app.decorators import permission_required, delivery_required\nfrom app.constants import DELIVERY_FEE\n\n\n@bp.before_app_request\ndef before_request():\n g.search_form = SearchForm()\n if current_user.is_authenticated:\n current_user.ping()\n\n\n@bp.route('/order/')\n@login_required\n@delivery_required\ndef order(order_id):\n order = Order.query.get(order_id)\n if not order:\n flash('Pedido não encontrado')\n return redirect(url_for('main.orders'))\n return render_template('main/order.html', title=\"Pedido \" + str(order_id), order=order)\n\n\n@bp.route('/orders')\n@login_required\n@delivery_required\ndef orders():\n return render_template('main/react.html', title=\"Pedidos\", react_app_name='orders', props={})\n\n\n@login_required\n@bp.route('/pedido/')\ndef pedido(order_id):\n order = current_user.orders.filter_by(id=order_id).first()\n if not order:\n flash('Pedido não encontrado')\n return redirect(url_for('main.meus_pedidos'))\n return render_template('main/order.html', title=\"Pedido \" + str(order_id), order=order)\n\n\n@login_required\n@bp.route('/meus_pedidos/')\ndef meus_pedidos():\n page = request.args.get('page', 1, type=int)\n orders = current_user.orders.order_by(\n Order.placed_timestamp.desc()).paginate(page, 5, False)\n next_url = url_for('main.meus_pedidos',\n page=orders.next_num) if orders.has_next else None\n prev_url = url_for('main.meus_pedidos',\n page=orders.prev_num) if orders.has_prev else None\n return render_template('main/meus_pedidos.html', title=\"Pedidos\",\n orders=orders.items, prev_url=prev_url, next_url=next_url)\n\n\n@login_required\n@bp.route('/completed_order')\ndef completed_order():\n return render_template('main/completed_order.html', title=\"Sucesso\")\n\n\n@bp.route('/cart')\ndef cart():\n # response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n # response.headers['Pragma'] = 'no-cache'\n return render_template('main/react.html', title=\"Carrinho\", react_app_name='cart', props={'deliveryFee': DELIVERY_FEE})\n\n\n@bp.route('/')\ndef index():\n eans = [\n 7896004710891,\n 7896023703010,\n 891142203014,\n 7896422509589,\n 7896116860217,\n 7896658035388,\n 7898569765071,\n 7891106914369,\n 7896422524452,\n 7896422526975,\n 7896714211275,\n 7891045043588,\n 7896015518875,\n ]\n items = Item.get_best_prices(eans)\n return render_template('main/index.html', items=items, search=True)\n\n\n@bp.route('/search')\ndef search():\n if not g.search_form.validate():\n return redirect(url_for('main.index'))\n if not g.search_form.q.data:\n return redirect(url_for('main.index'))\n page = request.args.get('page', 1, type=int)\n\n searched_text = g.search_form.q.data.strip()\n Searches.add_search(searched_text)\n\n items, total = Item.search(\n searched_text, page, current_app.config['ITEMS_PER_PAGE'])\n\n next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \\\n if total > page * current_app.config['ITEMS_PER_PAGE'] else None\n prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \\\n if page > 1 else None\n return render_template('main/index.html', title='Pesquisa', items=items, next_url=next_url, prev_url=prev_url)\n\n\n@bp.route('/detail/')\ndef detail(id):\n item = Item.get_ordered_prices_by_id(id)\n if not item:\n flash('Produto não encontrado')\n return redirect(url_for('main.index'))\n ProductDetailClick.click(ean=Item.query.get(id).ean)\n can_edit = False\n if current_user.store_id is not None:\n for i in item:\n if i.store_id == current_user.store_id:\n can_edit = True\n return render_template('main/detail.html', title='Produto', item=item, can_edit=can_edit)\n\n\n@bp.route('/product/edit//', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PRICE)\ndef product_edit(store_id, ean):\n item = Item.query.filter_by(store_id=store_id, ean=ean).first()\n if not item:\n flash('Produto não encontrado')\n return redirect(url_for('main.index'))\n\n form = EditProductPriceForm(obj=item)\n\n if request.method == 'POST' and form.validate_on_submit():\n item.name = form.name.data\n item.price = form.price.data\n item.promotion_price = form.promotion_price.data\n item.promotion_qty = form.promotion_qty.data\n db.session.add(item)\n db.session.commit()\n flash('Produto alterado com sucesso')\n return redirect(url_for('main.detail', ean=item.ean))\n\n return render_template('main/product_edit.html', title='Editar', item=item, form=form)\n","repo_name":"lorenzocesconetto/Buscamed","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"17"} +{"seq_id":"74771773785","text":"#!/usr/bin/env python3\n# importing the requests library \nimport sys\nimport time\nimport hashlib\nimport json\nimport logging as log\nimport threading\nimport requests\n\nfrom CovidRetriever import CovidRetriever\nfrom DataStore.CovidDataStore import CovidDataStore\n\nfrom display_helper import LCDScreen\n\n\nOUTPUT_FILE_NAME = \"COVID_19.log\"\nLOG_OUTPUT_FORMAT = \"%(asctime)s: %(message)s\"\n\nHENNEPIN_URL = \"https://www.health.state.mn.us/diseases/coronavirus/situation.html\"\n\nsleep_minutes = 120\nsleep_seconds = sleep_minutes * 60\n\ndisplay_sleep_seconds = 30\n\ndata_retriever = None\ndata_store = None\n\nlcd_screen = None\n\ndef setup():\n log.basicConfig( \n filename=OUTPUT_FILE_NAME,\n format=LOG_OUTPUT_FORMAT, \n level=log.DEBUG,\n datefmt=\"%H:%M:%S\"\n )\n\n global data_retriever\n global data_store\n global lcd_screen\n\n data_retriever = CovidRetriever()\n data_store = CovidDataStore()\n lcd_screen = LCDScreen()\n\ndef display_thread_helper(name):\n\n current_grab_index = 0\n\n while True:\n log.info(\"hello\")\n message = data_store.get_message_for_index(current_grab_index)\n\n if message is not None:\n current_grab_index += 1\n \n log.info(\"Would have logged -> %s\", message)\n lcd_screen.print(message)\n\n time.sleep(display_sleep_seconds)\n\ndef case_count_display_string(location, count):\n return \"{}\\nCases {:,}\".format(location, count)\n\nif __name__ == \"__main__\":\n setup()\n\n display_thread = threading.Thread(target=display_thread_helper, args=('Display',))\n\n thread_started = False\n\n while True:\n\n try:\n display_messages = []\n\n response = requests.get(url = HENNEPIN_URL)\n\n # Hennepin County\n hennepin_county_count = data_retriever.get_count_for_hennepin(response)\n\n display_messages.append(\n case_count_display_string(\n \"Hennepin County\", \n hennepin_county_count\n )\n )\n\n # Minnesota\n minnesota_state_count = data_retriever.get_count_for_minnesota(response)\n\n display_messages.append(\n case_count_display_string(\n \"Minnesota\", \n minnesota_state_count\n )\n )\n\n # Nation\n nation_count = data_retriever.get_count_for_nation()\n\n display_messages.append(\n case_count_display_string(\n \"U.S.\", \n nation_count\n )\n )\n\n data_store.add_messages(display_messages, True)\n\n if thread_started == False:\n display_thread.start()\n\n except:\n log.exception(\"Failed to retrieve data\")\n\n log.info(\"Will now sleep for %d seconds\", sleep_seconds)\n time.sleep(sleep_seconds)","repo_name":"forsythetony/covid_19_display","sub_path":"covid19.py","file_name":"covid19.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"21324832593","text":"class Graph():\n def __init__(self, nodes, init_graph):\n self.nodes = nodes\n self.graph = self.construct_graph(nodes, init_graph)\n\n def construct_graph(self, nodes, init_graph):\n graph = {}\n for node in nodes:\n graph[node] = {}\n\n graph.update(init_graph)\n\n for node, edges in graph.items():\n for adjacent_node, value in edges.items():\n if graph[adjacent_node].get(node, False) == False:\n graph[adjacent_node][node] = value\n\n return graph\n\n def get_nodes(self):\n return self.nodes\n\n def get_outgoing_edges(self, node):\n connections = []\n for out_node in self.nodes:\n if self.graph[node].get(out_node, False) != False:\n connections.append(out_node)\n return connections\n\n def value(self, node1, node2):\n return self.graph[node1][node2]\n\nnodes = [\"Полтавская ул.\", \"Лиговский проспект\", \"Невский проспект\",\n \"Ул. Жуковского\", \"Ул. Марата\", \"Свечной пер.\", \"Гончарная ул.\", \"Ул. Маяковского\"]\n\ninit_graph = {}\nfor node in nodes:\n init_graph[node] = {}\n\ninit_graph[\"Лиговский проспект\"][\"Невский проспект\"] = 332\ninit_graph[\"Лиговский проспект\"][\"Ул. Жуковского\"] = 1200\ninit_graph[\"Лиговский проспект\"][\"Свечной пер.\"] = 764\ninit_graph[\"Свечной пер.\"][\"Ул. Марата\"] = 528\ninit_graph[\"Ул. Марата\"][\"Невский проспект\"] = 858\ninit_graph[\"Невский проспект\"][\"Гончарная ул.\"] = 284\ninit_graph[\"Невский проспект\"][\"Ул. Маяковского\"] = 638\ninit_graph[\"Невский проспект\"][\"Полтавская ул.\"] = 642\ninit_graph[\"Полтавская ул.\"][\"Гончарная ул.\"] = 359\ninit_graph[\"Ул. Жуковского\"][\"Ул. Маяковского\"] = 238\n\nprint(init_graph[\"Лиговский проспект\"])\n\n\ngraph = Graph(nodes, init_graph)\nstart = \"Свечной пер.\"\ndestination = \"Полтавская ул.\"\n\ndef Dijkstra(graph):\n unvisited_nodes = graph.get_nodes()\n min_paths = {}\n for node in unvisited_nodes:\n min_paths[node] = float('inf')\n min_paths[start] = 0\n current_node = start\n while unvisited_nodes:\n trails = graph.get_outgoing_edges(current_node)\n for node in trails:\n if node in unvisited_nodes:\n if min_paths[node] == float('inf'):\n min_paths[node] = min_paths[current_node] + graph.value(current_node, node)\n else:\n min_paths[node] = min(min_paths[node], min_paths[current_node] + graph.value(current_node, node))\n unvisited_nodes.remove(current_node)\n if not unvisited_nodes:\n break\n candidates = {node: min_paths[node] for node in unvisited_nodes}\n current_node = min(candidates, key=candidates.get)\n return min_paths\n\np = Dijkstra(graph)\nanswer = p[destination]\nprint(p)\nprint(answer)\n\n\n\n","repo_name":"GoldenJaden/ITMO-study","sub_path":"Algorythms/Semester 2/Lab 3 Greedy algs + dijkstra/Deixtra.py","file_name":"Deixtra.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"17"} +{"seq_id":"74022868825","text":"import sys\nsys.stdin = open('25304_영수증.txt')\n\n# X: 총 금액\n# N: 구매한 물건의 종류의 수\n# a: 물건의 가격\n# b: 물건의 개수\n\nX = int(input())\nN = int(input())\n\nprice = 0\n\nfor _ in range(N):\n a, b = map(int, input().split())\n\n price += (a * b)\n\nif price == X:\n print('Yes')\nelse:\n print('No')","repo_name":"BuildEnough/TIL2","sub_path":"알고리즘/노션파이썬문제/20220811/25304_영수증.py","file_name":"25304_영수증.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"17"} +{"seq_id":"36759433979","text":"import imp\nfrom .database import *\nfrom .crud import *\nfrom .models import *\nfrom .schemas import *\nfrom sqlmodel import Session, select\nimport hashlib\nimport uuid\nfrom features.dropdown import *\nfrom passlib.context import CryptContext\nfrom uuid import uuid1\nfrom fastapi.security import OAuth2PasswordRequestForm\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\n\n\ndef get_db():\n db = Session(engine)\n return db\n\n# def add_ag_login(data:agency_data):\n# pass\n\ndef register_users(data:agency):\n # return uuid.uuid1()\n with get_db() as db:\n ag = agency_data(agency_id=str(uuid.uuid1()),ag_uniq_id = data.ag_uniq_id,agency_Name = data.agency_Name,hashedpass = pwd_context.hash(data.password))\n db.add(ag)\n # add_ag_login(ag) #function to add login details of agency\n db.commit()\n\n#get user data for authentication\ndef get_all_agencies()->dict:\n with get_db() as db:\n res = db.exec(\n \"SELECT * FROM agency_data;\"\n ).fetchall()\n return res\n\ndef getagid(agunid):\n with get_db() as db:\n res = db.exec(\n f\"SELECT agency_id FROM agency_data WHERE ag_uniq_id = '{agunid}';\"\n ).one()\n return res.agency_id\n\ndef syncUp(data:user_req_agency_form):\n with get_db() as db:\n reid = str(uuid.uuid1())\n agenid = getagid(data.ag_uniq_id)\n req = user_req_agency(reqid= reid, agencyid = agenid,adhaar = data.Adhaar,custid = data.custid)\n db.add(req)\n db.commit()\n return db.exec(f\"SELECT * FROM user_req_agency WHERE reqid = '{reid}';\").fetchall()[0]\n\ndef getreq():\n with get_db() as db:\n reqs = db.exec(\n \"SELECT * FROM user_req_agency;\"\n ).fetchall()\n return reqs\n\n\ndef ag_res(data:response_form):\n with get_db() as db:\n res = db.exec(\n # update user agency set status = * where reqid = *;\n f\"UPDATE user_req_agency SET status = '{data.status}' WHERE reqid = '{data.request_id}';\"\n )\n db.commit()\n ls = db.exec(f\"SELECT * from user_req_agency Where reqid='{data.request_id}';\").fetchall()\n if(not len(ls)):\n return \"request id not found, please check the request id\"\n else:\n return ls\n \ndef get_name(agid):\n with get_db() as db:\n res = db.exec(\n f\"SELECT agency_Name FROM agency_data WHERE agency_id = '{agid}';\"\n ).one()\n return res.agency_Name","repo_name":"rahulraikwar00/Address_sync","sub_path":"adlink/db/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"17"} +{"seq_id":"38927875177","text":"\"\"\"\nImplement various feature extraction techniques using tf and sklearn\n\"\"\"\nimport os\nimport functools\nimport pathlib\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\nfrom tensorflow import feature_column\nfrom tensorflow.feature_column import (\n numeric_column, bucketized_column,\n categorical_column_with_vocabulary_list, embedding_column,\n crossed_column, indicator_column)\nfrom tensorflow.keras.layers import DenseFeatures\n\n\nclass PackNumericFeatures(object):\n def __init__(self, names):\n self.names = names\n\n def __call__(self, features, labels):\n numeric_features = [features.pop(name) for name in self.names]\n numeric_features = [tf.cast(feat, tf.float32)\n for feat in numeric_features]\n numeric_features = tf.stack(numeric_features, axis=-1)\n features['numeric'] = numeric_features\n return features, labels\n\n\ndef categorical2onehot(unique_feats, categorical_feats):\n categorical_columns = []\n for feature in categorical_feats:\n vocab = unique_feats[feature]\n cat_col = tf.feature_column.categorical_column_with_vocabulary_list(\n key=feature, vocabulary_list=vocab)\n categorical_columns.append(tf.feature_column.indicator_column(cat_col))\n return categorical_columns\n\n\ndef categorical2embedding(unique_feats, categorical_feats, categorical_feats_len):\n categorical_columns = []\n for feature, length in zip(categorical_feats, categorical_feats_len):\n vocab = unique_feats[feature]\n cat_col = tf.feature_column.categorical_column_with_vocabulary_list(\n key=feature, vocabulary_list=vocab)\n categorical_columns.append(\n tf.feature_column.embedding_column(cat_col, length))\n return categorical_columns\n\n\ndef normalization(train_data, NUMERIC_FEATURES):\n\n def normalize_numeric_data(data, mean, std):\n return (data-mean)/std\n\n desc = train_data[NUMERIC_FEATURES].describe()\n MEAN = np.array(desc.T['mean'])\n STD = np.array(desc.T['std'])\n normalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)\n numeric_column = numeric_column(\n 'numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])\n numeric_columns = [numeric_column]\n return numeric_columns\n","repo_name":"saratbhargava/cat-in-the-dat","sub_path":"src/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"31580083586","text":"def solution(s):\n answer = ''\n ansList = []\n words = list(s.split(\" \"))\n for w in words :\n temp = ''\n for i in range(len(w)) :\n if i %2 == 0 :\n temp += w[i].upper()\n else :\n temp += w[i].lower()\n ansList.append(temp)\n answer = ' '.join(ansList)\n return answer","repo_name":"miseop25/Back_Jun_Code_Study","sub_path":"Programmers/기타문제/이상한_문자_만들기/make_worn_word_ver1.py","file_name":"make_worn_word_ver1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"17"} +{"seq_id":"25606274333","text":"from socket import socket \r\nfrom threading import Thread \r\nfrom zlib import compress \r\nfrom mss import mss \r\nfrom zlib import decompress \r\nimport pygame \r\nWIDTH = 1366 \r\nHEIGHT = 768 \r\nhost='192.168.1.82' \r\nport=9000 \r\nch=int(input('Do you want to share your screen?\\n1.Yes\\n2.No\\nEnter your choice : ')) \r\nif(ch==1): \r\n def retreive_screenshot(conn): \r\n with mss() as sct: # The region to capture \r\n rect = {'top': 0, 'left': 0, 'width': WIDTH, 'height': HEIGHT} \r\n while 'recording': # Capture the screen \r\n img = sct.grab(rect) # Tweak the compression level here (0-9) \r\n pixels = compress(img.rgb, 6) # Send the the pixels length \r\n size = len(pixels) \r\n size_len = (size.bit_length() + 7) // 8 \r\n conn.send(bytes([size_len])) # Send the actual pixels length \r\n size_bytes = size.to_bytes(size_len, 'big') \r\n conn.send(size_bytes) # Send pixels conn.sendall(pixels) \r\n def server(): \r\n sock = socket() \r\n sock.bind((host, port)) \r\n try: \r\n sock.listen(5) \r\n print('Server started.')\r\n while 'connected': \r\n conn, addr = sock.accept() \r\n print('Client connected IP:', addr) \r\n thread = Thread(target=retreive_screenshot, args=(conn,)) \r\n thread.start() \r\n finally: \r\n sock.close() \r\n server() \r\nelif ch==2: \r\n def recvall(conn, length):\r\n buf=b'' \r\n while len(buf) < length: \r\n data = conn.recv(length - len(buf)) \r\n if not data: return data \r\n buf += data \r\n return buf\r\n\r\n def client(): \r\n pygame.init() \r\n screen = pygame.display.set_mode((WIDTH, HEIGHT)) \r\n clock = pygame.time.Clock() \r\n watching = True \r\n sock = socket() \r\n sock.connect((host, port)) \r\n try: \r\n while watching: \r\n for event in pygame.event.get(): \r\n if event.type == pygame.QUIT: \r\n watching = False \r\n break # Retreive the size of the pixels, the pixels length and pixels \r\n size_len = int.from_bytes(sock.recv(1), byteorder='big') \r\n size = int.from_bytes(sock.recv(size_len), byteorder='big') \r\n pixels = decompress(recvall(sock, size)) # Create the Surface from raw pixels \r\n img = pygame.image.fromstring(pixels, (WIDTH, HEIGHT), 'RGB') # Display the picture \r\n screen.blit(img, (0, 0)) \r\n pygame.display.flip()\r\n clock.tick(60) \r\n finally: sock.close() \r\n client() \r\nelse: \r\n print('Invalid Choice.')","repo_name":"ayeman19/Screensharing-using-Python","sub_path":"screenshare.py","file_name":"screenshare.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"31198907898","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport csv\n# quote_page='http://www.bloomberg.com/quote/SPX:IND'\n# page=urllib.request.urlopen(quote_page)\n#\n# soup=BeautifulSoup(page,'html.parser')\n#\n# name_box=soup.find('h1',attrs={'class': 'name'})\n# name=name_box.text.strip()\n# print(name)\n\nquote_page='https://organicfoodsandcafe.com/product-category/baby/baby-baby/food/'\n# quote_page='https://organicfoodsandcafe.com/product-category/baby/baby-baby/food/page/3/'\npage=urllib.request.urlopen(quote_page)\n\nfilename='Baby_Food_Price.csv'\n\nsoup=BeautifulSoup(page,'html.parser')\n\nname=[]\nprice_num=[]\nprice_currency=[]\n\nname_count=1\nprice_count=1\n\n# for headers in soup.find_all('h3'):\nfor ultag in soup.find_all('ul',{'class': 'products'}):\n for headers in ultag.find_all('h3'):\n name.append(headers.text.strip())\n name_count=name_count+1\n\n for price in ultag.find_all('span',attrs={'class': 'woocommerce-Price-amount amount'}):\n price_style=price.text.strip()\n price_style_split=price_style.split()\n price_num.append(price_style_split[1])\n price_currency.append(price_style_split[0])\n price_count=price_count+1\n# print(name)\nprint(price_num)\nprint(price_currency)\n\n\nif name_count==price_count:\n rows_zip = zip(name, price_num, price_currency)\n with open(filename,'w',newline='') as f:\n writer=csv.writer(f)\n for row in rows_zip:\n writer.writerow(row)\n\nf.close()\n\n\n# print(name_count)\n# print(price_count)","repo_name":"lwn625/DataFromWeb","sub_path":"test2_beautifulsoup.py","file_name":"test2_beautifulsoup.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"28782091029","text":"# -*- coding: utf-8 -*-\nimport os\nCONFIG_DIR = os.path.dirname(os.path.abspath(__file__))\nAPP_ROOT = CONFIG_DIR.rsplit('/', 1)[0]\n\nALLOWED_EXTENSIONS = set(['avi', 'mp4'])\nUPLOAD_FOLDER = \"web/static/uploads\"\nconfig = {\n 'DOMAIN': 'upaty.com',\n 'MAX_CONTENT_LENGTH': 50 * 1024 * 1024,\n 'DATETIME_FORMAT': \"%d-%m-%Y %H:%M\",\n 'DATE_FORMAT': '%d-%m-%Y',\n 'TIME_FORMAT': '%H:%M',\n 'SECRET_KEY': 'flask-session-insecure-secret-key',\n 'SQLALCHEMY_DATABASE_URI': '',\n 'SQLALCHEMY_ECHO': False,\n 'CSS_SYNC_PORT': 9264,\n 'debug': True,\n 'email': {\n },\n 'ALLOWED_EXTENSIONS': ALLOWED_EXTENSIONS,\n 'UPLOAD_FOLDER': UPLOAD_FOLDER,\n 'FB_APP_ID': '167101860397819',\n 'FB_APP_SECRET': 'dd6cbcd78edea6bc66aec061609a8d7a',\n 'FB_PAGE_ID': '266115127115395',\n 'YT_API_KEY': 'AIzaSyBq5d32XmOxCU0tvDgO2bCUaiudqgmHLWQ',\n # Fanpage Access Token\n 'FB_ACCESS_TOKEN': 'EAACXZBm8HwvsBAFe9UIr3UJEw1m9m64I7JrTDqv7pAxxsrD7tErjSpqZAg0kbFoUgKo31LadcKSHRn7ZBZBfxXR3XtxKfX65Ms2CnMO3i9dgbfZAMPAlMHEAxQFtUNZBUqUpEXkIAoGb9NddZAlz206Vi4zrs3tMpPg5MojJmYaBNYMxpFecUzQ',\n 'LANGUAGES': (\n ('vi', u'Tiếng Việt'),\n ('en', u'English')\n ),\n 'english_domains': ['upaty.com', 'dev.upaty.com']\n}\n","repo_name":"hupahupa/myyeah1","sub_path":"config/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"17328762176","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom base.models import BaseModel\n\n\nclass MenuHours(BaseModel):\n \"\"\"\n Holds Restaurant menu hours\n like: sunday restaurant serve to menu hour Lunch Menu(12:00 PM - 03:00 PM)\n\n Attributes\n ----------\n restaurant: restaurant foreign key\n name: name of the menu like \"Lunch, Breakfast\"\n week_day: name of the week day like \"Saturday, Sunday\"\n start_time: start time of the menu in 24hr format\n end_time: end time of the menu in 24hr format\n status: active, archived, delete\n \"\"\"\n\n class Status(models.TextChoices):\n ACTIVE = 'active', _('active')\n ARCHIVED = 'archived', _('archived')\n DELETED = 'deleted', _('deleted')\n\n class WeekDay(models.TextChoices):\n SUNDAY = 'sunday', _('sunday')\n MONDAY = 'monday', _('monday')\n TUESDAY = 'tuesday', _('tuesday')\n WEDNESDAY = 'wednesday', _('wednesday')\n THURSDAY = 'thursday', _('thursday')\n FRIDAY = 'friday', _('friday')\n SATURDAY = 'saturday', _('saturday')\n\n restaurant = models.ForeignKey('restaurant.Restaurant', on_delete=models.CASCADE)\n name = models.CharField(max_length=50)\n week_day = models.CharField(max_length=50, choices=WeekDay.choices, default=WeekDay.SUNDAY)\n start_time = models.TimeField()\n end_time = models.TimeField()\n status = models.CharField(max_length=30, choices=Status.choices,\n default=Status.ACTIVE)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = _('Menu Hour')\n verbose_name_plural = _('Menu Hours')\n db_table = 'menu_hours'\n unique_together = [['restaurant', 'name', 'week_day']]\n","repo_name":"mirajehossain/restaurant-lunch-decision-maker","sub_path":"src/item/models/menu_hours.py","file_name":"menu_hours.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"17"} +{"seq_id":"20568057584","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')\n\nimport django\ndjango.setup()\nfrom rango.models import Category, Page\n#When importing django models, make sure you have imported your project's settings by importing django and\n#setting the environment variable DJANGO_SETTINGS_MODULE to be your projects settings file.\n#Then call django.setup to import your Django project's settings.\n#You need to do this stuff before importing your models because we first need to initialise our Django\n#infrastructure\n\n#run the population script everytime this is modified\n\n#what is going on is essentially a series of function calls to two small\n#functions, add_page(), and add_cat() defined towards the end of the module.\n#Reading through the code, we find that execution starts at the bottom of the module\n#above the execution, we define functions; these are not executed unless we call\n#them. we call on the defined populate() function in execution at the bottom.\ndef populate():\n #First we will create lists of dictionaries containing the pages\n #we want to add into each category\n #Then we will create a dictionary of dictionaries for our categories.\n #Allows us to iterate through each data structure, and add the data to our models.\n\n python_pages = [\n {\"title\": \"Official Python Tutorial\",\n \"url\":\"http://docs.python.org/2/tutorial/\",\n \"views\": 200},\n {\"title\":\"How to Think like a Computer Scientist\",\n \"url\":\"http://www.greenteapress.com/thinkpython/\",\n \"views\": 300},\n {\"title\":\"Learn Python in 10 Minutes\",\n \"url\":\"http://www.korokithakis.net/tutorials/python/\",\n \"views\":400} ]\n \n django_pages = [\n {\"title\":\"Official Django Tutorial\",\n \"url\":\"https://docs.djangoproject.com/en/1.9/intro/tutorial01/\",\n \"views\": 40},\n {\"title\":\"Django Rocks\",\n \"url\":\"http://www.djangorocks.com/\",\n \"views\": 30},\n {\"title\":\"How to Tango with Django\",\n \"url\":\"http://www.tangowithdjango.com/\",\n \"views\": 1000} ]\n\n other_pages = [\n {\"title\":\"Bottle\",\n \"url\":\"http://bottlepy.org/docs/dev/\",\n \"views\": 50},\n {\"title\":\"Flask\",\n \"url\":\"http://flask.pocoo.org\",\n \"views\": 100} ]\n #each key/value pairing represents the name of the category as the key, and\n #an additional dictionary containing additional information relating to the\n #category as the value.\n cats = {\"Python\": {\"pages\": python_pages, \"views\":128, \"likes\":64},\n \"Django\": {\"pages\": django_pages, \"views\":64, \"likes\":32},\n \"Other Frameworks\": {\"pages\": other_pages, \"views\":32, \"likes\":16} }\n #If you want to add more categories or pages, add them to the dictionaries above.\n\n #The code below goes through the cats dictionary, then adds each category,\n #and then adds all the associated pages for that category.\n #add_cat() and add_page() are responsible for the creation of new categories and pages\n #the function populate() keeps tabs on categories that are created.\n #A reference to a new category is stores in local variable c. This is stored because a Page (in order to be added)\n #requires a Category reference. After add_cat and add_page are called inside this populate function we\n #are in, the function concludes by looping through all new Category and associated Page objects, displaying\n #their names.\n for cat, cat_data in cats.items():\n #cat is the key (python, django, other) and cat_data is the value so\n #to access value you would do cat_data[\"pages\"]\n c = add_cat(cat, cat_data[\"views\"], cat_data[\"likes\"])\n for p in cat_data[\"pages\"]:\n add_page(c, p[\"title\"], p[\"url\"], p[\"views\"])\n\n #Print out the categories we have added.\n for c in Category.objects.all():\n for p in Page.objects.filter(category=c):\n print(\"- {0} - {1}\".format(str(c), str(p)))\n\n\n#in the two functions below, we make use of the get_or_create() method for creating model instances\n#and because we don't want to create duplicates of the same entry. it removes a lot of repetitive code for us.\n#it checks if the entry exists in the database\n#if it doesn't exist, the get_or_create() method creates it. if it does exist, then a reference to the specific model\n#instance is returned\ndef add_page(cat, title, url, views):\n p = Page.objects.get_or_create(category=cat, title=title)[0]\n p.url=url\n p.views=views\n p.save()\n return p\n\ndef add_cat(name, views, likes):\n c = Category.objects.get_or_create(name=name)[0]\n c.views=views\n c.likes=likes\n c.save()\n return c\n\n#Start execution here!\nif __name__== '__main__':\n #code within a conditional if __name__=='__main__' statement will only be executed when the module is run as a\n #standalone Python script. Importing the module will not run this code; any classes or functions will\n #however be fully accessible to you. \n print(\"Starting Rango population script...\")\n populate()\n \n","repo_name":"2335870F/tango_with_django_project","sub_path":"populate_rango.py","file_name":"populate_rango.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"27561939307","text":"'''\n函数式编程:\n\n既然变量可以指向函数,函数的参数能接收变量,那么一个函数就可以接收另一个函数作为参数,这种函数就称之为高阶函数。\n\n一个最简单的高阶函数:\n\ndef add(x, y, f):\n return f(x) + f(y)\n\n filter()函数返回的是一个Iterator,也就是一个惰性序列,所以要强迫filter()完成计算结果,需要用list()函数获得所有结果并返回list。\n filter()的作用是从一个序列中筛出符合条件的元素。由于filter()使用了惰性计算,所以只有在取filter()结果的时候,才会真正筛选并每次返回下一个筛出的元素。\n\nsorted()也是一个高阶函数。用sorted()排序的关键在于实现一个映射函数。\n\n\n'''\n\nfrom functools import reduce\nimport util.PrintUtil as pu\nimport time, functools\n\ndef add(x, y, f):\n return f(x) + f(y)\n\npu.print_line_separator(\"高阶函数add调用\")\n\nresult = add(-1,4,abs)\nprint(\"高阶函数add调用结果: {}\".format(result))\n\n\n\npu.print_line_separator(\"高阶函数求两个list中最大\\最小值的和\")\ndef listPlus(list1,list2,plus1,plus2):\n return plus1(list1) + plus2(list2)\n\nlist1 = [1,4,5]\nlist2 = [100,200,500]\nlistMaxPlus = listPlus(list1,list2,max,min)\nprint(\"求两个list最大元素之和: {}\".format(listMaxPlus))\n\npu.print_line_separator()\nmax, min = min, max\nprint(max(1, 2, 3, 4, 5))\nprint(min(1, 2, 3, 4, 5))\n\n\npu.print_line_separator(\"Python内置函数 map()/reduce()使用\")\ndef f(x):\n return x * x\n\nlist1 = map(f,[1,2,3,4,5])\nprint(list(list1))\nprint(list(list1)) # 再次打印的话会是[] 因为map返回的是一个迭代器,迭代器只能遍历一次\n\n\nstr1 = map(str,[1,2,3,4,5])\nprint(list(str1))\n\n\n\nDIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}\n\ndef str2int(s):\n def fn(x, y):\n return x * 10 + y\n def char2num(s):\n return DIGITS[s]\n return reduce(fn, map(char2num, s))\n\nprint(str2int(\"123123\"))\n\n#还可以用lambda函数进一步简化成:\n\nDIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}\ndef char2num(s):\n return DIGITS[s]\ndef str2int(s):\n return reduce(lambda x, y: x * 10 + y, map(char2num, s))\n\n\nprint(list(map(char2num, \"123456\")))\n\npu.print_line_separator()\n\n# 首字母大写 其他小写\ndef normalize(name):\n temp_str = name[:1].upper() + name[1:len(name)].lower()\n return temp_str\n\n\nprint(list(map(normalize,['adam', 'LISA', 'barT'])))\n\n\npu.print_line_separator(\"Python内建的filter()函数用于过滤序列\")\ndef is_odd(x):\n return x % 2 == 1\n\nprint(list(filter(is_odd,[1,2,3,4,5,6,7,8,9])))\n\ndef not_empty(s):\n return s and s.strip() # 此处是过滤非空后在去空格 否则:AttributeError: 'NoneType' object has no attribute 'strip'\nprint(list(filter(not_empty, ['A', '', 'B', None, 'C', ' '])))\n\n\npu.print_line_separator(\"通过高阶函数实现求素数\")\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\ndef _not_divisible(n):\n return lambda x:x % n > 0\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n),it)\n\nfor n in primes():\n if n < 100:\n print(n)\n else:\n break\n\n\npu.print_line_separator(\"回数是指从左向右读和从右向左读都是一样的数,例如12321,909\")\ndef is_palindrome(s):\n s = str(s)\n return s == s[::-1]\n\nprint(list(filter(is_palindrome,range(1,100))))\n\npu.print_line_separator(\"高阶函数sorted的使用\")\nprint(sorted(['Acd','Tom','lucy','jerrY','JACK','ROY'],key=str.lower))\nprint(sorted(['Acd','Tom','lucy','jerrY','JACK','ROY'],key=str.lower,reverse=True)) # 排序后的结果倒过来\n\n\n\npu.print_line_separator(\"对元组中的数据按照名称\\分数进行排序\")\nL1 = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\ndef sorted_by_name(t):\n return t[0];\ndef sorted_by_score(t):\n return t[1];\nprint(sorted(L1,key=sorted_by_name,reverse=True))\nprint(sorted(L1,key=sorted_by_score))\nprint(sorted(L1,key=tuple))\n\n\npu.print_line_separator(\"利用闭包返回一个计数器函数,每次调用它返回递增整数\")\ndef createCounter():\n def cc():\n i = 1\n while True:\n yield i\n i = i + 1\n it = cc()\n def counter():\n return next(it)\n return counter\n\ncounterA = createCounter()\nprint(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5\ncounterB = createCounter()\nif [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:\n print('测试通过!')\nelse:\n print('测试失败!')\n\n\ndef count():\n fs = []\n for i in range(1, 3):\n def f():\n return i*i\n fs.append(f)\n return fs\n\nf1, f2 = count()\nprint(f1())\nprint(f2())\n\npu.print_line_separator(\"匿名函数\")\nprint(list(filter(lambda n : n % 2 == 1, range(1, 20))))\nprint(list(filter(lambda n : n % 2, range(1, 20))))\nprint(list(filter(lambda n : n % 3, range(1, 20))))\nprint(list(filter(lambda n : n % 3 == 0, range(1, 20))))\n\n\npu.print_line_separator(\"装饰器\")\nprint(\"函数对象有一个__name__属性,可以拿到函数createCounter的名字:{}\".format(createCounter.__name__))\n\ndef metric(fn):\n @functools.wraps(fn)\n def wrapper(*args,**kw):\n start = time.time()\n r = fn(*args,**kw)\n consumed = (time.time() - start) * 1000\n print('%s executed in %s ms'%(fn.__name__,consumed))\n return r\n return wrapper\n\n# 测试\n@metric\ndef fast(x, y):\n time.sleep(0.0012)\n return x + y;\n\n@metric\ndef slow(x, y, z):\n time.sleep(0.1234)\n return x * y * z;\n\nf = fast(11, 22)\ns = slow(11, 22, 33)\nprint(\"f = {},s = {}\".format(f,s))\nif f != 33:\n print('{} 测试失败!'.format(f.__name__))\nelif s != 7986:\n print('{} 测试失败!'.format(s.__name__))\nelse:\n print('{} 测试通过!'.format(fast.__name__))\n\n\npu.print_line_separator(\"装饰器实现日志输出\")\n\ndef log(*remark):\n def decorator(fn,*args,**kw):\n @functools.wraps(fn)\n def wrapper(*args,**kw):\n if remark and len(remark) > 0:\n print('{}正在执行函数:{}'.format(remark[0], fn.__name__))\n else:\n print('匿名正在执行函数:{}'.format(fn.__name__))\n return fn(*args,**kw)\n return wrapper\n return decorator\n\n@log()\ndef slow2(x, y, z):\n time.sleep(0.1234)\n return x * y * z;\n\nprint(slow2(1,2,3))\n\npu.print_line_separator(\"使用funtools.partial实现偏函数\")\n# 所以,简单总结functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单。\n# 当函数的参数个数太多,需要简化时,使用functools.partial可以创建一个新的函数,这个新函数可以固定住原函数的部分参数,从而在调用时更简单。\nint2 = functools.partial(int,base = 2)\nprint(int2('1000000'))","repo_name":"huxiulei/learnpython","sub_path":"liaoxuefeng/python_higherorder_function.py","file_name":"python_higherorder_function.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"24866623783","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nc = pd.read_csv('Comcast_telecom_complaints_data.csv')\nc = pd.DataFrame(c)\nc.head(n=1)\n\nnum_complaints_daily = c.groupby(by=[\"Date_month_year\"], dropna=False).count()\ndaily_complaints = num_complaints_daily['Customer Complaint']\ndaily_complaints\n\ndate_list = c['Date_month_year'].str.split('-', n=-1, expand=False)\n\nmonths = []\nfor date in date_list:\n months.append(date[1])\n\nc['months'] = months\nc = c.reindex(columns=['Ticket #', 'Customer Complaint', 'Date', 'Date_month_year', 'months', 'Time', 'Received Via', 'City', 'State', 'Zip code', 'Status', 'Filing on Behalf of Someone'])\nc.head()\n\nnum_complaints_monthly = c.groupby(by=[\"months\"], dropna=False).count()\nmonthly_complaints = num_complaints_monthly['Customer Complaint']\nmonthly_complaints\n\npd.set_option('display.max_rows', 15)\nfct = pd.DataFrame(c['Customer Complaint'].value_counts().reset_index())\nfreq_complaint_types = fct.rename(columns={'index':'complaint type', 'Customer Complaint':'count'})\nfreq_complaint_types\n\nstatus = []\nfor index, row in c.iterrows():\n if row['Status'] == 'Open' or row['Status'] == 'Pending':\n status.append('Open')\n elif row['Status'] == 'Closed' or row['Status'] == 'Solved':\n status.append('Closed')\n\nc['Status Value'] = status\nc.head()\n\ncomplaints_by_state = c.groupby(by=[\"State\"], dropna=False).count()\ncbs = complaints_by_state.reset_index()\n\ntotal_num_complaints = []\nfor index, row in cbs.iterrows():\n total_num_complaints.append(row['Customer Complaint'])\n\ncbs['Total Complaints'] = total_num_complaints\ncbs.head()\n\n\n\n\n\n\n\n\n# Which state has the maximum complaints - Georgia\n\ndata = {}\nfor index, row in cbs.iterrows():\n data[row['State']] = row['Customer Complaint']\n \nstate = list(data.keys())\ncount = list(data.values())\n\nfig = plt.figure(figsize = (30, 10))\nplt.bar(state, count, color ='maroon',\n width = 0.4)\n \nplt.xlabel(\"State\")\nplt.xticks(rotation=30)\nplt.ylabel(\"No. of Complaints \")\nplt.title(\"No. of Complaints by State\")\nplt.show()\n\n\n\n\n\n\n\n# Which state has the highest percentage of unresolved complaints - Kansas\ntotal_complaints = cbs[['State', 'Total Complaints']]\ncomplaints_by_state_status = c.groupby(by=[\"State\", 'Status Value'], dropna=False).count()\ncbss = pd.DataFrame(complaints_by_state_status['Customer Complaint'])\ncbss.head()\ncbss = cbss.reset_index()\n\ncbss = cbss.merge(total_complaints, on='State')\n\npercentage = []\nfor index, row in cbss.iterrows():\n perc = round((row['Customer Complaint']/row['Total Complaints'])*100, 1)\n percentage.append(perc)\n\ncbss['Percentage'] = percentage\nopen_complaints = cbss.loc[cbss['Status Value'] == 'Open']\n\nstate = list(open_complaints['State'])\nperc_closed = list(open_complaints['Percentage'])\n\nfig = plt.figure(figsize = (30, 10))\nplt.bar(state, perc_closed, color ='maroon',\n width = 0.4)\n \nplt.xlabel(\"State\")\nplt.xticks(rotation=30)\nplt.ylabel(\"Percent of Complaints \")\nplt.title(\"Percentage of Open Complaints by State\")\nplt.show()\n\n\n\n\n\n\n\nperc_resolved = c.groupby(by=[\"Status Value\"], dropna=False).count()\nresolved = perc_resolved.drop(columns='Total Complaints')\nresolved = resolved['Customer Complaint']\nres = pd.DataFrame(resolved).reset_index()\nres.columns = ['Status', 'Num Complaints']\ntotal = res['Num Complaints'].sum()\nres['Percent Total'] = round(((res['Num Complaints']/total)*100), 1)\nres","repo_name":"mshah016/simplilearn-stuff","sub_path":"Python/Data_science_with_Python_1/Comcast/comcast_sourcecode.py","file_name":"comcast_sourcecode.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"74226103704","text":"\"\"\"\nContains logic for health checks for Zookeeper and Solr\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport itertools\nimport datetime\nimport json\nfrom threading import Thread\nimport random\nfrom collections import defaultdict\nimport time\nfrom pprint import pformat\nimport logging\nlog = logging.getLogger(__name__)\nimport sys\nimport six\nimport traceback\n\nfrom kazoo.retry import KazooRetry\nfrom kazoo.client import KazooClient\n\nfrom solrzkutil.formatter import format_host\n\nfrom solrzkutil.parser import (parse_admin_dump, \n parse_admin_cons, \n parse_admin_wchp, \n parse_admin_wchc)\nfrom solrzkutil.util import (netcat, \n parse_zk_hosts,\n kazoo_clients_from_client, \n kazoo_clients_connect, \n kazoo_client_cache_enable)\n\nfrom pprint import pprint\n\n# TODO put these in a constants sub-module.\nZNODE_PATH_SEPARATOR = '/'\nOVERSEER_ELECT_LEADER_PATH = '/overseer_elect/leader' # this is a file containing the current leader.\nOVERSEER_ELECT_ELECTION_PATH = '/overseer_elect/election'\nLIVE_NODES_PATH = '/live_nodes'\nZK_QUEUE_PATHS = [\n '/overseer/collection-map-completed',\n '/overseer/collection-map-failure',\n '/overseer/collection-map-running',\n '/overseer/collection-queue-work',\n '/overseer/queue',\n '/overseer/queue-work'\n]\n\n# TODO handle unavailable server errors intelligently.\n# TODO turn the async commands into a class in which operations can be performed on an object.\n# TODO move utility functions into util module.\n# TODO make summary functions to get summary output from (success, data, errors) for specific check functions\n# TODO change the 'check' functions to return a tuple / (success, data, errors)\n# where data is extra information that may be useful to summary functions.\n# TODO a check needs to exist for watch functionality around known paths that must exist per collection.\n# TODO ignore early errors like NoNodeError where it will be represented later by comparison of directories.\n# TODO ignore unavailable server errors in the comparisons, this isn't what is being checked... if only 2 of 3 servers are\n# available you should only compare the two.\n# Any problems with connecting will be reported in the connection check.\n# TODO format the host tuple in error messages\n# TODO change all errors to use custom Exception classes with metrics built into them.\n# TODO write \"resolution\" classes that take information from check_* function output and suggest possible resolutions.\n# TODO during N-way comparisons, if only 1 of N is different, customize the log entry to indicate the node that is 'missing' information\n# for this comparison simply get a set object of all repsonses, if the set contains exactly 1 element there\n# are no differences. If the set contains 2 elements, 1 host is inconsistent.\n# If the set contains 3 o more elements, there is a general consistency issue.\n\ndef connect_to_zookeeper(zookeepers):\n try:\n c = KazooClient(zookeepers)\n return c\n except Exception as e:\n output = [get_exception_traceback()]\n log.error(output)\n\ndef multi_admin_command(zk_client, command):\n \"\"\"\n Executes an administrative command over multiple zookeeper nodes in a session-less manner \n using threading.\n \n Using threading not only speeds up the total time taken to query the remote Zookeeper hosts, \n it also ensures the most similar and real-time results from the servers.\n \n :param zk_client: Zookeeper connection object (KazooClient instance or subclass of)\n The connection instance should be configured with the hosts that are\n members of the ensemble.\n :param command: the administrative command to execute on each host within ``zk_client.hosts``\n \"\"\"\n # TODO handle exceptions from the client, exceptions should \n # return None or the Exception object. \n \n if not isinstance(command, six.binary_type):\n raise ValueError('command must be a byte string got: %s' % type(command))\n \n admin_results = []\n \n def get_admin(zk_host, zk_port, cmd):\n result = netcat(zk_host, zk_port, cmd)\n admin_results.append(result)\n\n wait = []\n for host, port in zk_client.hosts:\n t = Thread(target=get_admin, args=(host, port, command))\n t.start()\n wait.append(t)\n \n # wait for all threads to finish\n for wait_thread in wait:\n wait_thread.join()\n \n return admin_results\n\n\ndef znode_path_join(parts):\n \"\"\"\n # TODO move to utils\n Given a sequence of node segments construct a fully qualified path.\n \n Can join paths from sequences like::\n \n ('/', '/path1/path2', 'path')\n ('path1', 'path2', 'path')\n ('path1', '/path2', '/path')\n \n The output for all 3 examples above will be the path::\n \n /path1/path2/path\n \"\"\"\n if not len(parts):\n raise ValueError('empty path %s' % parts)\n \n parts = [p.strip(ZNODE_PATH_SEPARATOR) for p in parts if p.strip(ZNODE_PATH_SEPARATOR).strip()]\n \n # add leading slash\n parts[0] = ZNODE_PATH_SEPARATOR + parts[0]\n \n return ZNODE_PATH_SEPARATOR.join(parts)\n \ndef znode_path_split(path):\n \"\"\"\n # TODO move to utils\n Given an absolute znode path returns a tuple (directory, filename)\n \n Note that you should-not/cannot use path functions in Python to parse znode paths as they will not\n work cross-platform (Windows).\n \"\"\"\n if not path.startswith(ZNODE_PATH_SEPARATOR):\n raise ValueError('A znode path must be fully qualified and start with: \"%s\", got: %s' % (ZNODE_PATH_SEPARATOR, path))\n \n parts = path.split(ZNODE_PATH_SEPARATOR)[1:]\n return znode_path_join(parts[:-1]), parts[-1]\n\ndef check_ephemeral_dump_consistency(zk_client):\n \"\"\"\n Check the consistency of 'dump' output across Zookeeper hosts\n \n :param zookeepers: A zookeeper connection string (should describe all ensemble members)\n \"\"\"\n zk_hosts = zk_client.hosts\n dump_results = multi_admin_command(zk_client, b'dump')\n ephemerals = [parse_admin_dump(item)['ephemerals'] for item in dump_results]\n \n # Flatten the data structure returned by parsing the 'dump' command so that we have\n # a sequence (list) of sets that can be compared using set operations.\n ephemerals_compare = []\n for ephemerals in ephemerals:\n ephemeral_set = set()\n for session, paths in six.viewitems(ephemerals):\n for path in paths:\n ephemeral_set.add((session, path))\n \n ephemerals_compare.append(ephemeral_set)\n \n # Find all unique sets of indexes to use for comparisons.\n errors = []\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(ephemerals_compare)), repeat=2) if pair[0] != pair[1]}\n for idx1, idx2 in comparisons:\n # Set comparison to determine differences between the two hosts\n differences = ephemerals_compare[idx1] ^ ephemerals_compare[idx2]\n if differences:\n errors.append(\n 'ephemeral nodes do not match for host:{host1} and host:{host2}... differences: {diff}'.format(\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in differences])\n )\n )\n \n if not errors:\n log.debug('%s.%s encountered no errors' % (__name__, check_ephemeral_dump_consistency.__name__))\n \n return errors\n\ndef check_watch_sessions_clients(zk_client):\n \"\"\"\n Check watch consistency for client-related files, exclude solr hosts from these tests.\n\n This verifies the watches that clients (solrj) should be watching they in-fact are watching.\n if any client is watching some, but not all client-related files this will result in an exception.\n \"\"\"\n CLIENT_WATCHES = ('/clusterprops.json', '/clusterstate.json', '/aliases.json')\n solr_sessions = get_solr_session_ids(zk_client)\n return check_watch_session_consistency(zk_client, CLIENT_WATCHES, exclude=solr_sessions)\n \ndef check_watch_sessions_solr(zk_client):\n \"\"\"\n Check watch consistency for solr sessions only.\n Each solr server watches a very specific set of paths based on its replicas, online collections etc... \n This function ensures the watches that should exist do.\n \"\"\"\n # TODO, in order to verify this you need to know quite a bit of information about collections,\n # active config sets, etc.\n # - get collections\n # - get state.json for replica, etc info\n # - query solr for collection information\n # - \n \ndef check_watch_sessions_valid(zk_client):\n \"\"\"\n Ensure all watch sessions have a valid connection associated with them.\n\n This works by getting the output of 'wchc' and comparing it to 'cons'.\n \"\"\"\n # TODO finish me\n errors = []\n zk_hosts = zk_client.hosts\n wchc_results = multi_admin_command(zk_client, b'wchc')\n wchc_result_parsed = [parse_admin_wchc(result) for result in wchc_results]\n \n # Get connection/session information\n conn_results = multi_admin_command(zk_client, b'cons')\n conn_data = map(parse_admin_cons, conn_results)\n conn_data = list(itertools.chain.from_iterable(conn_data))\n # Get a set() of all valid zookeeper sessions as integers\n valid_sessions = {con.get('sid') for con in conn_data if 'sid' in con}\n\n errors = []\n\n for host_idx, watch_result in enumerate(wchc_result_parsed):\n for session_id in watch_result.keys():\n if session_id not in valid_sessions:\n zk_host = format_host(zk_hosts[host_idx])\n errors.append(\"zookeeper [%s] watch session [%s] connection missing (stale session?)\" % (zk_host, session_id))\n\n return errors\n\n\ndef check_watch_sessions_duplicate(zk_client):\n \"\"\"\n Ensure no watch session id is represented on more than one server\n This shouldn't happen, and if it does it indicates a hung or invalid watch, which means a session was\n reconnected / reused it chose a different Zookeeper host and the old watch never was cleared.\n Restart the Zookeeper host with the invalid watch / session to resolve this issue.\n\n We will use the ``wchc`` administrative command to get watches by session-id for this check.\n \"\"\"\n errors = []\n zk_hosts = zk_client.hosts\n wchc_results = multi_admin_command(zk_client, b'wchc')\n wchc_result_parsed = [parse_admin_wchc(result) for result in wchc_results]\n\n # First thing to check, do any of the hosts share the same session, this would be a problem.\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(zk_hosts)), repeat=2) if pair[0] != pair[1]}\n for idx1, idx2 in comparisons:\n # Set comparison to determine differences between the two hosts\n duplicates = set(wchc_result_parsed[idx1].keys()) & set(wchc_result_parsed[idx2].keys())\n if not duplicates:\n log.debug('host:{host1} and host:{host2} have {watch1ct} and {watch2ct} watches, and no duplicates.'.format(\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n watch1ct=len(wchc_result_parsed[idx1].keys()),\n watch2ct=len(wchc_result_parsed[idx2].keys())\n ))\n continue\n \n errors.append(\n 'duplicate sessions are present on watches for host:{host1} and host:{host2}... duplicates: {diff}'.format(\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in duplicates])\n )\n )\n\n return errors\n \ndef check_watch_sessions_present(zk_client, session_watches):\n \"\"\"\n Verify that watches exist on all the paths defined for the sessions defined.\n This function will assume you expect exactly 1 watch to exist per session/path defined.\n \n Note that watches can exist on any Zookeeper server, so we have to check all of the servers\n for watches.\n \n We will use the ``wchc`` administrative command to get watches by session-id for this check.\n \"\"\"\n # TODO finish me\n errors = []\n zk_hosts = zk_client.hosts\n wchc_results = multi_admin_command(zk_client, b'wchc')\n wchc_result_parsed = [parse_admin_wchc(result) for result in wchc_results]\n \n # combine / merge all of the sessions\n session_watches = {}\n for wchc in wchc_result_parsed:\n session_watches.update(wchc)\n \n raise NotImplementedError()\n \ndef check_watch_session_consistency(zk_client, watch_paths, exclude=None, include=None):\n \"\"\"\n Verify watches on the given files are consistent.\n \n This function finds situations where a client is watching perhaps 2 files, when it should be\n watching 3. One of its watches has died or timed out.\n \n :param include: include ONLY these session ids in the check.\n :param exclude: exclude these session ids from the check, useful to exclude yourself, or client\n other client sessions.\n :param watch_paths: A list of fully qualified znode paths that should have consistent \n watch sessions across them.\n \"\"\"\n \n '''\n wchp_result_data will be a dictionary, where each top-level key corresponds to\n a znode, and each value is a dictionary, where the indexes are the Zookeeper hosts indexes\n as defined in zk_client.hosts, and the value is the list of session ids for that host, for that \n znode.\n \n Example::\n \n {\n '/clusterprops.json': {\n host-idx-1: [session-1, session-2, session-3],\n host-idx-2: [session-1, session-2, session-3]\n },\n '/aliases.json': {\n host-idx-1: [session-4, session-5, session-6],\n host-idx-2: [session-4, session-5, session-6]\n }\n }\n '''\n errors = []\n zk_hosts = zk_client.hosts\n wchp_results = multi_admin_command(zk_client, b'wchp')\n wchp_result_parsed = [parse_admin_wchp(result) for result in wchp_results]\n wchp_result_data = defaultdict(dict)\n for path in watch_paths:\n for host_idx in range(len(zk_hosts)):\n if not wchp_result_parsed[host_idx]:\n wchp_result_data[path][host_idx] = []\n else:\n sessions = set(wchp_result_parsed[host_idx].get(path, []))\n if exclude:\n sessions = sessions - set(exclude)\n if include:\n sessions = sessions & set(include)\n wchp_result_data[path][host_idx] = sessions\n\n \n # Check to see if any of the znodes has NO watches across all servers.\n for path, host_sessions in six.viewitems(wchp_result_data):\n # combine all the individual zookeeper hosts results, if this list is empty \n # then no sessions were returned for this path.\n sessions = itertools.chain.from_iterable(host_sessions.values())\n if not sessions:\n errors.append(\n \"no watches exist for znode: %s on %d zookeeper hosts checked\" % (path, len(zk_hosts))\n )\n \n # Find comparison sets, the indexes will be indexes of `watch_paths`\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(watch_paths)), repeat=2) if pair[0] != pair[1]}\n \n # Check to ensure each znode contains the same set of session watches.\n for host_idx in range(len(zk_hosts)):\n for idx1, idx2 in comparisons:\n path1 = watch_paths[idx1]\n path2 = watch_paths[idx2]\n differences = set(wchp_result_data[path1][host_idx]) ^ set(wchp_result_data[path2][host_idx])\n if differences:\n errors.append(\n '{zk_host} sessions watches do not match across files:{file1} and {file2}... differences: {diff}'.format(\n zk_host = format_host(zk_hosts[host_idx]),\n file1=path1,\n file2=path2,\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in differences])\n )\n )\n \n if not errors:\n log.debug('%s.%s encountered no errors' % (__name__, check_watch_session_consistency.__name__))\n\n return errors\n\ndef check_zookeeper_connectivity(zk_client, min_timeout=2):\n \"\"\"\n Check zookeeper connectivity responsiveness\n \"\"\"\n kazoo_client_cache_enable(False)\n clients = kazoo_clients_from_client(zk_client)\n # ensure all the clients are connected\n errors = []\n for timeout in range(min_timeout, 10):\n connected = kazoo_clients_connect(clients, continue_on_error=True)\n if connected < len(clients):\n errors.append('%d clients unable to connect within %d secs' % (\n len(clients) - connected, timeout))\n else:\n break\n \n kazoo_client_cache_enable(True)\n return errors\n \ndef get_async_ready(asyncs):\n \"\"\"\n Given a dictionary containing async objects wait for them all to become ready before returning.\n \n :param asyscs: asynchronous requests that have been started.\n \n Asyncs structure is::\n \n {\n arg_0: {\n 0: kazoo.interfaces.IAsyncResult\n 1: kazoo.interfaces.IAsyncResult\n 2: kazoo.interfaces.IAsyncResult\n },\n arg_1: {\n 0: kazoo.interfaces.IAsyncResult\n 1: kazoo.interfaces.IAsyncResult\n 2: kazoo.interfaces.IAsyncResult\n },\n }\n \"\"\"\n while True:\n ready = []\n for asyncs_per_host in asyncs.values():\n async_results = asyncs_per_host.values()\n ready.extend(map(lambda a: a.ready(), async_results))\n \n if all(ready):\n break\n \n return True\n \ndef get_async_call_per_host(zk_client, args, call):\n \"\"\"\n :param args: arguments to pass into ``call``, this should be a list of znode paths for example.\n :param call: a callable that accepts two arguments (KazooClient, arg)\n where arg is an entry from args\n \n ``call`` should usually be a lambda such as::\n \n lambda c, arg: c.get(arg)\n \n returns a dictionary like::\n \n {\n arg_0: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n arg_1: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n }\n \"\"\"\n clients = kazoo_clients_from_client(zk_client)\n kazoo_clients_connect(clients)\n \n asyncs = defaultdict(dict)\n for arg in args:\n for client_idx, client in enumerate(clients):\n asyncs[arg][client_idx] = call(client, arg)\n \n # block until the calls complete\n get_async_ready(asyncs)\n \n results = defaultdict(dict)\n for arg, host_async in six.viewitems(asyncs):\n for host_idx, async_result in six.viewitems(host_async):\n results[arg][host_idx] = async_result.exception or async_result.get()\n \n return results\n\ndef get_async_result_tuples(results):\n \"\"\"\n Given a dictionary like::\n\n {\n arg_0: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n arg_1: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n }\n\n Return a list, composed of tuples of (host, arg, result)\n where arg is the input argument, host is the host index and\n result is the response/result object from the zookeeper api call\n\n Any results that contain exception objects / errors are ignored.\n\n :param result: A result set dictionary as returned from ``get_async_call_per_host``\n :returns: ``list``\n \"\"\"\n if not isinstance(results, dict):\n raise ValueError('\"result\" must be dict, got: %s' % type(dict))\n\n items = []\n\n for arg, host_result in six.viewitems(results):\n items.extend([(host, arg, result) for host, result in six.viewitems(host_result) if not isinstance(result, Exception)])\n\n return items\n\ndef get_async_input_result_set(result):\n \"\"\"\n Given an async result dictionary... gets input and result tuples as a set.\n Use this function when you want a flat set of results and don't care about \n inspecting the result per-host. You just want a unique set of responses.\n\n Given a dictionary like::\n\n {\n arg_0: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n arg_1: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n }\n\n Return a set, composed of all the result objects combined.\n\n Any results that contain exception objects / errors are ignored.\n\n :param result: A result set dictionary as returned from ``get_async_call_per_host``\n :returns: a ``set`` of tuples (arg, result), automatically excludes Exceptions/Errors from results.\n \"\"\"\n if not isinstance(result, dict):\n raise ValueError('\"result\" must be dict, got: %s' % type(dict))\n \n result_items = get_async_result_tuples(result)\n return {(arg, response) for host, arg, response in result_items}\n \ndef get_async_result_set(result):\n \"\"\"\n Similar to ``get_async_input_result_set()``, but only returns the result object.\n\n Use this function when you only care about the unique result values, and dont need to \n know about their inputs, or errors/exceptions.\n\n :param result: dictionary, async input result\n :returns: a set of results. \n \"\"\"\n result_tuple_set = get_async_input_result_set(result)\n return {response for arg, response in result_tuple_set}\n\ndef filter_chroot(chroot, paths):\n \"\"\"\n Takes a sequence of paths, and returns only those that match a given chroot. \n Removes the chroot from the prefix of the path.\n Filter for, and remove chroots from a set of given paths. \n\n :param chroot: Your zk connections chroot\n \"\"\"\n if not chroot:\n return paths\n\n chroot = '/' + chroot\n filtered = []\n for path in paths:\n if path.startswith(chroot):\n filtered.append('/' + path.lstrip(chroot))\n\n return filtered\n \ndef get_ephemeral_paths_children_per_host(zk_client):\n \"\"\"\n Returns a dictionary mapping znode_directory to a list of lists containing children for each node\n queried.\n\n Note that if your zk_client is configured with a chroot, ONLY ephemerals matching that chroot will be returned.\n\n This function works by querying all zookeeper servers for their 'dump' output. This output is combined, and a unique\n set of directories are determined from the output of each servers dump.\n From here the children of each directory are queried.\n \n returns a dictionary like::\n \n {\n arg_0: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n arg_1: {\n 0: result or exception obj\n 1: result or exception obj \n 2: result or exception obj \n },\n }\n \"\"\"\n chroot = zk_client.chroot\n # get 1 KazooClient per Zookeeper host.\n clients = kazoo_clients_from_client(zk_client)\n # ensure all the clients are connected\n kazoo_clients_connect(clients)\n \n ephemeral_directories = set()\n dump_results = multi_admin_command(zk_client, b'dump')\n \n \n # We'll combine all of the results from each available servers dump result.\n # the results from each server should be identical, but just in case we'll combine all results.\n ephemeral_znodes = []\n for host_result in dump_results:\n # parse the output from dump, and get the znodes list from each output\n ephemeral_znodes.extend([znodes for session, znodes in six.viewitems(parse_admin_dump(host_result)['ephemerals'])])\n # flatten the list of lists\n ephemeral_znodes = sorted(set(itertools.chain.from_iterable(ephemeral_znodes)))\n # filter the znodes paths for paths that contain only our chroot. If there are many applications using this ZK cluster\n # only apply our checks to paths that are associated with the current connection string / chroot.\n ephemeral_matching = filter_chroot(chroot, ephemeral_znodes)\n log.debug(\"ephemeral paths resolved from 'dump': %d, ...\\n%s\" % (len(ephemeral_znodes), pformat(ephemeral_znodes)))\n log.debug(\"ephemeral paths matching chroot: %d, ...\\n%s\" % (len(ephemeral_matching), pformat(ephemeral_matching)))\n # We assume that all the znodes that are ephemeral from the 'dump' command are files.\n # We then calculate a set of all directories to examine children in.\n ephemeral_directories = []\n for znode in ephemeral_matching:\n if not znode or znode.strip() == ZNODE_PATH_SEPARATOR:\n log.warn('a znode returned from `dump` is unexpectedly empty: \"%s\", the output of dump is: %s' % (znode, dump_results))\n try:\n ephemeral_directories.append(znode_path_split(znode)[0])\n except Exception as e:\n log.error('exception while getting znode path: \"%s\", the output of dump is: %s' % (znode, dump_results))\n continue\n \n ephemeral_directories = sorted(set(ephemeral_directories))\n log.debug(\"ephemeral directories matching chroot: %d, ...\\n%s\" % (len(ephemeral_directories), pformat(ephemeral_directories)))\n \n def call(client, znode):\n return client.get_children_async(znode)\n \n results = get_async_call_per_host(zk_client, ephemeral_directories, call)\n \n return results\n \n \ndef get_async_call_per_host_errors(zk_client, async_result, ignore=None):\n \"\"\"\n Return a list of errors contained within the response of ephemeral_children\n \n :param zk_client: Zookeeper connection object (KazooClient instance or subclass of)\n start() will be called internally when the connection is used.\n The connection instance should be configured with the hosts that are\n members of the ensemble.\n \n :param async_result: The response from ``get_async_call_per_host()``\n :param ignore: Ignore these exception objects.\n :returns: ``list``\n \"\"\"\n hosts = zk_client.hosts\n # note that 'cb' is a kazoo.interfaces.IAsyncResult\n errors = []\n for arg, host_result in six.viewitems(async_result):\n for client_idx, result in six.viewitems(host_result):\n if isinstance(result, Exception):\n exception = result\n if ignore and any([isinstance(exception, exc) for exc in ignore]):\n log.debug('ignore error class: [%s] %s' % (exception.__class__.__name__, exception))\n continue\n\n # see if this one is an error.\n errors.append(\n \"error from host: %s for input: [%s], error: (%s) %s\" % (\n format_host(hosts[client_idx]),\n arg,\n exception.__class__.__name__,\n str(exception)\n )\n )\n continue\n \n return errors\n \ndef get_async_result_paths_chilren_per_host(children):\n \"\"\"\n Combines arguments, with returned lists to get a unique sequence of \n full qualified Zookeeper paths, for all the children of the directories\n queried.\n\n Returns a simple sorted list of unique paths returned by ``get_async_call_per_host``\n when the call performed is ``get_children()``\n \n :param children: A structure as returned from ``get_async_call_per_host()``\n \"\"\"\n paths = set()\n\n for parent_path, host_children in six.viewitems(children):\n for client_idx, child_paths in six.viewitems(host_children):\n if isinstance(child_paths, Exception):\n continue\n for child_path in child_paths:\n paths.add(znode_path_join([parent_path, child_path]))\n \n return sorted(paths)\n \ndef check_ephemeral_znode_consistency(zk_client):\n \"\"\"\n For all ephemeral znodes check to ensure their directories are consistent across hosts,\n as well as the content of each node, and their ephemral session / owner.\n \n :param zk_client: Zookeeper connection object (KazooClient instance or subclass of)\n start() will be called internally when the connection is used.\n The connection instance should be configured with the hosts that are\n members of the ensemble.\n \"\"\"\n # Connect to each Zookeeper Host\n zk_hosts = zk_client.hosts\n clients = kazoo_clients_from_client(zk_client)\n kazoo_clients_connect(clients)\n \n children_results = get_ephemeral_paths_children_per_host(zk_client)\n errors = get_async_call_per_host_errors(zk_client, children_results)\n child_paths = get_async_result_paths_chilren_per_host(children_results)\n \n # Check the children are consistent across hosts for each node queried.\n log.debug('checking %d paths that contain ephemerals for consistent children' % len(set(children_results.keys())))\n for parent_path, host_children in six.viewitems(children_results):\n children_sets = [frozenset(host_children[idx]) for idx in range(len(host_children)) if not isinstance(host_children[idx], Exception)]\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(children_sets)), repeat=2) if pair[0] != pair[1]}\n for idx1, idx2 in comparisons:\n # Set comparison to determine differences between the two hosts\n differences = children_sets[idx1] ^ children_sets[idx2]\n if not differences:\n continue\n \n errors.append(\n 'ephemeral path [{parent}] contains inconsistent child nodes for host:{host1} and host:{host2}... differences: {diff}'.format(\n parent=parent_path,\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in differences])\n )\n )\n \n def call(client, znode):\n return client.get_async(znode)\n \n log.debug('checking %d paths for consistent content / stats' % len(child_paths))\n znode_results = get_async_call_per_host(zk_client, child_paths, call)\n errors.extend(get_async_call_per_host_errors(zk_client, znode_results))\n \n for path, host_results in six.viewitems(znode_results):\n znode_content_sets = [ frozenset(['line %d: %s' % (lidx, line) for lidx, line in enumerate((host_results[idx][0] or '').splitlines())])\n for idx in range(len(host_results))\n if not isinstance(host_results[idx], Exception)]\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(znode_content_sets)), repeat=2) if pair[0] != pair[1]}\n for idx1, idx2 in comparisons:\n # Set comparison to determine differences between the two hosts\n differences = znode_content_sets[idx1] ^ znode_content_sets[idx2]\n if not differences:\n continue\n \n errors.append(\n 'ephemeral path [{path}] contains inconsistent content for host:{host1} and host:{host2}... differences: {diff}'.format(\n path=path,\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in sorted(differences)])\n )\n )\n \n # Check that the znodes have a consistent ephemeral owner session id.\n znode_ephemeral_sets = [frozenset([getattr(host_results[idx][1], 'ephemeralOwner', None)]) for idx in range(len(host_results)) if not isinstance(host_results[idx], Exception)]\n comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(znode_ephemeral_sets)), repeat=2) if pair[0] != pair[1]}\n for idx1, idx2 in comparisons:\n # Set comparison to determine differences between the two hosts\n differences = znode_ephemeral_sets[idx1] ^ znode_ephemeral_sets[idx2]\n if not differences:\n continue\n \n errors.append(\n 'ephemeral path [{path}] contains inconsistent ephemeral owner for host:{host1} and host:{host2}... differences: {diff}'.format(\n path=path,\n host1=format_host(zk_hosts[idx1]),\n host2=format_host(zk_hosts[idx2]),\n diff='\\n\\t' + '\\n\\t'.join([six.text_type(entry) for entry in sorted(differences)])\n )\n )\n \n if not errors:\n log.debug('%s.%s encountered no errors' % (__name__, check_ephemeral_znode_consistency.__name__))\n \n return errors\n \ndef check_ephemeral_sessions_fast(zk_client):\n \"\"\"\n Fast ephemeral session check, ensure all ephemeral paths contain valid ephemeral \n znodes with valid sessions. The check is performed for each Zookeeper host\n \n This is a fast version, because instead of exhaustively walking all paths to discover all\n ephemerals, it uses 'dump' output to make assumptions about znode paths that contain ephemerals.\n \n :param zk_client: Zookeeper connection object (KazooClient instance or subclass of)\n start() will be called internally when the connection is used.\n The connection instance should be configured with the hosts that are\n members of the ensemble.\n \"\"\"\n # Connect to each Zookeeper Host\n clients = kazoo_clients_from_client(zk_client)\n kazoo_clients_connect(clients)\n\n children_results = get_ephemeral_paths_children_per_host(zk_client)\n errors = get_async_call_per_host_errors(zk_client, children_results)\n child_paths = get_async_result_paths_chilren_per_host(children_results)\n \n \n # Get connection/session information\n conn_results = multi_admin_command(zk_client, b'cons')\n conn_data = map(parse_admin_cons, conn_results)\n conn_data = list(itertools.chain.from_iterable(conn_data))\n # Get a set() of all valid zookeeper sessions as integers\n valid_sessions = {con.get('sid') for con in conn_data if 'sid' in con}\n log.debug('found %d active sessions across %d ensemble members' % (len(valid_sessions), len(clients)))\n\n def call(client, znode):\n return client.get_async(znode)\n \n znode_results = get_async_call_per_host(zk_client, child_paths, call)\n\n for path, host_results in six.viewitems(znode_results):\n for host_idx, result in six.viewitems(host_results):\n if isinstance(result, Exception):\n exception = result\n # see if this one is an error.\n errors.append(\n \"error from host: %s, path: %s, error: (%s) %s\" % (\n format_host(zk_client.hosts[host_idx]),\n path,\n exception.__class__.__name__,\n str(exception)\n )\n )\n else:\n content, stats = result\n ephemeral_session = getattr(stats, 'ephemeralOwner', None)\n if not ephemeral_session:\n continue\n \n if ephemeral_session not in valid_sessions:\n errors.append(\n \"error from host: %s, ephemeral path: %s, session-id: [%s] does not exist on any Zookeeper server\" % (\n format_host(zk_client.hosts[host_idx]), \n path,\n ephemeral_session\n )\n )\n else:\n log.debug('host %s path %s has valid session: %d' % (format_host(zk_client.hosts[host_idx]), path, ephemeral_session))\n \n if not errors:\n log.debug('%s.%s encountered no errors' % (__name__, check_ephemeral_sessions_fast.__name__))\n \n return errors\n \n\ndef get_solr_session_ids(zk_client):\n \"\"\"\n Find zookeeper-client sessions across ensemble nodes that are solr servers\n \"\"\"\n # query live-nodes, to get sessions that belong to Solr hosts.\n def call(client, znode):\n return client.get_children_async(znode)\n \n children_results = get_async_call_per_host(zk_client, ['/live_nodes'], call)\n \n errors = []\n if not children_results:\n raise RuntimeError('No live nodes exist on the %d zookeeper hosts checked' % len(zk_client.hosts))\n \n live_nodes = sorted(set(itertools.chain.from_iterable(children_results[LIVE_NODES_PATH].values())))\n live_nodes = [znode_path_join([LIVE_NODES_PATH, node]) for node in live_nodes if not isinstance(node, Exception)]\n \n def call(client, znode):\n return client.get_async(znode)\n \n live_results = get_async_call_per_host(zk_client, live_nodes, call)\n \n if not live_results:\n raise ValueError('znode get() for live_nodes failed to return any results, input: %s' % pformat(live_nodes))\n \n errors.extend(get_async_call_per_host_errors(zk_client, live_results))\n \n live_node_sessions = [[getattr(content_stats[1], 'ephemeralOwner', None) for content_stats in live_node.values() if not isinstance(content_stats, Exception)] \n for live_node in live_results.values()]\n live_node_sessions = sorted(set(itertools.chain.from_iterable(live_node_sessions)))\n live_node_sessions = [session_id for session_id in live_node_sessions if session_id is not None]\n \n if errors and not live_node_sessions:\n raise RuntimeError(errors)\n elif errors:\n log.warn(errors)\n \n return live_node_sessions\n\ndef get_solrj_session_ids(zk_client):\n \"\"\"\n Find zookeeper-client sessions across ensemble nodes that are solrj clients. \n\n Clients are identified based on their watches.\n \"\"\"\n\ndef get_zookeeper_collections(zk_client):\n \"\"\"\n Discover the collections that exist by name in Zookeeper.\n \"\"\"\n pass\n\ndef get_zookeeper_collections_state(zk_client):\n \"\"\"\n Get a json object for state.json for each Zookeeper collection.\n \"\"\"\n \ndef get_solr_connection_affinitiy(zk_client):\n \"\"\"\n Get information about which zookeeper nodes solr is connected to. \n Solr randomly picks a Zookeeper node to connect to, which means this is \n where its watches and ephemeral sessions will come from.\n\n If this host ever experiences an issue, the solr host will have an issue.\n In order to understand the severity and impact of a Zookeeper host degredation \n you must understand Solr host affinitiy.\n \"\"\"\n\n\ndef get_solrj_connection_affinity(zk_client):\n \"\"\"\n Get information about where SolrJ clients are connected\n\n This shows information about SolrJ clients, the ip-addresses they are comming from, and the Zookeeper\n hosts they are connected to, as well as an overview of their session ids. \n Because multiple sessions can be associated with a single ip address, we will group clients by remote IP address.\n \"\"\"\n \ndef check_solr_live_nodes(zk_client):\n \"\"\"\n Check that live nodes are all present and consistent\n \n If a collection/replica refers to a node not in the live-nodes list, then thats a problem.\n \"\"\"\n # TODO finish me\n def call(client, znode):\n return client.get_children_async(znode)\n \n children_results = get_async_call_per_host(zk_client, [LIVE_NODES_PATH], call)\n \n errors = []\n if not children_results:\n errors.append('No live nodes exist on the %d zookeeper hosts checked' % len(zk_client.hosts))\n \n raise NotImplementedError()\n \ndef check_solr_administration(zk_client):\n \"\"\"\n Ensure the solr administrative page is reachable / responsive.\n \"\"\"\n raise NotImplementedError()\n\ndef check_solr_query_handler(zk_client):\n \"\"\"\n Ensure the solr query handlers are responsive\n \"\"\"\n raise NotImplementedError()\n \ndef check_solr_cluster_status(zk_client):\n \"\"\"\n Ensure the solr cluster status reports no downed replicas, etc.\n \"\"\"\n raise NotImplementedError()\n \n\n\ndef check_zxid_consistency(zk_client):\n \"\"\"\n \"\"\"\n raise NotImplementedError()\n\ndef check_myid_sequentiality(zk_client):\n \"\"\"\n \"\"\"\n raise NotImplementedError()\n\ndef check_mode_output(zk_client):\n \"\"\"\n \"\"\"\n raise NotImplementedError()\n \ndef check_collection_state(zk_client):\n \"\"\"\n Check to ensure all state.json files are present, and contain no down states.\n \"\"\"\n raise NotImplementedError()\n\ndef check_collections_state_consistency(zk_client):\n \"\"\"\n Check that collections, shards, replicas, and core states are consistent between solr and zookeeper\n Check the core API to ensure the core is healthy in addition to the Replica.\n \"\"\"\n raise NotImplementedError()\n\n\n\ndef get_znode_paths_age(zk_client, znodes, coalesce=max):\n \"\"\"\n Given a sequence of znode paths return a map of all znode paths in the directory and the objects\n modified or creation date.\n\n :param zk_client: A KazooClient object\n :param znodes: a sequences of paths to get stats for. The stats will be gathered\n using async operations as quickly as possible.\n :param coalesce: a function that accepts a sequence and coalesces between them\n the default behavior is to find the (max) of the values.\n If you provide ``None`` for coalesce, individual results from each\n Zookeeper host will be returned.\n \"\"\"\n raise NotImplementedError()\n \ndef get_znode_children_counts(zk_client, znodes, coalesce=max):\n \"\"\"\n Get zookeeper child counts. All zookeeper servers are queried, the function given to ``coalesce`` \n used to coalesce the results into a scalar.\n\n :param zk_client: A KazooClient object\n :param znodes: a sequences of paths to get stats for. The stats will be gathered\n using async operations as quickly as possible.\n :param coalesce: a function that accepts a sequence and coalesces between them\n the default behavior is to find the (max) of the values.\n If you provide ``None`` for coalesce, individual results from each\n Zookeeper host will be returned.\n \n \"\"\"\n if not callable(coalesce):\n raise ValueError('\"coalesce\" must be a callable got: %s' % type(coalesce))\n\n child_paths = sorted(set(znodes))\n # zstats.numChildren\n def call(client, znode):\n return client.get_async(znode)\n \n log.debug('retreiving child node counts for %d paths' % len(child_paths))\n znode_results = get_async_call_per_host(zk_client, child_paths, call)\n errors = get_async_call_per_host_errors(zk_client, znode_results)\n results = get_async_result_tuples(znode_results)\n\n paths_stats = defaultdict(list)\n for host, path, response in results:\n contents, stats = response\n paths_stats[path].append(getattr(stats, 'numChildren', 0))\n\n if coalesce is None:\n return paths_stats\n\n stats = {}\n stats.update({k: coalesce(v) for k, v in six.viewitems(paths_stats)})\n\n return stats\n\n\ndef get_zookeeper_time(zk_client):\n \"\"\"\n Get the current time on the Zookeeper servers.\n Returns a list of responses for each zk host defined in zk_client.\n \"\"\"\n # zk.set(\"/temp/timetest\", b\"some data\")\n # zk.get()\n\ndef check_server_time_consistency(zk_client):\n \"\"\"\n Check the drift between local time on Zookeeper ensemble members. \n If the time is off, bad things will happen.\n \"\"\"\n times = get_zookeeper_time(zk_client)\n raise NotImplementedError()\n\n\n\ndef check_queue_age(zk_client, threshold=datetime.timedelta(minutes=5)):\n \"\"\"\n Check to ensure timestamps of items in QUEUE paths are not too old.\n\n :param zk_client: KazooClient\n :param threshold: a datetime.timedelta() object controlling the maximum age of a file.\n \"\"\"\n # TODO find a way to get a consistent NOW from the server. \n # create a file and delete it? \n # get all the child paths of the queue directories\n # for combine the child paths and query for \n raise NotImplementedError()\n\n\ndef check_queue_sizes(zk_client, threshold=5):\n \"\"\"\n For the most part queues should be empty. If they contain more than a given number of \n entries, return information.\n\n :param threshold: ``int`` the max number of children a queue can contain before an error is raised.\n \"\"\"\n\n errors = []\n stats = get_znode_children_counts(zk_client, ZK_QUEUE_PATHS)\n missing = set(stats.keys()) ^ set(ZK_QUEUE_PATHS)\n for path in missing:\n errors.append(\"queue path [%s] is missing\" % path)\n\n if stats is None:\n raise ValueError(\"stats is None!!!\")\n for path, max_children in six.viewitems(stats):\n if max_children > threshold:\n errors.append(\n \"queue [%s] is backed up with: %d children, error threshold: %d\" % (path, max_children, threshold)\n )\n\n return errors\n\n\ndef check_overseer_election(zk_client):\n \"\"\"\n Overseer election contains znodes to help with the election of the overseer within Zookeeper.\n\n This check works by comparing Solr connected clients, and their session id to the \n overseer elect entries. There should be exactly 1 entry per zookeeper host. \n The name of the overseer elect entries and their contents are also verified.\n\n /overseer_elect/leader is a file with the contents like::\n\n {\"id\":\"98720987344797722-10.51.65.147:8983_solr-n_0000000000\"}\n\n /overseer_elect/election contains entries like::\n\n 242836175563980834-10.51.64.64:8983_solr-n_0000000001\n 98720987344797722-10.51.65.147:8983_solr-n_0000000000\n \"\"\"\n errors = []\n\n # TODO its a common pattern to get all the children of a directory using get_children() and then retrieve the \n # node data and stats for each child, so maybe move this to a utility method\n # its also common to compare the per-host result for differences, that can be abstracted as well.\n def call(client, znode):\n return client.get_children_async(znode)\n \n children_results = get_async_call_per_host(zk_client, [LIVE_NODES_PATH, OVERSEER_ELECT_ELECTION_PATH], call)\n\n child_paths = get_async_result_paths_chilren_per_host(children_results)\n # we also need to get the contents of the leader file.\n child_paths.append(OVERSEER_ELECT_LEADER_PATH)\n \n\n def call(client, znode):\n return client.get_async(znode)\n \n node_result = get_async_call_per_host(zk_client, child_paths, call)\n # get tuples of (path, result)\n # result is a tuple of (contents, stats)\n node_data = get_async_input_result_set(node_result)\n live_nodes = {znode_path_split(path)[1]:result for path, result in node_data if path.startswith(LIVE_NODES_PATH)}\n election_nodes = {znode_path_split(path)[1]:result for path, result in node_data if path.startswith(OVERSEER_ELECT_ELECTION_PATH)}\n overseer_leader = [result for path, result in node_data if path == OVERSEER_ELECT_LEADER_PATH]\n overseer_data_unique = {result[0] for result in overseer_leader}\n live_node_sessions = {getattr(lnode[1], 'ephemeralOwner', None) for lnode in live_nodes.values()}\n\n # Test that overseer leader is logical.\n if not overseer_leader:\n errors.append(\"no overseer leader path exists at: %s\" % OVERSEER_ELECT_ELECTION_PATH)\n elif len(overseer_data_unique) > 1:\n errors.append(\"overseer leader [%s] contents vary: [%s]\" % OVERSEER_ELECT_ELECTION_PATH, ', '.join(overseer_data_unique))\n else:\n overseer_contents, overseer_stats = overseer_leader[0]\n overseer_data = {}\n try:\n overseer_data = json.loads(overseer_contents)\n except ValueError as e:\n errors.append(\"overseer leader %s contains invalid json [%s] - %s\" % (OVERSEER_ELECT_LEADER_PATH, overseer_contents, e))\n\n if 'id' not in overseer_data:\n errors.append(\"overseer leader %s contains invalid json [%s] - missing 'id' field\" % (OVERSEER_ELECT_LEADER_PATH, overseer_contents))\n else:\n overseer_id = overseer_data['id'].strip()\n if overseer_id not in election_nodes:\n errors.append(\"overseer leader %s election node %s not present in overseers: [%s]\" % (OVERSEER_ELECT_LEADER_PATH, overseer_id, ', '.join(election_nodes.keys())))\n else:\n leader_owner = getattr(overseer_stats, 'ephemeralOwner', None)\n election_node_owner = getattr(election_nodes[overseer_id][1], 'ephemeralOwner', None)\n if leader_owner != election_node_owner:\n errors.append(\n \"overseer leader %s session: [%s] does not match corresponding election node [%s] session: [%s]\" \n % (OVERSEER_ELECT_LEADER_PATH, leader_owner, election_node_owner))\n \n # Test that overseer election members are logical\n if len(live_nodes) != len(election_nodes):\n errors.append(\"election members, and live nodes, are different: LIVE:[%s] ELECTION:[%s]\" % (', '.join(live_nodes.keys()),', '.join(election_nodes.keys())))\n\n election_node_sessions = set()\n election_node_hosts = set()\n election_node_debug = [] # maps node name to session for debug.\n for nodename, data in six.viewitems(election_nodes):\n session, solrnode, queueid = nodename.split('-')\n session = int(session)\n election_node_sessions.add(session)\n election_node_hosts.add(solrnode)\n\n election_node_debug.append('%s=>%d' % (nodename, session))\n \n\n node_stats = data[1]\n owner_session = getattr(node_stats, 'ephemeralOwner', None)\n\n if solrnode not in live_nodes:\n errors.append(\"election member [%s] references a solr-host [%s] not associated with any valid live_node,\" % (nodename, solrnode))\n\n if owner_session is None:\n errors.append(\"election member [%s] is not ephemeral, this node is invalid\" % nodename)\n else:\n if session not in live_node_sessions:\n errors.append(\"election member [%s] references a session [%s] not associated with any valid live_node\" % (nodename, session))\n\n if owner_session != session:\n errors.append(\"election member [%s] node-name session is not the same as owning session: %s != %s\" % (nodename, session, owner_session))\n\n # Check that live-nodes are accounted for in election members.\n for livenode, data in six.viewitems(live_nodes):\n\n node_stats = data[1]\n owner_session = getattr(node_stats, 'ephemeralOwner', None)\n\n if livenode not in election_node_hosts:\n errors.append(\"live_node %s does not exist in within any election member: %s\" % (livenode, ', '.join(election_node_hosts)))\n\n if owner_session not in election_node_sessions:\n errors.append(\"live_node %s session owner: [%s] is not referenced within any election member: [%s]\" % (livenode, owner_session, ', '.join(election_node_debug)))\n\n return errors\n\ndef get_exception_traceback():\n ex_type, ex, tb = sys.exc_info()\n traceback.format_tb(tb,10)\n exception_info = \" ** (%s) %s - %s \" % (ex_type, ex, \";\\n\".join(traceback.format_tb(tb,10)))\n del tb\n return exception_info\n\ndef check_ensemble_for_complex_errors(zk_client):\n \"\"\"\n This function does several complex checks: \n * Checks zookeeper connectivity.\n * Checks ephemeral nodes.\n * Checks watches.\n\n :returns: a sequence of errors, empty list if there were no errors.\n \"\"\"\n errors = []\n\n try:\n errors.extend(check_zookeeper_connectivity(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_ephemeral_sessions_fast(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_ephemeral_znode_consistency(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_ephemeral_dump_consistency(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_watch_sessions_clients(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_watch_sessions_duplicate(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_queue_sizes(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_watch_sessions_valid(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n try:\n errors.extend(check_overseer_election(zk_client))\n except Exception as e:\n errors.extend([get_exception_traceback()])\n\n return errors","repo_name":"bendemott/solr-zkutil","sub_path":"solrzkutil/healthy.py","file_name":"healthy.py","file_ext":"py","file_size_in_byte":54372,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"17"} +{"seq_id":"24634678443","text":"from stack_impl import Stack\n\ndef next_greater(arr):\n elem = 0\n next_val = 0 \n \n stack = Stack()\n stack.push(arr[0])\n res = []\n for i in range(0, len(arr)):\n next_val = arr[i]\n if not stack.is_empty():\n elem = stack.pop()\n while elem < next_val:\n res.append(next_val)\n if stack.is_empty():\n break\n elem = stack.pop()\n\n if elem > next_val:\n stack.push(elem)\n\n stack.push(next_val)\n\n\n while not stack.is_empty():\n elem = stack.pop()\n\n res.append(-1)\n\n print(res)\n\na = [16, 17, 4, 3, 5, 2]\nnext_greater(a)\nnext_greater([6, 12, 4, 1, 2, 111, 2, 2, 10])\nnext_greater([11, 13, 21, 3, 4, 2])\nnext_greater([4, 5, 2, 25])\n","repo_name":"Jon-J/code_practice","sub_path":"algo/stack/replace_every_element_next_greatest.py","file_name":"replace_every_element_next_greatest.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"16653047720","text":"import pandas as pd\n\n\ndef getLikedMoves():\n return user_ratings.loc[user_ratings['rating'] > 3]\n\n# 1) Create a user profile based on the genres of the movies.\n# Count how often each movie genre appeared in the set of the movies\n# that the user has liked (i.e., when the rating is greater than 3).\ndef createUserProfile():\n # drop if rating is 3 or worse\n # get genres from being genre1|genre2|... into seperated rows with duplicated other values\n user_ratings_filtered_split = user_ratings_filtered\n user_ratings_filtered_split['genres'] = user_ratings_filtered['genres'].str.split(\"|\")\n #print(user_ratings_filtered_split.to_string())\n user_ratings_filtered_split = user_ratings_filtered_split.explode('genres')\n #print(user_ratings_filtered_split.to_string())\n\n # count ratings per genres\n counted_ratings_per_genre = user_ratings_filtered_split.groupby('genres')['genres'].count()\n #print(counted_ratings_per_genre)\n return counted_ratings_per_genre\n\n # 2) Determine the similarity of each recommendable movie to this user profile.\n# Implement a simple strategy that simply determines the overlap in genres, ignoring how many movies of a certain genre the user has liked.\n# Inspect the outcomes of this recommendation strategy for a few users.\ndef getSimilarities():\n similarities_ids_dict = {} # such that no movie is recommended more than once (key is id)\n\n for index, movie in data.iterrows():\n if movie['user_id'] == user_id:\n continue\n genres = movie['genres'].split(\"|\")\n intersected_set = set(genres).intersection(liked_genres)\n #if intersected_set:\n similarities_ids_dict[movie['movie_id']] = len(intersected_set)\n\n sorted_dict_similarities = dict(sorted(similarities_ids_dict.items(), key=lambda x: x[1], reverse=True)[:10])\n keys = sorted_dict_similarities.keys()\n return list(keys)\n\n# 3) Extend the algorithm as follows. When recommending, remove all movies that have no overlap with the given user profile.\n# Rank the remaining items based on their popularity1. Again, test your method with a few users.\n# 1 You can determine the popularity of any item by counting the numbers of ratings for it.\ndef getSimilaritiesExtended():\n similarities_ids = set() # such that no movie is recommended more than once\n for index, movie in data.iterrows():\n if movie['user_id'] == user_id:\n continue\n genres = movie['genres'].split(\"|\")\n intersected_set = set(genres).intersection(liked_genres)\n # skip ones with no overlap\n if intersected_set:\n similarities_ids.add(movie['movie_id'])\n\n # 1 You can determine the popularity of any item by counting the numbers of ratings for it.\n possible_movies = data[data['movie_id'].isin(list(similarities_ids))]\n\n counted_ratings_per_genre = possible_movies.groupby('movie_id')['rating'].mean()\n\n sorted_dict_ratings = dict(sorted(counted_ratings_per_genre.items(), key=lambda x: x[1], reverse=True)[:10])\n #print (sorted_dict_ratings)\n # {989: 5.0, 1830: 5.0, 3172: 5.0, 3233: 5.0, 3280: 5.0, 3382: 5.0, 3607: 5.0, 3656: 5.0, 3245: 4.8, 53: 4.75}\n # tested with user 123\n # none of the recommended movies regarding rating and overlap were rated already by the user so shall be fine\n return sorted_dict_ratings\n\n# 4) Implement a method that also considers the “genre‐count” in the user profile in some form.\ndef getSimilaritiesExtendedGenreCount():\n genre_count = len(liked_genres)\n similarities_ids = set() # such that no movie is recommended more than once\n weights = set() # such that no movie is recommended more than once\n similarities = {}\n\n for index, movie in data.iterrows():\n if movie['user_id'] == user_id:\n continue\n genres = movie['genres'].split(\"|\")\n\n intersected_set = set(genres).intersection(liked_genres)\n # skip ones with no overlap\n if intersected_set:\n # weight factor is the bigger the more overlap we have\n weight = len(intersected_set) / genre_count # more weight if more overlaps\n similarities_ids.add(movie['movie_id'])\n weights.add(weight)\n similarities[movie['movie_id']] = weight\n\n possible_movies = data[data['movie_id'].isin(list(similarities_ids))]\n\n counted_ratings_per_genre = possible_movies.groupby('movie_id')['rating'].mean().reset_index()\n # TODO check whether weight is actually added\n print(counted_ratings_per_genre)\n\n for id, movie in counted_ratings_per_genre.items():\n # counted_ratings_per_genre[counted_ratings_per_genre['movie_id']==id]#['rating']*= similarities.get(id)\n counted_ratings_per_genre[counted_ratings_per_genre['movie_id']==id]['rating'] =\\\n counted_ratings_per_genre[counted_ratings_per_genre['movie_id']==id]['rating'] * similarities.get(id)\n\n print(counted_ratings_per_genre)\n\n sorted_dict_ratings = dict(sorted(counted_ratings_per_genre['rating'].items(), key=lambda x: x[1], reverse=True)[:10])\n return sorted_dict_ratings\n\nif __name__ == '__main__':\n print('Running the popularity‐aware content‐based recommender')\n\n # A) Accepts the user ID as input (on the console)\n user_id = input(\"Enter user ID: \")\n try:\n user_id = int(user_id)\n except ValueError:\n print(\"Error: user ID must be an integer\")\n exit()\n\n # Load the MovieLens dataset\n ratings = pd.read_csv('./ratings.dat', sep='::', engine='python', names=[\n 'user_id', 'movie_id', 'rating', 'timestamp'])\n movies = pd.read_csv('./movies.dat', sep='::', engine='python', names=[\n 'movie_id', 'title', 'genres'], encoding='ISO-8859-1')\n users = pd.read_csv('./users.dat', sep='::', engine='python', names=[\n 'user_id', 'gender', 'age', 'occupation', 'zip'], encoding='ISO-8859-1')\n\n\n # B) Displays the user profile in terms of the rated items\n # Merge the data\n data = pd.merge(ratings, users, on='user_id')\n data = pd.merge(data, movies, on='movie_id')\n #print(data)\n\n print('User profile in terms of rated items of the user:')\n user_ratings = data[data['user_id'] == user_id]\n user_ratings_filtered = getLikedMoves()\n print(user_ratings_filtered[:10].to_string(\n index=False))\n\n # C) Prints the top‐10 recommendations on the console. To implement the algorithm:\n\n # 1) Create a user profile based on the genres of the movies.\n # Count how often each movie genre appeared in the set of the movies\n # that the user has liked (i.e., when the rating is greater than 3).\n user_profile = createUserProfile()\n\n # for futher use in the similarity methods I want the genres liked by the user as set\n liked_genres = set(user_profile.to_dict().keys())\n print('Genres liked by user ',user_id, liked_genres)\n\n # 2) Determine the similarity of each recommendable movie to this user profile.\n # Implement a simple strategy that simply determines the overlap in genres, ignoring how many movies of a certain genre the user has liked.\n # Inspect the outcomes of this recommendation strategy for a few users.\n sim_movie_ids = getSimilarities()\n print('Recommended movies according to C2) (overlap in genres)')\n print (movies[movies['movie_id'].isin(sim_movie_ids)].to_string())\n\n # 3) Extend the algorithm as follows. When recommending, remove all movies that have no overlap with the given user profile.\n # Rank the remaining items based on their popularity1. Again, test your method with a few users.\n # 1 You can determine the popularity of any item by counting the numbers of ratings for it.\n sim_movie_ids_extended = getSimilaritiesExtended()\n print('Recommended movies according to C3) (overlap in genres extended)')\n print (movies[movies['movie_id'].isin(sim_movie_ids_extended)].to_string())\n\n # 4) Implement a method that also considers the “genre‐count” in the user profile in some form.\n sim_movie_ids_extended_genre_count = getSimilaritiesExtendedGenreCount()\n print('Recommended movies according to C4) (overlap in genres extended)')\n print (movies[movies['movie_id'].isin(sim_movie_ids_extended_genre_count)].to_string())\n\n # Test your method interactively with a few users to check the plausibility of the recommendations.\n # Use the MovieLens1M dataset for testing your program. Structure your program code in functions and/or classes.\n # Implement appropriate error handling procedures.","repo_name":"stenglx/recommender_systems_exc4","sub_path":"HW04_Stengg.py","file_name":"HW04_Stengg.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"26967209816","text":"from github import Github\n\ndef delete_repositories(username, access_token, repos_to_keep):\n # Create a GitHub instance using the access token\n g = Github(access_token)\n user = g.get_user(username)\n \n # Get all repositories of the user\n repos = user.get_repos()\n\n # Loop through each repository and check if it should be deleted\n for repo in repos:\n if repo.name not in repos_to_keep:\n # Delete the repository\n repo.delete()\n print(f\"Deleted repository: {repo.name}\")\n\nif __name__ == \"__main__\":\n # Replace \"your_github_username\" with your actual GitHub username\n username = \"your_github_username\"\n \n # Replace \"YOUR_ACCESS_TOKEN\" with the access token you generated\n access_token = \"YOUR_ACCESS_TOKEN\"\n \n # List the names of repositories you want to keep\n repos_to_keep = [\"Repo1\", \"Repo2\", \"Repo3\"]\n \n # Call the function to delete the unwanted repositories\n delete_repositories(username, access_token, repos_to_keep)\n","repo_name":"MahmudRafi/Delete-Multiple-github-Repo-with-exception","sub_path":"delete_repos.py","file_name":"delete_repos.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"41195085188","text":"#!/usr/bin/env python3\n\"\"\"\n1. FIFO caching\n\"\"\"\nfrom base_caching import BaseCaching\n\n\nclass FIFOCache(BaseCaching):\n \"\"\"\n A class FIFOCache that inherits from\n BaseCaching and is a caching system:\n \"\"\"\n\n def __init__(self):\n \"\"\"initialize\"\"\"\n super().__init__()\n self.queue = []\n\n def put(self, key, item):\n \"\"\"\n Store a key-value pair\n \"\"\"\n if key is None or item is None:\n return\n if key in self.cache_data:\n self.queue.remove(key)\n else:\n length = len(self.cache_data)\n if length >= BaseCaching.MAX_ITEMS:\n print(\"DISCARD: {}\".format(self.queue[0]))\n del self.cache_data[self.queue[0]]\n del self.queue[0]\n self.queue.append(key)\n self.cache_data[key] = item\n\n def get(self, key):\n \"\"\"Returns the value in self.cache_data associated with key\"\"\"\n if key is not None and key in self.cache_data:\n return self.cache_data[key]\n return None\n","repo_name":"nnodim/alx-backend","sub_path":"0x01-caching/1-fifo_cache.py","file_name":"1-fifo_cache.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"22050217991","text":"import logging\nimport traceback as tb\nimport json\nimport azure.functions as func\nfrom classes import STATError\nfrom shared import coordinator\n\ndef main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:\n logging.debug('STAT Function started processing a request.')\n module_name = req.route_params.get('modulename')\n\n try:\n req_body = req.get_json()\n except ValueError:\n logging.error(msg={'Error': 'Invalid Request Body', 'InvocationId': context.invocation_id})\n return func.HttpResponse(json.dumps({'Error': 'Invalid Request Body', 'InvocationId': context.invocation_id}), status_code=400, mimetype='application/json')\n\n try:\n return_data = coordinator.initiate_module(module_name=module_name, req_body=req_body)\n except STATError as e:\n trace = tb.format_exception(None, e, e.__traceback__)\n logging.error(msg={'Error': e.error, 'SourceError': e.source_error, 'InvocationId': context.invocation_id}, exc_info=True)\n return func.HttpResponse(json.dumps({'Error': e.error, 'InvocationId': context.invocation_id, 'SourceError': e.source_error, 'Traceback': trace}), status_code=e.status_code, mimetype='application/json')\n except Exception as e:\n trace = tb.format_exception(None, e, e.__traceback__)\n logging.error(e, exc_info=True)\n return func.HttpResponse(json.dumps({'Error': 'Module processing failed, an unknown exception has occurred.', 'InvocationId': context.invocation_id, 'Traceback': trace}), status_code=400, mimetype='application/json')\n except:\n logging.error(msg={'Error': 'Module processing failed, an unknown exception has occurred.', 'InvocationId': context.invocation_id}, exc_info=True)\n return func.HttpResponse(json.dumps({'Error': 'Module processing failed, an unknown exception has occurred.', 'InvocationId': context.invocation_id}), status_code=400, mimetype='application/json')\n \n return func.HttpResponse(body=json.dumps(return_data.body.__dict__), status_code=return_data.statuscode, mimetype=return_data.contenttype)\n","repo_name":"briandelmsft/STAT-Function","sub_path":"modules/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"17"} +{"seq_id":"35851072006","text":"# -*- coding: utf-8 -*-\n#\n# 2020-02-15 Jean-Pierre Höhmann \n# Add webAuthn token\n# 2018-06-15 Cornelius Kölbel \n# Add translation for authentication failure - since\n# this is a message that is displayed in the UI.\n# 2016-04-08 Cornelius Kölbel \n# Avoid \"None\" as redundant 2nd argument\n# 2015-11-04 Cornelius Kölbel \n# Add REMOTE_USER check\n# 2015-04-03 Cornelius Kölbel \n# Add logout time to response\n# 2014-12-15 Cornelius Kölbel, info@privacyidea.org\n# Initial creation\n#\n# (c) Cornelius Kölbel\n# Info: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see .\n#\n__doc__ = \"\"\"This REST API is used to authenticate the users. A user needs to\nauthenticate when he wants to use the API for administrative tasks like\nenrolling a token.\n\nThis API must not be confused with the validate API, which is used to check,\nif a OTP value is valid. See :ref:`rest_validate`.\n\nAuthentication of users and admins is tested in tests/test_api_roles.py\n\nYou need to authenticate for all administrative tasks. If you are not\nauthenticated, the API returns a 401 response.\n\nTo authenticate you need to send a POST request to /auth containing username\nand password.\n\"\"\"\nfrom flask import (Blueprint,\n request,\n current_app,\n g)\nimport jwt\nfrom functools import wraps\nfrom datetime import (datetime,\n timedelta)\nfrom privacyidea.lib.error import AuthError, ERROR\nfrom privacyidea.lib.crypto import geturandom, init_hsm\nfrom privacyidea.lib.audit import getAudit\nfrom privacyidea.lib.auth import (check_webui_user, ROLE, verify_db_admin,\n db_admin_exist)\nfrom privacyidea.lib.framework import get_app_config_value\nfrom privacyidea.lib.user import User, split_user, log_used_user\nfrom privacyidea.lib.policy import PolicyClass, REMOTE_USER\nfrom privacyidea.lib.realm import get_default_realm, realm_is_defined\nfrom privacyidea.api.lib.postpolicy import (postpolicy, get_webui_settings, add_user_detail_to_response, check_tokentype,\n check_tokeninfo, check_serial, no_detail_on_fail, no_detail_on_success,\n get_webui_settings)\nfrom privacyidea.api.lib.prepolicy import (is_remote_user_allowed, prepolicy,\n pushtoken_disable_wait, webauthntoken_authz, webauthntoken_request,\n webauthntoken_auth, increase_failcounter_on_challenge)\nfrom privacyidea.api.lib.utils import (send_result, get_all_params,\n verify_auth_token, getParam)\nfrom privacyidea.lib.utils import get_client_ip, hexlify_and_unicode, to_unicode\nfrom privacyidea.lib.config import get_from_config, SYSCONF, ensure_no_config_object, get_privacyidea_node\nfrom privacyidea.lib.event import event, EventConfiguration\nfrom privacyidea.lib import _\nimport logging\nimport traceback\nimport threading\n\nlog = logging.getLogger(__name__)\n\n\njwtauth = Blueprint('jwtauth', __name__)\n\n\n@jwtauth.before_request\ndef before_request():\n \"\"\"\n This is executed before the request\n \"\"\"\n ensure_no_config_object()\n request.all_data = get_all_params(request)\n privacyidea_server = get_app_config_value(\"PI_AUDIT_SERVERNAME\", get_privacyidea_node(request.host))\n g.policy_object = PolicyClass()\n g.audit_object = getAudit(current_app.config)\n g.event_config = EventConfiguration()\n # access_route contains the ip addresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n # Save the HTTP header in the localproxy object\n g.request_headers = request.headers\n g.serial = getParam(request.all_data, \"serial\", default=None)\n g.audit_object.log({\"success\": False,\n \"client\": g.client_ip,\n \"client_user_agent\": request.user_agent.browser,\n \"privacyidea_server\": privacyidea_server,\n \"action\": \"{0!s} {1!s}\".format(request.method, request.url_rule),\n \"action_detail\": \"\",\n \"thread_id\": \"{0!s}\".format(threading.current_thread().ident),\n \"info\": \"\"})\n\n username = getParam(request.all_data, \"username\")\n if username:\n # We only fill request.User, if we really have a username.\n # On endpoints like /auth/rights, this is not available\n loginname, realm = split_user(username)\n # overwrite the split realm if we have a realm parameter. Default back to default_realm\n realm = getParam(request.all_data, \"realm\") or realm or get_default_realm()\n # Prefill the request.User. This is used by some pre-event handlers\n try:\n request.User = User(loginname, realm)\n except Exception as e:\n request.User = None\n log.warning(\"Problem resolving user {0!s} in realm {1!s}: {2!s}.\".format(loginname, realm, e))\n log.debug(\"{0!s}\".format(traceback.format_exc()))\n\n\n@jwtauth.route('', methods=['POST'])\n@prepolicy(increase_failcounter_on_challenge, request=request)\n@prepolicy(pushtoken_disable_wait, request)\n@prepolicy(webauthntoken_request, request=request)\n@prepolicy(webauthntoken_authz, request=request)\n@prepolicy(webauthntoken_auth, request=request)\n@postpolicy(get_webui_settings)\n@postpolicy(no_detail_on_success, request=request)\n@postpolicy(add_user_detail_to_response, request=request)\n@postpolicy(check_tokentype, request=request)\n@postpolicy(check_tokeninfo, request=request)\n@postpolicy(check_serial, request=request)\n@event(\"auth\", request, g)\ndef get_auth_token():\n \"\"\"\n This call verifies the credentials of the user and issues an\n authentication token, that is used for the later API calls. The\n authentication token has a validity, that is usually 1 hour.\n\n :jsonparam username: The username of the user who wants to authenticate to\n the API.\n :jsonparam password: The password/credentials of the user who wants to\n authenticate to the API.\n :jsonparam realm: The realm where the user will be searched.\n\n :return: A json response with an authentication token, that needs to be\n used in any further request.\n\n :status 200: in case of success\n :status 401: if authentication fails\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /auth HTTP/1.1\n Host: example.com\n Accept: application/json\n\n username=admin\n password=topsecret\n\n **Example Authentication Response**:\n\n .. sourcecode:: http\n\n HTTP/1.0 200 OK\n Content-Length: 354\n Content-Type: application/json\n\n {\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": {\n \"token\": \"eyJhbGciOiJIUz....jdpn9kIjuGRnGejmbFbM\"\n }\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n **Response for failed authentication**:\n\n .. sourcecode:: http\n\n HTTP/1.1 401 UNAUTHORIZED\n Content-Type: application/json\n Content-Length: 203\n\n {\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"error\": {\n \"code\": -401,\n \"message\": \"missing Authorization header\"\n },\n \"status\": false\n },\n \"version\": \"privacyIDEA unknown\",\n \"config\": {\n \"logout_time\": 30\n }\n }\n\n \"\"\"\n validity = timedelta(hours=1)\n username = getParam(request.all_data, \"username\")\n password = getParam(request.all_data, \"password\")\n realm_param = getParam(request.all_data, \"realm\")\n details = {}\n realm = ''\n\n # the realm parameter has precedence! Check if it exists\n if realm_param and not realm_is_defined(realm_param):\n raise AuthError(_(\"Authentication failure. Unknown realm: {0!s}.\".format(realm_param)),\n id=ERROR.AUTHENTICATE_WRONG_CREDENTIALS)\n\n if username is None:\n raise AuthError(_(\"Authentication failure. Missing Username\"),\n id=ERROR.AUTHENTICATE_MISSING_USERNAME)\n\n user_obj = request.User\n if not user_obj:\n # The user could not be resolved, but it could still be a local administrator\n loginname, realm = split_user(username)\n realm = (realm_param or realm or get_default_realm()).lower()\n user_obj = User()\n else:\n realm = user_obj.realm\n loginname = user_obj.login\n\n # Failsafe to have the user attempt in the log, whatever happens\n # This can be overwritten later\n g.audit_object.log({\"user\": username,\n \"realm\": realm})\n\n secret = current_app.secret_key\n superuser_realms = [x.lower() for x in current_app.config.get(\"SUPERUSER_REALM\", [])]\n # This is the default role for the logged-in user.\n # The role privileges may be risen to \"admin\"\n role = ROLE.USER\n # The way the user authenticated. This could be\n # \"password\" = The admin user DB or the user store\n # \"pi\" = The admin or the user is authenticated against privacyIDEA\n # \"remote_user\" = authenticated by webserver\n authtype = \"password\"\n # Verify the password\n admin_auth = False\n user_auth = False\n\n # Check if the remote user is allowed\n if (request.remote_user == username) and is_remote_user_allowed(request) != REMOTE_USER.DISABLE:\n # Authenticated by the Web Server\n # Check if the username exists\n # 1. in local admins\n # 2. in a realm\n # 2a. is an admin realm\n authtype = \"remote_user \"\n if db_admin_exist(username):\n role = ROLE.ADMIN\n admin_auth = True\n g.audit_object.log({\"success\": True,\n \"user\": \"\",\n \"administrator\": username,\n \"info\": \"internal admin\"})\n user_obj = User()\n else:\n # check, if the user exists\n g.audit_object.log({\"user\": user_obj.login,\n \"realm\": user_obj.realm,\n \"info\": log_used_user(user_obj)})\n if user_obj.exist():\n user_auth = True\n if user_obj.realm in superuser_realms:\n role = ROLE.ADMIN\n admin_auth = True\n\n elif verify_db_admin(username, password):\n role = ROLE.ADMIN\n admin_auth = True\n log.info(\"Local admin '{0!s}' successfully logged in.\".format(username))\n # This admin is not in the default realm!\n realm = \"\"\n user_obj = User()\n g.audit_object.log({\"success\": True,\n \"user\": \"\",\n \"realm\": \"\",\n \"administrator\": username,\n \"info\": \"internal admin\"})\n\n else:\n # The user could not be identified against the admin database,\n # so we do the rest of the check\n if password is None:\n g.audit_object.add_to_log({\"info\": 'Missing parameter \"password\"'}, add_with_comma=True)\n else:\n options = {\"g\": g,\n \"clientip\": g.client_ip}\n for key, value in request.all_data.items():\n if value and key not in [\"g\", \"clientip\"]:\n options[key] = value\n user_auth, role, details = check_webui_user(user_obj,\n password,\n options=options,\n superuser_realms=\n superuser_realms)\n details = details or {}\n serials = \",\".join([challenge_info[\"serial\"] for challenge_info in details[\"multi_challenge\"]]) \\\n if 'multi_challenge' in details else details.get('serial')\n if db_admin_exist(user_obj.login) and user_auth and realm == get_default_realm():\n # If there is a local admin with the same login name as the user\n # in the default realm, we inform about this in the log file.\n # This condition can only be checked if the user was authenticated as it\n # is the only way to verify if such a user exists.\n log.warning(\"A user '{0!s}' exists as local admin and as user in \"\n \"your default realm!\".format(user_obj.login))\n if role == ROLE.ADMIN:\n g.audit_object.log({\"user\": \"\",\n \"administrator\": user_obj.login,\n \"realm\": user_obj.realm,\n \"resolver\": user_obj.resolver,\n \"serial\": serials,\n \"info\": \"{0!s}|loginmode={1!s}\".format(log_used_user(user_obj),\n details.get(\"loginmode\"))})\n else:\n g.audit_object.log({\"user\": user_obj.login,\n \"realm\": user_obj.realm,\n \"resolver\": user_obj.resolver,\n \"serial\": serials,\n \"info\": \"{0!s}|loginmode={1!s}\".format(log_used_user(user_obj),\n details.get(\"loginmode\"))})\n\n if not user_auth and \"multi_challenge\" in details and len(details[\"multi_challenge\"]) > 0:\n return send_result({\"role\": role,\n \"username\": loginname,\n \"realm\": realm},\n details=details)\n\n if not admin_auth and not user_auth:\n raise AuthError(_(\"Authentication failure. Wrong credentials\"),\n id=ERROR.AUTHENTICATE_WRONG_CREDENTIALS,\n details=details or {})\n else:\n g.audit_object.log({\"success\": True})\n request.User = user_obj\n\n # If the HSM is not ready, we need to create the nonce in another way!\n hsm = init_hsm()\n if hsm.is_ready:\n nonce = geturandom(hex=True)\n # Add the role to the JWT, so that we can verify it internally\n # Add the authtype to the JWT, so that we could use it for access\n # definitions\n rights = g.policy_object.ui_get_rights(role, realm, loginname,\n g.client_ip)\n menus = g.policy_object.ui_get_main_menus({\"username\": loginname,\n \"role\": role,\n \"realm\": realm},\n g.client_ip)\n else:\n import os\n nonce = hexlify_and_unicode(os.urandom(20))\n rights = []\n menus = []\n\n # What is the log level?\n log_level = current_app.config.get(\"PI_LOGLEVEL\", 30)\n\n token = jwt.encode({\"username\": loginname,\n \"realm\": realm,\n \"nonce\": nonce,\n \"role\": role,\n \"authtype\": authtype,\n \"exp\": datetime.utcnow() + validity,\n \"rights\": rights},\n secret, algorithm='HS256')\n\n # set the logged-in user for post-policies and post-events\n g.logged_in_user = {\"username\": loginname,\n \"realm\": realm,\n \"role\": role}\n\n # Add the role to the response, so that the WebUI can make decisions\n # based on this (only show selfservice, not the admin part)\n return send_result({\"token\": to_unicode(token),\n \"role\": role,\n \"username\": loginname,\n \"realm\": realm,\n \"log_level\": log_level,\n \"rights\": rights,\n \"menus\": menus},\n details=details)\n\n\ndef admin_required(f):\n \"\"\"\n This is a decorator for routes, that require to be authenticated.\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n check_auth_token(required_role=[ROLE.ADMIN])\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef user_required(f):\n \"\"\"\n This is a decorator for routes, that require to be authenticated.\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n check_auth_token(required_role=[\"user\", \"admin\"])\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef check_auth_token(required_role=None):\n \"\"\"\n This checks the authentication token\n\n You need to pass an authentication header:\n\n PI-Authorization: \n\n You can do this using httpie like this:\n\n http -j POST http://localhost:5000/system/getConfig Authorization:ewrt\n \"\"\"\n auth_token = request.headers.get('PI-Authorization')\n if not auth_token:\n auth_token = request.headers.get('Authorization')\n r = verify_auth_token(auth_token, required_role)\n g.logged_in_user = {\"username\": r.get(\"username\"),\n \"realm\": r.get(\"realm\"),\n \"role\": r.get(\"role\")}\n\n\n@jwtauth.route('/rights', methods=['GET'])\n@user_required\ndef get_rights():\n \"\"\"\n This returns the rights of the logged in user.\n\n :reqheader Authorization: The authorization token acquired by /auth request\n \"\"\"\n enroll_types = g.policy_object.ui_get_enroll_tokentypes(g.client_ip,\n g.logged_in_user)\n\n g.audit_object.log({\"success\": True})\n return send_result(enroll_types)\n","repo_name":"privacyidea/privacyidea","sub_path":"privacyidea/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":18776,"program_lang":"python","lang":"en","doc_type":"code","stars":1388,"dataset":"github-code","pt":"17"} +{"seq_id":"43711946109","text":"#!/usr/bin/env python\n\nimport sys, time\n\nfrom PyQt4 import QtCore, QtGui\n\nimport numpy as np\n\nimport pyqtgraph as pg\n\nfrom vis_helpers import *\nfrom zmq_protobuf_helpers import *\n\nimport DynamicMheTelemetry_pb2 as mheProto\n\nfrom vis_helpers import OcpWorker\n\napp = QtGui.QApplication([])\n\nclass CustomGraphicsView( pg.GraphicsView ):\n\tdef __init__(self, timer, parent = None):\n\t\tpg.GraphicsView.__init__(self, parent)\n\n\t\tassert isinstance(timer, QtCore.QTimer)\n\t\tself._timer = timer\n\t\n\tdef keyPressEvent(self, event):\n\t\tif event.key() == QtCore.Qt.Key_P:\n\t\t\t# A simple start/stop of a timer if the key \"P\" is pressed\n\t\t\tif self._timer.isActive() is True:\n\t\t\t\tself._timer.stop()\n\t\t\telse:\n\t\t\t\tself._timer.start()\n\nlayout = pg.GraphicsLayout(border = (100, 100, 100))\n\n#\n# Generic fields, that all protobufs _must_ have\n#\ngenNames = [\"ts_trigger\", \"ts_elapsed\"]\n\n#\n# Test fields\n#\n\n#\n# Window organization\n#\nmhePlots = dict()\n\n# x, y, z\nposNames = [(\"x\", \"m\"), (\"y\", \"m\"), (\"z\", \"m\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), posNames, posNames) )\n# RPY, or e11_...e33\nrpyNames = [(\"roll\", \"deg\"), (\"pitch\", \"deg\"), (\"yaw\", \"deg\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), rpyNames, rpyNames) )\n# dx, dy, dz\nvelNames = [(\"dx\", \"m/s\"), (\"dy\", \"m/s\"), (\"dz\", \"m/s\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), velNames, velNames) )\n# w_... x, y, z\ngyroNames = [(\"w_bn_b_x\", \"rad/s\"), \"w_bn_b_y\", \"w_bn_b_z\"]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), gyroNames, gyroNames) )\n\nlayout.nextRow()\n\n# aileron, elevator; daileron, delevator\nctrlNames = [(\"aileron\", \"rad\"), (\"daileron\", \"rad/s\"), \"elevator\", \"delevator\"]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), ctrlNames, ctrlNames) )\n# ddelta, motor_torque, dmotor_torque, [cos, sin delta]\ncarNames = [(\"ddelta\", \"rpm\"), (\"motor_torque\", \"Nm\"), (\"dmotor_torque\", \"Nm/s\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), carNames, carNames) )\n# r, dr, ddr, dddr\ncableNames = [(\"r\", \"m\"), (\"dr\", \"m/s\"), (\"ddr\", \"m/s^2\"), (\"dddr\", \"m/s^3\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), cableNames, cableNames) )\n# obj_value, kkt_value, exec_prep, exec_fdb\nperfNames = [\"obj_value\", \"kkt_value\", (\"exec_prep\", \"s\"), (\"exec_fdb\", \"s\")]\nmhePlots.update( addPlotsToLayout(layout.addLayout( ), perfNames, perfNames,\n\t\t\t\toptions = {\"obj_value\": [\"semilogy\"],\n\t\t\t\t\t\t \"kkt_value\": [\"semilogy\"]}) )\n\nhorizonNamesAlt = flatten(posNames + rpyNames + velNames + gyroNames + \\\n\t\t\t\t ctrlNames + carNames + cableNames)\nhistoryNamesAlt = flatten( perfNames )\n\nhorizonNames = []\nfor v in horizonNamesAlt:\n\tif isinstance(v, tuple):\n\t\thorizonNames.extend( [ v[ 0 ] ] )\n\telse:\n\t\tassert isinstance(v, str)\n\t\thorizonNames.extend( [ v ] )\nhistoryNames = []\nfor v in historyNamesAlt:\n\tif isinstance(v, tuple):\n\t\thistoryNames.extend( [ v[ 0 ] ] )\n\telse:\n\t\tassert isinstance(v, str)\n\t\thistoryNames.extend( [ v ] )\n\n#\n# Setup update of the plotter\n#\n\n# Queue for data exchange between the worker and the main thread\nimport Queue\nq1 = Queue.Queue(maxsize = 10)\n\ndef updatePlots():\n\tglobal q1\n\tglobal mhePlots\n\tglobal horizonNames, historyNames\n\n\tdef updateGroup(q, plots):\n\t\ttry:\n\t\t\tdata = q.get_nowait()\n\t\t\ttimeStamps = data[ \"ts_trigger\" ]\n\t\t\t# Update \n\t\t\tmap(lambda name: plots[ name ].setData( data[ name ] ), horizonNames)\n\t\t\tmap(lambda name: plots[ name ].setData(timeStamps, data[ name ]), historyNames)\n\t\t\t\n\t\texcept Queue.Empty:\n\t\t\tpass\n\t\t\n\t# Update all plots\n\tupdateGroup(q1, mhePlots)\n\ntimer = QtCore.QTimer()\ntimer.timeout.connect( updatePlots )\ntimer.start( 100 )\n\n#\n# ZMQ part:\n#\n\nhost = \"192.168.1.110\"\n#host = \"localhost\"\n\nDynamicMhePort = \"5570\"\n\n# Create workers\nworkers = []\n\nworkers.append(OcpWorker(mheProto, host + \":\" + DynamicMhePort, q1, bufferSize = 20 * 25))\n\n# Start Qt event loop unless running in interactive mode.\n#\nif __name__ == '__main__':\n\n\tview = CustomGraphicsView( timer )\n\tview.setCentralItem( layout )\n\tview.show()\n\tview.setWindowTitle(\n\t\t\"Telemetry for MHE; Horizons of states and controls and history of performance indicators\" )\n\tview.resize(1024, 768)\n\n\tfor worker in workers:\n\t\tworker.start()\n\t\n\timport sys\n\tif (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\tQtGui.QApplication.instance().exec_()\n\t\n\tfor worker in workers:\n\t\tworker.stop()\n","repo_name":"drewm1980/planepower","sub_path":"visualiser/vis_dynamic_mhe.py","file_name":"vis_dynamic_mhe.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"17"} +{"seq_id":"70722664984","text":"\nclass CATNode:\n def __init__(self, _etiqueta, _hijos, _padre):\n self.etiqueta = _etiqueta\n self.hijos = _hijos\n self.padre = _padre\n self.siguiente = 0\n self.n_hijos = []\n\n def add_child(self, new_child):\n self.n_hijos.append(new_child)\n self.siguiente+=1\n\n def set_parent(self, new_parent):\n self.padre = new_parent\n\n def get_parent(self):\n return self.padre\n\nclass CATree:\n def __init__(self):\n self.root = None\n self.actual_node = None\n self.is_usable = True\n\n self.dictio_visualizer = {}\n\n def insert(self, _etiqueta, _hijos, debug = False):\n if self.is_usable:\n if debug: print(_etiqueta, \": \", end=\"\")\n if not self.root:\n temp_node = CATNode(_etiqueta, _hijos, None)\n self.root = temp_node\n self.actual_node = temp_node\n if debug: print(\"\\t Creando Raiz\")\n\n else:\n if self.down_to_first_child() == _etiqueta : # Evaluando primer hijo es equivalente a la etiqueta ingresante\n temp_node = CATNode(_etiqueta, _hijos, self.actual_node) # Por lo general siempre es verdadero\n self.actual_node.add_child(temp_node)\n self.actual_node = temp_node\n if debug: print(\"\\t Insercción normal\")\n\n\n else: # De no serlo es por que : No hay concordancia o no existe hijo a donde bajar\n self.actual_node = self.go_next(self.actual_node)\n if self.actual_node != None:\n #print(\"-->\", self.actual_node.hijos, \"-->\", self.actual_node.siguiente)\n if self.actual_node.hijos[self.actual_node.siguiente] == _etiqueta:\n temp_node = CATNode(_etiqueta, _hijos, self.actual_node)\n self.actual_node.add_child(temp_node)\n self.actual_node = temp_node\n if debug: print(\"\\t Insercción siguiente\")\n else:\n self.is_usable = False\n print(\"\\t Incongruencia de insercción\")\n else:\n self.is_usable = False\n print(\"\\t Root reached!!\")\n\n if self.is_usable and self.down_to_first_child() == \"lambda\":\n if debug: print(\"\\t Lambda: Lambda in\")\n self.insert_lambda()\n\n def down_to_first_child(self):\n if len(self.actual_node.hijos) == 0:\n return \"\"\n else:\n return self.actual_node.hijos[0]\n\n def go_next(self, current_node): # First interaction is actual node. In recursivity it takes several temp nodes values until it reaches root node\n if current_node.get_parent() != None:\n temp_parent = current_node.get_parent()\n if( temp_parent.siguiente < len(temp_parent.hijos) ):\n return temp_parent\n else:\n return self.go_next(temp_parent)\n else:\n return None # Indica que hemos llegado a la raiz (por ende no tiene padre)\n\n def insert_lambda(self):\n temp_node = CATNode(\"lambda\", [], self.actual_node)\n self.actual_node.add_child(temp_node)\n\n\n ############# Tree tools #################\n def draw_tree(self):\n self.set_tree_tranversal(self.root, 0, debug = False)\n for key in self.dictio_visualizer:\n print(key, self.dictio_visualizer.get(key))\n\n def add_to_dictio(self, _level, _etiqueta):\n if not self.dictio_visualizer.get(_level):\n self.dictio_visualizer[_level] = []\n self.dictio_visualizer[_level].append(_etiqueta)\n\n def set_tree_tranversal(self, start_from, order, debug = False):\n if debug:\n print(order, start_from.etiqueta, start_from.hijos)\n\n self.add_to_dictio(order, \"ROOT\" + \"->\" + start_from.etiqueta if not start_from.padre else start_from.padre.etiqueta + \"->\" + start_from.etiqueta)\n for node in start_from.n_hijos:\n self.set_tree_tranversal(node, order+1, debug)\n return\n\nmy_tree = CATree()\nmy_tree.insert(\"E\", [\"T\", \"Ep\"])\nmy_tree.insert(\"T\", [\"F\", \"Tp\"])\nmy_tree.insert(\"F\", [\"num\"])\nmy_tree.insert(\"num\", [])\nmy_tree.insert(\"Tp\", [\"lambda\"])\nmy_tree.insert(\"Ep\", [\"+\", \"T\", \"Ep\"])\nmy_tree.insert(\"+\", [])\nmy_tree.insert(\"T\", [\"F\", \"Tp\"])\nmy_tree.insert(\"F\", [\"(\", \"E\", \")\"])\nmy_tree.insert(\"(\", [])\nmy_tree.insert(\"E\", [\"T\", \"Ep\"])\nmy_tree.insert(\"T\", [\"F\", \"Tp\"])\nmy_tree.insert(\"F\", [\"num\"])\nmy_tree.insert(\"num\", [])\nmy_tree.insert(\"Tp\", [\"*\", \"F\", \"Tp\"])\nmy_tree.insert(\"*\", [])\nmy_tree.insert(\"F\", [\"num\"])\nmy_tree.insert(\"num\", [])\nmy_tree.insert(\"Tp\", [\"lambda\"])\nmy_tree.insert(\"Ep\", [\"lambda\"])\nmy_tree.insert(\")\", [])\nmy_tree.insert(\"Tp\", [\"lambda\"])\nmy_tree.insert(\"Ep\", [\"lambda\"])\nmy_tree.insert(\"$\", [])\n\nmy_tree.draw_tree()\n\n#print(my_tree.down_to_first_child())\n#print(my_tree.actual_node.get_parent().hijos)\n","repo_name":"dave98/2020_01","sub_path":"Compiladores/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"17"} +{"seq_id":"17872839027","text":"from __future__ import absolute_import, division, unicode_literals\nfrom six import text_type\n\nfrom ..constants import scopingElements, tableInsertModeElements, namespaces\n\n\n\n\nMarker = None\n\nlistElementsMap = {\n None: (frozenset(scopingElements), False),\n \"button\": (frozenset(scopingElements | set([(namespaces[\"html\"], \"button\")])), False),\n \"list\": (frozenset(scopingElements | set([(namespaces[\"html\"], \"ol\"),\n (namespaces[\"html\"], \"ul\")])), False),\n \"table\": (frozenset([(namespaces[\"html\"], \"html\"),\n (namespaces[\"html\"], \"table\")]), False),\n \"select\": (frozenset([(namespaces[\"html\"], \"optgroup\"),\n (namespaces[\"html\"], \"option\")]), True)\n}\n\n\nclass Node(object):\n def __init__(self, name):\n \"\"\"Node representing an item in the tree.\n name - The tag name associated with the node\n parent - The parent of the current node (or None for the document node)\n value - The value of the current node (applies to text nodes and\n comments\n attributes - a dict holding name, value pairs for attributes of the node\n childNodes - a list of child nodes of the current node. This must\n include all elements but not necessarily other node types\n _flags - A list of miscellaneous flags that can be set on the node\n \"\"\"\n self.name = name\n self.parent = None\n self.value = None\n self.attributes = {}\n self.childNodes = []\n self._flags = []\n\n def __str__(self):\n attributesStr = \" \".join([\"%s=\\\"%s\\\"\" % (name, value)\n for name, value in\n self.attributes.items()])\n if attributesStr:\n return \"<%s %s>\" % (self.name, attributesStr)\n else:\n return \"<%s>\" % (self.name)\n\n def __repr__(self):\n return \"<%s>\" % (self.name)\n\n def appendChild(self, node):\n \"\"\"Insert node as a child of the current node\n \"\"\"\n raise NotImplementedError\n\n def insertText(self, data, insertBefore=None):\n \"\"\"Insert data as text in the current node, positioned before the\n start of node insertBefore or to the end of the node's text.\n \"\"\"\n raise NotImplementedError\n\n def insertBefore(self, node, refNode):\n \"\"\"Insert node as a child of the current node, before refNode in the\n list of child nodes. Raises ValueError if refNode is not a child of\n the current node\"\"\"\n raise NotImplementedError\n\n def removeChild(self, node):\n \"\"\"Remove node from the children of the current node\n \"\"\"\n raise NotImplementedError\n\n def reparentChildren(self, newParent):\n \"\"\"Move all the children of the current node to newParent.\n This is needed so that trees that don't store text as nodes move the\n text in the correct way\n \"\"\"\n \n for child in self.childNodes:\n newParent.appendChild(child)\n self.childNodes = []\n\n def cloneNode(self):\n \"\"\"Return a shallow copy of the current node i.e. a node with the same\n name and attributes but with no parent or child nodes\n \"\"\"\n raise NotImplementedError\n\n def hasContent(self):\n \"\"\"Return true if the node has children or text, false otherwise\n \"\"\"\n raise NotImplementedError\n\n\nclass ActiveFormattingElements(list):\n def append(self, node):\n equalCount = 0\n if node != Marker:\n for element in self[::-1]:\n if element == Marker:\n break\n if self.nodesEqual(element, node):\n equalCount += 1\n if equalCount == 3:\n self.remove(element)\n break\n list.append(self, node)\n\n def nodesEqual(self, node1, node2):\n if not node1.nameTuple == node2.nameTuple:\n return False\n\n if not node1.attributes == node2.attributes:\n return False\n\n return True\n\n\nclass TreeBuilder(object):\n \"\"\"Base treebuilder implementation\n documentClass - the class to use for the bottommost node of a document\n elementClass - the class to use for HTML Elements\n commentClass - the class to use for comments\n doctypeClass - the class to use for doctypes\n \"\"\"\n\n \n documentClass = None\n\n \n elementClass = None\n\n \n commentClass = None\n\n \n doctypeClass = None\n\n \n fragmentClass = None\n\n def __init__(self, namespaceHTMLElements):\n if namespaceHTMLElements:\n self.defaultNamespace = \"http://www.w3.org/1999/xhtml\"\n else:\n self.defaultNamespace = None\n self.reset()\n\n def reset(self):\n self.openElements = []\n self.activeFormattingElements = ActiveFormattingElements()\n\n \n self.headPointer = None\n self.formPointer = None\n\n self.insertFromTable = False\n\n self.document = self.documentClass()\n\n def elementInScope(self, target, variant=None):\n\n \n \n exactNode = hasattr(target, \"nameTuple\")\n\n listElements, invert = listElementsMap[variant]\n\n for node in reversed(self.openElements):\n if (node.name == target and not exactNode or\n node == target and exactNode):\n return True\n elif (invert ^ (node.nameTuple in listElements)):\n return False\n\n assert False \n\n def reconstructActiveFormattingElements(self):\n \n \n \n\n \n if not self.activeFormattingElements:\n return\n\n \n i = len(self.activeFormattingElements) - 1\n entry = self.activeFormattingElements[i]\n if entry == Marker or entry in self.openElements:\n return\n\n \n while entry != Marker and entry not in self.openElements:\n if i == 0:\n \n i = -1\n break\n i -= 1\n \n entry = self.activeFormattingElements[i]\n\n while True:\n \n i += 1\n\n \n entry = self.activeFormattingElements[i]\n clone = entry.cloneNode() \n\n \n element = self.insertElement({\"type\": \"StartTag\",\n \"name\": clone.name,\n \"namespace\": clone.namespace,\n \"data\": clone.attributes})\n\n \n self.activeFormattingElements[i] = element\n\n \n if element == self.activeFormattingElements[-1]:\n break\n\n def clearActiveFormattingElements(self):\n entry = self.activeFormattingElements.pop()\n while self.activeFormattingElements and entry != Marker:\n entry = self.activeFormattingElements.pop()\n\n def elementInActiveFormattingElements(self, name):\n \"\"\"Check if an element exists between the end of the active\n formatting elements and the last marker. If it does, return it, else\n return false\"\"\"\n\n for item in self.activeFormattingElements[::-1]:\n \n \n if item == Marker:\n break\n elif item.name == name:\n return item\n return False\n\n def insertRoot(self, token):\n element = self.createElement(token)\n self.openElements.append(element)\n self.document.appendChild(element)\n\n def insertDoctype(self, token):\n name = token[\"name\"]\n publicId = token[\"publicId\"]\n systemId = token[\"systemId\"]\n\n doctype = self.doctypeClass(name, publicId, systemId)\n self.document.appendChild(doctype)\n\n def insertComment(self, token, parent=None):\n if parent is None:\n parent = self.openElements[-1]\n parent.appendChild(self.commentClass(token[\"data\"]))\n\n def createElement(self, token):\n \"\"\"Create an element but don't insert it anywhere\"\"\"\n name = token[\"name\"]\n namespace = token.get(\"namespace\", self.defaultNamespace)\n element = self.elementClass(name, namespace)\n element.attributes = token[\"data\"]\n return element\n\n def _getInsertFromTable(self):\n return self._insertFromTable\n\n def _setInsertFromTable(self, value):\n \"\"\"Switch the function used to insert an element from the\n normal one to the misnested table one and back again\"\"\"\n self._insertFromTable = value\n if value:\n self.insertElement = self.insertElementTable\n else:\n self.insertElement = self.insertElementNormal\n\n insertFromTable = property(_getInsertFromTable, _setInsertFromTable)\n\n def insertElementNormal(self, token):\n name = token[\"name\"]\n assert isinstance(name, text_type), \"Element %s not unicode\" % name\n namespace = token.get(\"namespace\", self.defaultNamespace)\n element = self.elementClass(name, namespace)\n element.attributes = token[\"data\"]\n self.openElements[-1].appendChild(element)\n self.openElements.append(element)\n return element\n\n def insertElementTable(self, token):\n \"\"\"Create an element and insert it into the tree\"\"\"\n element = self.createElement(token)\n if self.openElements[-1].name not in tableInsertModeElements:\n return self.insertElementNormal(token)\n else:\n \n \n parent, insertBefore = self.getTableMisnestedNodePosition()\n if insertBefore is None:\n parent.appendChild(element)\n else:\n parent.insertBefore(element, insertBefore)\n self.openElements.append(element)\n return element\n\n def insertText(self, data, parent=None):\n \"\"\"Insert text data.\"\"\"\n if parent is None:\n parent = self.openElements[-1]\n\n if (not self.insertFromTable or (self.insertFromTable and\n self.openElements[-1].name\n not in tableInsertModeElements)):\n parent.insertText(data)\n else:\n \n \n parent, insertBefore = self.getTableMisnestedNodePosition()\n parent.insertText(data, insertBefore)\n\n def getTableMisnestedNodePosition(self):\n \"\"\"Get the foster parent element, and sibling to insert before\n (or None) when inserting a misnested table node\"\"\"\n \n \n \n lastTable = None\n fosterParent = None\n insertBefore = None\n for elm in self.openElements[::-1]:\n if elm.name == \"table\":\n lastTable = elm\n break\n if lastTable:\n \n \n if lastTable.parent:\n fosterParent = lastTable.parent\n insertBefore = lastTable\n else:\n fosterParent = self.openElements[\n self.openElements.index(lastTable) - 1]\n else:\n fosterParent = self.openElements[0]\n return fosterParent, insertBefore\n\n def generateImpliedEndTags(self, exclude=None):\n name = self.openElements[-1].name\n \n if (name in frozenset((\"dd\", \"dt\", \"li\", \"option\", \"optgroup\", \"p\", \"rp\", \"rt\"))\n and name != exclude):\n self.openElements.pop()\n \n \n self.generateImpliedEndTags(exclude)\n\n def getDocument(self):\n \"Return the final tree\"\n return self.document\n\n def getFragment(self):\n \"Return the final fragment\"\n \n fragment = self.fragmentClass()\n self.openElements[0].reparentChildren(fragment)\n return fragment\n\n def testSerializer(self, node):\n \"\"\"Serialize the subtree of node in the format required by unit tests\n node - the node from which to start serializing\"\"\"\n raise NotImplementedError\n","repo_name":"P79N6A/gecko-dev-comments-removed","sub_path":"testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":12033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"36185495448","text":"import unittest, numpy as np, itertools as itt\n\nfrom cdmft.schemes.bethe import GLocal, SelfEnergy, WeissField, GLocalAFM, WeissFieldAIAO, GLocalWithOffdiagonals, GLocalAIAO\n\n\nclass TestSchemesBethe(unittest.TestCase):\n\n def test_SchemesBethe_init(self):\n h = np.array([[0,0],[0,0]])\n g = GLocal(1, {'up': h, 'dn': h}, None, None, 3, ['up', 'dn'], [2, 2], 10, 1001)\n\n def test_SchemesBethe_calculate(self):\n h = np.array([[0]])\n g = GLocal(1, {'up': h, 'dn': h}, None, None, 3, ['up', 'dn'], [1, 1], 10, 1001)\n se = SelfEnergy(['up', 'dn'], [1, 1], 10, 1001)\n se.zero()\n g.set(se, 0)\n\n def test_SchemesBetheAFM_calculate(self):\n h = np.array([[0]])\n g = GLocalAFM(1, {'up': h, 'dn': h}, None, None, 3, ['up', 'dn'], [1, 1], 10, 1001)\n se = SelfEnergy(['up', 'dn'], [1, 1], 10, 1001)\n se.zero()\n g.set(se, 0)\n\n def test_SchemesBetheAIAO(self):\n g = GLocalAIAO(1, {'spin-site': np.identity(6)}, ['spin-site'], [6], 10, 1001)\n se = SelfEnergy(['spin-site'], [6], 10, 1001)\n se.zero()\n testmat = np.array([[i*j for j in range(1,7)] for i in range(1,7)])\n g['spin-site'].data[1001,:,:] = testmat\n g0 = WeissFieldAIAO(['spin-site'], [6], 10, 1001)\n g0.calc_selfconsistency(g, se, 3)\n g.find_and_set_mu(3., se, 0, 1000)\n self.assertTrue(g._last_g_loc_convergence[-1] < 0.001)\n\n def test_SchemesBethe_find_and_set_mu_single(self):\n h = np.array([[0]])\n g = GLocal(1, {'up': h, 'dn': h}, None, None, 3, ['up', 'dn'], [1, 1], 10, 1001)\n se = SelfEnergy(['up', 'dn'], [1, 1], 10, 1001)\n se.zero()\n g.set(se, 0)\n self.assertTrue(abs(g.total_density()-1) < 1e-3)\n g.find_and_set_mu(1, se, 0.23, 100)\n self.assertTrue(abs(g.total_density()-1) < 1e-3)\n\n def test_SchemesBethe_find_and_set_mu_double(self):\n h = np.array([[-1,0],[0,1]])\n g = GLocal(1, {'up': h, 'dn': h}, None, None, 3, ['up', 'dn'], [2, 2], 10, 1001)\n se = SelfEnergy(['up', 'dn'], [2, 2], 10, 1001)\n se.zero()\n g.find_and_set_mu(2, se, .12, 3)\n self.assertTrue(abs(g.total_density()-2) < 1e-2)\n","repo_name":"MHarland/cdmft","sub_path":"test/test_schemesbethe.py","file_name":"test_schemesbethe.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"14"} +{"seq_id":"19713581731","text":"from datetime import datetime\nfrom ftplib import FTP\nfrom io import BytesIO\n\nimport numpy as np\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\nfrom pysftp import CnOpts, Connection\n\nfrom config import host, port, pwd, user\n\ncnOpts = CnOpts()\ncnOpts.hostkeys = None\n\n\n### GET FILE THTOUGH FTP\ndef get_df(dfname=None, usecols=None):\n \"\"\"\n This function creates a pandas dataframe from csv's pulled from the sftp. The dataframe name must be specified:\n\n aircraft utilization: ac_util\n fleet utilization: fleet_util\n engine utilization: eng_util\n apu utilization: apu_util\n component utilization: comp_util\n mel usage: mel_use\n mel workorders: mel_wo\n part removals: part_rel\n PIREP/MAREP: sys_rel\n\n \"\"\"\n with Connection(host, user, None, pwd, int(port), cnopts=cnOpts) as sftp:\n sftp.cwd(\"amos_aims_intfc/emr_dash_test\")\n with sftp.open(dfname) as f:\n df = pd.read_csv(f, usecols=usecols)\n\n return df\n\n\n### AIRCRFAT REGISTRATIONS IN THE FLEET\ndef fleet_regs():\n ac_list = get_df(\"ac_util.csv\")[\"A/C\"].sort_values(ascending=True).tolist()\n\n return ac_list\n\n\n### NUMBER OF DEPARTURES FROM EACH CITY\ndef get_deps(period=3, regs=[]):\n coord = pd.read_csv(\"db/idn_cities.csv\")\n flights = (\n pd.read_csv(\n \"db/ac_legs.csv\", usecols=[\"A/C\", \"Serv.Type\", \"Dep.\", \"Arr.\", \"Dep. Date\"]\n )\n .replace({\"KNO\": \"DPS\", \" \": np.nan})\n .dropna(subset=[\"Serv.Type\", \"Dep.\"])\n )\n\n # Filter\n flights[\"Dep. Date\"] = pd.to_datetime(flights[\"Dep. Date\"], format=\"%d.%b.%Y\")\n queryPeriod = (flights[\"Dep. Date\"].dt.month > datetime.now().month - period) & (\n flights[\"Dep. Date\"].dt.year == datetime.now().year\n )\n queryRegs = flights[\"A/C\"].isin(regs)\n\n flights = flights[queryPeriod & queryRegs]\n\n flights = (\n flights[flights[\"Serv.Type\"] == \"J\"]\n .groupby(by=\"Dep.\")\n .count()\n .reset_index()\n .replace(\n {\n \"Dep.\": {\n \"CGK\": \"Jakarta\",\n \"DPS\": \"Denpasar\",\n \"SUB\": \"Surabaya\",\n \"YIA\": \"Yogyakarta\",\n \"BDJ\": \"Banjarmasin\",\n \"BPN\": \"Balikpapan\",\n \"PDG\": \"Padang\",\n \"PKU\": \"Pekanbaru\",\n \"PLM\": \"Palembang\",\n \"PNK\": \"Pontianak\",\n }\n }\n )\n )\n flights = flights.rename(columns={\"Dep.\": \"city\"})\n\n coord = coord.merge(flights, how=\"right\")\n\n return coord\n\n\n### MONTHLY TECH. DR DATAFRAME\ndef get_dr(period=None, ytd=False, technical=True):\n \"\"\"\n Function to calculate the dispatch reliability for a given period.\n \"\"\"\n\n df0 = get_df(\n \"fleet_util.csv\", usecols=[\"Start\", \"TAH acc.\", \"TAC acc.\"]\n ) # Get monthly fleet utilization (FC)\n df0[\"Start\"] = pd.to_datetime(df0[\"Start\"])\n cycs = df0.set_index(\"Start\").to_period(\n \"M\"\n ) # The cycles dataframe is index by monthly period\n\n df1 = get_df(\"delays.csv\", usecols=[\"A/C\", \"Date\", \"Int. Code\"]) # Get delays data\n df1[\"Date\"] = pd.to_datetime(df1[\"Date\"])\n dels = df1.set_index(\"Date\").to_period(\n \"M\"\n ) # Index the delay dataframe by monthly period\n\n if technical:\n dels = dels[dels[\"Int. Code\"] != 93] # Only account for initial 4X delay codes\n\n dels = (\n dels.groupby(level=0)[[\"Int. Code\"]]\n .count()\n .rename(columns={\"Int. Code\": \"Delays\"})\n ) # Group and count the delays in monthly period\n\n dr = cycs.merge(dels, \"left\", left_index=True, right_index=True).fillna(0)\n rel = []\n for row in dr.itertuples(index=False):\n rel.append(\n round((row[1] - row[2]) * 100 / row[1], 2)\n ) # Calculate the DR (rounded to 2 decimal) and append to the list\n\n dr[\"dr\"] = rel # Add the monthly DR list into the dataframe\n\n # Period filters\n now = f\"{datetime.now().year}\" + \"-\" + f\"{datetime.now().month-1}\"\n if ytd:\n last = f\"{datetime.now().year}\" + \"-1\"\n else:\n past = datetime.now() - relativedelta(months=period)\n last = f\"{past.year}\" + \"-\" + f\"{past.month}\"\n\n return dr.loc[last:now, :]\n\n\n### MONTHLY APU UTIL DATAFRAME\ndef get_apu(period):\n get_df(\n \"apu_util.csv\",\n usecols=[\"Start\", \"A/C\", \"S/N\", \"APU Cycle Diff\", \"APU Hours Diff\"],\n )\n","repo_name":"onionbloom/emr_dash","sub_path":"dataframes.py","file_name":"dataframes.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38597139714","text":"#!/usr/bin/env python\n\nDOC='''\nThis program extracts the footer information from BRASS output files.\nThe output is written as tab-separated fields to the system standard output.\n\nUsage:\n brass_footer.py FILE [FILES...] # output written to screen\n brass_footer.py FILE [FILES...] > OUTPUT.TSV # output written to OUTPUT.TSV\n ls PATH/*.BRASSOUT | brass_footer.py # input through pipe\n\n'''\nimport sys\nimport os\nimport logging\nfrom functools import reduce\n\nlogger = logging.getLogger()\nlogger.addHandler(logging.StreamHandler())\n\n\nclass Result(object):\n def __init__(self, filename):\n self.values = {'FileName': filename}\n\n def add(self, item, value):\n self.values[item] = value\n\n def keys(self):\n return set(self.values)\n\n def output(self, keys):\n fields = []\n for k in keys:\n fields.append(str(self.values.get(k, 0)))\n return '\\t'.join(fields) + '\\n'\n\n\nclass SetOfResults(object):\n def __init__(self):\n self.results = []\n\n def __len__(self):\n return len(self.results)\n\n def __str__(self):\n return self.output()\n\n def keys(self):\n return reduce(set.union, [result.keys() for result in self.results], set())\n\n def output(self):\n lines = []\n keys = ['FileName'] + [key for key in sorted(self.keys()) if key != 'FileName']\n if keys == ['FileName']:\n raise ValueError('No files had any footer information')\n lines.append('\\t'.join(keys) + '\\n')\n for result in self.results:\n lines.append(result.output(keys))\n return ''.join(lines)\n\n def append(self, result):\n self.results.append(result)\n\n\ndef skip_to_end(f):\n \"\"\"\n Skip towards the end of the file.\n\n Brass output files can be big, but the footer is always around the\n same size. This makes it faster to skip to the end of the file,\n then rewind to an area just before the footer (about 2000 bytes\n seems to work).\n \"\"\"\n f.seek(0, os.SEEK_END)\n here = f.tell()\n f.seek(max(0, here-2000), os.SEEK_SET)\n\n\ndef process_file(filename):\n import re\n\n # Regex extracts FIELD NAME and VALUE from lines of the form\n # \"# string FIELD NAME: int VALUE\"\n rgx = re.compile('^[#|%]\\s+(.+)\\:\\s+(\\d+)') # comment char can be # or %\n if not os.path.exists(filename):\n logger.error(\"File {} does not exist\".format(filename))\n return\n\n result = Result(filename)\n\n with open(filename) as fl:\n skip_to_end(fl)\n for line in fl:\n search = rgx.search(line)\n if search is not None:\n\n # Remove spaces and put in title case\n itemname = re.sub('[^a-zA-Z<]+', '', search.group(1).title())\n\n # One field starts with non-alphanumeric chars - \" < N read pairs\"\n # so this replaces \"< N\" with TooFew\n itemname = re.sub('<\\d?', 'TooFew', itemname)\n\n # Value is an integer\n value = int(search.group(2))\n result.add(itemname, value)\n return result\n\n\nif __name__ == \"__main__\":\n processed = SetOfResults()\n\n if len(sys.argv) > 1: # Prefer files on command line\n filenames = sys.argv[1:]\n\n else: # No files on the command line, fallback check stdin\n if sys.stdin.isatty(): # Waiting for terminal input, so write some help\n sys.stderr.write(DOC)\n sys.stderr.write(\"Enter files to scan (CTRL-D when done, CTRL-C to quit):\\n\")\n\n try: # Grab filenames from stdin\n filenames = [f.strip() for f in sys.stdin.read().split() if f.strip() > '']\n\n except KeyboardInterrupt: # Exit cleanly from CTRL-C, no backtrace\n sys.exit()\n\n for filename in filenames:\n result = process_file(filename)\n if result is not None:\n processed.append(result)\n\n if len(processed) > 0: # Successfully processed some files, so write to stdout\n try:\n sys.stdout.write(str(processed))\n except ValueError as err:\n logger.error(str(err))\n sys.stderr.write(DOC)\n\n else: # FAIL!\n logger.error(\"No files were found\")\n sys.stderr.write(DOC)\n\n","repo_name":"TransmissibleCancerGroup/Mixed","sub_path":"brass_footer.py","file_name":"brass_footer.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"20188595280","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 31 13:59:24 2020\n\n@author: xpanz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.manifold import MDS\nimport math\nimport matplotlib.pyplot as plt\n\nimport matplotlib.image as mpimg\n#%%\nA = np.array([[149.1, 35.3],\n [144.9, 37.8],\n [151.2, 33.9],\n [138.6, 34.9],\n [130.8, 12.3],\n [115.8, 31.7],\n [117.9, 35],\n [118.6, 20.3],\n [153, 27.5],\n [146.88, 19.28],\n [145.8, 16.9],\n [147.3, 42.8]])\n\nD = np.zeros((12,12))\nA = (A/180)*math.pi\nR = 6371\nfor i in range (0,12):\n for j in range (0,12):\n C = (math.sin(abs(A[i,1])-(A[j,1])))**2 + math.cos(A[i,1])*math.cos(A[j,1])*(math.sin(abs(A[i,0])-(A[j,0])))**2\n theta = math.asin(math.sqrt(C))\n D[i][j] = theta*R\n \ndata = D**2\nindex = ['Canberra','Melbourne','Sydney','Adelade','Darwin','Perth ','Albany','Port Hedland','Brisbane','Townsville ','Cairns','Hobart']\ncolumns = ['Canberra','Melbourne','Sydney','Adelade','Darwin','Perth ','Albany','Port Hedland','Brisbane','Townsville ','Cairns','Hobart']\nWord = pd.DataFrame(data,index,columns)\nWord\n#%%\nmds = MDS()\nmds.fit(data)\n#%%\n\n\na = mds.embedding_\nprint(a)\nplt.scatter(a[0:,0],a[0:,1],color='red')\nfor i,txt in enumerate(index):\n plt.annotate(txt,((a[i,0],a[i,1])))\n\nplt.show()","repo_name":"woshipx/Linear_Dimensional_Reduction","sub_path":"MDS_AUS/Distance_Point.py","file_name":"Distance_Point.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"17328216063","text":"'''Methods to calculate various metrics.'''\n\nimport numpy as np\n\n\ndef compute_cvar(data, alpha, lower_range=True):\n '''CVaR as mean of the lower-alpha-percentile of data.\n adapted from https://github.com/nuria95/O-RAAC/blob/57347bc682798ff9f5600131c606517832efe864/oraaclib/util/utilities.py\n\n Args:\n data (ndarray): the trajectory RMSE collected by the Experiment class\n alpha (float): the percentile upper bound to use\n lower_range (bool): ???\n\n Returns:\n cvar (float): the resulting CVaR\n '''\n\n data = np.atleast_2d(data)\n _, N = data.shape\n sorted_data = np.sort(data)\n\n # NOTE: what does it do?\n # if alpha == 1 or alpha <= 0.5:\n # cvar = sorted_data[:, :int(alpha * N)].mean(1)\n # else:\n # cvar = sorted_data[:, int(alpha * N)::].mean(1)\n if lower_range:\n cvar = sorted_data[:, :int(alpha * N)].mean()\n else:\n cvar = sorted_data[:, -int(alpha * N):].mean()\n if np.all(np.isnan(cvar)):\n raise ValueError(f'Not enough samples to compute {alpha} '\n f'CVaR from {data}')\n else:\n return cvar\n","repo_name":"utiasDSL/safe-control-gym","sub_path":"safe_control_gym/math_and_models/metrics/performance_metrics.py","file_name":"performance_metrics.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":445,"dataset":"github-code","pt":"14"} +{"seq_id":"27657972081","text":"# 2902번 KMP는 왜 KMP일까?\n# https://www.acmicpc.net/problem/2902\nimport sys, os\n\nsys.stdin = open(\"{}/BOJ2902.txt\".format(os.path.dirname(os.path.realpath(__file__))))\ntmp = sys.stdin.readline().split(\"-\")\nans = \"\"\nfor i in range(len(tmp)):\n ans += tmp[i][0]\nprint(ans)\n","repo_name":"lzmgl/PS","sub_path":"2023-08/BOJ2902/BOJ2902.py","file_name":"BOJ2902.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"7955352317","text":"import timeit\nimport numpy as orig_np\nimport legate.numpy as np\n\n\ndef azimint_naive(data, radius, npt):\n rmax = radius.max()\n res = np.zeros(npt, dtype=np.float64)\n for i in range(npt):\n r1 = rmax * i / npt\n r2 = rmax * (i+1) / npt\n mask_r12 = np.logical_and((r1 <= radius), (radius < r2))\n values_r12 = data[mask_r12]\n res[i] = values_r12.mean()\n # on_values = 0\n # for j in range(len(data)):\n # if r1 <= radius[j] and radius[j] < r2:\n # res[i] += data[j]\n # on_values += 1\n # res[i] /= on_values\n return res\n\n\ndef initialize(N):\n data = orig_np.random.rand(N).astype(np.float64)\n radius = orig_np.random.rand(N).astype(np.float64)\n return data, radius\n\n\nif __name__ == \"__main__\":\n\n # Initialization\n N = 1000000\n npt = 1000\n orig_data, orig_radius = initialize(N)\n data = np.empty_like(orig_data)\n data[:] = orig_data\n radius = np.empty_like(orig_radius)\n radius[:] = orig_radius\n np_args = (data, radius, npt)\n\n # First execution\n azimint_naive(data, radius, npt)\n\n # Benchmark\n time = timeit.repeat(\"azimint_naive(data, radius, npt)\",\n setup=\"pass\", repeat=10, number=1, globals=globals())\n print(\"Legate Median time: {}\".format(np.median(time)))\n","repo_name":"ZiangTian/hpcTask1","sub_path":"code/npbench/benchmarks/azimint_naive/azimint_naive_legate.py","file_name":"azimint_naive_legate.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72251701455","text":"from flask_sqlalchemy import SQLAlchemy\nfrom dataclasses import dataclass\n\ndb = SQLAlchemy()\n\n@dataclass\nclass RunnedProcess(db.Model):\n id: int = db.Column(db.Integer, primary_key=True)\n frameCount: int = db.Column(db.Integer)\n currentFrame: int = db.Column(db.Integer)\n\n def to_dict(self):\n return dict(id = self.id, frameCount = self.frameCount, currentFrame = self.currentFrame)\n\n@dataclass\nclass EndedProcess(db.Model):\n id: int = db.Column(db.Integer, primary_key=True)\n mp4: str = db.Column(db.String(50), default=None)\n webm: str = db.Column(db.String(50), default=None)\n json: str = db.Column(db.String(50), default=None)\n\n def to_dict(self):\n return dict(id = self.id, mp4 = self.mp4, webm = self.webm, json = self.json)\n\n\nclass StartedProcess(db.Model):\n id:int = db.Column(db.Integer, primary_key=True)\n priority:int = db.Column(db.Integer, default=None)\n\n def to_dict(self):\n return dict(id = self.id, priority = self.priority)\n\n\ndef get_process(process_id):\n process = EndedProcess.query.get(process_id)\n status = 'end'\n if not process:\n status = 'run'\n process = RunnedProcess.query.get(process_id)\n if not process:\n status = 'start'\n process = StartedProcess.query.get(process_id)\n process_dict = process.to_dict()\n process_dict.update({'status': status})\n\n return process_dict\n\ndef set_process(process_id, status, app, **kwargs):\n with app.app_context():\n if status == 'start':\n process = StartedProcess(id=process_id, priority=1)\n db.session.add(process)\n db.session.commit()\n\n\n\n if status == 'run':\n process = StartedProcess.query.get(process_id)\n if process:\n db.session.delete(process)\n\n process = RunnedProcess.query.get(process_id)\n if not process:\n process = RunnedProcess(id=process_id, frameCount=kwargs.get('frameCount'), currentFrame=kwargs.get('currentFrame'))\n db.session.add(process)\n db.session.commit()\n else:\n process.frameCount = kwargs.get('frameCount', process.frameCount)\n process.currentFrame = kwargs.get('currentFrame', process.currentFrame)\n db.session.add(process)\n db.session.commit()\n\n if status == 'end':\n process = RunnedProcess.query.get(process_id)\n if process:\n db.session.delete(process)\n\n process = EndedProcess.query.get(process_id)\n if not process:\n process = EndedProcess(id=process_id)\n db.session.commit()\n\n process.mp4 = kwargs.get('mp4', process.mp4)\n process.webm = kwargs.get('webm', process.webm)\n process.json = kwargs.get('json', process.json)\n\n db.session.add(process)\n db.session.commit()\n process_dict = process.to_dict()\n process_dict.update({'status': status})\n return process_dict\n\n","repo_name":"alxgrents/web-speed-determination","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"35682274358","text":"#!/usr/bin/env python\n'''\nCreated on Jan 22, 2010\n\n@author: t0ster\n'''\nfrom PyQt4 import QtCore, QtGui\nfrom widgets import Field\nfrom ffnet import ffnet, mlgraph, savenet, loadnet\nimport numpy\n\n\nclass NeuralNetwork(QtGui.QWidget):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.field = Field(20, 20)\n \n self.outputs = []\n \n self.input = []\n self.target = []\n \n b = QtGui.QPushButton(\"Learn!\")\n self.connect(b, QtCore.SIGNAL(\"clicked()\"), self.learn)\n \n self.outcomes_list = QtGui.QComboBox()\n self._add_output(\"Square\")\n self._add_output(\"Triangle\")\n self._add_output(\"Line\")\n \n hpanel = QtGui.QHBoxLayout()\n hpanel.addWidget(self.outcomes_list)\n hpanel.addWidget(b)\n \n btn_classify = QtGui.QPushButton(\"Classify\")\n self.connect(btn_classify, QtCore.SIGNAL(\"clicked()\"), self.classify)\n \n btn_clear = QtGui.QPushButton(\"Clear\")\n self.connect(btn_clear, QtCore.SIGNAL(\"clicked()\"), self.clear)\n \n self.label_output = QtGui.QLabel()\n self.label_output.setMaximumHeight(20)\n \n self.label_epoch = QtGui.QLabel()\n self.label_epoch.setMaximumHeight(20)\n \n vpanel = QtGui.QVBoxLayout()\n vpanel.addWidget(self.field)\n vpanel.addLayout(hpanel)\n vpanel.addWidget(self.label_output)\n vpanel.addWidget(self.label_epoch)\n vpanel.addWidget(btn_classify)\n vpanel.addWidget(btn_clear)\n \n self.setLayout(vpanel)\n \n try:\n self.net, self.epoch = loadnet(\"netdata.dat\")\n except IOError:\n conec = mlgraph((self.field.x*self.field.y, 10, 10, 3))\n self.net = ffnet(conec)\n self.epoch = 0\n \n \n def _add_output(self, output):\n self.outputs.append(output)\n self.outcomes_list.addItem(output)\n \n \n def closeEvent(self, close_event):\n self.save_net()\n \n \n def learn(self):\n self.epoch += 1\n self.input.append(self.field.get_values())\n \n a = [0.0]*3; a[self.outcomes_list.currentIndex()] = 1.0\n self.target.append(a)\n\n self.net.train_tnc(numpy.array(self.input), numpy.array(self.target), maxfun = 2000, messages=1)\n \n \n def save_net(self):\n savenet((self.net, self.epoch), \"netdata.dat\")\n \n \n def classify(self):\n res_array = self.net.call(numpy.array(self.field.get_values()))\n print (res_array)\n res_arg = res_array.argmax()\n res_value = res_array[res_arg]\n self.label_output.setText(\"%s (%s)\" % (self.outputs[res_arg], res_value) )\n self.label_epoch.setText(\"Epoch: %s\" % self.epoch)\n \n \n def clear(self):\n self.field.clear() \n \n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtGui.QApplication(sys.argv)\n m = NeuralNetwork()\n m.show()\n \n sys.exit(app.exec_())","repo_name":"t0ster/neuralnet-model","sub_path":"neuralnet.py","file_name":"neuralnet.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"13158229357","text":"from __future__ import absolute_import, division, print_function\nfrom dials.array_family import flex # import dependency\nfrom dials.algorithms.profile_model import modeller # import dependency\nfrom dials_algorithms_profile_model_gaussian_rs_ext import *\n\nfrom dials.algorithms.profile_model.gaussian_rs.model import (\n phil_scope,\n) # implicit dependency\nfrom dials.algorithms.profile_model.gaussian_rs.model import (\n Model,\n) # implicit dependency\n\n\ndef BBoxCalculator(crystal, beam, detector, goniometer, scan, delta_b, delta_m):\n \"\"\" Return the relavent bbox calculator. \"\"\"\n if goniometer is None or scan is None or scan.get_oscillation()[1] == 0:\n algorithm = BBoxCalculator2D(beam, detector, delta_b, delta_m)\n else:\n algorithm = BBoxCalculator3D(beam, detector, goniometer, scan, delta_b, delta_m)\n return algorithm\n\n\ndef PartialityCalculator(crystal, beam, detector, goniometer, scan, sigma_m):\n \"\"\" Return the relavent partiality calculator. \"\"\"\n if goniometer is None or scan is None or scan.get_oscillation()[1] == 0:\n algorithm = PartialityCalculator2D(beam, sigma_m)\n else:\n algorithm = PartialityCalculator3D(beam, goniometer, scan, sigma_m)\n return algorithm\n\n\ndef MaskCalculator(crystal, beam, detector, goniometer, scan, delta_b, delta_m):\n \"\"\" Return the relavent partiality calculator. \"\"\"\n if goniometer is None or scan is None or scan.get_oscillation()[1] == 0:\n algorithm = MaskCalculator2D(beam, detector, delta_b, delta_m)\n else:\n algorithm = MaskCalculator3D(beam, detector, goniometer, scan, delta_b, delta_m)\n return algorithm\n","repo_name":"BlenderCN-Org/dials-dev20190819","sub_path":"modules/dials/algorithms/profile_model/gaussian_rs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"13416816183","text":"#coding: utf-8\n#Aluna: Alice Fernandes Silva / Programação 1, 2015\n#questão: remove palavras com mais vogais\n\ndef remove_palavras_com_mais_vogais(lista):\n\tcontadores = []\n\tfor i in range(len(lista)):\n\t\tcont = 0\n\t\tfor j in range(len(lista[i])):\n\t\t\tif lista[i][j] in \"aeiouAEIOU\":\n\t\t\t\tcont += 1\n\t\t\t\t\n\t\tcontadores.append(cont)\n\t\n\tmaior = contadores[0]\n\tfor i in range(len(contadores)-1,-1,-1):\n\t\tif contadores[i] >= maior:\n\t\t\tmaior = contadores[i]\n\t\n\t\n\tfor i in range(len(contadores)-1,-1,-1):\n\t\tif contadores[i] == maior:\n\t\t\tlista.pop(i)\n\n\n\t\n\t\t\n\t\t\n\n","repo_name":"alicesilva/P1-Python-Problemas","sub_path":"remove_vogais.py","file_name":"remove_vogais.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"37430852469","text":"import os\nimport pickle\nfrom functools import partial\nimport numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Polygon, box, Point, LineString\nfrom shapely.ops import unary_union\nfrom shapely.affinity import translate, rotate, scale\nfrom descartes.patch import PolygonPatch\nfrom tqdm.auto import trange\n\n\n# Stanford color palette at\n# https://identity.stanford.edu/color.html#print-color\ncolors = dict(\n Ls=\"#F4F4F4\",\n bkgd=\"#8c1515\",\n chamber=\"#0098DB\",\n sun=\"#2e2d29\",\n planet=\"#2e2d29\",\n wheel1=\"#2e2d29\",\n wheel2=\"#2e2d29\",\n piston=\"#2F2424\",\n pad=\"#2e2d29\",\n belt=\"#dad7cb\",\n gmm=\"#F9F6EF\",\n code=\"#928b81\",\n kde=\"#928b81\"\n)\n\nimport ssm\nTEXTFILE = \"hmm_nocomments.py\"\n\n\ndef _single_L(x, y, height=5.75, width=5, weight=1, serif=.75):\n \"\"\"Draw a single block L\"\"\"\n boxes = []\n # Add the horizontal and vertical bars\n boxes.append(box(x+serif, y, x+serif+weight, y+height))\n boxes.append(box(x, y, x+width, y+weight))\n # Add the serifs\n boxes.append(box(x, y+height-weight, x+2*serif+weight, y+height))\n boxes.append(box(x+width-weight, y, x+width, y+weight+1.5*serif))\n return unary_union(boxes)\n\ndef make_Ls():\n \"\"\"Make a pair of block L's.\"\"\"\n l1 = _single_L(-3.5, -2.25)\n l2 = _single_L(-1.5, -3.5)\n return unary_union([l1, l2])\n\ndef sample_spike_train(num_neurons, time_window, rate, seed=0):\n \"\"\"A random spike train from a Poisson process.\"\"\"\n npr.seed(seed)\n rate *= np.ones(num_neurons)\n num_spikes = npr.poisson(time_window * rate)\n spikes = [npr.rand(s) * time_window for s in num_spikes]\n spikes = list(map(np.sort, spikes))\n return spikes\n\ndef make_spike_train(ax, state, spikes, time_window, bkgd,\n bottom=-3.5,\n top=3.5,\n left=-5.25,\n right=-1.75,\n sep_frac=0.2):\n t_start = time_window * (1 - state)\n t_stop = t_start + time_window\n num_neurons = len(spikes)\n spike_height = (top - bottom) / ((1 + sep_frac) * num_neurons)\n spike_sep = sep_frac * spike_height\n\n lines = []\n for neuron, nspikes in enumerate(spikes):\n # pad the spike train with itself on the left\n nspikes = np.concatenate((nspikes, nspikes + time_window))\n for spike in nspikes:\n if spike < t_start or spike > t_stop:\n continue\n\n xpos = left + (spike - t_start) * (right - left) / time_window\n ypos = bottom + neuron * (spike_height + spike_sep)\n\n line = LineString([[xpos, ypos], [xpos, ypos+spike_height]]).buffer(0.01)\n if bkgd.contains(line):\n lines.append(line)\n\n return lines\n\n\ndef get_code_buffers(file, num_rows=23, buffer_size=100, window_size=45):\n text = []\n with open(file, \"r\") as f:\n for line in f.readlines():\n line = line.strip()\n if len(line) > 0:\n if line[0] not in ('#', '@', '\"'):\n text.append(line)\n text = ' '.join(text)\n\n # choose random starting points\n stops = npr.choice(np.arange(buffer_size, len(text)), size=num_rows)\n buffers = [text[stop - buffer_size:stop] for stop in stops]\n return buffers\n\ndef make_code_textbox(buffers, state, buffer_size=100, window_size=45):\n start = int((buffer_size - window_size) * (1 - state))\n\n textbox = '\\n'.join([\n buffer[start:start + window_size]\n for buffer in buffers\n ])\n\n return textbox\n\n\ndef sample_mixture_model(num_points, cache=False):\n if cache:\n fname = \"gmm_{}.pkl\".format(num_points)\n if os.path.exists(fname):\n with open(fname, \"rb\") as f:\n results = pickle.load(f)\n\n else:\n results = _sample_mixture_model(num_points)\n with open(fname, \"wb\") as f:\n pickle.dump(results, f)\n else:\n results = _sample_mixture_model(num_points)\n\n return results\n\n\ndef _sample_mixture_model(num_points):\n \"\"\"Sample a Gaussian mixture model and compute its posterior\"\"\"\n\n means = np.array([\n [2, 3],\n [3, 1.5],\n [4, 0],\n [4.5, -1.5]\n ])\n covariances = np.array([\n [[0.1, 0],\n [0, 0.1]],\n [[0.4, 0.05],\n [0.05, 0.2]],\n [[0.3, -0.05],\n [-0.05, 0.2]],\n [[0.05, 0],\n [0, 0.2]]\n ])\n\n npr.seed(0)\n data = np.zeros((num_points, 2))\n for n in range(num_points):\n comp = npr.choice(len(means))\n data[n] = npr.multivariate_normal(means[comp], covariances[comp])\n\n times = npr.rand(num_points)\n perm = np.argsort(times)\n data = data[perm]\n times = times[perm]\n\n valid = (np.abs(data[:, 1]) < 3.5) & (np.linalg.norm(data, axis=1) < 5.25)\n data = data[valid]\n times = times[valid]\n gmms = []\n return data, times, means, covariances, gmms\n\n\ndef plot_gaussian_2D(mu, lmbda, color='b', num_std=2,\n centermarker=True, label='',\n alpha=1., ax=None, artists=None):\n '''\n Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix.\n '''\n assert len(mu) == 2\n ax = ax if ax else plt.gca()\n\n t = np.hstack([np.arange(0,2*np.pi,0.01),0])\n circle = np.vstack([np.sin(t),np.cos(t)])\n ellipse = np.dot(np.linalg.cholesky(lmbda),circle)\n\n point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha, zorder=3) \\\n if centermarker else None\n\n for scale in range(1, num_std+1):\n ax.plot(scale * ellipse[0,:] + mu[0],\n scale * ellipse[1,:] + mu[1],\n linestyle='-', linewidth=2,\n color=color,\n label=label,\n alpha=alpha,\n zorder=3)\n\n\ndef init_kdeplot(bkgd):\n X, Y = np.meshgrid(np.linspace(0, 5, 100),\n np.linspace(-3.75, 3.75, 100))\n XY = np.column_stack((X.ravel(), Y.ravel()))\n\n valid = np.array([bkgd.contains(Point(*xy)) for xy in XY])\n return X, Y, XY, valid\n\ndef kdeplot(points, X, Y, XY, valid, lengthscale=0.25):\n logpdf = -0.5 * np.sum((XY[:, None, :] - points[None, :, :])**2 / lengthscale**2, axis=-1)\n from scipy.special import logsumexp\n logpdf = logsumexp(logpdf, axis=1)\n logpdf[~valid] = np.nan\n plt.contour(X, Y, np.exp(logpdf).reshape(X.shape), 6, colors=colors[\"kde\"], zorder=3)\n\ndef draw_frame(ax, state, spikes, buffers, data, times, means, covariances, gmms, helpers=None):\n ax.cla()\n ax.patch.set_color((0, 0, 0, 0.))\n\n Ls = make_Ls()\n ax.add_patch(PolygonPatch(Ls, facecolor=colors[\"Ls\"], edgecolor=\"none\", zorder=0))\n\n # Plot the text box\n textbox = make_code_textbox(buffers, state)\n ax.text(-3.5, 3.4, textbox,\n fontsize=10.,\n fontfamily=\"monospace\",\n verticalalignment='top', color=colors[\"code\"], zorder=1)\n\n # Overlay background\n bkgd = Point(0, 0).buffer(5.25) - Ls\n ax.add_patch(PolygonPatch(bkgd, facecolor=colors[\"bkgd\"], edgecolor='none', zorder=2))\n\n # Draw the spike train\n lines = make_spike_train(ax, state, spikes, 10, bkgd)\n for line in lines:\n ax.add_patch(PolygonPatch(line, facecolor='w', edgecolor='w', zorder=3))\n\n # Plot a contour of the KDE\n if helpers is None:\n helpers = init_kdeplot(bkgd)\n\n # Plot data points and kernel density estimate\n these_data = data[times < state]\n if len(these_data > 0):\n kdeplot(these_data, *helpers)\n ax.plot(these_data[:, 0], these_data[:, 1], 'wo', markersize=4, zorder=4)\n\n ax.add_patch(PolygonPatch(bkgd, facecolor='none', edgecolor='k', lw=6, zorder=100))\n\n ax.set_xlim(-5.5, 5.5)\n ax.set_ylim(-5.5, 5.5)\n\n return helpers\n\nif __name__ == \"__main__\":\n\n # Sample random spike train\n spikes = sample_spike_train(20, 10, 0.75)\n\n # Get code buffers\n buffers = get_code_buffers(TEXTFILE, 25)\n\n # Sample gaussian mixture model\n data, times, means, covariances, gmms = sample_mixture_model(150)\n\n # Initialize plot\n fig = plt.figure(figsize=(6, 6))\n # fig.patch.set_alpha(0.0)\n ax = fig.add_axes((0, 0, 1, 1))\n # ax.patch.set_color(\"gray\")\n ax.patch.set_alpha(0.0)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set_xlim(-5.5, 5.5)\n ax.set_ylim(-5.5, 5.5)\n\n # # Draw a single frame\n # draw_frame(ax, 0, spikes, buffers, data, times, means, covariances, gmms)\n # plt.show()\n # plt.savefig(\"logo.png\", dpi=300)\n # plt.savefig(\"logo.pdf\")\n\n # Save still frames and convert to gif with\n # convert -delay 3 -loop 0 _stills/frame_{0..99}.png logo.gif\n # The `convert` util comes with ImageMagick\n thetas = np.linspace(0, 1, 100)\n helpers = None\n for i in trange(len(thetas)):\n helpers = draw_frame(ax, thetas[i], spikes, buffers, data, times, means, covariances, gmms, helpers=helpers)\n plt.savefig(\"_stills_code/frame_{}.png\".format(i), dpi=50)\n","repo_name":"lindermanlab/logo","sub_path":"make_logo_code.py","file_name":"make_logo_code.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"33955663074","text":"# Views are where the routes are defined\nimport pprint\nfrom flask import render_template, flash, redirect\nfrom app import app\nfrom .forms import LoginForm\n\n\n# Root for the test site\n@app.route('/')\n@app.route('/index')\ndef index():\n # Returns whatever is returned by the return for index()\n # need to create db connection here\n # return \"Hello, World!\"\n\n users = { # face user\n 'nickname': 'Minhaz',\n 'fullname': 'Ratul Minhaz',\n 'email': 'm@ratul.xyz'\n }\n\n posts = [ # fake array of posts\n {\n 'author': {'nickname': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'nickname': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n },\n {\n 'author': {'nickname': 'Mahmud'},\n 'body': 'Je mange riz!'\n }\n ]\n\n pprint.pprint(users, indent=4, depth=1)\n\n return render_template('index.html',\n title='Home',\n users=users,\n posts=posts)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n # after the submit button is pressed, if anything wrong the last return\n # will work.\n if form.validate_on_submit():\n # show a message on next page\n \n flash('Login requested for OpenID=\"%s\", remember_me=%s' %\n (form.openid.data, str(form.remember_me.data)))\n return redirect('/index')\n return render_template('login.html',\n title='Sign In',\n form=form,\n providers=app.config['OPENID_PROVIDERS'])\n","repo_name":"ratulotron/MegaFlask","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"42592187839","text":"def pipeline(handler, **opts):\n def decorator(func):\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return handler(result, **opts)\n\n return wrapper\n\n return decorator\n\n\ndef grep(content: str, pattern: str):\n import re\n\n filtered = []\n content = content.splitlines()\n for line in content:\n if re.search(pattern, line):\n filtered.append(line)\n\n return \"\\n\".join(filtered)\n\n\ndef tr(content: str, delete: bool, char: str):\n final = []\n\n if delete:\n content = content.splitlines()\n for line in content:\n new_line = line.replace(char, \"\")\n final.append(new_line)\n if final:\n return \"\".join(final)\n\n return content\n\n\n@pipeline(tr, delete=True, char=\"\\n\")\n@pipeline(grep, pattern=\"ed\")\ndef echo():\n poetry = \"\"\"\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\n \"\"\"\n return poetry.strip()\n\n\nif __name__ == '__main__':\n result = echo()\n print(result)\n","repo_name":"100gle/sspai-100-hours-series-python","sub_path":"projects/click-cli/basic/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"14"} +{"seq_id":"10336890015","text":"from rest_framework import serializers\n\nfrom agents.models import Agent\nfrom clients.models import Client, Site\n\n\nclass ListAgentSerializer(serializers.ModelSerializer[Agent]):\n class Meta:\n model = Agent\n fields = \"__all__\"\n\n\nclass DetailAgentSerializer(serializers.ModelSerializer[Agent]):\n status = serializers.ReadOnlyField()\n\n class Meta:\n model = Agent\n fields = (\n \"version\",\n \"operating_system\",\n \"plat\",\n \"goarch\",\n \"hostname\",\n \"agent_id\",\n \"last_seen\",\n \"services\",\n \"public_ip\",\n \"total_ram\",\n \"disks\",\n \"boot_time\",\n \"logged_in_username\",\n \"last_logged_in_user\",\n \"monitoring_type\",\n \"description\",\n \"mesh_node_id\",\n \"overdue_email_alert\",\n \"overdue_text_alert\",\n \"overdue_dashboard_alert\",\n \"offline_time\",\n \"overdue_time\",\n \"check_interval\",\n \"needs_reboot\",\n \"choco_installed\",\n \"wmi_detail\",\n \"patches_last_installed\",\n \"time_zone\",\n \"maintenance_mode\",\n \"block_policy_inheritance\",\n \"alert_template\",\n \"site\",\n \"policy\",\n \"status\",\n \"checks\",\n \"pending_actions_count\",\n \"cpu_model\",\n \"graphics\",\n \"local_ips\",\n \"make_model\",\n \"physical_disks\",\n \"serial_number\",\n )\n\n\nclass ClientSerializer(serializers.ModelSerializer[Client]):\n class Meta:\n model = Client\n fields = \"__all__\"\n\n\nclass SiteSerializer(serializers.ModelSerializer[Site]):\n class Meta:\n model = Site\n fields = \"__all__\"\n","repo_name":"infinite8co/tacticalrmm","sub_path":"api/tacticalrmm/beta/v1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"14"} +{"seq_id":"4299435275","text":"###############################################\n#\n# Author: Aniruddha Gokhale\n# Vanderbilt University\n#\n# Purpose: Skeleton/Starter code for the Broker application\n#\n# Created: Spring 2023\n#\n###############################################\n\n\n# This is left as an exercise for the student. The Broker is involved only when\n# the dissemination strategy is via the broker.\n#\n# A broker is an intermediary; thus it plays both the publisher and subscriber roles\n# but in the form of a proxy. For instance, it serves as the single subscriber to\n# all publishers. On the other hand, it serves as the single publisher to all the subscribers. \n\n\nimport os # for OS functions\nimport sys # for syspath and system exception\nimport time # for sleep\nimport argparse # for argument parsing\nimport configparser # for configuration parsing\nimport logging # for logging. Use it in place of print statements.\n\n# Import our topic selector. Feel free to use alternate way to\n# get your topics of interest\nfrom topic_selector import TopicSelector\n\n# Now import our CS6381 Middleware\nfrom CS6381_MW.BrokerMW import BrokerMW\n# We also need the message formats to handle incoming responses.\nfrom CS6381_MW import discovery_pb2\nfrom enum import Enum\n\nclass BrokerAppln():\n\n class State(Enum):\n INITIALIZE = 0,\n CONFIGURE = 1,\n TRANSFERDATA = 2,\n COMPLETED = 3\n\n def __init__(self, logger):\n self.state = self.State.INITIALIZE # state that are we in\n self.entity_num = 0\n self.pub_num = 0\n self.sub_num = 0\n self.name = None # our name (some unique name)\n self.mw_obj = None # handle to the underlying Middleware object\n self.logger = logger\n\n\n def configure(self, args):\n ''' Initialize the object '''\n\n try:\n # Here we initialize any internal variables\n self.logger.info(\"BrokerAppln::configure\")\n\n # set our current state to CONFIGURE state\n self.state = self.State.CONFIGURE\n\n # initialize our variables\n self.name = args.name # our name\n self.iters = args.iters # num of iterations\n\n\n # Now, get the configuration object\n self.logger.debug(\"BrokerAppln::configure - parsing config.ini\")\n config = configparser.ConfigParser()\n config.read(args.config)\n\n # Now setup up our underlying middleware object to which we delegate\n # everything\n self.logger.debug(\"BrokerAppln::configure - initialize the middleware object\")\n self.mw_obj = BrokerMW(self.logger)\n self.mw_obj.configure(args) # pass remainder of the args to the m/w object\n\n self.logger.info(\"BrokerxAppln::configure - configuration complete\")\n\n except Exception as e:\n raise e\n\n def driver(self):\n ''' Driver program '''\n\n try:\n self.logger.info(\"BrokerAppln::driver\")\n\n # First ask our middleware to keep a handle to us to make upcalls.\n # This is related to upcalls. By passing a pointer to ourselves, the\n # middleware will keep track of it and any time something must\n # be handled by the application level, invoke an upcall.\n self.logger.debug(\"BrokerAppln::driver - upcall handle\")\n self.mw_obj.transferData()\n\n # the next thing we should be doing is to register with the Broker\n # service. But because we are simply delegating everything to an event loop\n # that will call us back, we will need to know when we get called back as to\n # what should be our next set of actions. Hence, as a hint, we set our state\n # accordingly so that when we are out of the event loop, we know what\n # operation is to be performed. In this case we should be registering with\n # the Broker service. So this is our next state.\n self.state = self.State.TRANSFERDATA\n\n self.mw_obj.event_loop(timeout=None) # start the event loop\n\n\n self.logger.info(\"BrokerAppln::driver completed\")\n\n except Exception as e:\n raise e\n\n\n\n\ndef parseCmdLineArgs():\n # instantiate a ArgumentParser object\n parser = argparse.ArgumentParser(description=\"Broker Application\")\n\n # Now specify all the optional arguments we support\n # At a minimum, you will need a way to specify the IP and port of the lookup\n # service, the role we are playing, what dissemination approach are we\n # using, what is our endpoint (i.e., port where we are going to bind at the\n # ZMQ level)\n\n parser.add_argument(\"-n\", \"--name\", default=\"pub\", help=\"Some name assigned to us. Keep it unique per Broker\")\n\n parser.add_argument(\"-a\", \"--addr\", default=\"localhost\",\n help=\"IP addr of this Broker to advertise (default: localhost)\")\n\n parser.add_argument(\"-s\", \"--sub_port\", type=int, default=5590,\n help=\"Port number on which our underlying Broker ZMQ service runs, default=5599\")\n\n parser.add_argument(\"-p\", \"--pub_port\", type=int, default=5589,\n help=\"Port number on which our underlying Broker ZMQ service runs, default=5589\")\n\n parser.add_argument(\"-e\", \"--bro_id\", default=\"0\",\n help=\"broker id number, default 0\")\n\n parser.add_argument(\"-d\", \"--broker_id\", type=int, default=5589,\n help=\"IP Addr:Port combo for the discovery service, default localhost:5555\")\n\n parser.add_argument(\"-T\", \"--num_topics\", type=int, choices=range(1, 10), default=1,\n help=\"Number of topics to publish, currently restricted to max of 9\")\n\n parser.add_argument(\"-c\", \"--config\", default=\"config.ini\", help=\"configuration file (default: config.ini)\")\n\n parser.add_argument(\"-f\", \"--frequency\", type=int, default=1,\n help=\"Rate at which topics disseminated: default once a second - use integers\")\n\n parser.add_argument(\"-i\", \"--iters\", type=int, default=1000,\n help=\"number of publication iterations (default: 1000)\")\n\n parser.add_argument(\"-l\", \"--loglevel\", type=int, default=logging.INFO,\n choices=[logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL],\n help=\"logging level, choices 10,20,30,40,50: default 20=logging.INFO\")\n\n return parser.parse_args()\n\n\n\ndef main():\n try:\n # obtain a system wide logger and initialize it to debug level to begin with\n logging.info(\"Main - acquire a child logger and then log messages in the child\")\n logger = logging.getLogger(\"BrokerAppln\")\n\n # first parse the arguments\n logger.debug(\"Main: parse command line arguments\")\n args = parseCmdLineArgs()\n\n # reset the log level to as specified\n logger.debug(\"Main: resetting log level to {}\".format(args.loglevel))\n logger.setLevel(args.loglevel)\n logger.debug(\"Main: effective log level is {}\".format(logger.getEffectiveLevel()))\n\n # Obtain a Broker application\n logger.debug(\"Main: obtain the Broker appln object\")\n broker_app = BrokerAppln(logger)\n\n # configure the object\n logger.debug(\"Main: configure the Broker appln object\")\n broker_app.configure(args)\n\n broker_app.driver()\n\n except Exception as e:\n logger.error(\"Exception caught in main - {}\".format(e))\n return\n\nif __name__ == \"__main__\":\n\n # set underlying default logging capabilities\n logging.basicConfig (level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\n main ()","repo_name":"YuWVandy/Distributed-System-1","sub_path":"Assignment3/BrokerAppln.py","file_name":"BrokerAppln.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"26884758576","text":"import AcrivationFunctions as af\r\nimport numpy as np\r\nimport handlingFiles as handleFiles\r\nimport csv as csv\r\n\r\n\r\nclass Layer:\r\n\r\n def __init__(self, numOfNeurons, numOfNeuronsInPreviousLayer,num):\r\n self.layerNumber = num\r\n self.numOfNeurons = numOfNeurons\r\n self.numOfNeuronsInPreviousLayer = numOfNeuronsInPreviousLayer\r\n\r\n self.biases = np.zeros(numOfNeurons)\r\n self.activationValues = np.zeros_like(self.biases)\r\n self.nodeSums = np.zeros_like(self.biases)\r\n\r\n self.weights = np.random.uniform(low=-0.1, high=0.1, size=(numOfNeurons, numOfNeuronsInPreviousLayer))\r\n\r\n self.weightsGradient = np.zeros_like(self.weights)\r\n self.biasesGradient = np.zeros_like(self.biases)\r\n\r\n def printLayer(self):\r\n print('layer',self.layerNumber,' activation val: ',self.activationValues)\r\n print('layer',self.layerNumber,'biases: ', self.biases)\r\n print('layer',self.layerNumber,'weights: ', self.weights, '\\n')\r\n\r\n\r\nclass Network:\r\n\r\n learningRate = 0.05\r\n costC = 0\r\n\r\n def __init__(self,numOfHiddenLayers,numOfNeuronsInLayers):\r\n\r\n self.numOfLayers = numOfHiddenLayers+2\r\n self.numOfHiddenLayers = numOfHiddenLayers\r\n self.Layers = []\r\n self.Layers.append(Layer(numOfNeuronsInLayers[0], 0,0)) # append input layer without connections\r\n\r\n for i in range(1,self.numOfLayers):\r\n self.Layers.append(Layer(numOfNeuronsInLayers[i],numOfNeuronsInLayers[i-1],i))\r\n\r\n self.desiredOutput = np.zeros(shape=self.Layers[self.numOfLayers-1].numOfNeurons)\r\n\r\n def feedForward(self):\r\n for i in range(1,self.numOfLayers): # iterate through layers, start from 1 and not 0 - 0 is the input layer, no calculations take place here\r\n self.Layers[i].nodeSums = np.matmul(self.Layers[i-1].activationValues, self.Layers[i].weights.T) + self.Layers[i].biases\r\n self.Layers[i].activationValues = af.sigmoid(self.Layers[i].nodeSums)\r\n\r\n def propagateBackwards(self):\r\n outputLayer = self.numOfLayers-1\r\n propagatedCostC = 0\r\n #calculate error\r\n self.costC = np.array(self.Layers[outputLayer].activationValues - self.desiredOutput)\r\n\r\n #adjust weights of outputLayer\r\n for i in range(self.Layers[outputLayer].numOfNeurons):\r\n\r\n self.Layers[outputLayer].biasesGradient[i] = 2 * self.costC[i] * af.sigmoidDerivative(self.Layers[outputLayer].nodeSums[i])\r\n\r\n for j in range(self.Layers[outputLayer].numOfNeuronsInPreviousLayer):\r\n self.Layers[outputLayer].weightsGradient[i,j] = 2 * self.costC[i] * af.sigmoidDerivative(self.Layers[outputLayer].nodeSums[i]) * self.Layers[outputLayer - 1].activationValues[j]\r\n\r\n self.Layers[outputLayer].weights = self.Layers[outputLayer].weights - self.learningRate * self.Layers[outputLayer].weightsGradient\r\n self.Layers[outputLayer].biases = self.Layers[outputLayer].biases - self.learningRate * self.Layers[outputLayer].biasesGradient\r\n\r\n for k in range(outputLayer-1,1): # iterate through layers - start from max layer number -2 and go backwards\r\n propagatedCostC = 0\r\n for i in range(self.Layers[k].numOfNeurons):\r\n self.Layers[k].biasesGradient[i] = 2 * self.costC[i] * af.sigmoidDerivative(self.Layers[k].nodeSums[i])\r\n for j in range(self.Layers[k].numOfNeuronsInPreviousLayer):\r\n self.Layers[k].weightsGradient[i, j] = 2 * self.costC[i] * af.sigmoidDerivative(self.Layers[k].nodeSums[i]) * self.Layers[k - 1].activationValues[j]\r\n self.Layers[k].weights = self.Layers[k].weights - self.learningRate * self.Layers[k].weightsGradient\r\n self.Layers[k].biases = self.Layers[k].biases - self.learningRate * self.Layers[k].biasesGradient\r\n\r\n def writeInputData(self, data):\r\n for i in range(0, len(self.Layers[0].activationValues)):\r\n self.Layers[0].activationValues[i] = data[i]\r\n\r\n def loadTrainingExample(self,inputData, desiredOutput):\r\n self.writeInputData(inputData)\r\n self.desiredOutput = desiredOutput\r\n\r\n def saveNetwork(self,filename):\r\n accumulatedNetworkData = [[self.numOfLayers]]\r\n for layer in self.Layers:\r\n accumulatedNetworkData.append([layer.numOfNeurons])\r\n accumulatedNetworkData.append([layer.numOfNeuronsInPreviousLayer])\r\n accumulatedNetworkData.append(layer.biases)\r\n accumulatedNetworkData.append(layer.weights.flatten())\r\n handleFiles.writeCSV(filename, accumulatedNetworkData)\r\n\r\n def loadNetwork(self,fileName):\r\n file = open(fileName, 'r')\r\n csvReader = csv.reader(file)\r\n self.numOfLayers = int(next(csvReader)[0])\r\n\r\n for i in range(0,self.numOfLayers):\r\n self.Layers[i].numOfNeurons = int(next(csvReader)[0])\r\n self.Layers[i].numOfNeuronsInPreviousLayer = int(next(csvReader)[0])\r\n self.Layers[i].biases = np.array(next(csvReader))\r\n self.Layers[i].biases = self.Layers[i].biases.astype(np.float)\r\n tempArray = np.array(next(csvReader))\r\n tempArray = tempArray.astype(np.float) #convert to float\r\n self.Layers[i].weights = np.reshape(tempArray, newshape=(self.Layers[i].numOfNeurons, self.Layers[i].numOfNeuronsInPreviousLayer))\r\n file.close()\r\n\r\n def printNetwork(self):\r\n for layer in self.Layers:\r\n layer.printLayer()\r\n\r\n\r\n","repo_name":"JakubWrona255/SieciNeuronowe","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38571737859","text":"from itertools import combinations_with_replacement\nimport functools\n\nentree = input().split()\nmetin = sorted(entree[0].strip()) #combinations operation is done in lexicographic sorted order. Sorting in advance will give a sorted output\nlongeur = int(entree[1])\n\n\ntapils = list(combinations_with_replacement(metin, longeur))\nfor j in tapils:\n print(functools.reduce(lambda a, b: a + b, j))\n\n# itertools.combinations_with_replacement(iterable, r)\n# This tool returns length subsequences of elements from the input iterable allowing individual elements to be repeated more than once.\n\n# Combinations are emitted in lexicographic sorted order. So, if the input iterable is sorted, the combination tuples will be produced in sorted order.\n\n# Sample Code\n\n# >>> from itertools import combinations_with_replacement\n# >>> \n# >>> print list(combinations_with_replacement('12345',2))\n# [('1', '1'), ('1', '2'), ('1', '3'), ('1', '4'), ('1', '5'), ('2', '2'), ('2', '3'), ('2', '4'), ('2', '5'), ('3', '3'), ('3', '4'), ('3', '5'), ('4', '4'), ('4', '5'), ('5', '5')]\n# >>> \n# >>> A = [1,1,3,3,3]\n# >>> print list(combinations(A,2))\n# [(1, 1), (1, 3), (1, 3), (1, 3), (1, 3), (1, 3), (1, 3), (3, 3), (3, 3), (3, 3)]\n# Task\n\n# You are given a string .\n# Your task is to print all possible size replacement combinations of the string in lexicographic sorted order.\n\n# Input Format\n\n# A single line containing the string and integer value separated by a space.\n\n# Constraints\n\n\n# The string contains only UPPERCASE characters.\n\n# Output Format\n\n# Print the combinations with their replacements of string on separate lines.\n\n# Sample Input\n\n# HACK 2\n# Sample Output\n\n# AA\n# AC\n# AH\n# AK\n# CC\n# CH\n# CK\n# HH\n# HK\n# KK","repo_name":"nediyonbe/Exercise","sub_path":"Exercise 24.py","file_name":"Exercise 24.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"34869886474","text":"from yt_common.config import Config\n\nfrom typing import Union\n\nimport os\n\nTITLE: str = \"PriPri\"\nTITLE_LONG: str = \"Princess Principal\"\nRESOLUTION: int = 1080\nDATAPATH: str = os.path.dirname(__file__)\n\n\nclass PriPriConfig(Config):\n def __init__(self, desc: Union[str, int]) -> None:\n super().__init__(\n desc,\n TITLE,\n TITLE_LONG,\n RESOLUTION,\n DATAPATH\n )\n","repo_name":"yametetomete/EncodeScripts","sub_path":"Princess Principal/pripri_common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"14"} +{"seq_id":"27850933943","text":"import numpy as np\nimport OpenGL.GL as gl\nimport pangolin\nfrom multiprocessing import Process, Queue\nimport g2o\nfrom frame import poseRt\nLOCAL_WINDOW = 5\nclass Point(object):\n # A Point is a 3-D point in the world\n # Each Point is observed in multiple Frames\n\n def __init__(self, mapp, loc):\n self.pt = loc\n self.frames = []\n self.idxs = []\n \n self.id = len(mapp.points)\n mapp.points.append(self)\n\n def add_observation(self, frame, idx):\n frame.pts[idx] = self\n self.frames.append(frame)\n self.idxs.append(idx)\n\nclass Map(object):\n def __init__(self):\n self.frames = []\n self.points = []\n self.state = None\n self.q = Queue()\n\n p = Process(target=self.viewer_thread, args=(self.q,))\n p.daemon = True\n p.start()\n \n # *** Map optimizer ***\n def optimize(self):\n # create g2o optimizer\n opt = g2o.SparseOptimizer()\n solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())\n solver = g2o.OptimizationAlgorithmLevenberg(solver)\n opt.set_algorithm(solver)\n\n robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))\n\n if LOCAL_WINDOW is None:\n local_frames = self.frames\n else:\n local_frames = self.frames[-LOCAL_WINDOW:]\n\n # add frames to graph\n for f in self.frames:\n pose = np.linalg.inv(f.pose)\n sbacam = g2o.SBACam(g2o.SE3Quat(pose[0:3, 0:3], pose[0:3, 3]))\n sbacam.set_cam(f.K[0][0], f.K[1][1], f.K[0][2], f.K[1][2], 1.0)\n\n v_se3 = g2o.VertexCam()\n v_se3.set_id(f.id)\n v_se3.set_estimate(sbacam)\n v_se3.set_fixed(f.id <= 1 or f not in local_frames)\n opt.add_vertex(v_se3)\n \n # add points to frames\n PT_ID_OFFSET = 0x10000\n for p in self.points:\n if not any([f in local_frames for f in p.frames]):\n continue\n\n pt = g2o.VertexSBAPointXYZ()\n pt.set_id(p.id + PT_ID_OFFSET)\n pt.set_estimate(p.pt[0:3])\n pt.set_marginalized(True)\n pt.set_fixed(False)\n opt.add_vertex(pt)\n\n for f in p.frames:\n edge = g2o.EdgeProjectP2MC()\n edge.set_vertex(0, pt)\n edge.set_vertex(1, opt.vertex(f.id))\n uv = f.ukps[f.pts.index(p)]\n edge.set_measurement(uv)\n edge.set_information(np.eye(2))\n edge.set_robust_kernel(robust_kernel)\n opt.add_edge(edge)\n\n opt.set_verbose(True)\n opt.initialize_optimization()\n opt.optimize(50)\n\n # put frames back\n for f in self.frames:\n est = opt.vertex(f.id).estimate()\n R = est.rotation().matrix()\n t = est.translation()\n f.pose = np.linalg.inv(poseRt(R, t))\n\n # put points back (and cull)\n new_points = []\n for p in self.points:\n vert = opt.vertex(p.id + PT_ID_OFFSET)\n if vert is None:\n new_points.append(p)\n continue\n est = vert.estimate()\n\n # 2 match point that's old\n #old_point = len(p.frames) == 2 and p.frames[-1] not in local_frames\n '''\n # compute reprojection error\n errs = []\n for f in p.frames:\n uv = f.ukps[f.pts.index(p)]\n proj = np.dot(np.dot(f.K, f.pose[:3]),\n np.array([est[0], est[1], est[2], 1.0]))\n proj = proj[0:2] / proj[2]\n errs.append(np.linalg.norm(proj-uv))\n '''\n p.pt = np.array(est)\n new_points.append(p)\n\n self.points = new_points\n\n return opt.chi2()\n\n\n # *** Map viewer ***\n def viewer_thread(self, q):\n self.viewer_init(1024, 768)\n while 1:\n self.viewer_refresh(q)\n\n def viewer_init(self, w, h):\n pangolin.CreateWindowAndBind('Main', w, h)\n gl.glEnable(gl.GL_DEPTH_TEST)\n\n self.scam = pangolin.OpenGlRenderState(\n pangolin.ProjectionMatrix(w, h, 420, 420, w//2, h//2, 0.2, 1000),\n pangolin.ModelViewLookAt(0, -10, -8,\n 0, 0, 0,\n 0, -1, 0))\n self.handler = pangolin.Handler3D(self.scam)\n\n # Create Interactive View in window\n self.dcam = pangolin.CreateDisplay()\n self.dcam.SetBounds(0.0, 1.0, 0.0, 1.0, -w/h)\n self.dcam.SetHandler(self.handler)\n\n def viewer_refresh(self, q):\n if self.state is None or not q.empty():\n self.state = q.get()\n\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glClearColor(0.0, 0.0, 0.0, 1.0)\n self.dcam.Activate(self.scam)\n\n # draw poses\n gl.glColor3f(0.0, 1.0, 0.0)\n pangolin.DrawCameras(self.state[0])\n\n # draw keypoints\n gl.glPointSize(2)\n gl.glColor3f(1.0, 0.0, 0.0)\n pangolin.DrawPoints(self.state[1])\n\n pangolin.FinishFrame()\n\n def display(self):\n poses, pts = [], []\n for f in self.frames:\n poses.append(np.linalg.inv(f.pose))\n for p in self.points:\n pts.append(p.pt)\n self.q.put((np.array(poses), np.array(pts)))","repo_name":"Martin-Hedengran/Masterthesis","sub_path":"2020-07-01 Essential matrix/Clean/pointmap.py","file_name":"pointmap.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"15440799267","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('login', views.loginPage, name='login'),\n path('register', views.registerPage, name='register'),\n\n path('', views.home, name='index'),\n path('dashboard', views.home, name='dashboard'),\n path('customer/', views.customer, name='customer'),\n path('products', views.products, name='products'),\n\n path('create_order/', views.createOrder, name='create_order'),\n path('update_order/', views.updateOrder, name=\"update_order\"),\n path('delete_order/', views.deleteOrder, name=\"delete_order\"),\n path('update_customer/', views.updateCustomer, name=\"update_customer\"),\n\n]\n","repo_name":"AbirRahmanOne/crm","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"32391406471","text":"import numpy as np\n\ndef transformClouds(inclouds, posAng = 90., inc = 0., cent = [0.,0.],sbRad=None):\n \"\"\"\n Transforms skySampler cloudlets for use in KinMS.\n \n Calculate the galaxy co-ordinates of clouds from the sky plane. This MUST be used if any of the following conditions are true:\n inc != 0\n posAng != 90\n cent != [0,0]\n \n This exists as a stand-alone routine since an MCMC fit to the galaxy will likely need to run this every step, \n and the sampleClouds routine is computationally expensive\n ============\n Inputs:\n \n clouds: np.ndarray The output of the sampleClouds array [x,y,I]\n \n posAng: 0= None\n \"\"\"\n This method is to do the below tasks\n 1. get the non raid disk name from sut inventory and update the sut inventory file\n 2. enabling the vmd knobs according to the PCIe slot connected and slot-c also\n\n :return None\n \"\"\"\n super(PcieDataRaidInPchCpuSlot, self).prepare()\n sut_inv_file_path = self._os_installation_lib.get_sut_inventory_file_path()\n with open(sut_inv_file_path, \"r\") as fh:\n for line in fh.readlines():\n if \"non_raid_ssd_name\" in line:\n self.non_raid_disk_name = line\n break\n\n if not self.non_raid_disk_name:\n raise content_exceptions.TestError(\"Unable to find non RAID SSD name, please check the file under \"\n \"{}\".format(sut_inv_file_path))\n\n self.non_raid_disk_name = self.non_raid_disk_name.split(\"=\")[1]\n self._log.info(\"non RAID SSD Name from config file : {}\".format(self.non_raid_disk_name))\n\n # Step logger start for Step 1\n self._test_content_logger.start_step_logger(1)\n self._storage_common.enable_vmd_bios_knobs()\n # Enable Slot-C knob\n self._storage_common.enable_slot_c_knob()\n self._log.info(\"Enabling the VMD BIOS Knobs as per the pcie slots connected ...\")\n\n # Step logger end for Step 1\n self._test_content_logger.end_step_logger(1, return_val=True)\n\n def execute(self):\n \"\"\"\n This function is responsible for below tasks\n 1. Creating RAID on connected PCIe Nvme\n 2. delete RAID.\n\n :return: True if RAID creation else False\n \"\"\"\n\n self.raid_levels = [RaidConstants.RAID0]\n\n for raid_level in self.raid_levels:\n\n # Step logger start for Step 2\n self._test_content_logger.start_step_logger(2)\n\n raid_creation_screen = self._raid_util.create_raid(raid_level)\n self._log.debug(\"After RAID creation :\".format(raid_creation_screen))\n self.os.wait_for_os(self.reboot_timeout)\n\n # Step logger end for Step 2\n self._test_content_logger.end_step_logger(2, return_val=True)\n\n # Step logger start for Step 3\n self._test_content_logger.start_step_logger(3)\n\n self._raid_util.delete_raid(raid_level, self.non_raid_disk_name)\n self.os.wait_for_os(self.reboot_timeout)\n\n # Step logger end for Step 3\n self._test_content_logger.end_step_logger(3, return_val=True)\n\n return True\n\n def cleanup(self, return_status):\n \"\"\"Test Cleanup\"\"\"\n self._common_content_lib.store_os_logs(self.log_dir)\n self.bios_util.load_bios_defaults()\n self.perform_graceful_g3()\n super(PcieDataRaidInPchCpuSlot, self).cleanup(return_status)\n\n\nif __name__ == \"__main__\":\n sys.exit(Framework.TEST_RESULT_PASS if PcieDataRaidInPchCpuSlot.main() else Framework.TEST_RESULT_FAIL)\n","repo_name":"IPU-ChenFei/ipu_icx_network_toolkit","sub_path":"src/storage/test/pcie_data_raid_between_pch_and_cpu_slot/pcie_data_raid_between_pch_and_cpu_slot.py","file_name":"pcie_data_raid_between_pch_and_cpu_slot.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"16705182568","text":"# MARLI P\nfrom utils.train_loop.model import MainloopModel\nfrom utils.arg.model import ArgsModel\n\nfrom robot import Robot\nfrom logger import Logger\nfrom trainer import Trainer\nfrom proc import Proc\n\nimport torch\n\nclass MainloopRestart(object):\n def restart(self, m: MainloopModel,a: ArgsModel, r: Robot, l: Logger, t: Trainer, p: Proc):\n # Restart for push_only stage and goal-conditioned case\n if p.m.nonlocal_variables['push_step'] == a.max_push_episode_length + 1 or p.m.nonlocal_variables['new_episode_flag'] == 1 or p.m.nonlocal_variables['restart_scene'] == r.m.num_obj / 2:\n p.m.nonlocal_variables['push_step'] = 0 # reset push step\n p.m.nonlocal_variables['new_episode_flag'] = 0\n # save episode_improved_grasp_reward\n print('episode %d begins' % p.m.nonlocal_variables['episode'])\n if p.m.nonlocal_variables['restart_scene'] == r.m.num_obj / 2: # If at end of test run, re-load original weights (before test run)\n p.m.nonlocal_variables['restart_scene'] = 0\n r.sim.restart_sim()\n r.add_objects()\n if a.is_testing: # If at end of test run, re-load original weights (before test run)\n t.m.model.load_state_dict(torch.load(a.snapshot_file))\n\n t.m.clearance_log.append([t.m.iteration])\n l.write_to_log('clearance', t.m.clearance_log)\n if a.is_testing and len(t.m.clearance_log) >= a.max_test_trials:\n m.exit_called = True # Exit after training thread (backprop and saving labels)\n #continue\n return False # false breaks loop, replaces continue\n return True\n\n \n\n","repo_name":"andreipit/masters_thesis_remote_test","sub_path":"PythonApplication1/utils/train_loop/restart.py","file_name":"restart.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"26624135328","text":"from cryptdomainmgr.modules.common.cdmconfighelper import processConfig\nfrom cryptdomainmgr.modules.common.cdmconfighelper import applyDefault\n\ndef resolveAuto(serviceConfig, dependSections):\n if 'auto' in serviceConfig:\n serviceConfig.extend(dependSections)\n serviceConfig = [e for e in serviceConfig if e != 'auto' and e !='DEFAULT']\n serviceConfig = list(set(serviceConfig))\n return serviceConfig\n\n\ndef interpreteValues(cr, args):\n depends = args['content']['depends'].replace(' ','').split(',')\n args['config'][args['secname']]['depends'] = list(set(depends)) #if 0 < len(depends[0]) else []\n for depend in depends:\n dependSections = cr.getRawConfigOf(depend).keys()\n if depend in args['content']:\n dependVals = args['content'][depend].replace(' ','').split(',')\n dependsResolved = resolveAuto(dependVals,dependSections)\n args['config'][args['secname']][depend] = dependsResolved\n if len(dependsResolved) > len(dependSections):\n log.warn(\"Dependency in config entry does not exist!\")\n if 'requires' not in args['config'][args['secname']]:\n args['config'][args['secname']]['requires'] = {}\n args['config'][args['secname']]['requires'][depend] = dependsResolved\n\n\n\ndef interpreteConfig(cr, sh):\n cnf = processConfig(cr, 'cdm', preOp=None, postOp=lambda x: interpreteValues(cr, x),\n defaultConfig={'statedir': '/var/cryptdomainmgr',\n 'depends': 'cert, domain, dkim, \\\n dhparam, service', 'cert': 'auto',\n 'dkim': 'auto', 'dhparam': 'auto',\n 'domain': 'auto', 'service': 'auto'})\n return cnf\n\n\n#def interpreteConfig(cr, sh):\n# defaultCDMConfig = {'statedir': '/var/cryptdomainmgr'}\n# cdmConfig = cr.getRawConfigOf('cdm')\n# # apply general config defaults and the default section\n# cdmConfig = applyDefault(cdmConfig, defaultCDMConfig) # must be here because following section depends on default values\n# cr.updateConfig({'cdm': cdmConfig['DEFAULT']})\n# return cdmConfig\n#\n\n\n","repo_name":"TheTesla/cryptdomainmgr","sub_path":"cryptdomainmgr/modules/cdm/confighandler.py","file_name":"confighandler.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"14"} +{"seq_id":"2571362949","text":"from flask import Flask, redirect, url_for, render_template, flash\nfrom markupsafe import escape\nimport subprocess\nimport time\nimport os\n\n\napp = Flask(__name__, static_url_path='')\napp.secret_key = b'Thisissecret..out..secret..._5#y2L\"F4Q8z\\n\\xec]/'\n\n@app.route('/all_reports/')\ndef reports():\n links = []\n try:\n for name in os.scandir('static'):\n if name.is_file():\n if name.name.endswith('html'):\n links.append(url_for('static', filename=name.name))\n except FileNotFoundError:\n pass\n return render_template('reports.html', links=links)\n\n\n@app.route('/tests/')\ndef all_tests():\n flash(\"Starting All Tests... Please be patient\", \"alert alert-info\")\n report_or_error, returncode = run_tests(\"\")\n\n if returncode == 0:\n flash(\"Successfully completed the test... Please check reports\", \"alert alert-success\")\n return redirect(url_for('home'))\n else:\n flash(f\"Test failed Process exited with status code {returncode}\", \"alert alert-danger\")\n return redirect(url_for('home'))\n\n@app.route('/tests/')\ndef some_tests(subpath):\n flash(f\"Starting {subpath} Please be patient\", \"alert alert-info\")\n report_or_error, returncode = run_tests(escape(subpath))\n if returncode == 0:\n flash(\"Successfully completed the test... Please check reports\", \"alert alert-success\")\n return redirect(url_for('home'))\n else:\n flash(f\"Test failed Process exited with status code {returncode}\", \"alert alert-danger\")\n return redirect(url_for('home'))\n\n\n@app.route('/cleanup')\ndef cleanup():\n flash(\"Deleting all generated Reports\", \"alert alert-info\")\n try:\n subprocess.run(\"rm -rf static\", shell=True, check=True)\n flash(\"Deleted all generated Reports\", \"alert alert-success\")\n except subprocess.CalledProcessError as e:\n return_code = e.returncode\n flash(f\"Cleanup did not complete successfully. Return code of process: {return_code}\", \"alert alert-danger\")\n\n return redirect(url_for('home'))\n\n\n@app.route('/')\n@app.route('/index')\ndef home():\n \"\"\"\n Root path controller.\n :return: template index.html\n \"\"\"\n return render_template('/index.html', testlist=get_all_test_module_names())\n\n\ndef run_tests(test_name):\n suffix = str(time.time()).replace('.','')\n filename = f\"static/test_{suffix}.html\"\n if test_name == \"\":\n command = f\"pytest --html {filename} tests/\"\n else:\n command = f\"pytest --html {filename} tests/{test_name}\"\n try:\n cmd_process = subprocess.run(command, shell=True, check=True)\n return_code = cmd_process.returncode\n except subprocess.CalledProcessError as e:\n return_code = e.returncode\n error_str = f\"Tests Run did not complete successfully. Return code of process: {return_code}\"\n if return_code == 0:\n return filename[7:], return_code\n else:\n return error_str, return_code\n\n return None\n\n\ndef get_all_test_module_names():\n tests = []\n for dirpath, dirnames, files in os.walk('tests'):\n for file_name in files:\n if file_name.endswith('.py'):\n if file_name.startswith('test'):\n tests.append(file_name)\n return tests\n\n\n","repo_name":"pppillai/armageddon","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"74130034574","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: area_dialog\n :platform: Linux, Windows\n :synopsis: GUI for area division\n\n.. moduleauthor: Zoltan Siki \n\"\"\"\nfrom PyQt4.QtGui import QDialog, QMessageBox\nfrom area_div import Ui_AreaDivDialog\nfrom base_classes import tr\n\nclass AreaDialog(QDialog):\n \"\"\" Class for area division dialog\n \"\"\"\n def __init__(self, total_area, div_area, rotate):\n \"\"\" Initialize dialog data and event handlers\n\n :param log: log instance for log messages\n \"\"\"\n super(AreaDialog, self).__init__()\n self.total_area = int(total_area + 0.5)\n self.div_area = int(div_area + 0.5)\n self.rotate = rotate\n self.ui = Ui_AreaDivDialog()\n self.ui.setupUi(self)\n self.ui.CancelButton.clicked.connect(self.onCancelButton)\n self.ui.DivideButton.clicked.connect(self.onDivideButton)\n\n def showEvent(self, event):\n \"\"\" Set up initial state of dialog widgets\n\n :param event: NOT USED\n \"\"\"\n self.reset()\n\n def reset(self):\n \"\"\" Reset dialog to initial state\n \"\"\"\n self.ui.AreaLineEdit.setText(str(self.div_area))\n self.ui.TotalLineEdit.setText(str(self.total_area))\n self.ui.TwoPointRadio.setChecked(True)\n if self.rotate:\n self.ui.OnePointRadio.setEnabled(True)\n else:\n self.ui.OnePointRadio.setEnabled(False)\n\n def onDivideButton(self):\n \"\"\" Check input and accept dialog\n \"\"\"\n try:\n a = float(self.ui.AreaLineEdit.text())\n except ValueError:\n QMessageBox.warning(self, tr(\"Warning\"), tr(\"Invalid area value\"))\n return\n if a <= 0:\n QMessageBox.warning(self, tr(\"Warning\"), tr(\"Invalid area value\"))\n return\n if not self.ui.OnePointRadio.isChecked() and not self.ui.TwoPointRadio.isChecked():\n QMessageBox.warning(self, tr(\"Warning\"), tr(\"Select division method\"))\n return\n self.accept()\n\n def onCancelButton(self):\n \"\"\" Reject dialog\n \"\"\"\n self.reject()\n","repo_name":"zsiki/ls","sub_path":"area_dialog.py","file_name":"area_dialog.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"14"} +{"seq_id":"35432031732","text":"from random import randint\n\nnumero_aleatorio = randint(0, 5)\n\nnumero_digitado = int(input(\"Digite um numero entre 0 e 5: \"))\n\nif(numero_aleatorio == numero_digitado):\n print(\"Você advinhou o número!\")\nelse:\n print(\"Você não advinhou, tente na próxima vez!\")","repo_name":"altairxneto/ExerciciosDisciplinaPython","sub_path":"Lista 02/Exercicio01.py","file_name":"Exercicio01.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"11928928945","text":"import sys,argparse\nimport os,glob\nimport numpy as np\nimport pandas as pd\n#from GenomeData import *\n\nimport matplotlib\n# matplotlib.use('Agg')\nfrom matplotlib import gridspec\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size']=16\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\", \"Liberation Sans\", \"Bitstream Vera Sans\"]\nmatplotlib.rcParams[\"font.family\"] = \"sans-serif\"\nimport seaborn as sns\nsns.set_style(\"whitegrid\", {'axes.grid' : False,'grid.color': 'grey'})\nsns.set_style(\"ticks\",{'ytick.color': 'k','axes.edgecolor': 'k'})\nfrom matplotlib.colors import LinearSegmentedColormap\n#plus = re.compile('\\+')\n#minus = re.compile('\\-')\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\"]\n\n\ndef plot_dist(df,infile,label,color):\n df_tmp = pd.read_csv(infile,sep='\\t',index_col=3,header=None)\n genes = df_tmp.index.intersection(df.index)\n kwargs = {'cumulative': True}\n sns.distplot(df.loc[genes],label=label,color=color,hist=False,hist_kws=kwargs, kde_kws=kwargs)\n # sns.distplot(df.loc[genes],label=label,color=color,hist=False)\n \n \n\noutdir = 'f2_DEG_RP_distribution_figs'\nos.makedirs(outdir,exist_ok=True)\n\nproject_dir='/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'\nproject_dir='/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'\nall_gene_file='{}/f7_chipseq/f2_differential_binding_on_202011_UTX_WT_peaks/data_deg_overlap/gene_promoters/hg38_4k_promoter_geneID.bed'.format(project_dir)\ndown_gene_file='{}/f7_chipseq/f2_differential_binding_on_202011_UTX_WT_peaks/data_deg_overlap/gene_promoters/WT_vs_ctrl_Vector_dngenes_promoter.bed'.format(project_dir)\nup_gene_file='{}/f7_chipseq/f2_differential_binding_on_202011_UTX_WT_peaks/data_deg_overlap/gene_promoters/WT_vs_ctrl_Vector_upgenes_promoter.bed'.format(project_dir)\n\n# rank value by UTX signal \nrp_file='gene_rp_from_UTX_WT_peak.txt'\ndf = pd.read_csv(rp_file,sep='\\t',index_col=0,header=None)\ndf = np.log10(df+1)\n# fig\nfig = plt.figure(figsize = (3,3))\nplot_dist(df,all_gene_file,'All genes','k')\nplot_dist(df,up_gene_file,'Up genes','r')\nplot_dist(df,down_gene_file,'Down genes','b')\n# plot_dist(df,all_gene_file,'All genes','k')\n# df_tmp = pd.read_csv(all_gene_file,sep='\\t',index_col=3,header=None)\n# genes = df_tmp.index.intersection(df.index)\n# sns.distplot(df.loc[genes],label=label)\n\n# plt.yscale('log') \nplt.ylabel('PDF')\nplt.xlabel('log$_{{10}}$ RP') \nplt.legend() \nplt.savefig(outdir+os.sep+'RP_distribution.png',bbox_inches='tight',pad_inches=0.1,transparent=True,dpi=600)\nplt.show()\nplt.close()\n\n \n","repo_name":"zanglab/utx_code","sub_path":"f7_chipseq/f2_diff_binding_on_202011_UTX_WT_peaks/f3_UTX_peak_based_RP_on_genes/py2_rp_distribution_compr_DEG.py","file_name":"py2_rp_distribution_compr_DEG.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"12906641","text":"class TrieNode(object):\n def __init__(self):\n self.children = {}\n self.data = None\n self.hotness = 0\n\n\nclass AutocompleteSystem(object):\n\n def __init__(self, sentences, times):\n \"\"\"\n :type sentences: List[str]\n :type times: List[int]\n \"\"\"\n self.root = TrieNode()\n self.typed = ''\n for i, sen in enumerate(sentences):\n self.add_sentence(sen, times[i])\n\n def add_sentence(self, sen, hot):\n trie = self.root\n for c in sen:\n if c not in trie.children: trie.children[c] = TrieNode()\n trie = trie.children[c]\n trie.data = sen\n trie.hotness -= hot\n\n def dfs(self, trie):\n ans = []\n if not trie: return ans\n if trie.data: ans += (trie.hotness, trie.data),\n for sub_trie in trie.children.values():\n ans += self.dfs(sub_trie)\n return ans\n\n def input(self, c):\n \"\"\"\n :type c: str\n :rtype: List[str]\n \"\"\"\n if c == '#':\n self.add_sentence(self.typed, 1)\n self.typed = ''\n return []\n sen = self.typed = self.typed + c\n trie = self.root\n for c in sen:\n if c not in trie.children: return []\n trie = trie.children[c]\n return [s for _, s in sorted(self.dfs(trie))[:3]]\n\n# Your AutocompleteSystem object will be instantiated and called as such:\n# obj = AutocompleteSystem(sentences, times)\n# param_1 = obj.input(c)\n","repo_name":"mingweihe/leetcode","sub_path":"_0642_Design_Search_Autocomplete_System.py","file_name":"_0642_Design_Search_Autocomplete_System.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"16882730539","text":"from __future__ import annotations\n\nfrom typing import Iterable, Iterator, Optional, TYPE_CHECKING\n\nimport numpy as np \nfrom tcod.console import Console\n\nfrom entity import Actor, Item, Effect\nimport tile_types\n\nif TYPE_CHECKING:\n from engine import Engine\n from entity import Entity\n\nclass GameMap:\n def __init__(\n self, engine: Engine, width: int, height: int, \n window_width: int, window_height: int, entities: Iterable[Entity] = ()\n ):\n self.engine = engine\n self.width, self.height = width, height\n self.window_width, self.window_height = window_width, window_height\n self.entities = set(entities)\n\n self.tiles = np.full((width, height), fill_value=tile_types.floor, order=\"F\")\n sel = np.random.random(size=self.tiles.shape)\n sel = (sel >= 0.95)\n self.tiles[sel] = np.full(len(self.tiles[sel]), fill_value=tile_types.floor_star)\n\n self.visible = np.full((width, height), fill_value=False, order=\"F\") # Tiles the player can currently see\n self.explored = np.full((width, height), fill_value=True, order=\"F\") # Tiles the player can currently see\n\n self.system_exit_location = np.full((width, height), fill_value=False, order=\"F\") \n\n @property\n def gamemap(self) -> GameMap:\n return self\n\n @property\n def actors(self) -> Iterator[Actor]:\n \"\"\"Iterate over this maps living actors.\"\"\"\n yield from (\n entity\n for entity in self.entities\n if isinstance(entity, Actor) and entity.is_alive\n )\n\n @property\n def corpses(self) -> Iterator[Actor]:\n \"\"\"Iterate over this maps living actors.\"\"\"\n yield from (\n entity\n for entity in self.entities\n if isinstance(entity, Actor) and not entity.is_alive\n )\n\n @property\n def effects(self) -> Iterator[Effect]:\n \"\"\"Iterate over this maps living actors.\"\"\"\n yield from (\n entity\n for entity in self.entities\n if isinstance(entity, Effect) and entity.lifetime_in_turns != 0\n )\n\n @property\n def items(self) -> Iterator[Item]:\n yield from (entity for entity in self.entities if isinstance(entity, Item))\n\n def get_blocking_entity_at_location(self, location_x: int, location_y: int) -> Optional[Entity]:\n for entity in self.entities:\n if entity.blocks_movement and entity.x == location_x and entity.y == location_y:\n return entity\n\n return None\n\n def get_actor_at_location(self, x: int, y: int) -> Optional[Actor]:\n for actor in self.actors:\n if actor.x == x and actor.y == y:\n return actor\n\n return None\n\n def in_bounds(self, x: int, y: int) -> bool:\n \"\"\"Return True if x and y are inside of the bounds of this map.\"\"\"\n return 0 <= x < self.width and 0 <= y < self.height\n\n\n def get_window_coordinates(self, x: int, y: int):\n player = self.engine.player\n x_low = int(player.x-self.window_width/2)\n x_high = int(player.x+self.window_width/2)\n y_low = int(player.y-self.window_height/2)\n y_high = int(player.y+self.window_height/2)\n if x_low <= 0: x_low = 0; x_high = self.window_width\n if y_low <= 0: y_low = 0; y_high = self.window_height\n\n if x_high >= self.width: x_high = self.width; x_low = self.width - self.window_width\n if y_high >= self.height: y_high = self.height; y_low = self.height - self.window_height\n\n return x-x_low, y-y_low\n\n\n def render(self, console: Console) -> None:\n \"\"\"\n Renders the map.\n\n If a tile is in the \"visible\" array, then draw it with the \"light\" colors.\n If it isn't, but it's in the \"explored\" array, then draw it with the \"dark\" colors.\n Otherwise, the default is \"SHROUD\".\n \"\"\"\n player = self.engine.player\n x_low = int(player.x-self.window_width/2)\n x_high = int(player.x+self.window_width/2)\n y_low = int(player.y-self.window_height/2)\n y_high = int(player.y+self.window_height/2)\n if x_low <= 0: x_low = 0; x_high = self.window_width\n if y_low <= 0: y_low = 0; y_high = self.window_height\n\n if x_high >= self.width: x_high = self.width; x_low = self.width - self.window_width\n if y_high >= self.height: y_high = self.height; y_low = self.height - self.window_height\n\n console.tiles_rgb[0:self.window_width, 0:self.window_height] = np.select(\n condlist=[self.visible[x_low:x_high,y_low:y_high], self.explored[x_low:x_high,y_low:y_high]],\n choicelist=[self.tiles[x_low:x_high,y_low:y_high][\"light\"], self.tiles[x_low:x_high,y_low:y_high][\"dark\"]],\n default=tile_types.SHROUD,\n )\n\n entities_sorted_for_rendering = sorted(\n self.entities, key=lambda x: x.render_order.value\n )\n\n for entity in entities_sorted_for_rendering:\n # Only print entities that are in the FOV and on screen\n if 0 <= entity.x - x_low <= self.window_width and 0 <= entity.y - y_low <= self.window_height:\n if self.visible[entity.x, entity.y]:\n console.print(x=entity.x - x_low, y=entity.y - y_low, string=entity.char, fg=entity.color)\n\n\nclass GameWorld:\n \"\"\"\n Holds the settings for the GameMap, and generates new maps when moving down the stairs.\n \"\"\"\n\n def __init__(\n self,\n *,\n engine: Engine,\n map_window_width: int,\n map_window_height: int,\n map_width: int,\n map_height: int,\n max_monsters: int,\n min_monsters: int,\n max_items: int,\n\n current_map: GameMap = None,\n main_map: GameMap = None,\n stellar_systems: Iterable[StellarSystem]=[],\n\n ):\n self.engine = engine\n\n self.map_window_width = map_window_width\n self.map_window_height = map_window_height\n\n self.map_width = map_width\n self.map_height = map_height\n\n self.max_monsters = max_monsters\n self.min_monsters = min_monsters\n self.max_items = max_items\n\n self.current_map = current_map\n self.main_map = main_map\n self.stellar_systems = stellar_systems\n\n\n def generate_galaxy(self) -> None:\n from procgen import generate_space\n\n self.main_map, self.stellar_systems = generate_space(\n map_width=self.map_width, \n map_height=self.map_height, \n max_monsters=self.max_monsters,\n min_monsters=self.min_monsters,\n max_items=self.max_items,\n engine=self.engine,\n map_window_width=self.map_window_width,\n map_window_height=self.map_window_height,\n )\n\n self.engine.game_map = self.main_map\n self.current_map = self.main_map\n\n","repo_name":"ArtemBasyrov/cool_space_game","sub_path":"game_map.py","file_name":"game_map.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"1301258031","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# Read file and create a dataframe.\r\ndf = pd.read_csv(\"survey_results_public.csv\", header=0,\r\n usecols=['WorkWeekHrs', 'CompTotal', 'YearsCode', 'Age', 'Age1stCode'])\r\n\r\n# Remove missing values.\r\ndf = df.dropna()\r\n\r\ndf.loc[df['YearsCode'] == 'Less than 1 year'] = 0\r\ndf.loc[df['YearsCode'] == 'More than 50 years'] = 51\r\ndf.loc[df['Age1stCode'] == 'Younger than 5 years'] = 0\r\ndf.loc[df['Age1stCode'] == 'Older than 85'] = 86\r\ndf['YearsCode'] = df['YearsCode'].astype(\"float64\")\r\ndf['Age1stCode'] = df['Age1stCode'].astype('float64')\r\n\r\n# Check types of objects in dfdataframe.\r\ntype_obj = df.dtypes\r\nprint(type_obj)\r\n\r\n# checking the correlation using the Pearson method\r\n# ( It has a value between +1 and −1. A value of +1 is total positive linear correlation,\r\n# 0 is no linear correlation, and −1 is total negative linear correlation.)\r\nprint(df.corr(method='pearson'))\r\n\r\nfig, ((ax1, ax2, ax3, ax4, ax5), (ax6, ax7, ax8, ax9, ax10)) = plt.subplots(2, 5)\r\n\r\nax1.scatter(df['WorkWeekHrs'], df['CompTotal'])\r\nax1.set_title('WorkWeekHrs - CompTotal')\r\nax2.scatter(df['WorkWeekHrs'], df['YearsCode'])\r\nax2.set_title('WorkWeekHrs - YearsCode')\r\nax3.scatter(df['WorkWeekHrs'], df['Age'])\r\nax3.set_title('WorkWeekHrs - Age')\r\nax4.scatter(df['WorkWeekHrs'], df['Age1stCode'])\r\nax4.set_title('WorkWeekHrs - Age1stCode')\r\nax5.scatter(df['CompTotal'], df['YearsCode'])\r\nax5.set_title('CompTotal - YearsCode')\r\nax6.scatter(df['CompTotal'], df['Age'])\r\nax6.set_title('CompTotal - Age')\r\nax7.scatter(df['CompTotal'], df['Age1stCode'])\r\nax7.set_title('CompTotal - Age1stCode')\r\nax8.scatter(df['YearsCode'], df['Age'])\r\nax8.set_title('YearsCode - Age')\r\nax9.scatter(df['YearsCode'], df['Age1stCode'])\r\nax9.set_title('YearsCode - Age1stCode')\r\nax10.scatter(df['Age'], df['Age1stCode'])\r\nax10.set_title('Age - Age1stCode')\r\n\r\nplt.show()\r\n\r\n# dependent variable: YearsCode\r\n# independent variables: Age1stCode and Age\r\n","repo_name":"izabelafrydrychowicz/izabelafrydrychowicz","sub_path":"Zestaw2/Zestaw2_Zadanie1.py","file_name":"Zestaw2_Zadanie1.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"34563433422","text":"import cv2 as cv\nimport numpy as np\ncap = cv.VideoCapture(0)\nwhile(1):\n # Take each frame\n _, frame = cap.read()\n # Convert BGR to HSV\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n # define range of blue color in HSV\n lower_blue = np.array([0,50,50]) #red\n upper_blue = np.array([10,255,255]) #red\n # Threshold the HSV image to get only blue colors\n mask = cv.inRange(hsv, lower_blue, upper_blue)\n # print(mask)\n\n # Bitwise-AND mask and original image\n res = cv.bitwise_and(frame,frame, mask= mask)\n x = str(res)\n #display tempreture\n window_name = 'Image'\n \n # font\n font = cv.FONT_HERSHEY_SIMPLEX\n \n # org\n org = (50, 50)\n \n # fontScale\n fontScale = 1\n \n # Blue color in BGR\n color = (255, 0, 0)\n \n # Line thickness of 2 px\n thickness = 2\n \n # Using cv.putText() method\n image = cv.putText(frame, x, org, font, \n fontScale, color, thickness, cv.LINE_AA)\n \n\n\n cv.imshow('frame',image)\n # cv.imshow('mask',mask)\n cv.imshow('res',hsv)\n k = cv.waitKey(5) & 0xFF\n if k == 27:\n break\ncv.destroyAllWindows()","repo_name":"sajeethan19/Hesty","sub_path":"keras/Scripts/code/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"43009936587","text":"'''\n702 數組合併排序\n請撰寫一程式,輸入並建立兩組數組,各以-9999為結束點 (數組中不包含-9999)。\n將此兩數組合併並從小到大排序之,顯示排序前的數組和排序後的串列。\n'''\n\nt1 = []\nt2 = []\n\nprint(\"Create tuple1:\")\nwhile True:\n num = eval(input())\n if num == -9999:\n break\n else:\n t1.append(num)\n\nprint(\"Create tuple2:\")\nwhile True:\n num = eval(input())\n if num == -9999:\n break\n else:\n t2.append(num)\n\nprint(\"Combined tuple before sorting:\", tuple(t1 + t2))\nprint(\"Combined list after sorting:\", sorted(t1 + t2))\n\n\"\"\"\nCombined tuple before sorting: _\nCombined list after sorting: _\n\"\"\"\n","repo_name":"junyu0130/TQC-python-questions-and-answers","sub_path":"第7類_數組、集合以及詞典/702/PYA702.py","file_name":"PYA702.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"73882766413","text":"#!/usr/bin/env python\n# encoding: utf-8\n\ndef build(bld):\n vehicle = bld.path.name\n bld.ap_stlib(\n name=vehicle + '_libs',\n ap_vehicle=vehicle,\n ap_libraries=bld.ap_common_vehicle_libraries() + [\n 'AC_AttitudeControl',\n 'AC_WPNav',\n 'AP_Camera',\n 'AP_InertialNav',\n 'AP_JSButton',\n 'AP_LeakDetector',\n 'AP_Motors',\n 'AP_RCMapper',\n 'AP_Beacon',\n 'AP_TemperatureSensor',\n 'AP_Arming',\n 'AP_OSD',\n ],\n )\n\n bld.ap_program(\n program_name='ardusub',\n program_groups=['bin', 'sub'],\n use=vehicle + '_libs',\n )\n","repo_name":"ArduPilot/ardupilot","sub_path":"ArduSub/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":9122,"dataset":"github-code","pt":"14"} +{"seq_id":"5624601911","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 1 12:48:08 2020\n\n@author: smith\n\"\"\"\nimport spacy\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\nfrom gensim.models.phrases import Phrases, Phraser\nimport os\nimport multiprocessing\nimport csv\nimport re\nimport pandas as pd\nfrom time import time\nfrom datetime import datetime\nfrom collections import defaultdict\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\nimport logging\nimport gensim\nlogging.basicConfig(format=\"%(levelname)s - %(asctime)s: %(message)s\", datefmt= '%H:%M:%S', level=logging.INFO)\n\nw2v_dir = '/home/smith/Smith_Scripts/NLP_GeneExpression/w2v_model/model071520/'\nw2v_model = Word2Vec.load(os.path.join(w2v_dir, 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model'))\nmodelName = '_w2v071520_'\n\nresultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/'\nclusters = ['Cluster' + str(x) for x in range(20)]\n\n\ncategory = 'CellTypes'\ncomparison = 'MarkerGenes'\ntermIndex = pd.read_excel(os.path.join(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison + '_Frequency.xlsx'), index_col=0)\ntermIndex = termIndex.sort_values(by='Combined Occurances', ascending=False)\n\nenrichIndex = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx', index_col=0)\nenrIndex = enrichIndex.iloc[:,::4]\n\ndef calcTopSimilarities(cluster, category, min_freq=5, topn=2000, save=False):\n resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'\n clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')\n clusterNum=cluster.replace('Cluster', '')\n genesDf = pd.read_excel('/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx')\n genesList = genesDf[str(clusterNum) + '_n'].tolist()\n genes = genesList\n genes = []\n for gene in genesList:\n genes.append(gene.lower())\n# words = pd.read_excel(os.path.join(resultDirectory, str(cluster) + '_' + comparison + '_Results/' + category + '_' + cluster + '_Frequency.xlsx'), index_col=0)\n# words = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster0_EnrichedFunctions_onlyTest.xlsx', index_col=0)\n# wordsRedacted = words.loc[words['Occurances'] > min_freq]['word'].tolist()\n words = enrIndex\n wordsRedacted = words[cluster + ' term'].tolist()[:-1]\n if category == 'CellTypes':\n wordsRedacted = termIndex['word'].tolist()[:150]\n newWords = []\n for item in wordsRedacted:\n try:\n item = item.replace(' ', '_')\n newWords.append(item)\n except AttributeError:\n pass\n cat = pd.DataFrame()\n catX = pd.DataFrame()\n for gene in genes:\n gene = gene.lower()\n try:\n df = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity', 'similarity'])\n df['gene'] = gene\n df2 = df.loc[df['entity'].isin(newWords)]\n df2 = df2.reset_index(drop=True)\n dfX = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity ' + gene, 'similarity ' + gene])\n dfX2 = dfX.loc[dfX['entity ' + gene].isin(newWords)]\n dfX2 = dfX2.reset_index(drop=True)\n cat = pd.concat([cat, df2], axis=0)\n cat = cat.reset_index(drop=True)\n catX = pd.concat([catX, dfX2], axis=1)\n catX = catX.reset_index(drop=True)\n except KeyError:\n pass\n if save:\n # cat.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))\n # catX.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))\n cat.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))\n catX.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))\n\n return(cat, catX)\n\n\ndef averageSimilarities(cluster, category):\n clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'\n # clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')\n if not os.path.exists(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')):\n raise FileNotFoundError(\"Similarities file doesn't exist at \" + os.path.join(clusterDirectory, cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx'))\n else:\n df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))\n itemList = []\n aveList = []\n stdList = []\n weightList = []\n countList = []\n geneList = []\n for item in df['entity'].unique().tolist():\n ave = np.mean(df.loc[df['entity']==item]['similarity'])\n std = np.std(df.loc[df['entity']==item]['similarity'])\n gene = df.loc[df['entity']==item]['gene'].tolist()\n count = len(gene)\n weightedAve = df.loc[df['entity']==item].shape[0]*ave\n itemList.append(item)\n aveList.append(ave)\n stdList.append(std)\n weightList.append(weightedAve)\n countList.append(count)\n geneList.append(gene)\n df = pd.DataFrame(data=[itemList, aveList, stdList, weightList, countList, geneList]).T\n df.columns=['entity', 'ave_similarity', 'stdev', 'weighted_ave', 'count', 'similar_genes']\n df = df.sort_values(by='weighted_ave', ascending=False)\n df = df.drop_duplicates(subset='entity', keep='first')\n df.to_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'))\n return(df)\n\ndef combineAverageSims(clusters, category, save=True):\n clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'\n bigDf = pd.DataFrame()\n for cluster in clusters:\n df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'), index_col=0)\n df.columns=[cluster + '_entity', cluster + '_average_sim', cluster + '_stdev', cluster + '_weightedAve', cluster + '_count', cluster + '_similarGenes']\n bigDf = pd.concat([bigDf, df], axis=1)\n if save:\n bigDf.to_excel(os.path.join(clusterDirectory, 'Combined_AverageSimilarities' + modelName + category + '.xlsx'))\n return(bigDf)\n\n\ncat, catX = calcTopSimilarities('Cluster0', 'Functions', save=True)\n\ndf = averageSimilarities('Cluster0', 'Functions')\n\nfor cluster in clusters:\n calcTopSimilarities(cluster, 'CellTypes', min_freq=5, topn=10000, save=True)\n\nfor cluster in clusters:\n averageSimilarities(cluster, 'CellTypes')\n\ndf = combineAverageSims(clusters, 'CellTypes', save=True)\n\ndf = averageSimilarities('Cluster5', 'Functions')\n\n###FREQUENCY DISTRIBUTION:\ncat = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx')\n\n \ndef tsnescatterplot(model, setName, word, list_names,):\n \"\"\" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,\n its list of most similar words, and a list of words.\n \"\"\"\n arrays = np.empty((0, 300), dtype='f')\n word_labels = [word]\n color_list = ['red']\n\n # adds the vector of the query word\n\n arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)\n \n # gets list of most similar words\n close_words = model.wv.most_similar([word])\n \n # adds the vector for each of the closest words to the array\n try:\n for wrd_score in close_words:\n wrd_vector = model.wv.__getitem__([wrd_score[0]])\n word_labels.append(wrd_score[0])\n color_list.append('blue')\n arrays = np.append(arrays, wrd_vector, axis=0)\n \n # adds the vector for each of the words from list_names to the array\n for wrd in list_names:\n wrd_vector = model.wv.__getitem__([wrd])\n word_labels.append(wrd)\n color_list.append('green')\n arrays = np.append(arrays, wrd_vector, axis=0)\n except KeyError:\n pass\n # Reduces the dimensionality from 300 to 50 dimensions with PCA\n reduc = PCA(n_components=42).fit_transform(arrays) ###### CHANGED FROM 50 DURING TUTORIAL\n \n # Finds t-SNE coordinates for 2 dimensions\n np.set_printoptions(suppress=True)\n \n Y = TSNE(n_components=2, random_state=0, perplexity=10).fit_transform(reduc)\n \n # Sets everything up to plot\n df = pd.DataFrame({'x': [x for x in Y[:, 0]],\n 'y': [y for y in Y[:, 1]],\n 'words': word_labels,\n 'color': color_list})\n \n fig, _ = plt.subplots()\n fig.set_size_inches(9, 9)\n \n # Basic plot\n p1 = sns.regplot(data=df,\n x=\"x\",\n y=\"y\",\n fit_reg=False,\n marker=\"o\",\n scatter_kws={'s': 40,\n 'facecolors': df['color']\n }\n )\n \n # Adds annotations one by one with a loop\n for line in range(0, df.shape[0]):\n p1.text(df[\"x\"][line],\n df['y'][line],\n ' ' + df[\"words\"][line].title(),\n horizontalalignment='left',\n verticalalignment='bottom', size='medium',\n color=df['color'][line],\n weight='normal'\n ).set_size(15)\n\n \n plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)\n plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)\n \n plt.title('t-SNE visualization for {}'.format(word.title()))\n plt.savefig(os.path.join(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png'))\n\ntsnescatterplot(w2v_model, setName, word, newWords)\n\n\nw2v_model.wv.most_similar(positive=[\"drug_addiction\"], topn=20)\nw2v_model.wv.most_similar(positive=[\"nucleus_accumbens\"], topn=20)\nw2v_model.wv.most_similar(positive=[\"vta\"], topn=20)\nw2v_model.wv.most_similar(positive=[\"dbi\"], topn=20)\n\n\nw2v_model.wv.most_similar(positive=[\"enkephalin\", \"cacng4\"], negative=[\"opioid\"], topn=20)\n\nw2v_model.wv.most_similar(positive=[\"slc17a7\", \"cacng4\"], negative=[\"glutamatergic_neuron\"], topn=20)\n\n\n\n###RUN PCA:\n# fit a 2d PCA model to the vectors\nX = w2v_model[w2v_model.wv.vocab]\npca = PCA(n_components=50)\nresult = pca.fit_transform(X)\n#Plot the result\nfig, ax = plt.subplots()\nax.plot(result[:, 0], result[:, 1], 'o')\nax.set_title('Entities')\nplt.show()\n\nwords = list(w2v_model.wv.vocab.keys())\n\n\n\n","repo_name":"alexcwsmith/scNLP","sub_path":"scNLP_spaCy_GeneExpression_word2vec.py","file_name":"scNLP_spaCy_GeneExpression_word2vec.py","file_ext":"py","file_size_in_byte":11197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"32240798840","text":"import logging\nfrom arguments import args\n\n\n# def _setup_logger():\n# log_format = logging.Formatter(\"[%(asctime)s %(levelname)s] %(message)s\")\n# logger = logging.getLogger()\n# logger.setLevel(logging.INFO)\n# console_handler = logging.StreamHandler()\n# console_handler.setFormatter(log_format)\n# logger.handlers = [console_handler]\n\n# return logger\n\n\ndef _setup_logger():\n log_format = logging.Formatter(\"[%(asctime)s %(levelname)s] %(message)s\")\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Create a file handler\n\n if args.task == 'train':\n log_file = 'train.log'\n elif args.task == 'test':\n log_file = 'test.log'\n elif args.task == 'eval':\n log_file = 'eval.log'\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(log_format)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_format)\n\n\n # Add the file handler to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger\n\nlogger = _setup_logger()","repo_name":"ndkhoa0704/KGE","sub_path":"logger_config.py","file_name":"logger_config.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"74127885774","text":"from userbot import * ; from sys import * ; from telethon import TelegramClient, functions, types ; from telethon.tl.types import InputMessagesFilterDocument ; from pathlib import Path ; from userbot.javes_main.commands import * ; import asyncio, os, traceback ; javes = tgbot = bot.tgbot = borg = client\n\nLOGS.info(\"Connecting...\") ; o = o2 =o3 = None \ntry:\n client.start() ; LOGS.info(\"client connected\") ; o = \"client\"\nexcept:\n\tLOGS.info(\"Telegram String Session Wrong or Expired Please Add new one \") ; quit(1)\n\nif client2:\n try:\n client2.start() ; LOGS.info(\"client2 connected\") ; o2 = \"client2\"\n except:\n LOGS.info(\"client2 Session string Wrong/Expired Please add new string session or delete var E2\") ; quit(1)\n \nif client3:\n try:\n client3.start() ; LOGS.info(\"client3 connected\") ; o3 = \"client3\"\n except:\n LOGS.info(\"client3 Session string Wrong/Expired Please add new string or delete var E3 \") ; quit(1)\n\njaves.loop.run_until_complete(a()); from userbot.javes_main import custom_installer; from userbot.modules import *; os.system(\"rm userbot/modules/*.py\") ; LOGS.info(f\"Sucessfully connected with {o}, {o2}, {o3} check it by typing !javes in any chat, type !help for more info.\")\n\nif len(argv) not in (1, 3, 4):\n javes.disconnect()\nelse:\n javes.run_until_disconnected()\n","repo_name":"Zohre27/rekcah-pavi-javes","sub_path":"userbot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"12118571072","text":"import dill\nimport json\nimport logging\nimport sqlite3\nimport os\nfrom provider.util import SshUtil\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalStore:\n \"\"\"\n This a database implementation using local file storage\n File based cache implemented using sqlite3\n Only 2 columns - id (str primary key) and value (str)\n The db is stored at the root of the service with suffix .sqlitedb\n\n TODO as this is a key/value pair, it can be distributed to several segments\n based on key hash to avoid I/O conflicts, for multi-node access, not necessary\n at this time\n Usage:\n * mydata = LocalStore(\"NBA_players\")\n * mydata.put(\"best_player\", \"Lebron Bryant\")\n * mydata.put(\"worst_player\", \"Kobe James\")\n * if 'Lebron' in mydata.get(\"best_player\"):\n * print(\"Correct, worst player is %s\" % mydata.get(\"worst_player\"))\n * else:\n * print(\"No way!! Burning the place down.\")\n \"\"\"\n _DEFAULT_DATABASE_NAME = 'ssh_local_db'\n _DB_FILE_SUFFIX = '.sqlitedb'\n\n def __init__(self, db_name=_DEFAULT_DATABASE_NAME):\n \"\"\"\n db_name is used as the prefix of the local db file name as well\n as the name of the table in the .sqlite3 database\n \"\"\"\n db_name = db_name.strip()\n if not db_name:\n raise ValueError(\"db_name must be provided : (%s)\" % db_name)\n if ';' in db_name or '.' in db_name or \"'\" in db_name:\n raise ValueError(\"db_name can not contain [;.']\")\n self.db_table = db_name\n\n conn = None\n try:\n conn = sqlite3.connect(self._file_name())\n conn.execute(\"CREATE TABLE IF NOT EXISTS %s(id primary key, value text)\" % self.db_table)\n conn.commit()\n except Exception as e:\n logger.error('Exception when creating table %s: %s' % (self.db_table, str(e)))\n finally:\n if conn:\n conn.close()\n\n def _file_name(self):\n return self.db_table + self._DB_FILE_SUFFIX\n\n def delete_database(self):\n if os.path.exists(self._file_name()):\n os.remove(self._file_name())\n\n def clear_table(self):\n conn = None\n try:\n conn = sqlite3.connect(self._file_name())\n conn.execute(\"DROP TABLE IF EXISTS %s\" % self.db_table)\n conn.commit()\n conn.execute(\"CREATE TABLE %s(id primary key, value text)\" % self.db_table)\n conn.commit()\n except Exception as e:\n logger.error('Exception when dropping table %s: %s' % (self.db_table, str(e)))\n finally:\n if conn:\n conn.close()\n\n @staticmethod\n def is_valid_key(key):\n # Disallow colon and semicolon in key\n return ';' not in key and ':' not in key\n\n def delete(self, key):\n if not self.is_valid_key(key):\n logger.error(\"Invalid key: %s\" % str(key))\n return\n conn = sqlite3.connect(self._file_name())\n try:\n conn.execute('DELETE from %s where id = ?' % self.db_table, [key])\n conn.commit()\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n logger.error(\"Shouldn't encounter this error, table should be created already in delete_value\")\n except sqlite3.DatabaseError as e:\n logger.error(\"Database error exception: %s\" % str(e))\n finally:\n conn.close()\n\n def put(self, key, value):\n if not self.is_valid_key(key):\n raise ValueError('Key can not contain (;) or (\")')\n conn = sqlite3.connect(self._file_name())\n try:\n conn.execute(\"INSERT OR IGNORE INTO %s(id, value) values (?, ?)\" % self.db_table, [key, value])\n conn.execute('UPDATE %s SET value=? where id=?' % self.db_table, [value, key])\n conn.commit()\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n logger.error(\"Shouldn't encounter this error, table should be created already in put_value\")\n except sqlite3.DatabaseError as e:\n logger.error(\"Database error exception: %s\" % str(e))\n except Exception as e:\n logger.error(\"Unknown put exception: %s\" % str(e))\n finally:\n conn.close()\n\n def get(self, key):\n if not self.is_valid_key(key):\n return None\n conn = sqlite3.connect(self._file_name())\n try:\n # logger.debug(\"getting %s from table %s\" % (key, self.db_table))\n value_cursor = conn.execute('select value from %s where id = ?' % self.db_table, [key])\n result = value_cursor.fetchone()\n if result:\n return result[0]\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n logger.error(\"Shouldn't encounter this error, table should be created already in get_value\")\n except sqlite3.DatabaseError as e:\n logger.error(\"Database error exception: %s\" % str(e))\n finally:\n conn.close()\n return None\n\n\nclass ActionDatabase(LocalStore):\n _DEFAULT_ACTION_NAME = \"globus_actions\"\n\n def __init__(self, request_id=None, table_name=_DEFAULT_ACTION_NAME):\n \"\"\"\n If request_id is provided, it will be the default value\n for action_id for this ActionDatabase instance for methods\n \"\"\"\n LocalStore.__init__(self, db_name=table_name)\n self.action_id = request_id\n\n def get_info_dict(self, action_id=None):\n if action_id is None:\n action_id = self.action_id\n assert action_id, \"action_id must be provided\"\n result = self.get(action_id)\n if result:\n try:\n return json.loads(result)\n except json.JSONDecodeError as e:\n logger.error(\"Invalid entry in info storage: %s\" % str(e))\n return {}\n\n def store_action_request(self, action, request=None, action_id=None):\n assert action.action_id\n action_object = {\n 'status': action,\n 'request': request,\n }\n if not action_id:\n assert request and request.request_id\n action_id = request.request_id\n action_encoded = dill.dumps(action_object)\n self.put(action_id, action_encoded)\n\n def get_action_request(self, action_id=None):\n if action_id is None:\n action_id = self.action_id\n assert action_id, \"action_id must be provided\"\n action_encoded = self.get(action_id)\n if action_encoded:\n action_object = dill.loads(action_encoded)\n assert 'status' in action_object\n assert 'request' in action_object\n return (action_object.get('status', None),\n action_object.get('request', None))\n else:\n return None, None\n\n def update_action_request(self, action_status, action_id=None, request=None):\n if action_id is None:\n action_id = self.action_id\n action_object = self.get(action_id)\n if not action_object:\n logger.info(f\"Action {action_id} was newly created on update\")\n return self.store_action_request(\n action_status,\n request=request,\n action_id=action_id\n )\n\n def delete_action_request(self, action_id=None):\n if action_id is None:\n action_id = self.action_id\n assert action_id, \"action_id must be provided\"\n action = self.get(action_id)\n if action:\n self.delete(action_id)\n logger.info(f\"Action {action_id} was deleted from table\")\n else:\n raise KeyError(f\"Action {action_id} was not found\")\n","repo_name":"globus/ssh-action-provider","sub_path":"provider/local_db.py","file_name":"local_db.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72912152334","text":"import numpy\nimport pylab\nimport pickle\nimport glob\nc = 299792458.0 # speed of light\n\ndef getAntennaPositions(myarray):\n\tif myarray == 'askap':\n\t\txx,yy,zz,dishDiameter = pickle.load(open('askap_antennas.p','rb'))\n\t\tnDishes = len(xx)\n\t\tcoordsys = 'local'\n\t\tdishDiameters = numpy.zeros((nDishes,), numpy.float64) + dishDiameter\n\t\tdishDiameters = dishDiameters.tolist()\n\telif myarray == 'meerkat':\n\t\txx,yy,zz,dishDiameter = pickle.load(open('meerkat_antennas.p','rb'))\t\n\t\tnDishes = len(xx)\n\t\tcoordsys = 'local'\n\t\tdishDiameters = numpy.zeros((nDishes,), numpy.float64) + dishDiameter\n\t\tdishDiameters = dishDiameters.tolist()\t\n\telif myarray == 'ska':\n\t\txx,yy,zz,dishDiameters = pickle.load(open('ska_antennas.p','rb'))\t\n\t\tcoordsys = 'global'\t\n\treturn xx,yy,zz,dishDiameters,coordsys\n\ndef sim_ms(prefix,myarray,minFreq,nChan,chanWidth,startTime,scanLength,xx,yy,zz,dishDiameters,coordsys):\n\tprojectName = prefix+'_'+myarray+'_'+minFreq+'_'+str(nChan)+'ch_'+chanWidth+'_'+str(int(scanLength/60.0))+'min'\n\tRA_central = '12h00m00.0s' \n\tDec_central = '-30d00m00.0s'\n\tdishDiameter = 25.0\n\tStokes = 'XX XY YX YY'\n\tintegrationTime = '180s'\n\trefTime = me.epoch('IAT','2018/01/01')\n\televationLimit = '8.0deg'\n\tshadowLimit = 0.001\n\t#xx,yy,zz,dishDiameters,coordsys = getAntennaPositions(myarray)\n\t# nDishes = len(xx)\n\t# dishDiameters = numpy.zeros((nDishes,), numpy.float64) + dishDiameter\n\t# dishDiameters = dishDiameters.tolist()\n\tclName = projectName+'.cl'\n\tmsName = projectName+'.ms'\n\trefRA = qa.unit(RA_central)\n\trefDec = qa.unit(Dec_central)\n\tdirection = me.direction(rf = 'J2000', v0 = refRA, v1 = refDec)\n\tcl.addcomponent(dir = direction, flux = 1.0, freq = minFreq)\n\tcl.rename(filename = clName)\n\tcl.done()\n\tsm.open(msName)\n\tsm.setspwindow(spwname = prefix,\n\t\tfreq = minFreq,\n\t\tdeltafreq = chanWidth,\n\t\tfreqresolution = chanWidth,\n\t\tnchannels = nChan,\n\t\tstokes = Stokes)\n\tif myarray == 'askap':\n\t\tobsPosition = me.observatory('ATCA')\n\telse:\n\t\tobsPosition = me.observatory('MeerKAT')\n\tsm.setconfig(telescopename = myarray,\n\t\tx = xx,\n\t\ty = yy,\n\t\tz = zz,\n\t\tdishdiameter = dishDiameters,\n\t\tmount = 'ALT-AZ',\n\t\tcoordsystem = coordsys,\n\t\treferencelocation = obsPosition)\n\tsm.setfeed(mode = 'perfect X Y')\n\tsm.setfield(sourcename = prefix,\n\t\tsourcedirection = me.direction(rf = 'J2000', v0 = refRA, v1 = refDec))\n\tsm.settimes(integrationtime = integrationTime,\n\t\tusehourangle = True,\n\t\treferencetime = refTime)\n\tsm.setlimits(shadowlimit = shadowLimit,\n\t\televationlimit = elevationLimit)\n\tsm.setauto(autocorrwt = 0.0)\n\tscan = 0\n\tendTime = startTime + scanLength\n\twhile (startTime < endTime):\n\t\tsm.observe(prefix, prefix, starttime = str(startTime)+'s', stoptime = str(startTime + scanLength)+'s')\n\t\tme.doframe(refTime)\n\t\tme.doframe(obsPosition)\n\t\thadec = me.direction('hadec', qa.time(str(startTime + scanLength / 2)+'s'), refDec)\n\t\tazel = me.measure(hadec,'azel')\n\t\tsm.setdata(msselect = 'SCAN_NUMBER==' + str(scan))\n\t\tsm.predict(complist = clName)\n\t\tstartTime = startTime + scanLength\n\t\tscan += 1\n\tsm.done()\n\tos.system('rm -rf ' + clName)\n\n\treturn msName\n\ndef vector_avg(msName):\n\ttb.open(msName)\n\tcorrdat = tb.getcol('CORRECTED_DATA')\n\tnchans = corrdat.shape[1]\n\tspectrum = []\n\tfor chan in range(0,nchans):\n\t\tre_x = numpy.mean(numpy.real(corrdat[0,chan,:]))\n\t\tim_x = numpy.mean(numpy.imag(corrdat[0,chan,:]))\n\t\tre_y = numpy.mean(numpy.real(corrdat[3,chan,:]))\n\t\tim_y = numpy.mean(numpy.imag(corrdat[3,chan,:]))\n\t\telement = numpy.mean((re_x,im_x))+numpy.mean((re_y,im_y))\n\t\tspectrum.append(element)\n\ttb.done()\n\ttb.open(msName+'/SPECTRAL_WINDOW')\n\tfreqs = tb.getcol('CHAN_FREQ')/1e6\n\ttb.done()\n\treturn freqs,spectrum\n\nbptabs = glob.glob('cal_*.B')\n\nfor bpTable in bptabs:\n\tparts = bpTable.split('_')\n\tmyarray = parts[2]\n\tminFreq = parts[3]\n\tnChan = int(parts[4].replace('ch',''))\n\tchanWidth = parts[5]\n\tscanLength = 28000\n\tstartTime = -14400\n\tprefix = 'target'\n\txx,yy,zz,dishDiameters,coordsys = getAntennaPositions(myarray)\n\t\n\tprojectName = prefix+'_'+myarray+'_'+minFreq+'_'+str(nChan)+'ch_'+chanWidth+'_'+str(int(scanLength/60.0))+'min'\n\tmsName = projectName+'.ms'\n\t\n\tsim_ms(prefix,myarray,minFreq,nChan,chanWidth,startTime,scanLength,xx,yy,zz,dishDiameters,coordsys)\n\n\tapplycal(vis=msName,\n\t\tgaintable=bpTable,\n\t\tinterp='nearest')\n\n\tfreqs,spectrum = vector_avg(msName)\n\n\tspecpickle = bpTable.replace('cal_','spec_').replace('.B','.p')\n\tpickle.dump((freqs,spectrum),open(specpickle,'wb'))\n\t\n\tos.system('rm -rf '+msName)\n","repo_name":"IanHeywood/bp_sims","sub_path":"target_corruption.py","file_name":"target_corruption.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38042351239","text":"#! /usr/bin/env python3\nimport logging\nlogging.basicConfig(filename = 'debugLog.txt', level = logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(message)s')\nlogging.disable(logging.CRITICAL)\nlogging.debug('Start of program.')\n\nfrom time import time\ns = time()\n\n'''\nAuthor: Fanchen Bao\nDate: 10/06/2018\n\nProject Euler: Problem 64\n\nSolution: \n\tFor each number n, to get from sqrt(n) + a0 / c to a1 + (sqrt(n) - b) / c, it must satisfiy that b < sqrt(n).\nOtherwise, n - b^2 would be negative. Therefore, this provides the anchor case for recursion.\nWe will brute force through all possible a1 >= 1, and check whether the corresponding b < sqrt(n).\nSince as a1 gets bigger, b also gets bigger (because c * a1 - b = a0, in which a0 and c does not change),\nonce for a certain a1 b does not work, no more a1 needs to be tested. For those cases where b works, \nkeep going down each calling tree until the desired squred root period is found. \n'''\nfrom math import sqrt\n\ndef gcd(a, b):\n ''' find the gcd of number a and b, bot positive integers'''\n if a < b: # make sure a > b\n a, b = b, a\n if a % b == 0:\n return b\n else:\n return gcd(b, a % b)\n\ndef findFraction(n, b, c, maxB, period, coeffDict):\n ''' n is the target number. b and c is the current coefficient to be dealt with. maxB = int(sqrt(n)).\n period records all period values. iniB and iniC is the first fraction to be transformed.\n If any subsequent coefficient b and c exists already in coeffDict, then the full period is found.\n '''\n # if gcd(n - b**2, c) != c: \n # coeffDict[(b, c)] = False # current b, c pair fails\n # return False\n\n nextC = (n - b**2) // c # a big assumption that the coefficient in front of sqrt(n) is always 1\n nextA = 1\n while True:\n if nextA * nextC - b <= maxB:# find new a\n if nextA * nextC - b > 0: \n nextB = nextA * nextC - b\n if (nextB, nextC) in coeffDict: # coefficient ecountered before\n if coeffDict[(nextB, nextC)]: # previous encounter is valid, full period found\n period.append(nextA)\n return True\n # else: previous encounter is invalid, increment nextA and try again\n \n else: # coefficient not encountered before\n coeffDict[(b, c)] = True\n if findFraction(n, nextB, nextC, maxB, period, coeffDict):\n period.append(nextA)\n return True\n else: # once 'nextA' cannot produce a positive 'nextB' smaller than maxB, no need to continue\n coeffDict[(b, c)] = False # current b, c pair fails\n return False\n nextA += 1\n\n\n''' suppose each level of fraction can be expressed as a + (sqrt(n) - b) / c\n maxB represents the upper bound for coefficient b\n'''\nperiod = [] # record the repeated period\ncoeffDict = {} # record all coefficients that have occurred, and whether they can produce valid outcome (true) or not (false)\ncount = 0\nfor n in range(2, 10001):\n maxB = int(sqrt(n))\n period.clear()\n coeffDict.clear()\n if maxB**2 != n: # if n is perfect square, skip it\n for a0 in range(1, maxB + 1): # a0 is the first integer for the fraction representation\n coeffDict[(a0, 1)] = True # default to true, if it doesn't work, revert back to false\n if findFraction(n, a0, 1, maxB, period, coeffDict): # fraction period found\n break\n else:\n period.clear()\n # print(\"sqrt({})=[{}; {}], period = {}\".format(n, a0, period, len(period)))\n if len(period) % 2:\n count += 1\nprint(count)\n\n\n# runtime = 2.6 s\n\n\nprint(\"\\nTime: {}\".format(time() - s))\n\nlogging.debug('End of program.\\n\\n\\n')\n","repo_name":"FanchenBao/project_euler","sub_path":"p64/p64_without deep understanding of fraction representation.py","file_name":"p64_without deep understanding of fraction representation.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"5243759774","text":"n = int(input())\nres = 0\nfor i in range(n):\n f = input()\n if f == \"Tetrahedron\":\n res += 4\n elif f == \"Cube\":\n res += 6\n elif f == \"Octahedron\":\n res += 8\n elif f == \"Dodecahedron\":\n res += 12\n else:\n res += 20\nprint(res)","repo_name":"ISPritchin/Olympiad","sub_path":"800/Оформлено/Антон и многогранники.py","file_name":"Антон и многогранники.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"1519750487","text":"import os\nimport os.path\nfrom os import path\nimport nltk, collections\n\nimport sys\nimport json\nimport random \n\nimport numpy as np\nfrom tqdm import tqdm\nfrom itertools import chain\nimport matplotlib.pyplot as plt\n\n# from src.Train import test, train\n# from src.utils. import AverageMeter\nfrom Train import test,train\nfrom colorama import init \nfrom termcolor import colored \nfrom utils import (AverageMeter, save_checkpoint,get_text)\nimport torch \nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nimport fileinput\nimport time \nfrom torchvision import transforms\nimport importlib.util\nfrom importlib.machinery import SourceFileLoader\n\n\n\nDIR = ''\nDIR_DATA = '' # crazyh!\n#Run through list in the data folder?\n\nclass Engine(object):\n def __init__ (self):\n global DIR\n global DIR_DATA\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str, help=\"directory of config file\")\n parser.add_argument('--full_diagnostic', type=str, help=\"run full diagnostic (print) [default: false]\", default=False)\n\n\n # parser.add_argument('--dir', type=int, help=\"directory of config file\" default=\"data/data.json\")\n parser.add_argument('--cuda', action='store_true', help='Enable cuda')\n args = parser.parse_args()\n self.dir = args.dir\n self.config = self.dir\n self.parsed = {}\n with open(self.config) as f:\n self.parsed = json.load(f)\n self.gpu = self.parsed['gpu']\n self.dir_data_config = self.parsed['dir']\n from datasets import utils\n DIR = self.dir_data_config\n utils.DIR = DIR\n\n self.time = 0\n args.cuda = args.cuda and torch.cuda.is_available()\n self.fd = args.full_diagnostic\n self.device = torch.device('cuda:'+ self.gpu if args.cuda else 'cpu')\n self.image_transforms = transforms.Resize(32)\n\n # get rid of extra spaces\n\n self.loss = None\n self.accuracy = None\n if (self.fd):\n print(\" \")\n print(colored(\"==begining data (args put in)==\", 'magenta'))\n print(colored(\"Directory: \"+ self.dir, 'cyan'))\n print(colored(\"==ending data (args put in)==\", 'magenta'))\n print(\" \")\n\n\n \n # print(self.parsed)\n\n assert self.parsed\n self.seed = self.parsed['seed']\n \n print(\"Dir: \" + DIR)\n if path.exists(DIR+ 'seeds_save/' + 'seed.txt'):\n seedNew = random.randint(1,1000001)\n self.seed = seedNew \n # val.write(str(val.read()) + \"\\n\" + str(seedNew))\n \n with open(DIR+ 'seeds_save/' + 'seed.txt','a') as f:\n f.write('\\n' + str(self.seed))\n f.flush()\n else:\n completeName = os.path.join(DIR+ \"seeds_save\", 'seed.txt') \n file1 = open(completeName, \"w+\")\n file1.write(str(self.seed))\n file1.close()\n\n\n self.out_dir = self.parsed['out_dir']\n DIR_DATA = self.out_dir\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n\n self.name = self.parsed['name']\n self.modelDir = self.parsed['model'][0]\n self.trainDir = self.parsed['training'][0]\n \n # self.bi = self.modelDir['bidir']\n self.distance = self.modelDir['dis']\n self.class_name = self.modelDir['class_name']\n self.type = self.modelDir['type']\n self.file_path = self.modelDir['file_path']\n\n\n if self.type == \"Color\":\n from datasets.ColorDataset import (ReferenceGame)\n elif self.type == \"Chair\":\n from datasets.ChairDataset import (ReferenceGame)\n elif self.type == \"Creatures\":\n from datasets.CreaturesDataset import (ReferenceGame)\n \n \n\n\n name = os.path.basename(self.file_path)\n f_path = self.file_path.replace(name,\"\")\n name = name.replace('.py','')\n\n\n sys.path.append(os.path.dirname(f_path))\n\n module = __import__(name)\n \n\n while (os.path.dirname(f_path) in sys.path): \n sys.path.remove(os.path.dirname(f_path))\n \n sup = getattr(module, self.class_name)\n\n # get rid of commented out things unless important\n\n #module = __import__(module_name)\n #class_ = getattr(module, class_name)\n #instance = class_()\n # breakpoint()\n # split = self.file_path.split('/', 0)\n # name = os.path.basename(self.file_path)\n # f_path = self.file_path.replace(name,\"\")\n\n # sys.path.append(os.path.dirname(f_path))\n # module = __import__(name)\n # sup = getattr(module, self.class_name)\n\n self.lr = self.trainDir['learning_rate']\n #self.num = self.trainDir['number']\n #self.width = self.trainDir['width']\n\n self.bs = self.trainDir['batch_size']\n self.epochs = self.trainDir['epochs']\n self.dim = self.trainDir['dim']\n self.log_interval = self.trainDir['log_interval']\n\n\n # pylint to make nice indents\n self.train_dataset = ReferenceGame(split='Train', context_condition=self.distance, image_transform=self.image_transforms)\n #self.dataTrain = self.train_dataset.data\n self.train_loader = DataLoader(self.train_dataset, shuffle=True, batch_size=self.bs)\n self.N_mini_batches = len(self.train_loader)\n self.vocab_size = self.train_dataset.vocab_size\n self.vocab = self.train_dataset.vocab\n self.ref_dataset = ReferenceGame(vocab=self.vocab, split='Test', context_condition=self.distance,image_transform=self.image_transforms)\n #self.dataRef = self.ref_dataset.data\n self.test_dataset = ReferenceGame(vocab=self.vocab, split='Validation', context_condition=self.distance,image_transform=self.image_transforms)\n self.test_loader = DataLoader(self.test_dataset, shuffle=False, batch_size=self.bs)\n #self.dataTest = self.test_loader.data\n\n self.sup_img = sup(self.vocab_size, device=self.device).to(self.device)\n\n\n self.optimizer = torch.optim.Adam(\n chain(\n self.sup_img.parameters(),\n ), lr=self.lr)\n\n # model\n self.__init_model(self.config)\n if (self.fd):\n self.check_data()\n\n self.train()\n\n if (self.fd):\n self.load_model()\n self.load_best()\n self.final_loss()\n self.final_accuracy()\n self.final_time()\n # self.final_perplexity()\n print(DIR+'plot_data/' + 'plot_' + self.name + \".txt\") \n\n if path.exists(DIR+'plot_data/' + 'plot_' + self.name + \".txt\"): \n with open(DIR+'plot_data/' + 'plot_' + self.name + \".txt\", 'a') as f:\n f.write('\\n' + str(self.accuracy))\n f.flush()\n else:\n completeName = os.path.join(DIR+'plot_data/' + 'plot_' + self.name + \".txt\") \n file1 = open(completeName, \"w\")\n file1.write(str(self.accuracy))\n file1.close()\n \n\n def __init_model(self, config):\n print(\"init model\")\n #Model or Training class? Save to a self.var?\n def check_data(self):\n print(colored(\"==begining data (config)==\", 'magenta'))\n print(colored(\"name: \"+ self.name , 'cyan'))\n print(colored(\"bi: \"+ str(self.bi) , 'cyan'))\n print(colored(\"Class Name: \"+ self.class_name , 'cyan'))\n print(colored(\"learning rate: \"+ str(self.lr) , 'cyan'))\n print(colored(\"batch size: \"+ str(self.bs) , 'cyan'))\n print(colored(\"epochs: \"+ str(self.epochs) , 'cyan'))\n print(colored(\"dim: \"+ str(self.dim) , 'cyan'))\n print(colored(\"log interval: \"+ str(self.log_interval) , 'cyan'))\n print(colored(\"==ending data (config)==\", 'magenta'))\n print(\" \")\n\n def train(self):\n best_loss = float('inf')\n track_loss = np.zeros((self.epochs, 2))\n t0 = time.time()\n\n for epoch in range(1, self.epochs + 1):\n t_loss = self.train_one_epoch(epoch)\n v_loss = self.validate_one_epoch(epoch)\n\n is_best = v_loss < best_loss\n best_loss = min(v_loss, best_loss)\n track_loss[epoch - 1, 0] = t_loss\n track_loss[epoch - 1, 1] = v_loss\n\n save_checkpoint({\n 'epoch': epoch,\n 'sup_img': self.sup_img.state_dict(),\n 'track_loss': track_loss,\n 'optimizer': self.optimizer.state_dict(),\n 'vocab': self.vocab,\n 'vocab_size': self.vocab_size,\n }, is_best, folder=DIR_DATA)\n np.save(os.path.join(DIR_DATA, 'loss.npy'), track_loss)\n self.time = time.time() - t0\n \n def train_one_epoch(self, epoch): \n #train a single epoch \n\n train_loss = train(epoch,self.sup_img,self.train_loader,self.device,self.optimizer)\n return train_loss\n\n def validate_one_epoch(self, epoch): \n # validate a single epoch \n test_loss = test(epoch,self.sup_img,self.test_loader,self.device,self.optimizer)\n return test_loss\n\n def load_model(self,folder=DIR_DATA, filename='checkpoint.pth.tar'):\n checkpoint = torch.load(folder + filename)\n epoch = checkpoint['epoch']\n track_loss = checkpoint['track_loss']\n sup_img = checkpoint['sup_img']\n vocab = checkpoint['vocab']\n vocab_size = checkpoint['vocab_size']\n\n print(colored(\"==begining data (loaded model)==\", 'magenta'))\n print(colored(\"epoch: \"+ str(epoch) , 'cyan'))\n print(colored(\"track loss: \"+ str(track_loss) , 'cyan'))\n print(colored(\"sup img: \"+ str(sup_img) , 'cyan'))\n print(colored(\"vocab: \"+ str(vocab) , 'cyan'))\n print(colored(\"vocab size: \"+ str(vocab_size) , 'cyan'))\n print(colored(\"==ending data (loaded model)==\", 'magenta'))\n print(\" \")\n return epoch, track_loss, sup_img, vocab, vocab_size\n\n def load_best(self, folder=DIR_DATA, filename='model_best.pth.tar'):\n checkpoint = torch.load(folder + filename)\n epoch = checkpoint['epoch']\n \n print(colored(\"==begining data (best model)==\", 'magenta'))\n print(colored(\"epoch: \"+ str(epoch) , 'cyan'))\n print(colored(\"==ending data (best model)==\", 'magenta'))\n print(\" \")\n return epoch\n \n def final_accuracy(self):\n ref_loader = DataLoader(self.ref_dataset, shuffle=False, batch_size=self.bs)\n N_mini_batches = len(ref_loader)\n with torch.no_grad():\n\n total_count = 0\n correct_count = 0\n correct = False\n \n for batch_idx, (tgt_rgb, d1_rgb, d2_rgb, x_inp, x_len) in enumerate(ref_loader):\n batch_size = x_inp.size(0)\n tgt_rgb = tgt_rgb.to(self.device).float()\n d1_rgb = d1_rgb.to(self.device).float()\n d2_rgb = d2_rgb.to(self.device).float()\n x_inp = x_inp.to(self.device)\n x_len = x_len.to(self.device)\n\n tgt_score = self.sup_img(tgt_rgb, x_inp, x_len)\n d1_score = self.sup_img(d1_rgb, x_inp, x_len)\n d2_score = self.sup_img(d2_rgb, x_inp, x_len)\n soft = nn.Softmax(dim=1)\n loss = soft(torch.cat([tgt_score,d1_score,d2_score],1))\n softList = torch.argmax(loss, dim=1)\n\n correct_count += torch.sum(softList == 0).item()\n total_count += softList.size(0)\n \n\n accuracy = correct_count / float(total_count) * 100\n # print('====> Final Test Loss: {:.4f}'.format(loss_meter.avg))\n print(colored(\"==begining data (final accuracy)==\", 'magenta'))\n print(colored('====> Final Accuracy: {}/{} = {}%'.format(correct_count, total_count, accuracy), 'cyan'))\n print(colored(\"==ending data (final accuracy)==\", 'magenta'))\n print(\"\")\n self.accuracy = accuracy\n\n def final_loss(self):\n print(colored(\"==begining data (final loss)==\", 'magenta'))\n test_loader = DataLoader(self.test_dataset, shuffle=True, batch_size=self.bs)\n N_mini_batches = len(test_loader)\n with torch.no_grad():\n loss_meter = AverageMeter()\n\n for batch_idx, (tgt_rgb, d1_rgb, d2_rgb, x_inp, x_len) in enumerate(test_loader):\n batch_size = x_inp.size(0)\n tgt_rgb = tgt_rgb.to(self.device).float()\n d1_rgb = d1_rgb.to(self.device).float()\n d2_rgb = d2_rgb.to(self.device).float()\n x_inp = x_inp.to(self.device)\n x_len = x_len.to(self.device)\n\n # obtain predicted rgb\n tgt_score = self.sup_img(tgt_rgb, x_inp, x_len)\n d1_score = self.sup_img(d1_rgb, x_inp, x_len)\n d2_score = self.sup_img(d2_rgb, x_inp, x_len)\n\n loss = F.cross_entropy(torch.cat([tgt_score,d1_score,d2_score], 1), torch.LongTensor(np.zeros(batch_size)).to(self.device))\n self.loss = loss\n \n loss_meter.update(loss.item(), batch_size)\n print(colored('====> Final Test Loss: {:.4f}'.format(loss_meter.avg),'cyan'))\n \n print(colored(\"==ending data (final loss)==\", 'magenta'))\n print(\"\")\n \n def final_time(self):\n print(colored(\"==begining data (final time)==\", 'magenta'))\n print(colored('====> Final Time: {:.4f}'.format(self.time),'cyan'))\n print(colored(\"==ending data (final time)==\", 'magenta'))\n print(\"\")\n \n def final_perplexity(self):\n corpus = \"\"\n perp = 0\n counter = 0\n\n # for i in self.train_dataset.get_textColor():\n # corpus = corpus + \" \" + i\n # for i in self.ref_dataset.get_textColor():\n # corpus = corpus + \" \" + i\n # for i in self.test_dataset.get_textColor():\n # corpus = corpus + \" \" + i\n\n # print(corpus)\n\n model = self.unigram(self.train_dataset.get_textColor())\n model1 = self.unigram(self.ref_dataset.get_textColor())\n model2 = self.unigram(self.test_dataset.get_textColor())\n\n for i in self.train_dataset.get_textColor():\n if (self.perplexity(i, model) < 100):\n counter = counter + 1\n perp = perp + self.perplexity(i, model)\n for i in self.ref_dataset.get_textColor():\n if (self.perplexity(i, model1) < 100):\n counter = counter + 1\n perp = perp + self.perplexity(i, model1)\n for i in self.test_dataset.get_textColor():\n if (self.perplexity(i, model2) < 100):\n counter = counter + 1\n perp = perp + self.perplexity(i, model2)\n\n\n print(colored(\"==begining data (final perplexity)==\", 'magenta'))\n print(colored('====> Final Average Perplexity: {:.4f}'.format(perp/counter),'cyan'))\n print(colored(\"==ending data (final perplexity)==\", 'magenta'))\n print(\"\")\n\n def unigram(self, tokens): \n model = collections.defaultdict(lambda: 0.01)\n for f in tokens:\n try:\n model[f] += 1\n except KeyError:\n model [f] = 0\n continue\n N = float(sum(model.values()))\n for word in model:\n model[word] = model[word]/N\n return model\n \n def perplexity(self, testset, model):\n testset = testset.split()\n perplexity = 1\n N = 0\n for word in testset:\n N += 1\n perplexity = perplexity * (1/model[word])\n perplexity = pow(perplexity, 1/float(N)) \n return perplexity\n\nif __name__ == '__main__':\n run = Engine()","repo_name":"cocolab-projects/reference-game-exploration","sub_path":"src/rge/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":15910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"20991684009","text":"from bs4 import BeautifulSoup\nimport requests\n#thisClick_2442798175 > div > div.photo_wrap > a > img\n\ndef elevenst(URL):\n response = requests.get(URL)\n code = response.status_code\n html = response.text\n print(code)\n soup = BeautifulSoup(html, 'html.parser') #사용할 parser종류/ python 내장 parser인 html.parser\n elevenst_list = soup.select(\"#product_listing > div > div > ul > li\") #이 있으면 위에서 순차적으로 접근하지 않아도 됨. #이 고유한 것이기 때문에.\n laptop_list = []\n\n for elevenst in elevenst_list:\n img = elevenst.select_one(\"div > div.photo_wrap > a > img\").get('src') #thisClick_2442798175 > div > div.photo_wrap > a > img\n name = elevenst.select_one(\"div > div.list_info > p.info_tit > a\").text #thisClick_2442798175 > div > div.list_info > p.info_tit > a\n price = elevenst.select_one(\"div > div.list_price > div.price_box > span.price_detail > strong\").text #thisClick_2442798175 > div > div.list_price > div.price_box > span.price_detail > strong .text\n company = elevenst.select_one(\"div > div.list_benefit > p.benefit_tit\").text #thisClick_2442798175 > div > div.list_benefit > p.benefit_tit > a .text\n\n dic = {}\n dic[\"img\"] = img\n dic[\"name\"] = name.strip()\n dic[\"price\"] = price + \"원\"\n dic[\"company\"] = company.strip()\n laptop_list.append(dic)\n return laptop_list\n\nif __name__ == \"__main__\": #이 안에서 자체적으로 실행할 경우\n URL = 'http://www.11st.co.kr/category/DisplayCategory.tmall?method=getDisplayCategory2Depth&dispCtgrNo=1002944'\n laptop_list = elevenst(URL)\n import pprint as p\n p.pprint(laptop_list)","repo_name":"psm1206/Python","sub_path":"shop/elevenst2.py","file_name":"elevenst2.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"31074296377","text":"#!/usr/bin/env python3\n\nfrom code import code\n\ndef dumpMatrix(M: list): \n for row in M: \n print(\"[ \", end=\"\")\n\n for elem in row: \n print(\"{:>2} \".format(elem), end=\"\")\n\n print(\" ]\")\n\ndef main():\n c = code(16, 4)\n codeword = c.encode([1,2,3,4]) \n\n dumpMatrix(c.G)\n print(\"\\n\" + str(codeword))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"philDaprogrammer/error_correcting_codes","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"40384948586","text":"#!/usr/bin/env python\n\nimport sys\nimport argparse\nimport HTSeq\nfrom collections import defaultdict\nfrom ddb import configuration\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help=\"Input config file for samples\")\n parser.add_argument('-c', '--configuration', help=\"Configuration file for various settings\")\n parser.add_argument('-o', '--output', help=\"Output file name for CSV file\")\n args = parser.parse_args()\n\n sys.stdout.write(\"Parsing configuration data\\n\")\n config = configuration.configure_runtime(args.configuration)\n\n sys.stdout.write(\"Parsing sample data\\n\")\n samples = configuration.configure_samples(args.input, config)\n\n transcript_counts = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))\n\n for sample in samples:\n sys.stderr.write(\"Processing sample {}\\n\".format(sample))\n gtf_file = HTSeq.GFF_Reader(samples[sample]['gtf'], end_included=True)\n for feature in gtf_file:\n # sys.stderr.write(\"Processing entry: {}\\n\".format(feature))\n if feature.type is 'transcript':\n transcript_counts[feature.attr['transcript_id']][sample]['FPKM'] = feature.attr['FPKM']\n transcript_counts[feature.attr['transcript_id']][sample]['TPM'] = feature.attr['TPM']\n\n with open(args.output, 'w') as output:\n output.write(\"Transcript\")\n for sample in samples:\n output.write(\"\\t{sample} FPKM\\t{sample} TPM\".format(sample=sample))\n output.write(\"\\n\")\n for transcript in transcript_counts:\n output.write(\"{}\".format(transcript))\n for sample in samples:\n output.write(\"\\t{}\\t{}\".format(transcript_counts[transcript][sample]['FPKM'],\n transcript_counts[transcript][sample]['TPM']))\n output.write(\"\\n\")\n","repo_name":"GastonLab/ddb-tools","sub_path":"RNA/stringtie2countstable.py","file_name":"stringtie2countstable.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12872042846","text":"from utilities import *\nimport logging\nimport spur\nimport paramiko\nimport chess.uci\nfrom threading import Timer\nimport configparser\n\n\ndef get_installed_engines(engine_shell, engine_file):\n return read_engine_ini(engine_shell, (engine_file.rsplit(os.sep, 1))[0])\n\n\ndef read_engine_ini(engine_shell=None, engine_path=None):\n config = configparser.ConfigParser()\n try:\n if engine_shell is None:\n if not engine_path:\n program_path = os.path.dirname(os.path.realpath(__file__)) + os.sep\n engine_path = program_path + 'engines' + os.sep + platform.machine()\n config.read(engine_path + os.sep + 'engines.ini')\n else:\n with engine_shell.open(engine_path + os.sep + 'engines.ini', 'r') as file:\n config.read_file(file)\n except FileNotFoundError:\n pass\n\n library = []\n for section in config.sections():\n parser = configparser.ConfigParser()\n level_dict = {}\n if parser.read(engine_path + os.sep + config[section]['file'] + '.uci'):\n for ps in parser.sections():\n level_dict[ps] = {}\n for option in parser.options(ps):\n level_dict[ps][option] = parser[ps][option]\n\n library.append(\n {\n 'file' : engine_path + os.sep + config[section]['file'],\n 'section' : section,\n 'level_dict' : level_dict\n }\n )\n return library\n\n\ndef write_engine_ini(engine_path=None):\n def write_level_ini():\n parser = configparser.ConfigParser()\n if not parser.read(engine_path + os.sep + engine_file_name + '.uci'):\n if engine.has_limit_strength():\n uelevel = engine.get().options['UCI_Elo']\n elo_1, elo_2 = int(uelevel[2]), int(uelevel[3])\n minlevel, maxlevel = min(elo_1, elo_2), max(elo_1, elo_2)\n if maxlevel - minlevel > 1000:\n inc = int((maxlevel - minlevel) / 100)\n else:\n inc = int((maxlevel - minlevel) / 10)\n if 20 * inc + minlevel < maxlevel:\n inc = int((maxlevel - minlevel) / 20)\n set_elo = minlevel\n while set_elo < maxlevel:\n parser['Elo@{:04d}'.format(set_elo)] = {'UCI_LimitStrength' : 'true', 'UCI_Elo' : str(set_elo)}\n set_elo += inc\n parser['Elo@{:04d}'.format(maxlevel)] = {'UCI_LimitStrength': 'false', 'UCI_Elo': str(maxlevel)}\n if engine.has_skill_level():\n sklevel = engine.get().options['Skill Level']\n minlevel, maxlevel = int(sklevel[3]), int(sklevel[4])\n for level in range(minlevel, maxlevel+1):\n parser['Level@{:02d}'.format(level)] = {'Skill Level': str(level)}\n with open(engine_path + os.sep + engine_file_name + '.uci', 'w') as configfile:\n parser.write(configfile)\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n if not engine_path:\n program_path = os.path.dirname(os.path.realpath(__file__)) + os.sep\n engine_path = program_path + 'engines' + os.sep + platform.machine()\n engine_list = sorted(os.listdir(engine_path))\n config = configparser.ConfigParser()\n for engine_file_name in engine_list:\n if is_exe(engine_path + os.sep + engine_file_name):\n engine = UciEngine(engine_path + os.sep + engine_file_name)\n if engine:\n print(engine_file_name)\n try:\n if engine.has_levels():\n write_level_ini()\n config[engine_file_name[2:]] = {\n 'file': engine_file_name,\n 'name': engine.get().name,\n }\n except AttributeError:\n pass\n engine.quit()\n with open(engine_path + os.sep + 'engines.ini', 'w') as configfile:\n config.write(configfile)\n\n\nclass Informer(chess.uci.InfoHandler):\n def __init__(self):\n super(Informer, self).__init__()\n self.dep = 0\n self.allow_score = True\n self.allow_pv = True\n\n def on_go(self):\n self.dep = 0\n self.allow_score = True\n self.allow_pv = True\n super().on_go()\n\n def depth(self, dep):\n self.dep = dep\n super().depth(dep)\n\n def _reset_allow_score(self):\n self.allow_score = True\n\n def _reset_allow_pv(self):\n self.allow_pv = True\n\n def _allow_fire_score(self):\n if self.allow_score:\n self.allow_score = False\n Timer(0.5, self._reset_allow_score).start()\n return True\n else:\n return False\n\n def _allow_fire_pv(self):\n if self.allow_pv:\n self.allow_pv = False\n Timer(0.5, self._reset_allow_pv).start()\n return True\n else:\n return False\n\n def score(self, cp, mate, lowerbound, upperbound):\n if self._allow_fire_score():\n Observable.fire(Event.NEW_SCORE(score=cp, mate=mate))\n super().score(cp, mate, lowerbound, upperbound)\n\n def pv(self, moves):\n if self._allow_fire_pv() and moves:\n Observable.fire(Event.NEW_PV(pv=moves))\n super().pv(moves)\n\n\nclass UciEngine(object):\n def __init__(self, file, hostname=None, username=None, key_file=None, password=None):\n super(UciEngine, self).__init__()\n try:\n self.shell = None\n if hostname:\n logging.info(\"connecting to [%s]\", hostname)\n if key_file:\n shell = spur.SshShell(hostname=hostname, username=username, private_key_file=key_file,\n missing_host_key=paramiko.AutoAddPolicy())\n else:\n shell = spur.SshShell(hostname=hostname, username=username, password=password,\n missing_host_key=paramiko.AutoAddPolicy())\n self.shell = shell\n self.engine = chess.uci.spur_spawn_engine(shell, [file])\n else:\n self.engine = chess.uci.popen_engine(file)\n\n self.file = file\n if self.engine:\n handler = Informer()\n self.engine.info_handlers.append(handler)\n self.engine.uci()\n else:\n logging.error(\"engine executable [%s] not found\", file)\n self.options = {}\n self.future = None\n self.show_best = True\n\n self.res = None\n self.status = EngineStatus.WAIT\n self.level_support = False\n\n except OSError:\n logging.exception('OS error in starting engine')\n except TypeError:\n logging.exception('engine executable not found')\n\n def get(self):\n return self.engine\n\n def option(self, name, value):\n self.options[name] = value\n\n def send(self):\n self.engine.setoption(self.options)\n\n def level(self, options):\n self.options = options\n\n def has_levels(self):\n return self.level_support or self.has_skill_level() or self.has_limit_strength()\n\n def has_skill_level(self):\n return 'Skill Level' in self.engine.options\n\n def has_limit_strength(self):\n return 'UCI_LimitStrength' in self.engine.options\n\n def has_chess960(self):\n return 'UCI_Chess960' in self.engine.options\n\n def get_file(self):\n return self.file\n\n def get_shell(self):\n return self.shell # shell is only \"not none\" if its a local engine - see __init__\n\n def position(self, game):\n self.engine.position(game)\n\n def quit(self):\n return self.engine.quit()\n\n def terminate(self):\n return self.engine.terminate()\n\n def kill(self):\n return self.engine.kill()\n\n def uci(self):\n self.engine.uci()\n\n def stop(self, show_best=False):\n if self.is_waiting():\n logging.info('engine already stopped')\n return self.res\n self.show_best = show_best\n self.engine.stop()\n return self.future.result()\n\n def go(self, time_dict):\n if not self.is_waiting():\n logging.warning('engine (still) not waiting - strange!')\n self.status = EngineStatus.THINK\n self.show_best = True\n time_dict['async_callback'] = self.callback\n\n DisplayMsg.show(Message.SEARCH_STARTED(engine_status=self.status))\n self.future = self.engine.go(**time_dict)\n return self.future\n\n def ponder(self):\n if not self.is_waiting():\n logging.warning('engine (still) not waiting - strange!')\n self.status = EngineStatus.PONDER\n self.show_best = False\n\n DisplayMsg.show(Message.SEARCH_STARTED(engine_status=self.status))\n self.future = self.engine.go(ponder=True, infinite=True, async_callback=self.callback)\n return self.future\n\n def callback(self, command):\n self.res = command.result()\n DisplayMsg.show(Message.SEARCH_STOPPED(engine_status=self.status))\n if self.show_best:\n Observable.fire(Event.BEST_MOVE(result=self.res, inbook=False))\n else:\n logging.debug('event best_move not fired')\n self.status = EngineStatus.WAIT\n\n def is_thinking(self):\n return self.status == EngineStatus.THINK\n\n def is_pondering(self):\n return self.status == EngineStatus.PONDER\n\n def is_waiting(self):\n return self.status == EngineStatus.WAIT\n\n def startup(self, options, show=True):\n parser = configparser.ConfigParser()\n if not options and parser.read(self.get_file() + '.uci'):\n options = dict(parser[parser.sections().pop()])\n self.level_support = bool(options)\n if parser.read('picochess.uci'):\n pc_opts = dict(parser[parser.sections().pop()])\n pc_opts.update(options)\n options = pc_opts\n\n logging.debug(\"setting engine with options {}\".format(options))\n self.level(options)\n self.send()\n if show:\n logging.debug('Loaded engine [%s]', self.get().name)\n logging.debug('Supported options [%s]', self.get().options)","repo_name":"roundtree/Picochess-nonDGT","sub_path":"Picochess-nonDGT/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":10361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"28460283500","text":"import speech_recognition\r\nimport speech_recognition as sr\r\n\r\nrecognizer = sr.Recognizer()\r\n\r\nwhile True:\r\n try:\r\n with sr.Microphone() as mic:\r\n recognizer.adjust_for_ambient_noise(mic, duration= 0.2)\r\n print(\"listening...\")\r\n audio = recognizer.listen(mic)\r\n text = recognizer.recognize_google(audio)\r\n text = text.lower()\r\n\r\n print(text)\r\n\r\n except speech_recognition.UnknownValueError():\r\n recognizer = sr.Recognizer()\r\n continue","repo_name":"Joseph2403/Mini-Project","sub_path":"SpeechRecognition.py","file_name":"SpeechRecognition.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"20857001089","text":"from __future__ import (absolute_import, division, print_function)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap as Basemap\nfrom matplotlib.colors import rgb2hex, Normalize\nfrom matplotlib.patches import Polygon\nfrom matplotlib.colorbar import ColorbarBase\n\nfig, ax = plt.subplots()\n\n# Lambert Conformal map of lower 48 states.\nm = Basemap(llcrnrlon=-119,llcrnrlat=20,urcrnrlon=-64,urcrnrlat=49,\n projection='lcc',lat_1=33,lat_2=45,lon_0=-95)\n\n# Mercator projection, for Alaska and Hawaii\nm_ = Basemap(llcrnrlon=-190,llcrnrlat=20,urcrnrlon=-143,urcrnrlat=46,\n projection='merc',lat_ts=20) # do not change these numbers\n\n#%% --------- draw state boundaries ----------------------------------------\n## data from U.S Census Bureau\n## http://www.census.gov/geo/www/cob/st2000.html\nshp_info = m.readshapefile('st99_d00','states',drawbounds=True,\n linewidth=0.45,color='gray')\nshp_info_ = m_.readshapefile('st99_d00','states',drawbounds=False)\n\n## population density by state from\n## http://en.wikipedia.org/wiki/List_of_U.S._states_by_population_density\npopdensity = {\n'New Jersey': 438.00,\n'Rhode Island': 387.35,\n'Massachusetts': 312.68,\n'Connecticut':\t 271.40,\n'Maryland': 209.23,\n'New York': 155.18,\n'Delaware': 154.87,\n'Florida': 114.43,\n'Ohio':\t 107.05,\n'Pennsylvania':\t 105.80,\n'Illinois': 86.27,\n'California': 83.85,\n'Hawaii': 72.83,\n'Virginia': 69.03,\n'Michigan': 67.55,\n'Indiana': 65.46,\n'North Carolina': 63.80,\n'Georgia': 54.59,\n'Tennessee': 53.29,\n'New Hampshire': 53.20,\n'South Carolina': 51.45,\n'Louisiana': 39.61,\n'Kentucky': 39.28,\n'Wisconsin': 38.13,\n'Washington': 34.20,\n'Alabama': 33.84,\n'Missouri': 31.36,\n'Texas': 30.75,\n'West Virginia': 29.00,\n'Vermont': 25.41,\n'Minnesota': 23.86,\n'Mississippi':\t 23.42,\n'Iowa':\t 20.22,\n'Arkansas': 19.82,\n'Oklahoma': 19.40,\n'Arizona': 17.43,\n'Colorado': 16.01,\n'Maine': 15.95,\n'Oregon': 13.76,\n'Kansas': 12.69,\n'Utah':\t 10.50,\n'Nebraska': 8.60,\n'Nevada': 7.03,\n'Idaho': 6.04,\n'New Mexico': 5.79,\n'South Dakota':\t 3.84,\n'North Dakota':\t 3.59,\n'Montana': 2.39,\n'Wyoming': 1.96,\n'Alaska': 0.42}\n\n#%% -------- choose a color for each state based on population density. -------\ncolors={}\nstatenames=[]\ncmap = plt.cm.hot_r # use 'reversed hot' colormap\nvmin = 0; vmax = 450 # set range.\nnorm = Normalize(vmin=vmin, vmax=vmax)\nfor shapedict in m.states_info:\n statename = shapedict['NAME']\n # skip DC and Puerto Rico.\n if statename not in ['District of Columbia','Puerto Rico']:\n pop = popdensity[statename]\n # calling colormap with value between 0 and 1 returns\n # rgba value. Invert color range (hot colors are high\n # population), take sqrt root to spread out colors more.\n colors[statename] = cmap(np.sqrt((pop-vmin)/(vmax-vmin)))[:3]\n statenames.append(statename)\n\n#%% --------- cycle through state names, color each one. --------------------\nfor nshape,seg in enumerate(m.states):\n # skip DC and Puerto Rico.\n if statenames[nshape] not in ['Puerto Rico', 'District of Columbia']:\n color = rgb2hex(colors[statenames[nshape]])\n poly = Polygon(seg,facecolor=color,edgecolor=color)\n ax.add_patch(poly)\n\nAREA_1 = 0.005 # exclude small Hawaiian islands that are smaller than AREA_1\nAREA_2 = AREA_1 * 30.0 # exclude Alaskan islands that are smaller than AREA_2\nAK_SCALE = 0.19 # scale down Alaska to show as a map inset\nHI_OFFSET_X = -1900000 # X coordinate offset amount to move Hawaii \"beneath\" Texas\nHI_OFFSET_Y = 250000 # similar to above: Y offset for Hawaii\nAK_OFFSET_X = -250000 # X offset for Alaska (These four values are obtained\nAK_OFFSET_Y = -750000 # via manual trial and error, thus changing them is not recommended.)\n\nfor nshape, shapedict in enumerate(m_.states_info): # plot Alaska and Hawaii as map insets\n if shapedict['NAME'] in ['Alaska', 'Hawaii']:\n seg = m_.states[int(shapedict['SHAPENUM'] - 1)]\n if shapedict['NAME'] == 'Hawaii' and float(shapedict['AREA']) > AREA_1:\n seg = [(x + HI_OFFSET_X, y + HI_OFFSET_Y) for x, y in seg]\n color = rgb2hex(colors[statenames[nshape]])\n elif shapedict['NAME'] == 'Alaska' and float(shapedict['AREA']) > AREA_2:\n seg = [(x*AK_SCALE + AK_OFFSET_X, y*AK_SCALE + AK_OFFSET_Y)\\\n for x, y in seg]\n color = rgb2hex(colors[statenames[nshape]])\n poly = Polygon(seg, facecolor=color, edgecolor='gray', linewidth=.45)\n ax.add_patch(poly)\n\nax.set_title('United states population density by state')\n\n#%% --------- Plot bounding boxes for Alaska and Hawaii insets --------------\nlight_gray = [0.8]*3 # define light gray color RGB\nx1,y1 = m_([-190,-183,-180,-180,-175,-171,-171],[29,29,26,26,26,22,20])\nx2,y2 = m_([-180,-180,-177],[26,23,20]) # these numbers are fine-tuned manually\nm_.plot(x1,y1,color=light_gray,linewidth=0.8) # do not change them drastically\nm_.plot(x2,y2,color=light_gray,linewidth=0.8)\n\n#%% --------- Show color bar ---------------------------------------\nax_c = fig.add_axes([0.9, 0.1, 0.03, 0.8])\ncb = ColorbarBase(ax_c,cmap=cmap,norm=norm,orientation='vertical',\n label=r'[population per $\\mathregular{km^2}$]')\n\nplt.show()\n\n","repo_name":"matplotlib/basemap","sub_path":"examples/fillstates.py","file_name":"fillstates.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":750,"dataset":"github-code","pt":"15"} +{"seq_id":"30353228334","text":"from optimizer import Optimizer, Operations, Types\n\n\nclass TestBasic(object):\n def test_emit_operation(self):\n opt = Optimizer()\n opt.add_operation(Operations.FINISH, [])\n ops = opt.build_operations()\n assert len(ops) == 1\n assert ops[0].op == Operations.FINISH\n assert ops[0].getarglist() == []\n\n def test_inputs(self):\n opt = Optimizer()\n res = opt.add_input(Types.INT)\n\n opt.add_operation(Operations.FINISH, [res])\n ops = opt.build_operations()\n\n assert len(ops) == 1\n assert ops[0].op == Operations.FINISH\n assert ops[0].getarglist() == [res]\n","repo_name":"alex/optimizer-model","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"617901083","text":"import math\nimport warnings\nfrom abc import ABC, abstractmethod\n\nimport heapq\nfrom collections import namedtuple\n\nimport geopandas as gpd\nimport h3.api.numpy_int as h3\nimport numpy as np\nfrom shapely.geometry import MultiPolygon, Point, Polygon\nfrom shapely.ops import cascaded_union\n\nfrom skmob.utils import constants, utils\n\n\nclass TessellationTilers:\n def __init__(self):\n self._tilers = {}\n\n def register_tiler(self, key, tiler):\n self._tilers[key] = tiler\n\n def create(self, key, **kwargs):\n tiler = self._tilers.get(key)\n\n if not tiler:\n raise ValueError(key)\n\n return tiler(**kwargs)\n\n def get(self, service_id, **kwargs):\n return self.create(service_id, **kwargs)\n\n\ntiler = TessellationTilers()\n\n\nclass TessellationTiler(ABC):\n @abstractmethod\n def __call__(self, **kwargs):\n pass\n\n @abstractmethod\n def _build(self, **kwargs):\n pass\n\n\nclass VoronoiTessellationTiler(TessellationTiler):\n\n def __init__(self):\n\n super().__init__()\n self._instance = None\n\n def __call__(\n self, \n points, \n base_shape, \n meters=None, #not needed, kept for compatibility reasons\n which_osm_result=-1,\n crs=constants.DEFAULT_CRS):\n\n if not self._instance:\n\n if isinstance(points, gpd.GeoDataFrame):\n \n points.set_crs(constants.DEFAULT_CRS, allow_override = True, inplace = True)\n \n if not all(isinstance(x, Point) for x in points.geometry):\n \n raise ValueError(\"Not valid points object. Accepted type is: GeoDataFrame with a valid geometry column.\")\n \n \n elif isinstance(points, (list, np.ndarray)):\n \n is_not_correct_type = not all(isinstance(x, (tuple, list, np.ndarray, Point)) for x in points)\n wrong_pairs = not all(len(x) == 2 for x in points if type(x) != Point)\n \n if is_not_correct_type and wrong_pairs:\n \n raise ValueError(\"Not valid array object. Accepted types are shapely.geometry.Point or (lon, lat) pair of coordinates\")\n \n else:\n \n points = gpd.GeoDataFrame(geometry = [Point(xy) for xy in points], crs = crs)\n \n base_shape_geometry = self._create_geometry_if_does_not_exists(base_shape, which_osm_result)\n base_shape_geometry_merged = self._merge_all_polygons(base_shape_geometry)\n \n return self._build(points, base_shape_geometry_merged, crs)\n \n def _create_geometry_if_does_not_exists(self, base_shape, which_osm_result):\n\n if isinstance(base_shape, str):\n base_shape = self._str_to_geometry(base_shape, which_osm_result)\n\n elif self._isinstance_geodataframe_or_geoseries(base_shape):\n if all(isinstance(x, Point) for x in base_shape.geometry):\n base_shape = utils.bbox_from_points(base_shape, base_shape.crs)\n else:\n raise ValueError(\"Not valid base_shape object.\" \" Accepted types are str, GeoDataFrame or GeoSeries.\")\n \n return base_shape\n\n def _isinstance_geodataframe_or_geoseries(self, base_shape):\n return True if (isinstance(base_shape, gpd.GeoDataFrame) or isinstance(base_shape, gpd.GeoSeries)) else False\n\n def _str_to_geometry(self, base_shape, which_osm_result):\n base_shapes = utils.bbox_from_name(base_shape, which_osm_result=which_osm_result)\n polygon_shape = self._find_first_polygon(base_shapes)\n return polygon_shape\n\n def _find_first_polygon(self, base_shapes):\n return_shape = base_shapes.iloc[[0]]\n for i, current_shape in enumerate(base_shapes[\"geometry\"].values):\n if self._isinstance_poly_or_multipolygon(current_shape):\n return_shape = base_shapes.iloc[[i]]\n break\n return return_shape\n\n def _isinstance_poly_or_multipolygon(self, shape):\n return True if (isinstance(shape, Polygon) or isinstance(shape, MultiPolygon)) else False\n\n def _merge_all_polygons(self, base_shape):\n polygons = base_shape.geometry.values\n base_shape = gpd.GeoSeries(cascaded_union(polygons), crs=base_shape.crs)\n return base_shape\n \n @staticmethod\n def convex_hull(verteces): \n \"\"\" \n Graham Scan algorithm: O(n*log(n)).\n Without it the output of the Voronoi Tessellation would be unordered verteces.\n Computing the convex hull allows to produce the correct polygons of the tiles\n since the tiles are conveniently convex and the vertex order doesn't matter.\n \"\"\"\n\n # Find the point with the minimum \"first y\" coordinates\n min_ref_pt = min(verteces, key=lambda p: (p[1], p[0]))\n\n # Compute the polar angle of p2 wrt p1\n angle_between = lambda p0, p1: math.atan2(p1[1] - p0[1], p1[0] - p0[0])\n\n # Sort points by their polar angles with respect to the min ref point\n pts = sorted(verteces, key=lambda p: angle_between(min_ref_pt, p))\n\n # Initialize a stack with the first three points\n stack = [pts[0], pts[1], pts[2]]\n\n # Compute the cross product of the vectors [p0, p1] and [p0, p2]\n cross_product = lambda p0, p1, p2: (((p1[0] - p0[0]) * (p2[1] - p0[1])) -\n ((p1[1] - p0[1]) * (p2[0] - p0[0])))\n\n # Processing the rest of the points\n for i in range(3, len(pts)):\n # Remove points from the stack until the next point is on the convex hull\n while len(stack) > 1 and cross_product(stack[-2], stack[-1], pts[i]) <= 0:\n stack.pop()\n stack.append(pts[i])\n\n return stack\n\n def compute_voronoi(self, points, crs = constants.DEFAULT_CRS):\n \n class Queue():\n \"\"\" \n I implemented this queue with a minheap as I would always need the\n minimum half edge as the first element and it's reasonably fast.\n \"\"\"\n\n def __init__(self):\n self.heap = []\n\n def is_empty(self):\n return not self.heap\n\n def insert(self, he, site, dist):\n # the heap is sorted according to the he.y intersection with the sweep_line\n he.vector_point = site\n he.sweep_line = site.y + dist # it is the site.y distance from intersection\n heapq.heappush(self.heap, (he.sweep_line, he.vector_point.x, he))\n\n def delete(self, he):\n if he.vector_point:\n self.heap.remove((he.sweep_line, he.vector_point.x, he))\n heapq.heapify(self.heap)\n\n def get_min(self):\n sweep_line, x, he = self.heap[0]\n return Point(x, sweep_line)\n\n def pop_min_he(self):\n sweep_line, x, he = heapq.heappop(self.heap)\n return he\n\n def __iter__(self):\n return iter(self.heap)\n\n class HalfEdgesLinkedList():\n \"\"\"\n A linked list to store the position of half-edges wrt each other.\n \n There is also a linear search method to get a half-edge by\n an input point.\n \n It works by moving closer to the point, checking if the point \n is to the right of the half-edge during the iterations, moving \n from either direction to arrive to desired half-edge (left to the point).\n \"\"\"\n\n def __init__(self):\n # creating empty half-edges for the ends of the list\n self.leftend = HalfEdge()\n self.rightend = HalfEdge()\n \n # now linking the 2 ends of the list\n self.leftend.right = self.rightend\n self.rightend.left = self.leftend\n\n def insert(self, first, second):\n # position the second half-edge as the first half-edge to the right of the first\n \n # insert the first half-edge to the left of the second\n second.left = first\n # putting the right of the first half-edge to the right of the second\n second.right = first.right\n \n # go to the right of the first edge and set its left new half-edge\n first.right.left = second\n # then point also the the right of the first to the second\n first.right = second\n\n def delete(self, he):\n # connecting the left and right of the input hedge\n he.left.right = he.right\n he.right.left = he.left\n # deleting the half-edge marking it as empty set\n he.edge = {} \n\n def linear_search(self, pt):\n he = self.leftend\n \n # it will return the left half edge near the pt\n # if it doesn't find anything it will just set to the linked list to the right end\n if he is self.leftend or (he is not self.rightend and HalfEdge.check_pt_right(pt, he)):\n he = he.right\n while he is not self.rightend and HalfEdge.check_pt_right(pt, he):\n he = he.right\n he = he.left\n else:\n he = he.left\n while he is not self.leftend and not HalfEdge.check_pt_right(pt, he):\n he = he.left\n return he\n\n class HalfEdge(object):\n \"\"\"\n Main object to manipulate to get to vector events. \n \n The vector_point is the point on which two half edges meet, it is \n initialized as None and it changes when a vecctor event is set in stone.\n If we consider an Edge the parent of two HalfEdges, then we have the \n Left and Right halves (LH, RH) which are distinguishable by their \n \"orientation\", if they are going from left to right it's a \"left half\"\n indicated with 0 otherwise it's a \"right half\" expressed with 1. \n \"\"\"\n \n LH, RH = 0, 1\n\n def __init__(self, edge=None, oriented = LH):\n self.left, self.right = None, None # create a link between halfedges (used in linked list)\n self.oriented = oriented # set the orientation of the half-edge (LH or RH)\n self.vector_point = None # Point(x, y) == intersection between half-edges\n self.sweep_line = np.inf # y-coord of sweepline associated with site event\n self.edge = edge\n\n def get_region(self, region, default):\n # region 0 is the left, 1 is the right\n region = 1 if region == 'left' else 0\n other_region = 1 - region\n \n if not self.edge: \n return default\n elif self.oriented == 1: # if oriented is left\n return self.edge.region[region]\n else:\n return self.edge.region[other_region]\n\n def __lt__(self, other):\n # It may rarely happen that while pushing the he into the queue two he are\n # compared because the sweepline and intersection point x-axis were equal.\n #\n # In this case it tries to tell which is the smaller but it doesn't really\n # matter as they are identical in the end.\n # The purpose is guaranteing a \"first-come-first-served\" policy.\n return True\n \n\n @staticmethod\n def check_pt_right(pt, he):\n # returns True if pt is to right of halfedge\n e = he.edge\n is_pt_right = pt.x > e.region[HalfEdge.RH].x\n\n # if half edge orientation is left and point is to the right \n # of the rightmost he region, then the point is to its right\n if is_pt_right and he.oriented == HalfEdge.LH: \n return True # RIGHT\n\n # if half edge orientation is right and point is to the left \n # of the rightmost he region, then the point is to its left\n if not is_pt_right and he.oriented == HalfEdge.RH:\n return False # LEFT\n \n\n # if dx was greater : a was the coefficient, b the slope\n if e.a == 1.0: \n positive_slope = e.b >= 0.0\n dxp = pt.x - e.region[HalfEdge.RH].x\n dyp = pt.y - e.region[HalfEdge.RH].y\n no_need_check = False\n # either they are both True or both False\n if not (is_pt_right or positive_slope) or (is_pt_right and positive_slope):\n above = dyp >= e.b * dxp\n no_need_check = above\n else:\n above = pt.x + (pt.y * e.b) > e.c\n if not positive_slope:\n above = not above\n if not above:\n no_need_check = True\n if not no_need_check:\n dxs = e.region[HalfEdge.RH].x - e.region[HalfEdge.LH].x\n above = e.b * (dxp*dxp - dyp*dyp) < dxs*dyp * (1.0 + 2.0*dxp/dxs + e.b*e.b)\n if not positive_slope:\n above = not above\n \n # if dy was greater : b was the coefficient, a the slope\n else:\n yl = e.c - e.a * pt.x\n t1 = np.square(pt.y - yl)\n t2 = np.square(pt.x - e.region[HalfEdge.RH].x)\n t3 = np.square(yl - e.region[HalfEdge.RH].y)\n above = t1 > t2 + t3\n \n if he.oriented == HalfEdge.LH:\n return above\n else:\n return not above\n \n class EventHandler():\n \"\"\"\n A helper class that manages the events happening during the construction\n of the VoronoiTessellation. It manages to bisect points with an edge and \n to intersect half-edges during the handling of the two main events of the\n Fortune's algorithm.\n \n Indeed the two core functions are:\n 1. The Handling of Site Events when the sweep line crosses a Point while\n going bottom up across coordinates\n 2. The Handling of Circle Events when the sweep line crosses the circumcenter \n of a triangle of three points.\n \n \"\"\"\n def __init__(self, points, linked_list, queue):\n self.points = iter(points)\n self.linked_list = linked_list\n self.queue = queue\n\n # a polygon for each point (original + 4 to limit the tessellation)\n # only the ids will be stored here\n self.polygons = {k : [] for k in range(len(points))}\n self.vertices = list() # a list to store the actual vertices\n\n self.first_point = next(self.points, None)\n self.next_site = next(self.points, None)\n self.minpoint = Point(-np.inf,-np.inf)\n\n self.sitenum = 0\n \n @staticmethod\n def bisect_points(p0, p1):\n endpoints = [None, None] # no endpoints on the bisector it goes to infinity\n regions = [p0, p1] # storing the original points that are going to be bisected\n \n # it is faster to work with arrays\n p0, p1 = np.array([p0.coords]), np.array([p1.coords])\n \n # get the axis-wise distance of the two points (x from x and y from y)\n dist = (p1 - p0).reshape((2, 1)) # get the flattened array\n \n # is x-axis dist greater? (unpacking array)\n dx_greater = np.greater(*np.abs(dist)) \n \n # getting the slope with the largest axis as the denominator\n denom, numer = dist if dx_greater else dist[::-1]\n slope = numer / denom\n \n # set the slope of the line and set the coefficient to 1\n a, b = (1.0, slope) if dx_greater else (slope, 1.0)\n # mid-value to compute the slope later\n c = float(p0.dot(dist) + np.sum(np.square(dist))*0.5) / denom\n \n newedge = Edge(a, b, c, endpoints, regions)\n \n return newedge\n\n @staticmethod\n def intersect_halfedges(he1, he2):\n e1 = he1.edge\n e2 = he2.edge\n \n # Three cases where the intersection cannot be performed:\n # 1. Both edges must exist\n if (e1 is None) or (e2 is None):\n return\n\n # 2. The edges shouldn't intersect on the same region:\n if (e1.region[HalfEdge.RH] is e2.region[HalfEdge.RH]):\n return\n \n # 3. The absolute distance between coefficient and slope\n # shouldn't be zero nor too close to it:\n \n d = e1.a * e2.b - e1.b * e2.a\n if (abs(d) < 1e-10) or (abs(d) < (1e-10 * abs(d))):\n return None\n \n # computing coordinates of intersection (equation: ax + by + c = 0)\n intersect_x = (e1.c*e2.b - e2.c*e1.b) / d\n intersect_y = (e2.c*e1.a - e1.c*e2.a) / d\n \n # checking region with bottommost site event\n first_region_btm = _pt_lt_other_(e1.region[HalfEdge.RH], e2.region[HalfEdge.RH])\n \n # selecting the edge and half-edge with a site underneath the other\n he, e = (he1, e1) if first_region_btm else (he2, e2)\n\n # checking the intersection orientation wrt bottommost edge right region\n intersection_is_right = intersect_x >= e.region[HalfEdge.RH].x\n \n if ((intersection_is_right and he.oriented == HalfEdge.LH) or\n (not intersection_is_right and he.oriented == HalfEdge.RH)):\n return None\n\n # create a new site at the point of the region intersection\n return Point(intersect_x, intersect_y) # vector_event\n \n @staticmethod\n def _add_polygon_ids(edge, polygons, vertices, pmap):\n \n # simple function to extract the original indexes\n idx = lambda p: pmap[(p.x, p.y)]\n \n # find the original points idx associated with the edge end_points\n # for both ends of edges if it is not an infinite edge, otherwise add None\n left_vert, right_vert = [vertices[idx(p)] if p else None for p in edge.end_point]\n \n # looking at the index of the original points to set the region where\n # to add the verteces\n lmap_reg, rmap_reg = [idx(p) for p in edge.region]\n \n # adding the vertices to the corresponding list in the original regions\n polygons[lmap_reg].extend((left_vert, right_vert))\n polygons[rmap_reg].extend((left_vert, right_vert))\n\n def is_site_event(self):\n return self.next_site and (self.queue.is_empty() or _pt_lt_other_(self.next_site, self.minpoint))\n\n def handle_site_event(self):\n # finding the left and right HalfEdges corresponding to the next site\n # if it doesn't find any it just create an empty HalfEdge\n left_he = self.linked_list.linear_search(self.next_site)\n right_he = left_he.right\n\n # regions are reppresented by the original points:\n \n # get the right region of the first left HalfEdge\n # if not found it is the first_point of the list\n first = left_he.get_region('right', default = self.first_point)\n \n # create the edge bisecting the point on the right region with the current site\n # during the first site event it creates it bisecting the first and second point\n edge = self.bisect_points(first, self.next_site)\n\n # creating a HalfEdge from the edge created,orienting it LtR\n # it is then added on the right of the left_he found before\n bisector = HalfEdge(edge, HalfEdge.LH)\n self.linked_list.insert(left_he, bisector)\n\n # if the bisected he intersects with the left edge\n ip = self.intersect_halfedges(left_he, bisector)\n if ip is not None:\n # remove the left edge's vertex, if exists, from min-heap-queue\n self.queue.delete(left_he)\n # pushing the new vertex into the queue (waiting to)\n self.queue.insert(left_he, ip, self.next_site.distance(ip))\n\n # now moving to the right of the bisected edge creating a RtL he\n left_he = bisector\n bisector = HalfEdge(edge, HalfEdge.RH)\n \n # inserting the HalfEdge to the right of the previous bisector\n self.linked_list.insert(left_he, bisector)\n\n # if this bisector intersects with the original right HalfEdge\n ip = self.intersect_halfedges(bisector, right_he)\n if ip is not None:\n # push the new intersection into the queue\n self.queue.insert(bisector, ip, self.next_site.distance(ip))\n\n # move to the next site event to process\n self.next_site = next(self.points, None)\n\n def handle_circle_event(self, pmap):\n # pop the minimum vector_event from the queue (min-heap)\n left_he = self.queue.pop_min_he()\n \n # extracting its half edges neighbours\n leftleft_he = left_he.left\n right_he = left_he.right\n rightright_he = right_he.right\n\n # get the vector_point that caused this event\n vector_event_point = left_he.vector_point\n \n # reassigning original index to the current site count\n pmap[vector_event_point.coords[0]] = self.sitenum\n self.sitenum += 1\n \n # append the vertices according to the same site count order\n self.vertices.append((vector_event_point.coords[0]))\n \n # triplet of sites through which a circle goes through\n first = left_he.get_region('left', default = self.first_point)\n second = left_he.get_region('right', default = self.first_point)\n third = right_he.get_region('right', default = self.first_point)\n\n # set the endpoint of the left and right HalfEdge to be this vector\n # add also the polygons found to the dictionary \n left_he.edge.end_point[left_he.oriented] = vector_event_point\n if left_he.edge.end_point[1 - left_he.oriented] is not None:\n self._add_polygon_ids(left_he.edge, self.polygons, self.vertices, pmap)\n\n right_he.edge.end_point[right_he.oriented] = vector_event_point\n if right_he.edge.end_point[1 - right_he.oriented] is not None:\n self._add_polygon_ids(right_he.edge, self.polygons, self.vertices, pmap)\n\n # delete the bottommost HE from linked list (already popped from queue)\n self.linked_list.delete(left_he)\n \n # remove also information regarding the right half edge\n self.queue.delete(right_he)\n self.linked_list.delete(right_he)\n\n # if the site to the left of the event is on top to the Site to the right\n # swap them and set the orientation from RtL (Right to Left)\n \n oriented = HalfEdge.LH\n if first.y > third.y:\n first, third = third, first\n oriented = HalfEdge.RH\n\n # create an edge that is between the left and right sites\n edge = self.bisect_points(first, third)\n # set the bisected half edge according to the previously defined orientation\n bisector = HalfEdge(edge, oriented)\n\n # insert the new bisector to the right of the left half edge (to the left of the first)\n self.linked_list.insert(leftleft_he, bisector) \n \n # set only one endpoint to the new edge to be the vector point:\n # if the site to the left of this bisector is on top of the right\n # then this endpoint is set on the left, otherwise on the right\n invert_or = HalfEdge.RH - oriented\n edge.end_point[invert_or] = vector_event_point\n if edge.end_point[1 - invert_or] is not None:\n self._add_polygon_ids(edge, self.polygons, self.vertices, pmap)\n\n # if left HE and the new bisector don't intersect\n ip = self.intersect_halfedges(leftleft_he, bisector)\n if ip is not None:\n self.queue.delete(leftleft_he) # remove from the current queue position\n self.queue.insert(leftleft_he, ip, first.distance(ip)) # reinsert with new\n\n # if right HE and the new bisector don't intersect, then push it to the queue\n ip = self.intersect_halfedges(bisector, rightright_he)\n if ip is not None:\n self.queue.insert(bisector, ip, first.distance(ip))\n\n def add_remaining_edges(self, pmap):\n # setting the linked list to the right of its end\n he = self.linked_list.leftend.right\n \n # until the list goes to its right most element\n while he is not self.linked_list.rightend:\n # add polygons to the polygon dictionary\n self._add_polygon_ids(he.edge, self.polygons, self.vertices, pmap)\n # go to the right half edge\n he = he.right\n \n # I also need a simple structure to reference edges \n Edge = namedtuple('Edge', ['a', 'b', 'c', 'end_point', 'region'],\n defaults = [0.0, 0.0, 0.0, [None]*2, [None]*2])\n \n \n def _pt_lt_other_(p0, p1): # needed to compare points by smaller \"y\"\n # if y coords are smaller or they are equal but x is smaller\n return p0.y < p1.y or (p0.y == p1.y and p0.x < p1.x)\n \n \n # putting a external bound to the tessallation by creating 4 fake points\n # just a larger than earth box: 1e4 is out of the map's bounds\n tessellation_limit = [(-1e4, 0),(0, 1e4),(1e4, 0),(0, -1e4)]\n fake_points = [Point(*lim) for lim in tessellation_limit]\n\n # redefining the points as only unique points + fake_points\n points = gpd.GeoSeries(points.geometry.unique().tolist() +\n fake_points, crs = crs)\n\n # mapping index of the unsorted points\n pmap = {pt.coords[0] : i for i, pt in enumerate(points)}\n\n # getting the points sorted by lowest 'y' coordinate\n bottom_up_sorted_pts = sorted(points, key = lambda pt: (pt.y, pt.x))\n \n event_handler = EventHandler(bottom_up_sorted_pts, \n HalfEdgesLinkedList(), \n Queue())\n\n \n # while the queue is not empty and there are still points to process\n while True:\n \n # if the queue is not empty get the first value and set it as min\n if not event_handler.queue.is_empty():\n event_handler.minpoint = event_handler.queue.get_min()\n\n # if the next_site examined is the smallest or the queue is empty\n if event_handler.is_site_event(): # SITE EVENT\n event_handler.handle_site_event()\n\n # if the min value is the smallest we\n elif not event_handler.queue.is_empty(): # CIRCLE EVENT\n event_handler.handle_circle_event(pmap)\n\n else:\n break\n\n # now adding the remaining edges in the linked list\n event_handler.add_remaining_edges(pmap)\n \n # now adding the tiles to the final output\n voronoi_tess = list()\n\n for polygon in event_handler.polygons.values():\n # filtering the placeholder for vertex pointing to infinity\n valid_polygon = list(filter(lambda vertex: vertex is not None, polygon))\n tile_coords = VoronoiTessellationTiler.convex_hull(valid_polygon)\n \n # if the convex_hull has enough vertex to form a polygon\n if len(tile_coords) > 2: \n voronoi_tess.append(Polygon(tile_coords))\n\n return voronoi_tess\n\n\n def _build(self, points, base_shape, crs=constants.DEFAULT_CRS):\n if base_shape.crs != constants.DEFAULT_CRS:\n base_shape = base_shape.to_crs(constants.DEFAULT_CRS)\n \n base_shape_gdf = gpd.GeoDataFrame(geometry=base_shape, crs=crs)\n \n # computing the voronoi tessellation\n voronoi = self.compute_voronoi(points, crs=crs)\n # cutting out the areas out of the base shape (as the voronoi should be \"infinite\")\n gdf = gpd.GeoDataFrame(geometry=voronoi, crs=crs).overlay(base_shape_gdf)\n \n # gdf = gpd.GeoDataFrame(points.copy(), crs=crs)\n gdf.loc[:, constants.TILE_ID] = list(np.arange(0, len(gdf)))\n\n # Convert TILE_ID to have str type\n gdf[constants.TILE_ID] = gdf[constants.TILE_ID].astype(\"str\")\n\n return gdf[[constants.TILE_ID, \"geometry\"]]\n\n\n# Register the builder\ntiler.register_tiler(\"voronoi\", VoronoiTessellationTiler())\n","repo_name":"lwdovico/geospatial-analytics","sub_path":"Project/tilers.py","file_name":"tilers.py","file_ext":"py","file_size_in_byte":30811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"13935636657","text":"\nimport time\nimport os\nimport sys\nimport json\nimport requests\nimport yfinance as yf\n\npath_file_dict_symbol_info = 'dict_symbol_info.json'\npath_file_dict_symbol_history = 'dict_symbol_history.json'\n\nif os.path.isfile(path_file_dict_symbol_info):\n with open(path_file_dict_symbol_info, 'r') as file:\n dict_symbol_info = json.load(file)\nelse: \n dict_symbol_info = {}\n url=\"https://pkgstore.datahub.io/core/nasdaq-listings/nasdaq-listed_csv/data/7665719fb51081ba0bd834fde71ce822/nasdaq-listed_csv.csv\"\n text = requests.get(url).text\n list_line = text.split('\\r\\n')\n list_symbol = ''\n for line in list_line[1:]:\n list_part = line.split(',')\n if len(list_part) == 1:\n continue\n symbol_info = {}\n symbol_info['symbol'] = 'NDQ:' + list_part[0]\n symbol_info['company_name'] = list_part[1]\n symbol_info['securety_name'] = list_part[2]\n symbol_info['market_category'] = list_part[3]\n symbol_info['test_issue'] = list_part[4]\n symbol_info['financial_status'] = list_part[5]\n symbol_info['round_lot_size'] = list_part[6]\n\n dict_symbol_info[symbol_info['symbol']] = symbol_info\n with open(path_file_dict_symbol_info, 'w') as file:\n json.dump(dict_symbol_info, file)\n\n\nprint(len(dict_symbol_info))\n\n\nif os.path.isfile(path_file_dict_symbol_history):\n with open(path_file_dict_symbol_history, 'r') as file:\n dict_symbol_history = json.load(file)\nelse:\n dict_symbol_history = {}\n for i, symbol in enumerate(dict_symbol_info):\n print(i)\n print(len(dict_symbol_info['nasdaq']))\n ticker_symbol = yf.Ticker(symbol.split(':')[1])\n\n # get stock info\n # msft.info\n # print(msft.info)\n # get historical market data\n hist = ticker_symbol.history(period=\"max\")\n sys.stdout.flush()\n dict_symbol_history[symbol] = json.loads(hist.to_json(orient = \"records\"))\n\n with open(path_file_dict_symbol_history, 'w') as file:\n json.dump(dict_symbol_history, file)\n\n\n#NYSE:ALB,\n\nticker_symbol= yf.Ticker('ALB')\ndict_symbol_history['NYSE:ALB'] = {}\ndict_symbol_history['NYSE:ALB']['symbol'] = 'NYSE:ALB'\ndict_symbol_history['NYSE:ALB']['list_candle'] = json.loads(ticker_symbol.history(period=\"max\").to_json(orient = \"records\"))\n\n\nfrom matplotlib import pyplot as plt\nplt.figure()\nlist_symbol = ['NYSE:ALB', 'NDQ:GOOGL', 'NDQ:AAPL', 'NDQ:FB', 'NDQ:MSFT']\nfor i, symbol in enumerate(list_symbol):\n list_candle = dict_symbol_history[symbol]['list_candle']\n list_close_30 = [float(candle['Close']) for candle in list_candle[-60:-1]]\n plt.subplot(len(list_symbol), 1, i + 1)\n plt.plot(list_close_30, label=symbol)\n plt.legend()\nplt.show()\n","repo_name":"kozzion/breaker_selenium","sub_path":"script/degiro_wallet.py","file_name":"degiro_wallet.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"27993269909","text":"import logging\nimport os\nimport re\nimport signal\nfrom collections import defaultdict, namedtuple\nfrom concurrent.futures import CancelledError, ProcessPoolExecutor, wait\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom multiprocessing import Manager\nfrom typing import Iterable, Mapping, Optional\n\nfrom funcy import cached_property, first\n\nfrom dvc.exceptions import DvcException\nfrom dvc.path_info import PathInfo\nfrom dvc.repo.experiments.base import (\n EXEC_BASELINE,\n EXEC_CHECKPOINT,\n EXEC_HEAD,\n EXEC_MERGE,\n EXEC_NAMESPACE,\n EXPS_NAMESPACE,\n EXPS_STASH,\n CheckpointExistsError,\n ExperimentExistsError,\n ExpRefInfo,\n InvalidExpRefError,\n)\nfrom dvc.repo.experiments.executor import BaseExecutor, LocalExecutor\nfrom dvc.stage.run import CheckpointKilledError\nfrom dvc.utils import env2bool, relpath\n\nlogger = logging.getLogger(__name__)\n\n\ndef scm_locked(f):\n # Lock the experiments workspace so that we don't try to perform two\n # different sequences of git operations at once\n @wraps(f)\n def wrapper(exp, *args, **kwargs):\n with exp.scm_lock:\n return f(exp, *args, **kwargs)\n\n return wrapper\n\n\nclass BaselineMismatchError(DvcException):\n def __init__(self, rev, expected):\n if hasattr(rev, \"hexsha\"):\n rev = rev.hexsha\n rev_str = f\"{rev[:7]}\" if rev is not None else \"invalid commit\"\n super().__init__(\n f\"Experiment derived from '{rev_str}', expected '{expected[:7]}'.\"\n )\n self.rev = rev\n self.expected_rev = expected\n\n\nclass MultipleBranchError(DvcException):\n def __init__(self, rev):\n super().__init__(\n f\"Ambiguous commit '{rev[:7]}' belongs to multiple experiment \"\n \"branches.\"\n )\n self.rev = rev\n\n\nclass Experiments:\n \"\"\"Class that manages experiments in a DVC repo.\n\n Args:\n repo (dvc.repo.Repo): repo instance that these experiments belong to.\n \"\"\"\n\n EXPERIMENTS_DIR = \"experiments\"\n STASH_EXPERIMENT_FORMAT = \"dvc-exp:{rev}:{baseline_rev}:{name}\"\n STASH_EXPERIMENT_RE = re.compile(\n r\"(?:commit: )\"\n r\"dvc-exp:(?P[0-9a-f]+):(?P[0-9a-f]+)\"\n r\":(?P[^~^:\\\\?\\[\\]*]*)\"\n r\"(:(?P.+))?$\"\n )\n BRANCH_RE = re.compile(\n r\"^(?P[a-f0-9]{7})-(?P[a-f0-9]+)\"\n r\"(?P-checkpoint)?$\"\n )\n LAST_CHECKPOINT = \":last\"\n\n StashEntry = namedtuple(\n \"StashEntry\", [\"index\", \"rev\", \"baseline_rev\", \"branch\", \"name\"]\n )\n\n def __init__(self, repo):\n from dvc.lock import make_lock\n\n if not (\n env2bool(\"DVC_TEST\")\n or repo.config[\"core\"].get(\"experiments\", False)\n ):\n raise NotImplementedError\n\n self.repo = repo\n self.scm_lock = make_lock(\n os.path.join(self.repo.tmp_dir, \"exp_scm_lock\"),\n tmp_dir=self.repo.tmp_dir,\n )\n\n @cached_property\n def exp_dir(self):\n return os.path.join(self.repo.dvc_dir, self.EXPERIMENTS_DIR)\n\n @property\n def scm(self):\n return self.repo.scm\n\n @cached_property\n def dvc_dir(self):\n return relpath(self.repo.dvc_dir, self.repo.scm.root_dir)\n\n @cached_property\n def exp_dvc_dir(self):\n return os.path.join(self.exp_dir, self.dvc_dir)\n\n @property\n def exp_dvc(self):\n return self.repo\n\n @contextmanager\n def chdir(self):\n yield\n\n @cached_property\n def args_file(self):\n return os.path.join(\n self.exp_dvc.tmp_dir, BaseExecutor.PACKED_ARGS_FILE\n )\n\n @cached_property\n def stash(self):\n from dvc.scm.git import Stash\n\n return Stash(self.scm, EXPS_STASH)\n\n @property\n def stash_revs(self):\n revs = {}\n for i, entry in enumerate(self.stash):\n msg = entry.message.decode(\"utf-8\").strip()\n m = self.STASH_EXPERIMENT_RE.match(msg)\n if m:\n revs[entry.new_sha.decode(\"utf-8\")] = self.StashEntry(\n i,\n m.group(\"rev\"),\n m.group(\"baseline_rev\"),\n m.group(\"branch\"),\n m.group(\"name\"),\n )\n return revs\n\n def _stash_exp(\n self,\n *args,\n params: Optional[dict] = None,\n detach_rev: Optional[str] = None,\n baseline_rev: Optional[str] = None,\n branch: Optional[str] = None,\n name: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Stash changes from the workspace as an experiment.\n\n Args:\n params: Optional dictionary of parameter values to be used.\n Values take priority over any parameters specified in the\n user's workspace.\n baseline_rev: Optional baseline rev for this experiment, defaults\n to the current SCM rev.\n branch: Optional experiment branch name. If specified, the\n experiment will be added to `branch` instead of creating\n a new branch.\n name: Optional experiment name. If specified this will be used as\n the human-readable name in the experiment branch ref. Has no\n effect of branch is specified.\n \"\"\"\n with self.scm.stash_workspace(include_untracked=True) as workspace:\n # If we are not extending an existing branch, apply current\n # workspace changes to be made in new branch\n if not branch and workspace:\n self.stash.apply(workspace)\n\n # checkout and detach at branch (or current HEAD)\n if detach_rev:\n head = detach_rev\n elif branch:\n head = branch\n else:\n head = None\n with self.scm.detach_head(head) as rev:\n if baseline_rev is None:\n baseline_rev = rev\n\n # update experiment params from command line\n if params:\n self._update_params(params)\n\n # save additional repro command line arguments\n self._pack_args(*args, **kwargs)\n\n # save experiment as a stash commit\n msg = self._stash_msg(\n rev, baseline_rev=baseline_rev, branch=branch, name=name\n )\n stash_rev = self.stash.push(message=msg)\n\n logger.debug(\n (\n \"Stashed experiment '%s' with baseline '%s' \"\n \"for future execution.\"\n ),\n stash_rev[:7],\n baseline_rev[:7],\n )\n\n # Reset/clean any changes before prior workspace is unstashed\n self.scm.repo.git.reset(hard=True)\n self.scm.repo.git.clean(force=True)\n\n return stash_rev\n\n def _stash_msg(\n self,\n rev: str,\n baseline_rev: str,\n branch: Optional[str] = None,\n name: Optional[str] = None,\n ):\n if not baseline_rev:\n baseline_rev = rev\n msg = self.STASH_EXPERIMENT_FORMAT.format(\n rev=rev, baseline_rev=baseline_rev, name=name if name else \"\"\n )\n if branch:\n return f\"{msg}:{branch}\"\n return msg\n\n def _pack_args(self, *args, **kwargs):\n BaseExecutor.pack_repro_args(self.args_file, *args, **kwargs)\n self.scm.add(self.args_file)\n\n def _update_params(self, params: dict):\n \"\"\"Update experiment params files with the specified values.\"\"\"\n from benedict import benedict\n\n from dvc.utils.serialize import MODIFIERS\n\n logger.debug(\"Using experiment params '%s'\", params)\n\n for params_fname in params:\n path = PathInfo(self.exp_dvc.root_dir) / params_fname\n suffix = path.suffix.lower()\n modify_data = MODIFIERS[suffix]\n with modify_data(path, tree=self.exp_dvc.tree) as data:\n benedict(data).merge(params[params_fname], overwrite=True)\n\n # Force params file changes to be staged in git\n # Otherwise in certain situations the changes to params file may be\n # ignored when we `git stash` them since mtime is used to determine\n # whether the file is dirty\n self.scm.add(list(params.keys()))\n\n def reproduce_one(self, queue=False, **kwargs):\n \"\"\"Reproduce and checkout a single experiment.\"\"\"\n stash_rev = self.new(**kwargs)\n if queue:\n logger.info(\n \"Queued experiment '%s' for future execution.\", stash_rev[:7]\n )\n return [stash_rev]\n results = self.reproduce([stash_rev], keep_stash=False)\n exp_rev = first(results)\n if exp_rev is not None:\n self._log_reproduced(results)\n return results\n\n def reproduce_queued(self, **kwargs):\n results = self.reproduce(**kwargs)\n if results:\n self._log_reproduced(results)\n return results\n\n def _log_reproduced(self, revs: Iterable[str]):\n names = []\n for rev in revs:\n name = self.get_exact_name(rev)\n names.append(name if name else rev[:7])\n fmt = (\n \"\\nReproduced experiment(s): %s\\n\"\n \"To promote an experiment to a Git branch run:\\n\\n\"\n \"\\tdvc exp branch \\n\\n\"\n \"To apply the results of an experiment to your workspace run:\\n\\n\"\n \"\\tdvc exp apply \"\n )\n logger.info(fmt, \", \".join(names))\n\n @scm_locked\n def new(\n self, *args, checkpoint_resume: Optional[str] = None, **kwargs,\n ):\n \"\"\"Create a new experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n \"\"\"\n if checkpoint_resume is not None:\n return self._resume_checkpoint(\n *args, checkpoint_resume=checkpoint_resume, **kwargs\n )\n\n return self._stash_exp(*args, **kwargs)\n\n def _resume_checkpoint(\n self, *args, checkpoint_resume: Optional[str] = None, **kwargs,\n ):\n \"\"\"Resume an existing (checkpoint) experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n \"\"\"\n assert checkpoint_resume\n\n if checkpoint_resume == self.LAST_CHECKPOINT:\n # Continue from most recently committed checkpoint\n resume_rev = self._get_last_checkpoint()\n else:\n resume_rev = self.scm.resolve_rev(checkpoint_resume)\n allow_multiple = \"params\" in kwargs\n branch = self.get_branch_containing(\n resume_rev, allow_multiple=allow_multiple\n )\n if not branch:\n raise DvcException(\n \"Could not find checkpoint experiment \"\n f\"'{checkpoint_resume}'\"\n )\n\n baseline_rev = self._get_baseline(branch)\n if kwargs.get(\"params\", None):\n logger.debug(\n \"Branching from checkpoint '%s' with modified params, \"\n \"baseline '%s'\",\n checkpoint_resume,\n baseline_rev[:7],\n )\n detach_rev = resume_rev\n branch = None\n else:\n logger.debug(\n \"Continuing from tip of checkpoint '%s'\", checkpoint_resume\n )\n detach_rev = None\n\n return self._stash_exp(\n *args,\n detach_rev=detach_rev,\n baseline_rev=baseline_rev,\n branch=branch,\n **kwargs,\n )\n\n def _get_last_checkpoint(self):\n rev = self.scm.get_ref(EXEC_CHECKPOINT)\n if rev:\n return rev\n raise DvcException(\"No existing checkpoint experiment to continue\")\n\n @scm_locked\n def reproduce(\n self,\n revs: Optional[Iterable] = None,\n keep_stash: Optional[bool] = True,\n **kwargs,\n ):\n \"\"\"Reproduce the specified experiments.\n\n Args:\n revs: If revs is not specified, all stashed experiments will be\n reproduced.\n keep_stash: If True, stashed experiments will be preserved if they\n fail to reproduce successfully.\n \"\"\"\n stash_revs = self.stash_revs\n\n # to_run contains mapping of:\n # input_rev: (stash_index, rev, baseline_rev)\n # where input_rev contains the changes to execute (usually a stash\n # commit), rev is the original SCM commit to be checked out, and\n # baseline_rev is the experiment baseline.\n if revs is None:\n to_run = dict(stash_revs)\n else:\n to_run = {\n rev: stash_revs[rev]\n if rev in stash_revs\n else self.StashEntry(None, rev, rev, None, None)\n for rev in revs\n }\n\n logger.debug(\n \"Reproducing experiment revs '%s'\",\n \", \".join((rev[:7] for rev in to_run)),\n )\n\n executors = self._init_executors(to_run)\n exec_results = self._reproduce(executors, **kwargs)\n\n if keep_stash:\n # only drop successfully run stashed experiments\n to_drop = sorted(\n (\n stash_revs[rev][0]\n for rev in exec_results\n if rev in stash_revs\n ),\n reverse=True,\n )\n else:\n # drop all stashed experiments\n to_drop = sorted(\n (stash_revs[rev][0] for rev in to_run if rev in stash_revs),\n reverse=True,\n )\n for index in to_drop:\n self.stash.drop(index)\n\n result = {}\n for _, exp_result in exec_results.items():\n result.update(exp_result)\n return result\n\n def _init_executors(self, to_run):\n executors = {}\n with self.scm.stash_workspace(include_untracked=True):\n with self.scm.detach_head():\n for stash_rev, item in to_run.items():\n self.scm.set_ref(EXEC_HEAD, item.rev)\n self.scm.set_ref(EXEC_MERGE, stash_rev)\n self.scm.set_ref(EXEC_BASELINE, item.baseline_rev)\n\n # Executor will be initialized with an empty git repo that\n # we populate by pushing:\n # EXEC_HEAD - the base commit for this experiment\n # EXEC_MERGE - the unmerged changes (from our stash)\n # to be reproduced\n # EXEC_BASELINE - the baseline commit for this experiment\n executor = LocalExecutor(\n self.scm,\n self.dvc_dir,\n name=item.name,\n branch=item.branch,\n cache_dir=self.repo.cache.local.cache_dir,\n )\n executors[item.rev] = executor\n\n for ref in (EXEC_HEAD, EXEC_MERGE, EXEC_BASELINE):\n self.scm.remove_ref(ref)\n\n self.scm.repo.git.reset(hard=True)\n self.scm.repo.git.clean(force=True)\n return executors\n\n def _reproduce(\n self, executors: dict, jobs: Optional[int] = 1\n ) -> Mapping[str, Mapping[str, str]]:\n \"\"\"Run dvc repro for the specified BaseExecutors in parallel.\n\n Returns dict containing successfully executed experiments.\n \"\"\"\n result = defaultdict(dict)\n\n manager = Manager()\n pid_q = manager.Queue()\n with ProcessPoolExecutor(max_workers=jobs) as workers:\n futures = {}\n for rev, executor in executors.items():\n future = workers.submit(\n executor.reproduce,\n executor.dvc_dir,\n pid_q,\n rev,\n name=executor.name,\n )\n futures[future] = (rev, executor)\n\n try:\n wait(futures)\n except KeyboardInterrupt:\n # forward SIGINT to any running executor processes and\n # cancel any remaining futures\n pids = {}\n while not pid_q.empty():\n rev, pid = pid_q.get()\n pids[rev] = pid\n for future, (rev, _) in futures.items():\n if future.running():\n os.kill(pids[rev], signal.SIGINT)\n elif not future.done():\n future.cancel()\n\n for future, (rev, executor) in futures.items():\n rev, executor = futures[future]\n exc = future.exception()\n\n try:\n if exc is None:\n exp_hash, force = future.result()\n result[rev].update(\n self._collect_executor(executor, exp_hash, force)\n )\n else:\n # Checkpoint errors have already been logged\n if not isinstance(exc, CheckpointKilledError):\n logger.exception(\n \"Failed to reproduce experiment '%s'\",\n rev[:7],\n exc_info=exc,\n )\n except CancelledError:\n logger.error(\n \"Cancelled before attempting to reproduce experiment \"\n \"'%s'\",\n rev[:7],\n )\n finally:\n executor.cleanup()\n\n return result\n\n def _collect_executor(\n self, executor, exp_hash, force\n ) -> Mapping[str, str]:\n # NOTE: GitPython Repo instances cannot be re-used\n # after process has received SIGINT or SIGTERM, so we\n # need this hack to re-instantiate git instances after\n # checkpoint runs. See:\n # https://github.com/gitpython-developers/GitPython/issues/427\n del self.repo.scm\n\n results = {}\n\n def on_diverged(ref: str, checkpoint: bool):\n ref_info = ExpRefInfo.from_ref(ref)\n if checkpoint:\n raise CheckpointExistsError(ref_info.name)\n raise ExperimentExistsError(ref_info.name)\n\n for ref in executor.fetch_exps(\n self.scm, force=force, on_diverged=on_diverged,\n ):\n exp_rev = self.scm.get_ref(ref)\n if exp_rev:\n logger.debug(\"Collected experiment '%s'.\", exp_rev[:7])\n results[exp_rev] = exp_hash\n\n return results\n\n def check_baseline(self, exp_rev):\n baseline_sha = self.repo.scm.get_rev()\n if exp_rev == baseline_sha:\n return exp_rev\n\n exp_baseline = self._get_baseline(exp_rev)\n if exp_baseline is None:\n # if we can't tell from branch name, fall back to parent commit\n exp_commit = self.scm.resolve_commit(exp_rev)\n if exp_commit:\n exp_baseline = first(exp_commit.parents).hexsha\n if exp_baseline == baseline_sha:\n return exp_baseline\n raise BaselineMismatchError(exp_baseline, baseline_sha)\n\n @scm_locked\n def get_baseline(self, rev):\n \"\"\"Return the baseline rev for an experiment rev.\"\"\"\n rev = self.scm.resolve_rev(rev)\n return self._get_baseline(rev)\n\n def _get_baseline(self, rev):\n if rev in self.stash_revs:\n entry = self.stash_revs.get(rev)\n if entry:\n return entry.baseline_rev\n return None\n ref = first(self._get_exps_containing(rev))\n if not ref:\n return None\n try:\n ref_info = ExpRefInfo.from_ref(ref)\n return ref_info.baseline_sha\n except InvalidExpRefError:\n return None\n\n def _get_exps_containing(self, rev):\n for ref in self.scm.get_refs_containing(rev, EXPS_NAMESPACE):\n if not (ref.startswith(EXEC_NAMESPACE) or ref == EXPS_STASH):\n yield ref\n\n def get_branch_containing(\n self, rev: str, allow_multiple: bool = False\n ) -> str:\n names = list(self._get_exps_containing(rev))\n if not names:\n return None\n if len(names) > 1 and not allow_multiple:\n raise MultipleBranchError(rev)\n return names[0]\n\n def get_exact_name(self, rev: str):\n exclude = f\"{EXEC_NAMESPACE}/*\"\n ref = self.scm.describe(rev, base=EXPS_NAMESPACE, exclude=exclude)\n if ref:\n return ExpRefInfo.from_ref(ref).name\n return None\n\n def iter_ref_infos_by_name(self, name: str):\n for ref in self.scm.iter_refs(base=EXPS_NAMESPACE):\n if ref.startswith(EXEC_NAMESPACE) or ref == EXPS_STASH:\n continue\n ref_info = ExpRefInfo.from_ref(ref)\n if ref_info.name == name:\n yield ref_info\n\n def apply(self, *args, **kwargs):\n from dvc.repo.experiments.apply import apply\n\n return apply(self.repo, *args, **kwargs)\n\n def branch(self, *args, **kwargs):\n from dvc.repo.experiments.branch import branch\n\n return branch(self.repo, *args, **kwargs)\n\n def diff(self, *args, **kwargs):\n from dvc.repo.experiments.diff import diff\n\n return diff(self.repo, *args, **kwargs)\n\n def show(self, *args, **kwargs):\n from dvc.repo.experiments.show import show\n\n return show(self.repo, *args, **kwargs)\n\n def run(self, *args, **kwargs):\n from dvc.repo.experiments.run import run\n\n return run(self.repo, *args, **kwargs)\n\n def gc(self, *args, **kwargs):\n from dvc.repo.experiments.gc import gc\n\n return gc(self.repo, *args, **kwargs)\n","repo_name":"mtl-ai/dvc","sub_path":"dvc/repo/experiments/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":21931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"15"} +{"seq_id":"75169735692","text":"# -*- coding: UTF-8 -*- #\n\"\"\"\n@filename:plot_dataset.py\n@author:201300086\n@time:2022-11-21\n\"\"\"\nfrom pedestrian_data import PedestrianDataset\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\n# 步行轨迹图\nCUT_BEGIN = 0 # 删掉前几秒的数据,因为GPS不准\ndef plot_locus(Lati,Longi,label=\"TBD\",cut_begin=CUT_BEGIN):\n Lati = Lati[cut_begin:]\n Longi = Longi[cut_begin:]\n # plt.xlabel(\"Latitude (°)\")\n # plt.ylabel(\"Longitude (°)\")\n plt.text(Lati[0], Longi[0], 's', fontsize=10)\n plt.text(Lati[-1], Longi[-1], 'e', fontsize=10)\n plt.plot(Lati, Longi,'+', markersize=1, label=label, )#\n plt.legend(loc=0)\n\ndef plot_locus_realtime(Lati,Longi,label=\"TBD\",cut_begin=CUT_BEGIN):\n plt.clf() # 清除之前画的图\n Lati = Lati[cut_begin:]\n Longi = Longi[cut_begin:]\n # plt.xlabel(\"Latitude (°)\")\n # plt.ylabel(\"Longitude (°)\")\n plt.text(Lati[0], Longi[0], 's', fontsize=10)\n plt.text(Lati[-1], Longi[-1], 'e', fontsize=10)\n plt.plot(Lati, Longi,'+', markersize=1, label=label, )#\n plt.legend(loc=0)\n plt.pause(0.001) # 暂停一段时间,不然画的太快会卡住显示不出来\n plt.ioff() # 关闭画图窗口\n\n\n\n# 重力加速度变化图\ndef plot_gravity(sample:dict,title=\"TBD\"):\n a = sample['Accelerometer']\n b = sample['Linear Acceleration']\n minus = (a - b)\n print(minus)\n c = np.array(list(map(lambda x: np.linalg.norm(x), minus)))\n print(c.mean())\n plt.plot(range(len(c)), c)\n plt.title(title)\n\n# l, r = (0, 9) # 画图范围\n# dataset = PedestrianDataset([\"Hand-Walk\"], window_size=1000)\n#\n# for num, (name, locus) in enumerate(dataset):\n# if num in range(l, r):\n# print(\"正在遍历移动轨迹{}... \\n\".format(name))\n# plt.subplot(33 * 10 + num%10 + 1)\n# for sample in locus:\n# #print(len(locus))\n# plot_gravity(sample,title=\"{}\".format(name))\n# break\n# if num >= r:\n# break\n# plt.show()\nif __name__ == \"__main__\":\n matplotlib.use('TkAgg')\n\n l, r = (0, 9) # 画图范围\n dataset = PedestrianDataset([\"TestSet\"], window_size=100,skip_len=5) # 指定文件夹\n for num, (name, locus) in enumerate(dataset):\n if num in range(l, r):\n print(\"正在遍历移动轨迹{}... \\n\".format(name))\n plt.subplot(33 * 10 + num % 10 + 1)\n locus_pair = np.array(list(zip(locus.y_frame[\"Longitude (°)\"], locus.y_frame[\"Latitude (°)\"])))\n print(\"轨迹长度: \", len(locus_pair))\n plot_locus(locus_pair.T[0],locus_pair.T[1], label=\"{}\".format(name))\n if num >= r:\n break\n plt.show()\n","repo_name":"Googol2002/Pedestrian-Dead-Reckoning","sub_path":"plot_dataset.py","file_name":"plot_dataset.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"15"} +{"seq_id":"25585195263","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn import metrics\nfrom sklearn.utils import resample\nimport os\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport xgboost as xgb\n\nfrom sklearn.model_selection import ParameterGrid\nimport json\nfrom imblearn.over_sampling import SMOTE\nimport time\n\nalgo_dict = {}\nalgo_dict['CNB'] = ComplementNB\nalgo_dict['LR'] = LogisticRegression\nalgo_dict['DT'] = DecisionTreeClassifier\nalgo_dict['MLP'] = MLPClassifier\nalgo_dict['SVM'] = LinearSVC\nalgo_dict['XGBoost'] = xgb.XGBClassifier\nalgo_dict['RF'] = RandomForestClassifier\n\nalgo_param_grid = {}\n\nalgo_param_grid['MLP'] = {\n 'hidden_layer_sizes': [(64, 32), (32, 16), (16, 8), (8,)],\n 'activation': ['tanh', 'relu', 'identity', 'logistic'],\n 'solver': ['sgd', 'adam', 'lbfgs'],\n 'alpha': [1e-3, 1e-4, 5e-4, 5e-3, 1e-2],\n 'max_iter': [100, 300]\n}\n\nalgo_param_grid['LR'] = {\n 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],\n 'max_iter': [100, 200, 300, 400, 500],\n 'class_weight': ['balanced']\n}\n\nalgo_param_grid['DT'] = {\n 'max_depth': [6, 8, 10, 12, 16],\n 'max_features': ['auto', 'log2', 'sqrt'],\n 'class_weight': ['balanced']\n}\n\nalgo_param_grid['RF'] = {'n_estimators':[100, 200, 300, 400, 500], \n 'max_samples': [0.1, 0.2, 0.3, 0.4, 0.5], \n 'max_features':[0.1, 0.2, 0.3, 0.4, 0.5],\n 'max_depth': [6, 8, 12, 16, 20],\n 'n_jobs': [2], \n 'criterion':['entropy'],\n 'min_samples_leaf':[1, 2, 4, 8],\n 'min_samples_split':[2, 4, 8, 16], \n 'class_weight':['balanced_subsample'],\n 'verbose':[0] }\n \nalgo_param_grid['XGBoost'] = {'n_estimators':[100, 200, 300, 400, 500],\n 'eta': [1e-3, 1e-4, 5e-4, 5e-3, 1e-2],\n 'max_depth': [6, 8, 12, 16, 20],\n 'colsample_bytree':[0.1, 0.2, 0.3, 0.4, 0.5],\n 'subsample':[0.1, 0.2, 0.3, 0.4, 0.5],\n 'nthread':[4],\n 'objective':['binary:logistic'], \n 'verbosity':[0] }\n\nalgo_param_grid['CNB'] = {'alpha': [1e-4, 1e-3, 1e-2, 1e-1, 1]}\n\nalgo_param_grid['SVM'] = {#'penalty': ['l1', 'l2'],\n 'tol': [1e-4, 5e-4, 5e-3, 1e-3, 1e-2],\n 'C': [1e-2, 1e-1, 1],\n 'loss': ['hinge', 'squared_hinge'],\n 'loss': ['squared_hinge'],\n 'dual': [False, True],\n 'class_weight': ['balanced']\n }\n\n# Number of neighbors used in SMOTE technique\nfor algo in algo_dict.keys():\n algo_param_grid[algo]['k_neighbors'] = [3, 5, 7, 11, 17]\n\ndef evaluate(dataset, train_index, test_index, algo, param):\n X = dataset.drop(columns=['File', 'label'], axis=1)\n y = dataset['label']\n \n X_train = X.loc[train_index]\n X_test = X.loc[test_index]\n y_train = y.loc[train_index]\n y_test = y.loc[test_index]\n \n k_neighbors = param['k_neighbors']\n oversample = SMOTE(k_neighbors = k_neighbors)\n X_train, y_train = oversample.fit_resample(X_train, y_train)\n \n sc = MinMaxScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n \n param.pop('k_neighbors', None)\n model = algo(**param)\n \n model.fit(X_train, y_train)\n if hasattr(model, \"predict_proba\"):\n probs = model.predict_proba(X_test)\n prob_pos = probs[:,1]\n else: # use decision function\n prob_pos = model.decision_function(X_test)\n prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n \n fpr, tpr, thresholds = metrics.roc_curve(y_test, prob_pos, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n \n precision, recall, _ = metrics.precision_recall_curve(y_test, prob_pos)\n pr_auc = metrics.auc(recall, precision)\n \n max_mcc = -1\n optimal_threshold = 0\n for threshold in thresholds:\n y_pred = (prob_pos >= threshold).astype(bool) \n mcc = metrics.matthews_corrcoef(y_test, y_pred)\n if mcc > max_mcc:\n max_mcc = mcc\n optimal_threshold = threshold\n\n\n \n result = {}\n result['auc'] = auc\n result['mcc'] = max_mcc\n result['pr_auc'] = pr_auc\n \n #restore param for SMOTE technique\n param['k_neighbors'] = k_neighbors \n return result\n\ndef parameter_optimize(project_data_dir, dataset, algo, metrics_name=''):\n start_time = time.time()\n N = 100\n train_list = []\n test_list = []\n for i in range(N):\n train_index = resample(dataset.index.values, random_state=i, stratify=dataset['label'])\n test_index = list(set(dataset.index.values) - set(train_index))\n train_list.append(train_index)\n test_list.append(test_index)\n \n \n results_dict = {}\n test_results_dict = {}\n print(algo)\n params = list(ParameterGrid(algo_param_grid[algo]))\n print('len params: ', len(params))\n results_dict[algo] = [[] for i in range(len(params))]\n for i in range(N):\n \n train_index = resample(train_list[i], replace=False, n_samples = int(len(train_list[i])*0.7))\n test_index = list(set(train_list[i]) - set(train_index))\n for index, param in enumerate(params):\n try:\n result = evaluate(dataset, train_index, test_index, algo_dict[algo], param)\n results_dict[algo][index].append(result)\n except Exception as e:\n continue\n\n\n \n max_pr_auc = 0\n max_param = 0\n for i in range(len(params)):\n sum_pr_auc = 0\n for index in range(N):\n sum_pr_auc += results_dict[algo][i][index]['pr_auc']\n if sum_pr_auc > max_pr_auc:\n max_pr_auc = sum_pr_auc\n max_param = i\n \n \n with open(os.path.join(project_data_dir, 'best_params%s' % metrics_name, '%s.txt' % algo), 'w') as file:\n file.write(str(max_param))\n \n test_results_dict[algo] = []\n for i in range(N):\n print(\"Test iteration: \", i)\n train_index = train_list[i]\n test_index = test_list[i]\n result = evaluate(dataset, train_index, test_index, algo_dict[algo], params[max_param])\n test_results_dict[algo].append(result)\n \n with open(os.path.join(project_data_dir, 'best_params%s' % metrics_name, '%s_test_results.json' % algo), 'w') as file:\n file.write(json.dumps(test_results_dict))\n\n print(\"Time spent in parameter tunning --- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"NintyFive/PerformanceBugPrediction","sub_path":"RQ1_RQ2/evaluate_functions.py","file_name":"evaluate_functions.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"73808726730","text":"from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom .models import Product\nfrom Shop.models import Order,Basket,Comments\nfrom django.db.models import Q\nfrom Core.models import Category\nfrom Account.models import Userinformation\nfrom django.contrib.auth.models import User\n# Create your views here.\n\ndef SellerHome(request):\n user=Userinformation.objects.get(user=request.user)\n if user.is_seller:\n pro=0\n finallprice=0\n pr=Product.objects.filter(User=request.user).count()\n ord=Order.objects.filter(Q(orderstatus=1) | Q(orderstatus=2))\n for i in ord:\n bs=Basket.objects.filter(orderid=i)\n if bs:\n for x in bs:\n if x.productid.User == request.user:\n pro+=x.countp\n finallprice +=(x.productid.Price*x.countp)\n return render(request,'Seller/Home.html',{'pr':pr,'prs':pro,'price':finallprice})\n else:\n messages.warning(request,'شما دسترسی فروشندگی ندارید .') \n return redirect('Home')\n\ndef Products(request):\n pr=Product.objects.filter(User=request.user)\n messages.success(request,'محصولات')\n return render(request,'Seller/Products.html',{'pr':pr})\n\ndef Rmpr(request,idpr):\n pr=Product.objects.get(pk=idpr)\n pr.delete()\n messages.success(request,'محصول با موفقیت حذف شد .')\n return redirect('Seller/Home')\n\ndef AddProduct(request):\n return render(request,'Seller/AddProducts.html')\n\ndef addpr(request):\n if request.method=='POST':\n name=request.POST['name']\n description=request.POST['description']\n image =request.FILES['image']\n cty=request.POST['category']\n ct=Category.objects.get(pk=cty)\n price=request.POST['price']\n size=request.POST['size']\n qty=request.POST['qty']\n pr=Product(User=request.user,Title=name,Description=description,category=ct,Price=price,Size=size,Qty=qty,Image=image)\n pr.save()\n if pr :\n messages.success(request,'محصول با موفقیت اضافه شد .')\n else :\n messages.warning(request,'متاسفانه محصول اضافه نشد .') \n return redirect('Seller/Home')\n\ndef Editpr(request,idpr):\n pr=Product.objects.get(pk=idpr)\n return render(request,'Seller/EditProduct.html',{'pr':pr})\n\ndef editproduct(requset,idpr):\n pro=Product.objects.get(pk=idpr)\n if requset.method=='POST':\n name=requset.POST['name']\n description=requset.POST['description']\n image =requset.FILES.get('image',pro.Image)\n cty=requset.POST['category']\n ct=Category.objects.get(pk=cty)\n price=requset.POST['price']\n size=requset.POST['size']\n qty=requset.POST['qty']\n pr=Product(pk=idpr,User=requset.user,Title=name,Description=description,category=ct,Price=price,Size=size,Qty=qty,Image=image)\n pr.save()\n if pr :\n messages.success(requset,'محصول با موفقیت ویرایش شد .')\n else :\n messages.warning(requset,'متاسفانه محصول ویرایش نشد .')\n return redirect('Seller/Home')\n\ndef SoldProducts(request):\n sold=[]\n ord=Order.objects.filter(Q(orderstatus=1) | Q(orderstatus=2))\n for x in ord:\n bs=Basket.objects.filter(orderid=x)\n for i in bs:\n if i.productid.User == request.user:\n sold.append(i)\n messages.success(request,'کالاهای فروخته شده') \n return render(request,'Seller/SoldProducts.html',{'pr':sold}) \n\ndef comments(request):\n com=[]\n pr=Product.objects.filter(User=request.user)\n for i in pr:\n cm=Comments.objects.filter(product=i)\n com.append(cm)\n messages.success(request,'نظرات') \n return render(request,'Seller/Comments.html',{'cm':com}) \n","repo_name":"EhsanKhalili81/LocallyShop","sub_path":"LocallyShop/Seller/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"6493579317","text":"import pandas as pd\nimport numpy as np\nimport plotly.express as px\nfrom dash import Dash, html, dcc, Input, Output\nfrom dash_bootstrap_components.themes import BOOTSTRAP\nfrom src.components import ids\n\n\ndef render(app:Dash, min_year, max_year):\n\n return html.Div(\n children=[\n html.H5('Year'),\n\n dcc.Slider(\n id=ids.YEAR_SLIDER,\n value=min_year,\n min=min_year,\n max=max_year,\n step=1,\n marks=None,\n tooltip={\"placement\": \"bottom\", \"always_visible\": True},\n updatemode='drag'\n ),\n ],\n )","repo_name":"rafaelweinert/populationDashboard","sub_path":"src/components/yearSlider.py","file_name":"yearSlider.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"20668418738","text":"'''Database.py - Database utility functions\n===========================================\n\nThis module contains convenience functions to work with a relational\ndatabase.\n\n\nReference\n---------\n\n'''\nimport time\nimport re\nfrom pandas import DataFrame\n\n\ndef executewait(dbhandle, statement, error=Exception, regex_error=\"locked\",\n retries=-1, wait=5):\n '''repeatedly execute an SQL statement until it succeeds.\n\n\n Arguments\n ---------\n dbhandle : object\n A DB-API conform database handle.\n statement : string\n SQL statement to execute.\n error : string\n Exception to catch and examine for error messages.\n regex_error : string\n Any error message matching `regex_error` will be ignored,\n otherwise the procedure exists.\n retries : int\n Number of retries. If set to negative number, retry indefinitely.\n If set to 0, there will be only one attempt.\n wait : int\n Number of seconds to way between retries.\n\n Returns\n -------\n A cursor object\n\n '''\n cc = dbhandle.cursor()\n\n while 1:\n try:\n cc.execute(statement)\n except error as msg:\n if retries == 0:\n raise\n if not re.search(\"locked\", str(msg)):\n raise\n time.sleep(wait)\n retries -= 1\n continue\n break\n return cc\n\n\ndef getColumnNames(dbhandle, table):\n \"\"\"return column names of a table from a database.\n \"\"\"\n\n cc = executewait(dbhandle, \"SELECT * FROM %s LIMIT 1\" % table)\n return tuple([x[0] for x in cc.description])\n\n\ndef getTables(dbhandle):\n \"\"\"get list of tables in an sqlite database\"\"\"\n cc = executewait(\n dbhandle, \"\"\"select name from sqlite_master where type='table'\"\"\")\n return tuple([x[0] for x in cc])\n\n\ndef toTSV(dbhandle, outfile, statement, remove_none=True):\n '''execute statement and save as tsv file\n to disk.\n\n If *remove_none* is true, empty/NULL values will be output as\n empty values.\n\n '''\n cc = dbhandle.cursor()\n cc.execute(statement)\n outfile.write(\"\\t\".join([x[0] for x in cc.description]) + \"\\n\")\n\n def _str(x):\n if x is None:\n return \"\"\n else:\n return str(x)\n\n if remove_none:\n f = _str\n else:\n f = str\n\n outfile.write(\"\\n\".join(\n [\"\\t\".join(map(f, x)) for x in cc]))\n\n\ndef db_execute(cc, statements):\n '''excute a statement or statements against a cursor'''\n\n if type(statements) not in (list, tuple):\n statements = [statements]\n\n for statement in statements:\n cc.execute(statement)\n\n\ndef connect(dbhandle, attach=None):\n \"\"\"attempt to connect to database.\n\n If `dbhandle` is an existing connection to a database,\n it will be returned unchanged. Otherwise, this method\n will attempt to establish a connection.\n\n Arguments\n ---------\n dbhandle : object or string\n A database handle or a connection string.\n\n Returns\n -------\n dbhandle : object\n A DB-API2 conforming database handle\n \"\"\"\n if isinstance(dbhandle, str):\n try:\n import sqlite3\n except ImportError:\n raise ValueError(\n \"If an sqlite database location is passed\"\n \" directly the sqlite3 module must be installed\")\n\n dbhandle = sqlite3.connect(dbhandle)\n\n cc = dbhandle.cursor()\n\n if attach is not None:\n if isinstance(attach, str):\n db_execute(cc, attach)\n elif isinstance(attach, (tuple, list)):\n for attach_statement in attach:\n db_execute(cc, attach_statement)\n\n return dbhandle\n\n\ndef execute(queries, dbhandle=None, attach=False):\n '''Execute a statement or a list of statements (sequentially)'''\n\n cc = dbhandle.cursor()\n\n if attach:\n db_execute(cc, attach)\n\n db_execute(cc, queries)\n cc.close()\n\n\ndef fetch(query, dbhandle=None, attach=False):\n '''Fetch all query results and return'''\n\n cc = dbhandle.cursor()\n\n if attach:\n db_execute(cc, attach)\n\n sqlresult = cc.execute(query).fetchall()\n cc.close()\n return sqlresult\n\n\ndef fetch_with_names(query,\n dbhandle=None,\n attach=False):\n '''Fetch query results and returns them as an array of row arrays, in\n which the first entry is an array of the field names\n\n '''\n\n dbhandle = connect(dbhandle, attach=attach)\n\n cc = dbhandle.cursor()\n sqlresult = cc.execute(query).fetchall()\n\n data = []\n # http://stackoverflow.com/questions/4147707/\n # python-mysqldb-sqlite-result-as-dictionary\n field_names = [d[0] for d in cc.description]\n data.append([name for name in field_names])\n for record in sqlresult:\n line = [field for field in record]\n data.append(line)\n\n cc.close()\n return data\n\n\ndef fetch_DataFrame(query,\n dbhandle=None,\n attach=False):\n '''Fetch query results and returns them as a pandas dataframe'''\n\n dbhandle = connect(dbhandle, attach=attach)\n\n cc = dbhandle.cursor()\n sqlresult = cc.execute(query).fetchall()\n cc.close()\n\n # see http://pandas.pydata.org/pandas-docs/dev/generated/\n # pandas.DataFrame.from_records.html#pandas.DataFrame.from_records\n # this method is design to handle sql_records with proper type\n # conversion\n\n field_names = [d[0] for d in cc.description]\n pandas_DataFrame = DataFrame.from_records(\n sqlresult,\n columns=field_names)\n return pandas_DataFrame\n\n\ndef write_DataFrame(dataframe,\n tablename,\n dbhandle=None,\n index=False,\n if_exists='replace'):\n '''write a pandas dataframe to an sqlite db, index on given columns\n index columns given as a string or list eg. \"gene_id\" or\n [\"gene_id\", \"start\"]\n\n '''\n\n dbhandle = connect(dbhandle)\n\n dataframe.to_sql(tablename,\n con=dbhandle,\n flavor='sqlite',\n if_exists=if_exists)\n\n def indexStat(tablename, column):\n istat = ('create index %(tablename)s_%(column)s '\n 'on %(tablename)s(%(column)s)') % locals()\n return istat\n\n if index:\n\n cc = dbhandle.cursor()\n\n if isinstance(index, str):\n istat = indexStat(tablename, index)\n print(istat)\n db_execute(cc, istat)\n elif isinstance(index, (tuple, list)):\n for column in index:\n istat = indexStat(tablename, column)\n db_execute(cc, istat)\n\n cc.close()\n","repo_name":"CGATOxford/cgat","sub_path":"CGAT/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"15"} +{"seq_id":"41590856665","text":"import torch\nfrom torch.autograd import Variable\nimport config as c\nfrom preprocessing.sent_piece_model import sentence_piece_model\nimport os\nimport model as model\nfrom torch.utils.data import DataLoader\nimport random\n\nif __name__ == \"__main__\":\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n model = model.classifier()\n model.load_state_dict(torch.load(c.model_path))\n model.eval()\n model.cuda()\n\n spm = sentence_piece_model()\n spm.train_or_load()\n \n print(\"안녕하세요 ㅎㅎ\")\n\n while True:\n q = input(\"\\n\")\n q = spm.convert_word_to_index(q)\n q = spm.padding(q)\n q = q[:c.max_seq]\n\n predict_loader = DataLoader([q], shuffle=False)\n for q in predict_loader:\n q = Variable(torch.stack(q).cuda())\n out = model(q)\n _, pred = torch.max(out.data, 1)\n pred = pred.tolist()[0]\n # print(pred)\n\n if pred == 0:\n a = [\"아마도요!\", \"그렇겠죠?\", \"나 말고 경훈이 한테 말해요\", \"난 그냥 막걸리나 마셨으면 좋겠다\"]\n elif pred == 1:\n a = [\"아 이런 ㅜㅜ\", \"힘내요ㅜ\"]\n elif pred == 2:\n a = [\"할렐루야!\", \"아 역시 될 줄 알았어 ㅎㅎ\", \"축하염ㅎㅎ\", \"이런 날엔 소고기지!!\"]\n\n print(random.choice(a))\n\n\n","repo_name":"CHUUUU/nlp","sub_path":"example_intent_chatbot/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"15"} +{"seq_id":"28102613029","text":"\"\"\"\nAdvent of Code 2022\nDay 12\n\n\"\"\"\nfrom helpers.io import read_input\nimport numpy as np\nimport copy\nimport sys\n\npaths_found = set()\ndebug = False\n\ndef get_ord(char):\n return ord(char) - ord('a')\n\ndef read_map(lines):\n map_tmp = []\n for line in lines:\n map_line = []\n for char in line:\n if char == 'S':\n hight = 0\n elif char == 'E':\n hight = ord('z') - ord('a')\n else:\n hight = ord(char) - ord('a')\n map_line.append(hight)\n map_tmp.append(map_line)\n map = np.array(map_tmp)\n return map\n\n\ndef get_neighbours(map, location):\n left_y = max(0, location[1]-1)\n right_y = min(map.shape[1]-1, location[1]+1)\n upper_x = max(0, location[0]-1)\n lower_x = min(map.shape[0]-1, location[0]+1)\n neigbours = []\n for x in range(upper_x, lower_x + 1):\n for y in range(left_y, right_y + 1):\n if (x, y) != location and not (x != location[0] and y != location[1]):\n neigbours.append((x, y))\n return neigbours\n\n\ndef find_dest(map, path, visited, dest, rec_level):\n # path is a list of coords\n # the last item is the current location\n # this recursive function ends if there are no more possibilities to climb or the destination has been reached\n cur_loc = path[-1]\n print(f'running, rec_level: {rec_level}, cur_loc: {cur_loc}, path: {path}')\n if str(path) in paths_found:\n print('duplicate path')\n sys.exit\n else:\n paths_found.add(str(path))\n neighbours = get_neighbours(map, cur_loc)\n found_paths = []\n org_path = copy.copy(path)\n org_visited = copy.copy(visited)\n for neighbour in neighbours:\n if map[neighbour] in [map[cur_loc], map[cur_loc]+1] and neighbour == dest:\n # Destination found\n spaces = rec_level * 2 * ' '\n if debug:\n print(f\"{spaces}{neighbour}\")\n is_valid = True\n path.append(neighbour)\n visited.append(cur_loc)\n found_paths.append(path)\n print(f\"Path found: length {len(path)}: {path}\")\n if len(path) == 60:\n print('stop')\n break\n if map[neighbour] in [map[cur_loc], map[cur_loc]+1] and neighbour not in visited:\n path = copy.copy(org_path)\n spaces = len(path) * 2 * ' '\n if debug:\n print(f\"{spaces}{rec_level:6}: {neighbour} - {map[neighbour]} = {chr(ord('a') + map[neighbour])}\")\n visited.append(cur_loc)\n path.append(neighbour)\n path_lengths = [len(path1) for path1 in found_paths]\n if not path_lengths or len(path) <= min(path_lengths):\n is_valid, new_found_paths = find_dest(map, path, visited, dest, rec_level + 1)\n if new_found_paths:\n found_paths.extend(new_found_paths)\n else:\n is_valid = False\n else:\n is_valid = False\n return is_valid, found_paths\n\n\ndef print_path(path):\n grid = []\n x_dim = max([coord[0] for coord in path]) + 1\n y_dim = max([coord[1] for coord in path]) + 1\n grid = np.full((x_dim, y_dim), '.')\n prev_coord = path[0]\n for coord in path[1:]:\n if coord[0] - prev_coord[0] == 0:\n if coord[1] - prev_coord[1] == 1:\n grid[(prev_coord)] = '>'\n else:\n grid[(prev_coord)] = '<'\n elif coord[0] - prev_coord[0] == 1:\n grid[(prev_coord)] = 'v'\n elif coord[0] - prev_coord[0] == -1:\n grid[(prev_coord)] = '^'\n else:\n print('this cannot happen')\n prev_coord = coord\n print(grid)\n\ndef get_start_end(lines: list, letter):\n for x, line in enumerate(lines):\n for y, char in enumerate(line):\n if char == letter:\n return (x,y)\n return None\n\n# ======================================\n# MAIN\n# ======================================\n\ntest = False\ntest = True\nif test:\n input_file = 'input_test.txt'\nelse:\n input_file = 'input.txt'\n\nfile_lines = read_input(input_file)\nS = get_start_end(file_lines, 'S')\nE = get_start_end(file_lines, 'E')\n\nmap = read_map(file_lines)\n\n\n# l1 = get_neighbours(map, (0,0))\n\ncount_print = 0\n_, paths = find_dest(map, [S], [], E, 0)\n\nprint(min([len(path) for path in paths]) - 1)\n\nshortest_paths = [path for path in paths if len(path) == min([len(p) for p in paths])]\nfor shortest_path in shortest_paths:\n print_path(shortest_path)\n print()\n\nprint('finished')\n","repo_name":"BasvanderWorp/aoc_2022","sub_path":"day12/day_12a.py","file_name":"day_12a.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"8922856628","text":"# -*- coding: utf-8 -*-\n# @Author : Lodge\n\n# 单独写这里 是为了以后可以更方便的替换\nimport re\nimport sys\nimport traceback\n\nfrom loguru import logger\n\n\ndef my_logger(file_root, log_function, log_line, message, log_level=\"error\"):\n \"\"\"\n 因为是我自己用 我只用 warning 和 error 和 debug\n :param file_root: 日志报错路径\n :param log_function: 函数名称\n :param log_line: 报错行数\n :param message: 需要输出的信息\n :param log_level: 日志等级\n \"\"\"\n fmt_str = \"{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | \" + str(file_root) +\":[\" + str(log_function) +\"]:\" + str(log_line) + \" - {message}\"\n logger.remove()\n ctx = logger.add(\n sys.stderr,\n level=\"INFO\",\n colorize=True,\n format=fmt_str\n )\n if log_level.lower() == 'error':\n logger.error(message)\n elif log_level.lower() == \"debug\":\n logger.debug(message)\n else:\n logger.warning(message)\n # 复原系统自带的状态\n try:\n logger.remove(ctx)\n except Exception:\n pass\n finally:\n logger.add(sys.stderr) # stderr 直接输出 stdout遇到换行才输出 有个缓冲区\n\n\ndef handle_exception(traceback_format_info: str, function_name: str):\n \"\"\"\n 这里传入 traceback.format_exc() 就好了\n :param traceback_format_info : traceback.format_exc()\n :param function_name: 当前传入函数.__name__\n :return --> 报错行,报错文件路径,报错类型,报错详细介绍\n \"\"\"\n results = traceback_format_info.split('\\n')\n line = fl = ''\n for item in results[::-1]:\n if function_name in item:\n line = \"\".join(re.findall(r'line (\\d+)', item))\n fl = ''.join(re.findall(r'File \"(.*)\",', item))\n break\n exception_type = results[-2]\n exception_detail = results[-3].strip()\n return line, fl, exception_type, exception_detail\n\n\ndef get_using_line_info(limit: int = 7):\n \"\"\"\n 这里只是读取栈信息\n \"\"\"\n try:\n tb_data = traceback.format_stack(limit=limit)\n strings = \"\".join(tb_data).split('\\n')[0]\n line = \"\".join(re.findall(r'line (\\d+)', strings))\n fl = ''.join(re.findall(r'File \"(.*)\"', strings))\n return line, fl\n except Exception as err:\n _ = err\n return \"\", \"\"\n\n","repo_name":"Heartfilia/lite_tools","sub_path":"lite_tools/logs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"15"} +{"seq_id":"35816586054","text":"class Solution:\n def reverse(self, x: int) -> int:\n \n \n \n if x >= 0:\n r = int(str(x)[::-1])\n else:\n\n r = -1*int(str(abs(x))[::-1])\n\n \n if ((r >= (-(2**31))) and (r<=((2**31)-1))): \n return r\n return 0","repo_name":"sdshone/leetcode-submissions","sub_path":"7-reverse-integer/7-reverse-integer.py","file_name":"7-reverse-integer.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"70623294092","text":"from hashlib import sha1 as sha\nfrom random import SystemRandom\n\nfrom flask import current_app, request\nfrom modularodm import StoredObject, fields\n\n\nfrom .db import db\n\n# Use permanent API keys to associate abstract users with credentials.\n\n# Where are author and email stored?\n\n# This side - how can we subscribe to changes on the source system?\n\n# Client site - what are the implications of trusting them?\n\n\nclass AuthContext(object):\n\n @property\n def ssh_private_key(self):\n raise NotImplementedError\n\n @property\n def ssh_public_key(self):\n raise NotImplementedError\n\n @property\n def full_name(self):\n raise NotImplementedError\n\n @property\n def email(self):\n raise NotImplementedError\n\n def can_read_repo(self, repo_id):\n raise NotImplementedError\n\n def can_write_repo(self, repo_id):\n raise NotImplementedError\n\n def is_repo_admin(self, repo_id):\n raise NotImplementedError\n\n @property\n def can_create_repos(self):\n raise NotImplementedError\n\n\nclass PublicAuthContext(AuthContext):\n _id = None\n\n def can_read_repo(self, repo_id):\n return RepoMeta.load(repo_id).is_public\n\n def can_write_repo(self, repo_id):\n return False\n\n def is_repo_admin(self, repo_id):\n return False\n\n can_create_repos = False\n\n\nclass KeyAuthContext(StoredObject, AuthContext):\n _id = fields.StringField(primary=True)\n can_provision = fields.BooleanField(default=False)\n can_create_repos = fields.BooleanField(default=False)\n\n def can_read_repo(self, repo_id):\n\n for field in ['admin_repos', 'read_repos', 'write_repos']:\n try:\n for ref in getattr(self, field).get('repometa', []):\n if repo_id in getattr(self, field)['repometa'][ref]:\n return True\n except AttributeError:\n pass\n\n return RepoMeta.load(repo_id).is_public\n\n def can_write_repo(self, repo_id):\n\n for field in ['admin_repos', 'write_repos']:\n try:\n for ref in getattr(self, field).get('repometa', []):\n if repo_id in getattr(self, field)['repometa'][ref]:\n return True\n except AttributeError:\n pass\n\n return False\n\n def __init__(self, *args, **kwargs):\n super(KeyAuthContext, self).__init__(*args, **kwargs)\n self._id = sha( str(SystemRandom().random()) ).hexdigest()\n\nKeyAuthContext.set_storage(db)\n\n\nclass RepoMeta(StoredObject):\n _meta = {\n 'optimistic': True\n }\n\n _id = fields.StringField(primary=True)\n is_public = fields.BooleanField(default=False)\n access_read = fields.ForeignField(\n 'KeyAuthContext',\n backref='read_repos',\n list=True,\n )\n access_write = fields.ForeignField(\n 'KeyAuthContext',\n backref='write_repos',\n list=True,\n )\n access_admin = fields.ForeignField(\n 'KeyAuthContext',\n backref='admin_repos',\n list=True,\n )\n\nRepoMeta.set_storage(db)\n\n\ndef get_auth_context():\n \"\"\" Return an instance of a descendant of AuthContext.\n \"\"\"\n # TODO: Make this work :)\n if current_app.config.get('ignore_auth', False):\n return current_app.no_auth_user\n\n try:\n context = KeyAuthContext.load(request.args.get('key'))\n return context\n except KeyError:\n return PublicAuthContext()","repo_name":"lyndsysimon/git-web-api","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"39307869806","text":"import numpy as np\nimport pandas as pd\n\n# import data from original csv file\nfile = pd.read_csv('temperatures.csv')\nmatrix_new = file.to_numpy()\n\n# definition for time value and temperature value for each year as list\ntime_value_2015 = []\ntemperature_value_2015 = []\ntime_value_2016 = []\ntemperature_value_2016 = []\ntime_value_2017 = []\ntemperature_value_2017 = []\ntime_value_2018 = []\ntemperature_value_2018 = []\ntime_value_2019 = []\ntemperature_value_2019 = []\ntime_value_2020 = []\ntemperature_value_2020 = []\n\n# select time and temperature values for different years and store them in lists\nfor i in range(matrix_new.shape[0]):\n if matrix_new[i][2] < 201600000000:\n time_value_2015.append(matrix_new[i][2])\n temperature_value_2015.append(matrix_new[i][3])\n if 201600000000 < matrix_new[i][2] < 201700000000:\n time_value_2016.append(matrix_new[i][2])\n temperature_value_2016.append(matrix_new[i][3])\n if 201700000000 None:\n if litre < 100 and self.m3_already_increased is False:\n self.m3 = self.m3 + 1\n self._update_current_value(litre)\n self.m3_already_increased = True\n elif litre >= 400 and litre < 700 and self.m3_already_increased is True:\n self._update_current_value(litre)\n self.m3_already_increased = False\n else:\n self._update_current_value(litre)\n self._calc_instant_consumtion()\n self._round()\n\n def _update_current_value(self, litre):\n self._previous_value.value = self._current_value.value\n self._previous_value.time = self._current_value.time\n self._current_value.value = self.m3 + litre / 1000\n self._current_value.time = datetime.now()\n self._litre = litre\n self.value = self._current_value.value\n\n def _calc_instant_consumtion(self):\n if self._previous_value.time == 0:\n return\n\n self._instant_consumption_l = (\n self._current_value.value - self._previous_value.value\n ) * 1000\n delta_time_s = self._get_delta_between_times(\n self._current_value.time, self._previous_value.time\n )\n self.instant_consumption_l_per_min = (\n self._instant_consumption_l / delta_time_s * 60\n )\n\n def _get_delta_between_times(self, time1: datetime, time2: datetime) -> float:\n return (time1 - time2).total_seconds()\n\n def _round(self) -> None:\n self.value = round(self.value, 5)\n self.instant_consumption_l_per_min = round(\n self.instant_consumption_l_per_min, 2\n )\n self.instant_consumption_l_per_min += 0.0 # remove possible negative -0.00\n self._litre = round(self._litre, 2)\n self._instant_consumption_l = round(self._instant_consumption_l, 3)\n self._instant_consumption_l += 0.0 # remove possible negative -0.00\n self._current_value.value = round(self._current_value.value, 6)\n self._previous_value.value = round(self._previous_value.value, 6)\n","repo_name":"paulianttila/dialeye2mqtt","sub_path":"src/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"12988705359","text":"import base64\nimport cStringIO\nimport cgi\nimport datetime\nimport google.appengine.api.blobstore\nimport google.appengine.api.datastore\nimport google.appengine.api.datastore_errors\nimport google.appengine.api.datastore_types\nimport hashlib\nimport httplib\nimport logging\nimport os\nimport re\n\n\nSALT = 'salt'\n\nBLOB_KEY_HEADER = google.appengine.api.blobstore.BLOB_KEY_HEADER\n\nUPLOAD_INFO_CREATION_HEADER = (google.appengine.api.blobstore.\n UPLOAD_INFO_CREATION_HEADER)\n\nREDIRECT_HEADER = 'X-Accel-Redirect: %s\\n'\n\nUPLOAD_URL_PATTERN = '/%s(.*)'\n\nBLOB_KEY_HEADER_PATTERN = BLOB_KEY_HEADER+': (.*)'\n\nBASE_CREATION_HEADER_FORMAT = '%Y-%m-%d %H:%M:%S'\n\nCONTENT_PART = \"\"\"Content-Type: message/external-body; blob-key=\"%(blob_key)s\"; access-type=\"%(blob_key_header)s\"\nMIME-Version: 1.0\nContent-Disposition: form-data; name=\"file\"; filename=\"%(filename)s\"\n\nContent-Type: %(content_type)s\nMIME-Version: 1.0\nContent-Length: %(content_length)s\ncontent-type: %(content_type)s\ncontent-md5: %(content_md5)s\ncontent-disposition: form-data; name=\"file\"; filename=\"%(filename)s\"\n%(creation_header)s: %(timestamp)s\n\n\"\"\"\n\nSIMPLE_FIELD = \"\"\"Content-Type: text/plain\nMIME-Version: 1.0\nContent-Disposition: form-data; name=\"%(name)s\"\n\n%(value)s\"\"\"\n\n\ndef EncodeBlobKey(path):\n \"\"\"Encodes blob key.\n\n Args:\n path: The original file path.\n\n Returns:\n String version of BlobKey that is unique within the BlobInfo datastore.\n None if there are too many name conflicts.\n \"\"\"\n blob_key = base64.urlsafe_b64encode(\n '%i.%s' % (int(os.path.basename(path)),\n hashlib.md5(os.environ['APPLICATION_ID']+SALT).digest()))\n datastore_key = google.appengine.api.datastore.Key.from_path(\n google.appengine.api.blobstore.BLOB_INFO_KIND, blob_key)\n try:\n google.appengine.api.datastore.Get(datastore_key)\n except google.appengine.api.datastore_errors.EntityNotFoundError:\n return blob_key\n return None\n\n\ndef DecodeBlobKey(blob_key):\n \"\"\"Decodes a given BlobKey.\n\n Args:\n blob_key: BlobKey instance.\n\n Returns:\n Blob id.\n \"\"\"\n if isinstance(blob_key, google.appengine.api.datastore_types.BlobKey):\n encoded_key = str(blob_key)\n elif isinstance(blob_key, google.appengine.api.datastore_types.Key):\n encoded_key = str(blob_key.name())\n _id, _hash = base64.urlsafe_b64decode(encoded_key).split('.', 1)\n assert hashlib.md5(os.environ['APPLICATION_ID']+SALT).digest() == _hash\n return _id.zfill(10)\n\n\nclass UploadCGIHandler(object):\n \"\"\"Handles upload posts for the Blobstore API.\"\"\"\n\n def __init__(self, upload_url='upload/'):\n \"\"\"Constructor.\n\n Args:\n upload_url: URL which will be used for uploads.\n \"\"\"\n\n self.upload_url = upload_url\n\n def __call__(self, fp, environ):\n \"\"\"Executes the handler.\n\n Args:\n fp: A file pointer to the CGI input stream.\n environ: The CGI environment.\n\n Returns:\n File pointer to the CGI input stream.\n \"\"\"\n\n match = re.match(UPLOAD_URL_PATTERN % self.upload_url,\n environ['PATH_INFO'])\n if match == None:\n return fp\n\n upload_session_key = match.group(1).strip('/')\n\n try:\n upload_session = google.appengine.api.datastore.Get(\n upload_session_key)\n except google.appengine.api.datastore_errors.EntityNotFoundError:\n logging.error('Upload session %s not found' % upload_session_key)\n upload_session = None\n\n if self.upload_url.endswith('/'):\n upload_url = self.upload_url[:-1]\n else:\n upload_url = self.upload_url\n environ['PATH_INFO'] = environ['REQUEST_URI'] = '/' + upload_url\n\n def splitContentType(content_type):\n parts = content_type.split(';')\n pairs = dict([(key.lower().strip(), value) for key, value\n in [p.split('=', 1) for p in parts[1:]]])\n return parts[0].strip(), pairs\n \n main_type, key_values = splitContentType(environ['CONTENT_TYPE'])\n boundary = key_values.get('boundary')\n\n form_data = cgi.parse_multipart(fp, {'boundary': boundary})\n data = dict([(k, ''.join(form_data[k])) for k in form_data])\n\n fields = [f for f in set([k.split('.')[0] for k in data.keys()])\n if f+'.content_type' in data]\n\n def format_timestamp(stamp):\n return '%s.%06d' % (\n stamp.strftime(BASE_CREATION_HEADER_FORMAT),\n stamp.microsecond)\n\n message = []\n\n for field in fields:\n now = datetime.datetime.now()\n timestamp = format_timestamp(now)\n blobkey = EncodeBlobKey(data[field+'.path'])\n\n blob_entity = google.appengine.api.datastore.Entity(\n '__BlobInfo__', name=blobkey)\n\n blob_entity['content_type'] = data[field+'.content_type']\n blob_entity['creation'] = now\n blob_entity['filename'] = data[field+'.name']\n blob_entity['size'] = int(data[field+'.size'])\n\n google.appengine.api.datastore.Put(blob_entity)\n\n message.append('--' + boundary)\n values = dict(\n blob_key_header=BLOB_KEY_HEADER,\n blob_key=blobkey,\n filename=data[field+'.name'],\n content_type=data[field+'.content_type'],\n content_md5=data[field+'.md5'],\n content_length=data[field+'.size'],\n creation_header=UPLOAD_INFO_CREATION_HEADER,\n timestamp=timestamp\n )\n message.append(CONTENT_PART % values)\n\n del data[field+'.name']\n del data[field+'.content_type']\n del data[field+'.path']\n del data[field+'.md5']\n del data[field+'.size']\n\n for field in data:\n message.append('--' + boundary)\n message.append(SIMPLE_FIELD %\n {'name': field, 'value': data[field]})\n \n message += ['--' + boundary + '--']\n\n message = '\\n'.join(message)\n\n if upload_session:\n google.appengine.api.datastore.Delete(upload_session)\n\n environ['HTTP_CONTENT_LENGTH'] = str(len(message))\n\n return cStringIO.StringIO(message)\n\n\nclass CGIResponseRewriter(object):\n \"\"\"Response rewriter to modify the CGI output stream.\"\"\"\n\n def __call__(self, fp, environ):\n \"\"\"Execude rewriter code.\n\n Args:\n fp: File pointer to repsonse output stream.\n environ: The CGI environment.\n \"\"\"\n response = cStringIO.StringIO(fp.getvalue())\n headers = httplib.HTTPMessage(response).headers\n blob_key = ''\n if BLOB_KEY_HEADER in ''.join(headers):\n for header in headers:\n match = re.match(BLOB_KEY_HEADER_PATTERN, header)\n if match:\n blob_key = match.group(1)\n break\n\n try:\n blob_info = google.appengine.api.datastore.Get(\n google.appengine.api.datastore.Key.from_path(\n google.appengine.api.blobstore.BLOB_INFO_KIND,\n blob_key))\n except google.appengine.api.datastore_errors.EntityNotFoundError:\n return fp\n\n output = cStringIO.StringIO()\n for header in headers:\n match = re.match(BLOB_KEY_HEADER_PATTERN, header)\n if match:\n output.write(\n 'Content-Type: %s\\n' % blob_info['content_type'])\n elif header.startswith('Content-Length'):\n output.write('Content-Length: %s\\n' % blob_info['size'])\n else:\n output.write(header)\n\n def _URI(filename):\n return '/_ah/blobstore/%s/%s/%s' % (\n os.environ['APPLICATION_ID'], filename[-1], filename)\n\n output.write(\n REDIRECT_HEADER % _URI(DecodeBlobKey(blob_info.key())))\n output.write('\\n')\n\n return output\n\n return fp\n","repo_name":"Letractively/typhoonae","sub_path":"src/typhoonae/blobstore/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"19611622652","text":"# coding:utf-8\n\nfrom util.msg import params_msg\nfrom util.logger import logger\nfrom setting import BASE_URL, TIMEOUT, VERIFY, access_token, productCode\n\nimport requests\n\n\nparams = {\n\t\"payOrderNo\": \"\",\n}\n\ndef _pay_notify_mockPaySuccess(params=params, access_token=access_token):\n \"\"\"\n 银联支付回调\n /pay/notify/mockPaySuccess\n \"\"\"\n\n url = f\"{BASE_URL}/pay/notify/mockPaySuccess\"\n headers = {\"Authorization\": f\"bearer {access_token}\"}\n params = params\n \n with requests.get(url=url, headers=headers, params=params, timeout=TIMEOUT, verify=VERIFY) as r: \n logger.debug(params_msg(r, params)) \n return r\n","repo_name":"ZdLemon/UC","sub_path":"api/mall_center_pay/_pay_notify_mockPaySuccess.py","file_name":"_pay_notify_mockPaySuccess.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"6900552198","text":"import pandas as pd\n\npokemon = pd.read_csv('data/pokemon.csv', index_col=0)\n\n# Filter the dataframe for the pokemon that have \n# either a total base score \"total_bs\" greater than 650 or a speed greater than 140\n# All the pokeman must be legendary as well(= 1) \n# Save this dataframe as an object named legendary_pokemon\n\nlegendary_pokemon = pokemon[((pokemon['total_bs'] > 650) | \n (pokemon['speed'] > 140)) & \n (pokemon['legendary'] == 1)]\n\n# Display the first 10 rows of the dataframe\n\nlegendary_pokemon.head(10)\n\n","repo_name":"UBC-MDS/programming-in-python-for-data-science","sub_path":"exercises/archive/S_02_17.py","file_name":"S_02_17.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"15"} +{"seq_id":"73366407692","text":"import requests\n\ndef bit_filter(item):\n result = dict()\n result['full_name'] = item['full_name']\n result['html_url'] = item['links']['html']['href']\n result['description'] = item['description']\n result['language'] = item['language']\n result['updated_at'] = item['updated_on']\n result['is_private'] = item['is_private']\n return result\n\ndef com_filter(res):\n result = dict()\n\n if len(res['values']) > 0:\n trun = res['values'][0]\n result['message'] = trun['rendered']['message']['raw']\n result['hash'] = trun['hash']\n result['html_url'] = trun['links']['html']['href']\n result['author_name'] = trun['author']['user']['display_name']\n result['author_img'] = trun['author']['user']['links']['avatar']['href']\n result['author_url'] = trun['author']['user']['links']['html']['href']\n result['updated_at'] = trun['date']\n else:\n result['error'] = 'Not Found'\n\n return result\n\ndef bit_parser(response):\n res = response['values'][:10]\n data = dict()\n data['data'] = list(map(bit_filter, res))\n return data\n\n\ndef update_access():\n k = 'nVjyhTRVL9sRE5DfKK'\n s = 'k4usw7VDDaRW8KeWScQjhxGb7jbfA7fw'\n header = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n data = {\n \"grant_type\": \"client_credentials\"\n }\n token = requests.post(f'https://{k}:{s}@bitbucket.org/site/oauth2/access_token', headers=header, data=data)\n if token.status_code == 200:\n with open('-.txt', 'w') as f:\n f.write(token.json()['access_token'])\n else:\n return 'error'\n\ndef get_token():\n with open('-.txt', 'r') as f:\n return f.read()","repo_name":"jdevolop/navigator-repo","sub_path":"bitbucket_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"7004948499","text":"from django.dispatch import receiver\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.mail import send_mail\nfrom .models import (\n User,\n Ticket,\n Contact,\n)\nfrom .forms import TicketForm\n\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponseRedirect\n\nfrom django.views.generic import (\n CreateView,\n)\nfrom django.urls import reverse\n\n\nclass TicketCreationView(CreateView):\n model = Ticket\n form_class = TicketForm\n template_name = 'base/ticket.html'\n redirect_field_name = 'base/login.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.ticket_creator = self.request.user\n obj.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('ticket-panel', kwargs={'pk': self.request.user.id})\n\n\nclass ContactView(CreateView):\n model = Contact\n fields = '__all__'\n template_name = 'base/contact.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n send_mail(\n 'Thank you for contact',\n f'Our team is reviewing your message, stay tuned for answer(your message {obj.body[0:20]})',\n 'testbautrel111@gmail.com',\n [f'{obj.email}'],\n )\n messages.success(self.request, 'Our team is reviewing your message')\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('home')\n\n\ndef ticketConfirmation(request):\n return render(request, 'base/ticket_confirm.html')\n\n\n@login_required\ndef ticketPanel(request, pk):\n user = User.objects.get(id=pk)\n tickets = user.ticket_set.all()\n context = {'user': user, 'tickets': tickets}\n return render(request, 'base/tickets.html', context)\n\n\ndef ticketInfo(request, pk):\n ticket = Ticket.objects.get(id=pk)\n context = {'ticket': ticket}\n return render(request, 'base/ticket_info.html', context)\n","repo_name":"KarolBautrel/Djanphone","sub_path":"base/communication_user.py","file_name":"communication_user.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"10004131625","text":"from flask import Flask, render_template, request\nimport signal\nimport json\nimport threading\nimport time\nfrom kafka import KafkaConsumer\nfrom threading import Thread\nfrom flask_socketio import SocketIO, emit\nfrom map import createMap\nloc_arr=[]\ninterval = 30 \napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'ir'\nsocketio = SocketIO(app, cors_allowed_origins='*')\n\n\n@app.route('/')\ndef index():\n return 'Hello'\n\n@socketio.on('connect')\ndef handle_connect():\n auth_header = request.headers.get('Authorization')\n print(auth_header)\n token = auth_header.split(' ')[1] if auth_header else None\n \n # Authenticate client using token\n if token != 'ir':\n return False\n emit('message', 'Hello client!')\n\n\ndef start_consumer():\n consumer = KafkaConsumer('locations', bootstrap_servers=['localhost:9092'])\n for msg in consumer:\n # loc_arr.extend(json.loads(msg.value.decode('utf-8')))\n # print(json.loads(msg.value.decode('utf-8')))\n # for i in json.loads(msg.value.decode('utf-8')):\n # print(i)\n # print(json.loads(msg.value.decode('utf-8')))\n loc_arr.extend(json.loads(msg.value.decode('utf-8')))\n\ndef callMap():\n global loc_arr\n loc=[]\n loc=loc_arr\n loc_arr=[]\n try:\n html = createMap(loc, socketio)\n # html = createMap(loc_arr)\n # print(html)\n # print(msg.value.decode('utf-8'))\n socketio.emit('map', html)\n except Exception as e:\n print(e)\n print(\"An exception occurred\")\n\ndef run_function_every_30_seconds():\n threading.Timer(10.0, run_function_every_30_seconds).start()\n main_thread = threading.Thread(target= callMap)\n main_thread.start()\n\nrun_function_every_30_seconds()\n \n\n\n@socketio.on('disconnect')\ndef test_disconnect():\n print('Client disconnected')\n\n@socketio.on('error')\ndef test_disconnect():\n print('Client disconnected')\n\nif __name__ ==\"__main__\":\n socketio.start_background_task(start_consumer)\n socketio.run(app, port=8001)\n # time.sleep(10)\n # run_function_every_30_seconds()\n \n","repo_name":"Anurag22015/CSE508_Winter2023_Project_10","sub_path":"Final Deliverables/MapCreator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"13739922926","text":"#!/usr/bin/python3\n\nimport jwt\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom hashlib import sha256\n\nclass Database:\n def __init__(self, host, username, password, dbname):\n try:\n self.connect = mysql.connector.connect(host=host, user=username, passwd=password, database=dbname)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n self.cur = self.connect.cursor(buffered=True)\n\n# def __del__(self):\n# self.cur.close()\n# self.connect.close()\n \n def register(self, data):\n command = (\"insert into users \"\n \"(username, first_name, last_name, password) \"\n \"values (%(username)s, %(first_name)s, %(last_name)s, %(password)s)\"\n )\n data['password'] = sha256(data['password'].encode()).hexdigest()\n self.cur.execute(command, data)\n self.connect.commit()\n\n def login(self, data):\n data['password'] = sha256(data['password'].encode()).hexdigest()\n command = (\"select id from users where username=%(username)s and password=%(password)s\")\n self.cur.execute(command, data)\n result = self.cur.fetchone()\n if result is None:\n return \"user doesn't exist\", None\n token = self.get_token(data['username'])\n user_id = result[0]\n command = (\"insert into session (token, user_id) values (%s, %s)\")\n self.cur.execute(command, (token, user_id))\n self.connect.commit()\n return \"Login successful\", token\n \n def get_username(self, user_id):\n command = (\"select username from users where id=%s\")\n self.cur.execute(command, (user_id,))\n res = self.cur.fetchone()\n if not res:\n return None\n return res[0]\n \n def get_token(self, username):\n secret = 'pythonblog'\n algorithm = 'HS256'\n token = jwt.encode({'user': username}, secret, algorithm=algorithm).decode()\n return token\n\n def logout(self, token):\n command = (\"delete from session where token=%s\")\n self.cur.execute(command, (token,))\n self.connect.commit()\n return \"OK\"\n \n def is_login(self, token):\n command = (\"select user_id from session where token=%s\")\n self.cur.execute(command, (token,))\n res = self.cur.fetchone()\n if not res:\n return None\n return res[0]\n\n def get_lists_user(self):\n command = (\"select username, first_name, last_name from users\")\n self.cur.execute(command)\n arr = self.cur.fetchall()\n return arr\n \n def create_blog(self, data):\n command = (\"insert into blogs \"\n \"(theme, body) values \"\n \"(%(theme)s, %(body)s)\"\n )\n if data['token'] is not None:\n query = (\"select user_id from session where token=%s\")\n self.cur.execute(query, (data['token'],))\n uid = self.cur.fetchone()\n if uid is None:\n return \"no user with this token\"\n else:\n data['uid'] = uid[0]\n command = (\"insert into blogs \"\n \"(theme, body, user_id) values \"\n \"(%(theme)s, %(body)s, %(uid)s)\"\n )\n else:\n data['uid'] = None\n \n self.cur.execute(command, data)\n self.connect.commit()\n return \"Add blog successful\"\n\n def delete_blog(self, data):\n command = (\"delete from blogs where theme=%(theme)s\")\n self.cur.execute(command, data)\n self.connect.commit()\n return \"Delete successful\"\n\n def get_blog(self, theme):\n command = (\"select id from blogs where theme=%s\")\n self.cur.execute(command, (theme,))\n result = self.cur.fetchone()\n if not result:\n return None\n return result[0]\n\n def get_blogs(self):\n command = (\"select theme, body, user_id from blogs\")\n self.cur.execute(command)\n blogs = self.cur.fetchall()\n return blogs\n\n def get_blogs_from_users(self):\n command = (\"select theme, body, user_id from blogs where user_id is not null\")\n self.cur.execute(command)\n blogs = self.cur.fetchall()\n return blogs\n \n def edit_blog(self, data):\n command = (\"update blogs set theme=%(new_theme)s, body=%(body)s where theme=%(theme)s\")\n self.cur.execute(command, data)\n self.connect.commit()\n return \"edit successful\"\n\n def create_post(self, data, blogs):\n command = (\n \"insert into `posts` (head, body) \"\n \"values (%(head)s, %(body)s)\"\n )\n self.cur.execute(command, data)\n self.connect.commit()\n\n command = (\n \"insert into `post_blog` (post_id, blog_id) \"\n \"values (%s, %s)\"\n )\n post_id = self.get_post(data['head'])\n for blog in blogs:\n blog = blog.strip()\n blog_id = self.get_blog(blog)\n self.cur.execute(command, (post_id, blog_id))\n self.connect.commit()\n return \"Create post successful\"\n\n \n def get_post(self, head):\n command = (\"select id from `posts` where head=%s\")\n self.cur.execute(command, (head, ))\n res = self.cur.fetchone()\n if not res:\n return None\n return res[0]\n\n def delete_post(self, data):\n command = (\"delete from posts where head=%(head)s\")\n self.cur.execute(command, data)\n self.connect.commit()\n return \"delete post successful\"\n \n def create_comment(self, data, comment):\n command = (\"insert into `comments` (user_id, theme, body, post_id) \"\n \"values (%(user_id)s, %(theme)s, %(body)s, %(post_id)s)\" \n )\n post_id = self.get_post(data['post_id'])\n if post_id is None:\n return \"post doesn't exist\"\n data['post_id'] = post_id\n if comment != 'None':\n data['comm_id'] = self.get_comment(comment)\n command = (\"insert into `comments` (user_id, theme, body, post_id, comm_id) \"\n \"values (%(user_id)s, %(theme)s, %(body)s, %(post_id)s, %(comm_id)s)\" \n )\n self.cur.execute(command, data)\n self.connect.commit()\n return \"create comment successful\"\n\n def get_comment(self, theme):\n command = (\"select id from comments where theme=%s\")\n self.cur.execute(command, (theme,))\n res = self.cur.fetchone()\n if not res:\n return None\n return res[0]\n \n def get_user_comments(self, user_id):\n command = (\"select theme, body, post_id from comments where user_id=%s\")\n self.cur.execute(command, (user_id,))\n comments = self.cur.fetchall()\n string = \"\"\"\nPost id: {}\nAuthor_id: {}\ntheme: {}\ncomment: {}\n------------------------\"\"\"\n for comment in comments:\n theme, body, post = comment\n print(string.format(post, user_id, theme, body))\n ","repo_name":"Talkytitan5127/pythonpark","sub_path":"blog/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"43430571877","text":"#!/usr/bin/env python\n\n\"\"\"\nset_attitude_target.py: (Copter Only)\nThis example shows how to move/direct Copter and send commands\n in GUIDED_NOGPS mode using DroneKit Python.\nCaution: A lot of unexpected behaviors may occur in GUIDED_NOGPS mode.\n Always watch the drone movement, and make sure that you are in dangerless environment.\n Land the drone as soon as possible when it shows any unexpected behavior.\nTested in Python 2.7.10\n\"\"\"\n\n\n\nimport time\nimport math\nfrom sensor_msgs.msg import NavSatFix\nfrom std_msgs.msg import Float32\n# from vitarana_drone.msg import *\n# from pid_tune.msg import PidTune\nfrom sensor_msgs.msg import Imu\nfrom std_msgs.msg import Float32\nimport rospy\nimport time\nimport tf\nfrom numpy import zeros\nfrom collections import deque\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs import point_cloud2\nimport rospy\nimport time\n\n\n\n\n\ndef lat_to_x(input_latitude):\n return 110692.0702932625*(input_latitude+35.3632591)\ndef long_to_x(input_longitude):\n return -105292.0089353767*(input_longitude-149.1653452)\n\ndef x_to_lat(input_x):\n return input_x/110692.0702932625 - 35.3632591\ndef x_to_long(input_x):\n return input_x/(-105292.0089353767) + 149.1653452\n\noffset_x = 50\noffset_y = 0\nmaze = zeros([101,101],int)\nfactor = 0.5\n\ndef astar(maze,start,end):\n parent=zeros([101,101,2],int)\n vis=zeros([101,101],int)\n insertq=zeros([101,101],int)\n for i in range(100):\n for j in range(100):\n parent[i][j][0]=parent[i][j][1]=-1\n\n q=deque()\n q.append(start)\n val=start\n insertq[start[0]][start[1]]=1\n\n while q:\n temp=q.popleft()\n vis[temp[0]][temp[1]]=1\n if temp[0] == end[0] and temp[1] == end[1]:\n break\n if temp[0]-1>=0 and insertq[temp[0]-1][temp[1]]==0 and maze[temp[0]-1][temp[1]]==0:\n insertq[temp[0]-1][temp[1]]=1\n q.append([temp[0]-1,temp[1]])\n parent[temp[0]-1][temp[1]][0]=temp[0]\n parent[temp[0]-1][temp[1]][1]=temp[1]\n if temp[1]+1<=100 and insertq[temp[0]][temp[1]+1]==0 and maze[temp[0]][temp[1]+1]==0:\n insertq[temp[0]][temp[1]+1]=1\n q.append([temp[0],temp[1]+1])\n parent[temp[0]][temp[1]+1][0]=temp[0]\n parent[temp[0]][temp[1]+1][1]=temp[1]\n if temp[1]-1>=0 and insertq[temp[0]][temp[1]-1]==0 and maze[temp[0]][temp[1]-1]==0:\n insertq[temp[0]][temp[1]-1]=1\n q.append([temp[0],temp[1]-1])\n parent[temp[0]][temp[1]-1][0]=temp[0]\n parent[temp[0]][temp[1]-1][1]=temp[1]\n if temp[0]+1<=100 and insertq[temp[0]+1][temp[1]]==0 and maze[temp[0]+1][temp[1]]==0:\n insertq[temp[0]+1][temp[1]]=1\n q.append([temp[0]+1,temp[1]])\n parent[temp[0]+1][temp[1]][0]=temp[0]\n parent[temp[0]+1][temp[1]][1]=temp[1]\n if temp[0]+1<=100 and temp[1]+1<=100 and insertq[temp[0]+1][temp[1]+1]==0 and maze[temp[0]+1][temp[1]+1]==0:\n insertq[temp[0]+1][temp[1]+1]=1\n q.append([temp[0]+1,temp[1]+1])\n parent[temp[0]+1][temp[1]+1][0]=temp[0]\n parent[temp[0]+1][temp[1]+1][1]=temp[1]\n\n if temp[0]+1<=100 and temp[1]-1>=0 and insertq[temp[0]+1][temp[1]-1]==0 and maze[temp[0]+1][temp[1]-1]==0:\n insertq[temp[0]+1][temp[1]-1]=1\n q.append([temp[0]+1,temp[1]-1])\n parent[temp[0]+1][temp[1]-1][0]=temp[0]\n parent[temp[0]+1][temp[1]-1][1]=temp[1]\n\n if temp[0]-1>=0 and temp[1]+1<=100 and insertq[temp[0]-1][temp[1]+1]==0 and maze[temp[0]-1][temp[1]+1]==0:\n insertq[temp[0]-1][temp[1]+1]=1\n q.append([temp[0]-1,temp[1]+1])\n parent[temp[0]-1][temp[1]+1][0]=temp[0]\n parent[temp[0]-1][temp[1]+1][1]=temp[1]\n\n if temp[0]-1>=0 and temp[1]-1>=0 and insertq[temp[0]-1][temp[1]-1]==0 and maze[temp[0]-1][temp[1]-1]==0:\n insertq[temp[0]-1][temp[1]-1]=1\n q.append([temp[0]-1,temp[1]-1])\n parent[temp[0]-1][temp[1]-1][0]=temp[0]\n parent[temp[0]-1][temp[1]-1][1]=temp[1]\n \n temp=[end[0],end[1]]\n q1=deque()\n path=[]\n\n while temp[0]>=0 :\n q1.append(temp)\n temp=parent[temp[0]][temp[1]]\n\n #****************RETURNING PATH*******************************\n while q1:\n temp=q1.pop()\n path.append(temp)\n\n return path \n\n\ndef operate_astar(start,end,drone,maze,obs):\n print(\"INSIDE OPERATE ASTAR\")\n global factor\n print(start)\n print(end)\n start[0] = (int)(start[0]/factor)+50\n start[1] = (int)(start[1]/factor)\n final_dest = end\n print(final_dest)\n end[0] = (int)(end[0]/factor)+50\n end[1] = (int)(end[1]/factor)\n print(start)\n print(end)\n for p in obs:\n if abs(p[1])<1:\n cur_x = (int)((drone[0]+p[0])/factor)+50\n cur_y = (int)((drone[1]+p[2])/factor)\n k1 = -1\n while k1<=1:\n k2=-1\n while k2<=1:\n maze[cur_x+k1][cur_y+k2]=1\n k2=k2+1\n k1=k1+1\n # maze[(int)((drone[0]+p[0])/factor)+50][(int)((drone[1]+p[2])/factor)]=1\n # maze[(int)((drone[0]+p[0])/factor)+50+1][(int)((drone[1]+p[2])/factor)]=1\n # maze[(int)((drone[0]+p[0])/factor)+50-1][(int)((drone[1]+p[2])/factor)]=1\n\n print(\"PRINTING MAZE\")\n for i in range(100):\n for j in range(100):\n if maze[i][j] == 1:\n print(i,j)\n maze[start[0]][start[1]] = 0\n path = astar(maze,start,end)\n print(\"start end\")\n print(start)\n print(end)\n # print(path[1])\n print(\"PATH\")\n print(\"-------\")\n print(path)\n print(\"------\")\n print(len(path))\n print(final_dest)\n if(len(path)<=1):\n return [factor*(final_dest[0]-50),factor*final_dest[1]]\n else:\n return [factor*(path[1][0]-50) , factor*path[1][1]]\n \n\n\n\n\nclass Iris():\n \"\"\"docstring for Edrone\"\"\"\n def __init__(self):\n rospy.init_node('position_controller')\n self.alt_pub = rospy.Publisher('/Thrust', Float32, queue_size=10)\n self.lat_pub = rospy.Publisher('/Roll', Float32, queue_size=10)\n self.long_pub = rospy.Publisher('/Pitch', Float32, queue_size=10)\n rospy.Subscriber(\"/mavros/global_position/global\", NavSatFix, self.PoseCallBack)\n rospy.Subscriber('/depth_camera/depth/points', PointCloud2, self.callback_pointcloud)\n self.postn = [0,0,0]\n self.set_postn = [0,26,3]\n self.thrust =0\n self.roll =0\n self.pitch=0\n self.gen = [[0,0,0]]\n\n def callback_pointcloud(self,data):\n assert isinstance(data, PointCloud2)\n self.gen = point_cloud2.read_points(data, field_names=(\"x\", \"y\", \"z\"), skip_nans=True)\n time.sleep(1)\n # print(data.height,data.width)\n # assert isinstance(data, PointCloud2)\n # self.gen = point_cloud2.read_points(data, field_names=(\"x\", \"y\", \"z\"), skip_nans=True)\n # time.sleep(1)\n # # print type(gen)\n # # for p in gen:\n # # print(p)\n # # print \" x : %.3f y: %.3f z: %.3f\" %(p[0],p[1],p[2])\n \n\n\n\n def PoseCallBack(self,msg):\n self.postn[0] = lat_to_x(msg.latitude)\n self.postn[1] = long_to_x(msg.longitude)\n self.postn[2] = msg.altitude - 603.35877\n\n\n def guider(self):\n self.iter = 0\n self.lat_iter=0\n self.long_iter=0\n self.prev_height_err = 0\n self.prev_lat_err =0\n self.prev_long_err =0\n self.flag = 0\n self.next_lat = 0\n self.next_long = 0\n self.condition = 0\n self.arr = [[0,0,0]]\n while(True):\n global maze\n if(abs(self.next_lat-self.postn[0])<2 and abs(self.next_long - self.postn[1])<2):\n time.sleep(3)\n val = operate_astar([self.next_lat,self.next_long],[self.set_postn[0],self.set_postn[1]],[self.postn[0],self.postn[1]],maze,self.gen)\n self.next_lat = val[0]\n self.next_long = val[1]\n self.alt_pub.publish(self.set_postn[2])\n self.lat_pub.publish((self.next_lat))\n self.long_pub.publish((self.next_long))\n time.sleep(0.1)\n print(self.postn)\n print(\"PUBLISHED->\")\n print(self.next_lat,self.next_long)\n print(self.set_postn)\n\n \n \n\n\n\n\n\n\n\n\n\n#main\n\nif __name__ == \"__main__\":\n try:\n publish_cmd = Iris()\n time.sleep(1)\n publish_cmd.guider()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"node terminated.\")\n\n","repo_name":"DRDO-Based-Obstacle-Avioding-Drone/Drone","sub_path":"scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"39231976932","text":"import torch.nn as nn\nimport torch\n\n#from IPython import embed\n# Here we define our model as a class\nclass l3_dense(nn.Module):\n\n def __init__(self,emb_dim,num_classes):\n\n super(l3_dense, self).__init__()\n #self.flag = flag\n \n self.num_classes = num_classes\n self.emb_dim = emb_dim \n\n #self.layer_1 = nn.Linear(self.emb_dim, self.num_classes) \n self.model = nn.Sequential(\n nn.Linear(self.emb_dim,512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n nn.Linear(512,128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n\n nn.Linear(128,64),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n nn.Dropout(p=0.2),\n\n nn.Linear(64,self.num_classes)\n\n )\n \n def forward(self, x):\n # x shape os [batch_size, emb_dim]\n y = self.model(x) # shape is [batch_size , 512]\n \n \n return y\n","repo_name":"imkhoa99/Research-Project-2021","sub_path":"TAU-urban-audio-visual-scenes/train_combine/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"71760370573","text":"\"\"\"Download FASTAs of genomes for each dataset.\n\"\"\"\n\nimport argparse\nfrom collections import defaultdict\nimport gzip\nimport hashlib\nimport os\nimport re\nimport shutil\nimport textwrap\nimport time\n\nfrom Bio import Entrez\n\n__author__ = 'Hayden Metsky '\n\n\nEntrez.email = \"hayden@mit.edu\"\n\nDATASET_PYTHON_TEMPLATE_UNSEGMENTED = \"dataset_unsegmented.template.py\"\nDATASET_PYTHON_TEMPLATE_SEGMENTED = \"dataset_segmented.template.py\"\nDATASET_PYTHON_TEMPLATE_SEGMENTED_CONSOLIDATED = \"dataset_segmented.consolidated.template.py\"\n\nclass Dataset:\n\n def __init__(self, name, description, is_dna,\n is_not_believed_to_be_human_pathogen,\n is_erv, compare_to_explicit_tax_name,\n subset_of):\n self.name = name\n self.description = description\n self.is_dna = is_dna\n self.is_not_believed_to_be_human_pathogen = \\\n is_not_believed_to_be_human_pathogen\n self.is_erv = is_erv\n self.compare_to_explicit_tax_name = compare_to_explicit_tax_name\n self.subset_of = subset_of\n self.description_pattern = re.compile('^' + description + '$')\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return self.name == other.name\n\n @staticmethod\n def from_line(line):\n ls = line.split('\\t')\n name = ls[0]\n description = ls[1]\n if len(ls) > 2:\n options = ls[2].split(',')\n else:\n options = []\n is_dna = 'dna' in options\n is_not_believed_to_be_human_pathogen = \\\n 'not_believed_to_be_human_pathogen' in options\n is_erv = 'erv' in options\n compare_to_explicit_tax_name = \\\n 'compare_to_explicit_tax_name' in options\n\n subset_of = None\n for option in options:\n if option.startswith('subset:'):\n if subset_of is not None:\n raise ValueError(\"More than one subset was given\")\n subset_of = option[len('subset:'):]\n\n available_options = ['dna', 'not_believed_to_be_human_pathogen',\n 'erv', 'compare_to_explicit_tax_name']\n for option in options:\n if (not option.startswith('subset:') and\n option not in available_options):\n raise ValueError(\"Unknown option %s\" % option)\n\n return Dataset(name, description, is_dna,\n is_not_believed_to_be_human_pathogen,\n is_erv, compare_to_explicit_tax_name,\n subset_of)\n\n\ndef read_dataset_list(fn):\n datasets = []\n with open(fn) as f:\n for line in f:\n datasets += [Dataset.from_line(line.rstrip())]\n return datasets\n\n\nclass SequenceFromAccessionList:\n\n def __init__(self, representative, name, host,\n lineage, taxonomy_name, segment):\n self.representative = representative\n self.name = name\n self.host = host\n self.lineage = lineage\n self.taxonomy_name = taxonomy_name\n self.segment = segment\n self.human_is_host = 'human' in host\n self.is_segmented = segment != 'segment'\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return self.name == other.name\n\n @staticmethod\n def from_line(line):\n ls = line.split('\\t')\n\n segment = ls[5]\n removals = ['RNA', 'DNA']\n for removal in removals:\n if (segment.startswith('segment ' + removal + ' ') and\n len(segment) > len('segment ' + removal + ' ')):\n # change 'segment removal X' to 'segment X'\n segment = 'segment ' + segment[len('segment ' + removal + ' '):]\n\n return SequenceFromAccessionList(ls[0], ls[1], ls[2], ls[3], ls[4],\n segment)\n\n\ndef read_genome_accession_list(fn):\n sequences = []\n with open(fn) as f:\n for line in f:\n if line.startswith('#'):\n continue\n sequences += [SequenceFromAccessionList.from_line(line.rstrip())]\n return sequences\n\n\ndef uniqueify_genome_accession_list(sequences):\n # some sequences have the same name (neighbor) but different representatives\n # (i.e., different reference sequences) and therefore appear more than once\n # in the list; the sequences of these genomes are all we care about, so\n # \"unique-ify\" them so that each sequence name appears just once\n # (arbitrarily select the sequence, so in effect arbitrarily pick the\n # representative)\n return list(set(sequences))\n\n\ndef filter_sequences_with_nonhuman_host(sequences, args):\n # return only those sequences from lineages where humans are a host\n # sometimes the accession list incorrectly leaves out human as a\n # host, so also include any in args.human_host_lineages_to_add (if given)\n lineages_to_include = set(s.lineage for s in sequences if s.human_is_host)\n\n if args.human_host_lineages_to_add:\n with open(args.human_host_lineages_to_add) as f:\n for line in f:\n ls = line.rstrip().split('\\t')\n lineages_to_include.add(','.join(ls))\n\n return [s for s in sequences if s.lineage in lineages_to_include]\n\n\ndef verify_dataset_list(datasets):\n # each dataset name should appear only once\n assert len(set([d.name for d in datasets])) == len(datasets)\n\n\ndef verify_sequence_names_are_unique(sequences):\n assert len(set([s.name for s in sequences])) == len(sequences)\n\n\ndef find_datasets_for_name(datasets, lineage_most_specific, tax_name):\n # search through dataset descriptions, which are based on taxonomy\n # names, to find the one(s) matching lineage_most_specific\n # (unless dataset is explicitly supposed to look at tax_name)\n # search linearly and find all in case there's more than one (we'll\n # want to check later on that there is exactly one)\n matches = []\n for dataset in datasets:\n if dataset.compare_to_explicit_tax_name:\n compare_to = tax_name\n else:\n compare_to = lineage_most_specific\n if dataset.description_pattern.match(compare_to):\n matches += [dataset]\n return matches\n\n\ndef pair_each_sequence_with_dataset(sequences, datasets, datasets_to_skip,\n allow_multiple_dataset_matches):\n matches = {}\n for sequence in sequences:\n # there is sequence.taxonomy_name, but the dataset descriptions\n # are based on the most specific label in the lineage\n # (sequence.lineage), which is often (but not always) the same as\n # sequence.taxonomy_name\n sequence_lineage_most_specific = sequence.lineage.split(',')[-1]\n matching_datasets = find_datasets_for_name(datasets,\n sequence_lineage_most_specific,\n sequence.taxonomy_name)\n if len(matching_datasets) < 1:\n raise ValueError(\"No matching datasets for %s\" % sequence.lineage)\n\n matching_datasets = [d for d in matching_datasets if d.name not in \\\n datasets_to_skip]\n if len(matching_datasets) == 0:\n # skip this dataset\n continue\n if not allow_multiple_dataset_matches and len(matching_datasets) > 1:\n raise ValueError(\"More than one matching dataset for %s\" %\n sequence.lineage)\n matches[sequence] = matching_datasets\n return matches\n\n\ndef read_dataset_skip_list(fn):\n datasets_to_skip = set()\n with open(fn) as f:\n for line in f:\n datasets_to_skip.add(line.rstrip())\n return datasets_to_skip\n\n\ndef read_extra_sequences_paths(fn):\n extra_sequences_paths = {}\n with open(fn) as f:\n for line in f:\n ls = line.rstrip().split('\\t')\n extra_sequences_paths[ls[0]] = ls[1]\n return extra_sequences_paths\n\n\ndef map_dataset_to_sequences(dataset_for_sequence):\n # using map of sequence->[dataset], return map of dataset->[sequence]\n sequences_for_dataset = defaultdict(list)\n for sequence, datasets in dataset_for_sequence.items():\n for dataset in datasets:\n sequences_for_dataset[dataset].append(sequence)\n return dict(sequences_for_dataset)\n\n\ndef download_dataset(dataset, sequences, extra_sequences_path, out_dir,\n consolidate_segments=False, gzip_fastas=False):\n print(\"Starting download for\", dataset.name)\n\n num_sequences_segmented = sum([s.is_segmented for s in sequences])\n if num_sequences_segmented > 0 and num_sequences_segmented != len(sequences):\n # either none or all sequences should be labeled as segmented\n raise ValueError(\"There are %d sequences and %d are marked as segmented\"\n % (len(sequences), num_sequences_segmented))\n\n is_segmented = num_sequences_segmented > 0\n segments = set(s.segment for s in sequences)\n # Note that len(segments) may equal 1 even if is_segmented is True if\n # sequences for dataset are labeled with segments (e.g., assigned\n # 'segment X') but only 1 segment's sequence is available among the\n # sequences (i.e., all sequences are assigned 'segment X')\n\n if is_segmented:\n gb = download_raw_from_genbank(sequences, results_type='gb')\n strains = parse_strain_from_gb_results(gb)\n num_found = sum(True for s in sequences if strains[s.name] != None)\n sequences_for_strain = breakup_sequences_by_strain(sequences, strains,\n segments)\n\n print((\"%s (%d sequences) is segmented with %d sequenced segments; found \"\n \"strains for %d of these sequences (%d of the sequences \"\n \"could not be placed in a grouping)\") % (dataset.name,\n len(sequences), len(segments), num_found,\n len(sequences_for_strain[None])))\n\n write_dir = os.path.join(out_dir, 'data', dataset.name)\n if not os.path.exists(write_dir):\n os.makedirs(write_dir)\n\n header_names = set()\n\n num_genomes = 0\n # make a fasta for each strain (or isolate)\n for strain, strain_sequences in sequences_for_strain.items():\n if strain != None:\n num_genomes += 1\n added = make_fasta_for_genomes(strain_sequences, write_dir)\n header_names.update(added)\n\n # make a separate fasta for each sequence that could not be grouped\n # with a strain\n for sequence in sequences_for_strain[None]:\n num_genomes += 1\n added = make_fasta_for_genomes([sequence], write_dir)\n header_names.update(added)\n\n num_sequences = len(sequences)\n else:\n print(\"%s (%d sequences) is not segmented\" % (dataset.name,\n len(sequences)))\n\n write_dir = os.path.join(out_dir, 'data')\n if not os.path.exists(write_dir):\n os.makedirs(write_dir)\n\n # make a fasta for this dataset\n header_names = make_fasta_for_genomes(sequences, write_dir, dataset.name)\n num_genomes = len(sequences)\n num_sequences = len(sequences)\n\n if extra_sequences_path:\n sequences_added, genomes_added = merge_with_extra_sequences(dataset,\n sequences, extra_sequences_path,\n is_segmented, write_dir)\n num_sequences += sequences_added\n num_genomes += genomes_added\n\n if is_segmented and consolidate_segments:\n # group all .fasta files (one per genome) for this dataset into\n # one fasta file, and delete all the old ones\n segment_dir = os.path.join(out_dir, 'data', dataset.name)\n new_fasta_path = os.path.join(out_dir, 'data', dataset.name + '.fasta')\n with open(new_fasta_path, 'w') as fout:\n for genome_fn in os.listdir(segment_dir):\n if not genome_fn.endswith('.fasta'):\n continue\n genome_id = genome_fn.replace('.fasta', '')\n with open(os.path.join(segment_dir, genome_fn)) as fin:\n for line in fin:\n line = line.rstrip()\n if line.startswith('>'):\n # append genome id to header\n line = line + ' [genome ' + genome_id + ']'\n fout.write(line + '\\n')\n shutil.rmtree(segment_dir)\n\n if gzip_fastas:\n # gzip each fasta that was created\n if not is_segmented or (is_segmented and consolidate_segments):\n paths_to_gzip = [os.path.join(out_dir, 'data', dataset.name + '.fasta')]\n else:\n paths_to_gzip = []\n segment_dir = os.path.join(out_dir, 'data', dataset.name)\n for genome_fn in os.listdir(segment_dir):\n if genome_fn.endswith('.fasta'):\n paths_to_gzip += [os.path.join(segment_dir, genome_fn)]\n\n for path in paths_to_gzip: \n with open(path, 'rb') as fin:\n with gzip.open(path + '.gz', 'wb') as fout:\n shutil.copyfileobj(fin, fout)\n os.remove(path)\n\n make_dataset_python_file(dataset, num_genomes, num_sequences, segments,\n is_segmented, consolidate_segments, gzip_fastas,\n out_dir)\n \n\ndef breakup_sequences_by_strain(sequences, strains, segments):\n # strains map sequence_name->strain; make and return a map of\n # strain->[sequences]\n # in the returned dict, None->[sequences with no identified strain]\n\n sequence_for_name = {s.name: s for s in sequences}\n sequences_for_strain = defaultdict(list)\n for sequence_name, strain in strains.items():\n sequence = sequence_for_name[sequence_name]\n sequences_for_strain[strain].append(sequence)\n\n # ensure each strain has at most 1 of each segment\n strain_additions_to_none = set()\n seq_additions_to_none = set()\n for strain in sequences_for_strain.keys():\n if strain == None:\n continue\n for segment in segments:\n num_occurrences_of_segment = sum(True for g in\n sequences_for_strain[strain] if g.segment == segment)\n if num_occurrences_of_segment > 1:\n # segment appears more than once in this strain, so\n # invalidate it by adding all of these sequences to the\n # 'None' strain\n strain_additions_to_none.add(strain)\n for s in sequences_for_strain[strain]:\n seq_additions_to_none.add(s)\n for strain in strain_additions_to_none:\n del sequences_for_strain[strain]\n for s in seq_additions_to_none:\n sequences_for_strain[None].append(s)\n\n sequences_for_strain = dict(sequences_for_strain)\n if None not in sequences_for_strain:\n sequences_for_strain[None] = []\n\n return sequences_for_strain\n\n\ndef merge_with_extra_sequences(dataset, sequences, extra_sequences_path,\n is_segmented, write_dir):\n sequences_added = 0\n genomes_added = 0\n\n acc_nums_present = set([s.name for s in sequences])\n\n if is_segmented:\n # since this dataset is segmented, the extra sequences should be\n # in individual genome files and the given path should be a directory\n assert os.path.isdir(extra_sequences_path)\n\n for fn in os.listdir(extra_sequences_path):\n # check whether none or all of the sequences in fn are 'extra'\n # (i.e., are needed to be copied)\n acc_nums_needed = set()\n num_sequences = 0\n with open(os.path.join(extra_sequences_path, fn)) as f:\n for line in f:\n if line.startswith('>'):\n header = line.rstrip()[1:]\n acc_num = extract_accession_num_from_header(header)\n if (acc_num not in acc_nums_present and\n acc_num.split('.')[0] not in acc_nums_present):\n # acc_num nor the num without the verison number\n # is already present, so we need it\n acc_nums_needed.add(acc_num)\n num_sequences += 1\n\n num_needed = len(acc_nums_needed)\n if num_needed == 0:\n continue\n if num_needed != num_sequences:\n raise ValueError((\"In %s, some but not all sequences are \"\n \"needed; unsure what to do\") % \n os.path.join(extra_sequences_path, fn))\n\n # find the segment needed based on a header\n def extract_segment(header):\n segment_match = re.search(\n 'segment (.+?)(?: |,)|\\|Segment:(.+?)\\|', header)\n if segment_match is None:\n raise ValueError(\"In %s, could not determine a segment\" %\n os.path.join(extra_sequences_path, fn))\n\n if segment_match.group(1):\n return segment_match.group(1)\n else:\n return segment_match.group(2)\n\n # copy over fn while changing each header to include the segment\n # at the end (i.e., 'header [segment X]') as a suffix\n # (but not if it already ends in the suffix)\n print(\"Copying genome to\", os.path.join(write_dir, fn))\n with open(os.path.join(write_dir, fn), 'w') as fw:\n with open(os.path.join(extra_sequences_path, fn)) as fr:\n for line in fr:\n if line.startswith('>'):\n header = line.rstrip()[1:]\n acc_num = extract_accession_num_from_header(header)\n acc_nums_present.add(acc_num)\n segment = extract_segment(header)\n suffix = ' [segment ' + segment + ']'\n if header.endswith(suffix):\n updated_header = header\n else:\n updated_header = header + suffix\n fw.write('>' + updated_header + '\\n')\n else:\n fw.write(line)\n\n sequences_added += num_needed\n genomes_added += 1\n else:\n # since this dataset is not segmented, the extra sequences should\n # be in a single fasta file\n assert os.path.isfile(extra_sequences_path)\n\n out_path = os.path.join(write_dir, dataset.name + '.fasta')\n\n # append sequences to out_path\n with open(out_path, 'a') as fw:\n fw.write('\\n')\n with open(extra_sequences_path) as fr:\n currently_appending = False\n for line in fr:\n if line.startswith('>'):\n header = line.rstrip()[1:]\n acc_num = extract_accession_num_from_header(header)\n if (acc_num not in acc_nums_present and\n acc_num.split('.')[0] not in acc_nums_present):\n # append the current sequence\n currently_appending = True\n sequences_added += 1\n genomes_added += 1\n acc_nums_present.add(acc_num)\n else:\n currently_appending = False\n if currently_appending:\n fw.write(line)\n\n return sequences_added, genomes_added\n\n\ndef make_fasta_for_genomes(sequences, write_dir, name=None,\n max_tries=5,\n base_delay=5):\n # GenBank sporadically appears to give back a FASTA with missing\n # sequences (i.e., does not include all that were requested);\n # if this is the case, a ValueError is thrown and retry a few times\n # before crashing the entire program\n try_num = 1\n while try_num <= max_tries:\n try:\n return _make_fasta_for_genomes(sequences, write_dir, name=name)\n except ValueError as e:\n if try_num == max_tries:\n # used up all tries\n raise e\n time.sleep(2**(try_num - 1) * base_delay)\n try_num += 1\n\n\ndef _make_fasta_for_genomes(sequences, write_dir, name=None):\n if name is None:\n # make a name from the hash of the sequence names\n name = hashlib.sha224(''.join([s.name for s in sequences]).encode()).\\\n hexdigest()[-8:]\n\n out_path = os.path.join(write_dir, name + '.fasta')\n fasta_txt = download_raw_from_genbank(sequences, results_type='fasta')\n\n # Map header->sequence (sequence_for_header)\n # Rather than doing this while reading/copying the fasta file, this\n # allows us to ensure every sequence has an associated header in the\n # return from GenBank\n header_names = set(line[1:] for line in fasta_txt.split('\\n')\n if line.startswith('>'))\n sequence_for_header = {}\n for s in sequences:\n found = None\n for header in header_names:\n if s.name in header:\n if found is not None:\n raise ValueError((\"Found more than one possible header \"\n \"for sequence %s\") % s.name)\n found = header\n if found is None:\n raise ValueError(\"Could not find header for sequence %s\" % s.name)\n sequence_for_header[found] = s\n\n with open(out_path, 'w') as f:\n for line in fasta_txt.split('\\n'):\n if line.startswith('>'):\n header = line.rstrip()[1:]\n if header not in sequence_for_header:\n raise ValueError(\"Unknown sequence for header %s\" % header)\n sequence = sequence_for_header[header]\n if sequence.is_segmented:\n header = header + ' [' + sequence.segment + ']'\n f.write('>' + header)\n else:\n f.write(line)\n f.write('\\n')\n\n return header_names\n\n\ndef download_raw_from_genbank(sequences,\n results_type='fasta',\n batch_size=50,\n max_tries=5,\n base_delay=5):\n # Entrez gives sporadic exceptions (usually RuntimeErrors);\n # retry the call a few times if this happens before crashing\n # the entire program\n try_num = 1\n while try_num <= max_tries:\n try:\n return _download_raw_from_genbank(sequences,\n results_type=results_type,\n batch_size=batch_size)\n except Exception as e:\n if try_num == max_tries:\n # used up all tries\n raise e\n time.sleep(2**(try_num - 1) * base_delay)\n try_num += 1\n\n\ndef _download_raw_from_genbank(sequences,\n results_type='fasta',\n batch_size=50):\n # download raw data from GenBank of type 'gb' or 'fasta', as specified\n # by results_type\n\n accession_names = [s.name for s in sequences]\n\n # first fetch GI numbers in batches of size batch_size_large\n gi = []\n for i in range(0, len(accession_names), batch_size):\n gi_batch_query = ' '.join(accession_names[i:(i + batch_size)])\n gi_batch = Entrez.read(Entrez.esearch(db='nuccore',\n term=gi_batch_query,\n retmax=10**6))['IdList']\n gi.extend(gi_batch)\n\n raw_results = ''\n\n # now query GenBank using the GI numbers, fetching results in batches\n # of size batch_size\n query = ','.join(gi)\n reader = Entrez.read(Entrez.epost(db='nuccore', id=query))\n for i in range(0, len(gi), batch_size):\n results = Entrez.efetch(db='nuccore',\n rettype=results_type,\n retstart=i,\n retmax=batch_size,\n webenv=reader['WebEnv'],\n query_key=reader['QueryKey'])\n raw_results += results.read()\n\n return raw_results\n\n\ndef parse_strain_from_gb_results(gb_results):\n # gb_results is output of download_raw_from_genbank with results_type='gb'\n # returns map of accession_name->strain\n\n # each result is separated by the line '//', so split on this\n gb_results_split = gb_results.split('//\\n')\n\n accession_pattern = '^ACCESSION\\s+(\\w+)( |$)'\n strain_pattern = '/strain=\"(.+?)\"'\n isolate_pattern = '/isolate=\"(.+?)\"'\n\n strains = {}\n\n for result in gb_results_split:\n if len(result) == 0 or result.isspace():\n continue\n\n # find accession number\n accession_match = re.search(accession_pattern, result, re.MULTILINE)\n if not accession_match:\n print(accession_pattern, result)\n raise ValueError(\"Unknown accession number in result\")\n accession = accession_match.group(1)\n\n # find strain\n strain_match = re.search(strain_pattern, result, re.MULTILINE)\n if strain_match:\n strain = strain_match.group(1)\n else:\n # 'strain' is not present, so look for isolate\n isolate_match = re.search(isolate_pattern, result, re.MULTILINE)\n if isolate_match:\n strain = isolate_match.group(1)\n else:\n # no strain or isolate\n strain = None\n strains[accession] = strain\n\n return strains\n\n\ndef make_dataset_python_file(dataset, num_genomes, num_sequences,\n segments, is_segmented, consolidate_segments,\n gzip_fastas, out_dir):\n # sort segments alphanumerically (numerically if they're numbers)\n segments = sorted(segments,\n key=lambda x: (int(x) if x.isdigit() else float('inf'), x))\n\n if is_segmented:\n if consolidate_segments:\n template_file = DATASET_PYTHON_TEMPLATE_SEGMENTED_CONSOLIDATED\n else:\n template_file = DATASET_PYTHON_TEMPLATE_SEGMENTED\n else:\n template_file = DATASET_PYTHON_TEMPLATE_UNSEGMENTED\n\n fillins = {}\n fillins['VIRUS_REGEX'] = dataset.description\n fillins['NUM_GENOMES'] = str(num_genomes)\n fillins['DATASET_NAME'] = dataset.name\n\n if dataset.subset_of is None:\n fillins['SUBSET_NOTE'] = ''\n else:\n fillins['SUBSET_NOTE'] = ('\\n' + \"Note that the sequences in this \"\n \"dataset are a subset of those in the '%s' \"\n \"dataset.\" % dataset.subset_of + '\\n')\n\n if gzip_fastas:\n fillins['GZIP'] = '.gz'\n else:\n fillins['GZIP'] = ''\n\n if is_segmented:\n segments_brief = [s.replace('segment ', '') for s in segments]\n fillins['NUM_SEGMENTS'] = str(len(segments))\n fillins['NUM_SEQUENCES'] = str(num_sequences)\n fillins['LIST_OF_SEGMENTS_PYTHON_FORM'] = \\\n '[' + ', '.join([\"'\" + s + \"'\" for s in segments_brief]) + ']'\n fillins['LIST_OF_SEGMENTS_REGEX_FORM'] = '|'.join(segments_brief)\n\n out_file = os.path.join(out_dir, dataset.name + '.py')\n\n # Use a custom instance of TextWrapper so that we can set\n # replace_whitespace to False and therefore allow '\\n' in the\n # values of fillins\n tw = textwrap.TextWrapper(replace_whitespace=False)\n\n with open(out_file, 'w') as fw:\n with open(template_file) as fr:\n num_comment_delims = 0\n for line in fr:\n line = line.rstrip()\n num_comment_delims += line.count('\"\"\"')\n line_filledin = line\n for k, v in fillins.items():\n line_filledin = line_filledin.replace('[[' + k + ']]', v)\n if num_comment_delims % 2 == 1:\n # in multiline comment\n has_final_newline = line_filledin.endswith('\\n')\n line_filledin = tw.fill(line_filledin)\n if has_final_newline:\n # fill() always strips final newlines, so if it had\n # one add it back\n line_filledin += '\\n'\n fw.write(line_filledin + '\\n')\n\n\ndef read_extra_sequences_headers(extra_sequences_path, is_segmented):\n headers = []\n if is_segmented:\n # since this dataset is segmented, the extra sequences should be\n # in individual genome files and the given path should be a directory\n assert os.path.isdir(extra_sequences_path)\n\n for fn in os.listdir(extra_sequences_path):\n with open(os.path.join(extra_sequences_path, fn)) as f:\n for line in f:\n if line.startswith('>'):\n header = line.rstrip()[1:]\n headers += [header]\n else:\n # since this dataset is not segmented, the extra sequences should\n # be in a single fasta file\n assert os.path.isfile(extra_sequences_path)\n\n with open(extra_sequences_path) as fr:\n for line in fr:\n if line.startswith('>'):\n header = line.rstrip()[1:]\n headers += [header]\n return headers\n\ndef extract_accession_num_from_header(header):\n acc_match = re.search(\n '\\|(?:gb|emb|dbj|ref)\\|(.+?)\\||gb:(.+?)\\|', header)\n if acc_match is None:\n raise ValueError(\"In %s, could not determine accession\" %\n header)\n\n if acc_match.group(1):\n return acc_match.group(1)\n else:\n return acc_match.group(2)\n\ndef write_accession_nums(dataset, sequences, extra_sequences_path, out_dir):\n segments = set(s.segment for s in sequences)\n is_segmented = len(segments) > 1\n\n accession_nums = set([s.name for s in sequences])\n\n if extra_sequences_path:\n headers = read_extra_sequences_headers(extra_sequences_path,\n is_segmented)\n for header in headers:\n acc_num = extract_accession_num_from_header(header)\n if acc_num in accession_nums:\n # skip because we already have it\n continue\n if acc_num.split('.')[0] in accession_nums:\n # skip because we already have the prefix of the accession\n # number (i.e., the number without the version); e.g.,\n # acc_num is 'KJ123.1' but we have 'KJ123'\n continue\n accession_nums.add(acc_num)\n\n accession_nums = sorted(list(accession_nums))\n out_file = os.path.join(out_dir, dataset.name + '.accession_nums')\n with open(out_file, 'w') as fw:\n for an in accession_nums:\n fw.write(str(an) + '\\n')\n\n\ndef main(args):\n datasets = read_dataset_list(args.dataset_list)\n sequences = read_genome_accession_list(args.genome_accession_list)\n sequences = filter_sequences_with_nonhuman_host(sequences, args)\n sequences = uniqueify_genome_accession_list(sequences)\n\n if args.datasets_to_skip:\n datasets_to_skip = read_dataset_skip_list(args.datasets_to_skip)\n else:\n datasets_to_skip = set()\n\n if args.extra_sequences:\n extra_sequences_paths = read_extra_sequences_paths(\n args.extra_sequences)\n else:\n extra_sequences_paths = {}\n\n verify_dataset_list(datasets)\n verify_sequence_names_are_unique(sequences)\n\n dataset_for_sequence = pair_each_sequence_with_dataset(sequences, datasets,\n datasets_to_skip, args.allow_multiple_dataset_matches)\n sequences_for_dataset = map_dataset_to_sequences(dataset_for_sequence)\n\n missing_seqs = False\n for dataset in datasets:\n if dataset not in sequences_for_dataset:\n if dataset.name not in datasets_to_skip:\n print(\"No sequences for dataset: %s\" % dataset.name)\n missing_seqs = True\n if missing_seqs:\n raise Exception(\"Missing sequences for datasets printed above\")\n\n if args.print_sequences:\n for dataset, sequences in sequences_for_dataset.items():\n for s in sequences:\n print('\\t'.join([dataset.name, s.representative, s.name, s.lineage]))\n\n for dataset, sequences in sequences_for_dataset.items():\n if dataset.name in extra_sequences_paths:\n extra_sequences_path = extra_sequences_paths[dataset.name]\n else:\n extra_sequences_path = None\n\n if args.write_accession_nums:\n write_accession_nums(dataset, sequences, extra_sequences_path,\n args.write_accession_nums)\n\n if not args.skip_download:\n download_dataset(dataset, sequences, extra_sequences_path,\n args.out_dir,\n consolidate_segments=args.consolidate_segmented_genomes_into_one_fasta,\n gzip_fastas=args.gzip_fastas)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-dl', '--dataset-list', required=True,\n help=\"File with list of datasets\")\n parser.add_argument('-gl', '--genome-accession-list', required=True,\n help=\"File with accession list of all viral genomes\")\n parser.add_argument('-ds', '--datasets-to-skip',\n help=\"File with list of datasets to not download\")\n parser.add_argument('-es', '--extra-sequences',\n help=(\"File with list of datasets and corresponding paths to extra \"\n \"sequences to merge with those downloaded from GenBank\"))\n parser.add_argument('--human-host-lineages-to-add',\n help=(\"File listing lineages to explicitly include as having human \"\n \"as a host; each row gives a lineage, tab-separated \"\n \"by family/genus/species\"))\n parser.add_argument('--skip-download', dest=\"skip_download\",\n action=\"store_true\",\n help=(\"When set, do not perform the download\"))\n parser.add_argument('--write-accession-nums',\n help=(\"When set, write a list of accession nums for each dataset \"\n \"in the specified directory (this does not require a download)\"))\n parser.add_argument('--print-sequences', dest=\"print_sequences\",\n action=\"store_true\",\n help=(\"When set, print a list of sequences (accession and lineages) \"\n \"for each dataset (this does not require a download)\"))\n parser.add_argument('--allow-multiple-dataset-matches',\n dest=\"allow_multiple_dataset_matches\",\n action=\"store_true\",\n help=(\"When set, do not fail when a sequence matches more than one \"\n \"dataset\"))\n parser.add_argument('--consolidate-segmented-genomes-into-one-fasta',\n dest=\"consolidate_segmented_genomes_into_one_fasta\",\n action=\"store_true\",\n help=(\"When set, instead of storing all .fasta files (one per \"\n \"genome) for a segmented virus in a directory, place them \"\n \"all in a single fasta file\"))\n parser.add_argument('--gzip-fastas',\n dest=\"gzip_fastas\",\n action=\"store_true\",\n help=(\"When set, gzip each fasta file created\"))\n parser.add_argument('-o', '--out-dir', required=True,\n help=\"Directory in which to place output data\")\n\n args = parser.parse_args() \n\n main(args)\n","repo_name":"broadinstitute/hybsel-design-runs","sub_path":"download-genbank-viral-genomes/download_dataset_fastas.py","file_name":"download_dataset_fastas.py","file_ext":"py","file_size_in_byte":35810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"34020553850","text":"from django.contrib import admin\nfrom .models import License, Company\n\n\n@admin.register(License)\nclass LicenseAdmin(admin.ModelAdmin):\n list_filter = (\"id\",)\n readonly_fields = (\n 'license_key',\n 'date_joined',\n 'valid_until',\n 'is_active',\n 'count_cameras',\n 'neurons_active'\n )\n\n\n@admin.register(Company)\nclass CompanyAdmin(admin.ModelAdmin):\n list_filter = (\"id\", \"city\", \"my_company\")\n list_display = (\n 'name_company',\n 'website',\n 'contact_email',\n 'contact_phone',\n 'country',\n 'city',\n 'state',\n 'first_address',\n 'second_address',\n 'contact_mobile_phone',\n 'logo',\n 'file',\n 'index',\n 'date_joined',\n 'date_edited'\n )\n","repo_name":"5sControl/5s-backend","sub_path":"src/CompanyLicense/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"11084212266","text":"from __future__ import print_function\nfrom memory_profiler import profile\nimport sys\nfrom datetime import datetime, timedelta\nimport time\nimport io\nfrom beem.steem import Steem\nfrom beem.account import Account\nfrom beem.amount import Amount\nfrom beem.blockchain import Blockchain\nfrom beem.utils import parse_time\nfrom beem.instance import set_shared_steem_instance\nimport logging\nlog = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\n@profile\ndef profiling(name_list):\n stm = Steem()\n set_shared_steem_instance(stm)\n del stm\n print(\"start\")\n for name in name_list:\n print(\"account: %s\" % (name))\n acc = Account(name)\n max_index = acc.virtual_op_count()\n print(max_index)\n stopTime = datetime(2018, 4, 22, 0, 0, 0)\n hist_elem = None\n for h in acc.history_reverse(stop=stopTime):\n hist_elem = h\n print(hist_elem)\n print(\"blockchain\")\n blockchain_object = Blockchain()\n current_num = blockchain_object.get_current_block_num()\n startBlockNumber = current_num - 20\n endBlockNumber = current_num\n block_elem = None\n for o in blockchain_object.stream(start=startBlockNumber, stop=endBlockNumber):\n print(\"block %d\" % (o[\"block_num\"]))\n block_elem = o\n print(block_elem)\n\n\nif __name__ == \"__main__\":\n\n account_list = [\"utopian-io\", \"busy.org\", \"minnowsupport\", \"qurator\", \"thesteemengine\", \"ethandsmith\", \"make-a-whale\", \"feedyourminnows\", \"steembasicincome\",\n \"sbi2\", \"sbi3\", \"sbi4\", \"sbi5\", \"sbi6\", \"steemdunk\", \"thehumanbot\", \"resteemable\", \"kobusu\", \"mariachan\", \"qustodian\", \"randowhale\",\n \"bumper\", \"minnowbooster\", \"smartsteem\", \"steemlike\", \"parosai\", \"koinbot\", \"steemfunding\"]\n profiling(account_list)\n","repo_name":"holgern/beem","sub_path":"examples/memory_profiler1.py","file_name":"memory_profiler1.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"15"} +{"seq_id":"15326315365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCode for the ATC-NG waypoints (entry/exit gates and beacons)\n'''\n\nimport pygame.draw\nimport pygame.transform\nfrom pygame.locals import *\nfrom math import radians, sin, cos\n\nimport lib.utils as U\nfrom engine.settings import settings as S\n\n__author__ = \"Mac Ryan\"\n__copyright__ = \"Copyright 2011, Mac Ryan\"\n__license__ = \"GPL v3\"\n#__version__ = \"\"\n#__date__ = \"\"\n__maintainer__ = \"Mac Ryan\"\n__email__ = \"quasipedia@gmail.com\"\n__status__ = \"Development\"\n\n\nclass Gate(object):\n\n '''\n A gate represents an valid opening of the monitored airspace.\n Basically all entering and exiting planes should do so from a gate.\n '''\n\n def __init__(self, name, radial, heading, width, bottom, top):\n self.name = name\n self.radial = radial % 360\n self.heading = heading\n self.width = width\n self.bottom = bottom\n self.top = top\n self.__set_location()\n\n def __set_location(self):\n '''\n Find (x,y) coordinates in the aerospace corresponding to the centre of\n the gate.\n '''\n # Convert \"radial\": from \"CW deg from North\" to \"CCW rad from East\"\n convert_angle = lambda ang : radians(90-ang)\n radial = convert_angle(self.radial)\n # Find the coordinates of the centre of the gate\n s, c = sin(radial), cos(radial)\n ss, cs = cmp(s, 0), cmp(c, 0)\n sa, ca = abs(s), abs(c)\n side = cmp(sa, ca) #-1 for sides, 0 for corner, 1 or top or bottom\n rr = S.RADAR_RANGE\n x = cs * (rr if side <= 0 else rr/sa * ca) + rr\n y = ss * (rr if side >= 0 else rr/ca * sa) + rr\n self.location = (x, y)\n # For gates near 45° we want label treated as \"corner\", a delta of 0.12\n # make this work 40°-50° (0.25, 0.37, 0.48) for following 5° increments\n self.side = 0 if abs(sa-ca) < 0.12 else side\n\n def draw(self, surface):\n '''\n Blit self on radar surface.\n '''\n x, y = U.sc(self.location)\n # GATE\n # In order to facilitate blitting information on the orientation of the\n # gate, we create the image already rotated 90° clockwise by swapping\n # width and height...\n gate_width_px = U.rint(self.width / S.METRES_PER_PIXEL)\n gate_length_px = S.RADAR_RECT.h / 4\n aaf = 5 #anti-alias factor\n g_img = pygame.surface.Surface((gate_length_px*aaf,\n gate_width_px*aaf), SRCALPHA)\n # BOUNDARIES OF THE GATE\n pygame.draw.line(\n g_img, S.GRAY, (0, aaf), (gate_length_px*aaf, aaf), aaf)\n pygame.draw.line(\n g_img, S.GRAY, (0, gate_width_px*aaf-aaf),\n (gate_length_px*aaf, gate_width_px*aaf-aaf), aaf)\n # INFO ON ORIENTATION and FLIGHT LEVELS\n fl = lambda x : str(x/100).zfill(2)\n lines = ['H:' + str(self.heading).zfill(3),\n 'B:' + fl(self.bottom),\n 'T:' + fl(self.top)]\n fontobj = pygame.font.Font(S.MAIN_FONT, S.HUD_INFO_FONT_SIZE * aaf)\n label = U.render_lines(fontobj, lines, S.GRAY)\n label = label.subsurface(label.get_bounding_rect())\n w, h = label.get_size()\n ypsilon = U.rint(gate_width_px*aaf/2.0-h/2)\n g_img.blit(label, (0, ypsilon))\n g_img.blit(label, (gate_length_px*aaf-w, ypsilon))\n # tranformation and blitting\n rotang = 90 if 0<= self.heading < 180 else 270\n g_img = pygame.transform.rotate(g_img, rotang-self.heading)\n g_img = g_img.subsurface(g_img.get_bounding_rect()).copy()\n r = g_img.get_rect()\n g_img = pygame.transform.smoothscale(g_img, (U.rint(r.w*1.0/aaf),\n U.rint(r.h*1.0/aaf)))\n g_rect = g_img.get_rect()\n surface.blit(g_img, (x-g_rect.centerx, y-g_rect.centery))\n # LABEL\n fontobj = pygame.font.Font(S.MAIN_FONT, S.HUD_INFO_FONT_SIZE)\n label = fontobj.render(self.name, True, S.RED)\n w, h = label.get_size()\n signed_offset = lambda n : cmp(1,n)*w\n x += (signed_offset(x) if self.side <=0 else 0) - w/2\n y += (signed_offset(y) if self.side >=0 else 0) - h/2\n surface.blit(label, (x,y))\n\n\nclass Beacon(object):\n\n '''\n A beacon is a point on the ground that is known to aeroplanes and can be\n used to set heading for. [AZA0019 HEADING NDB4]\n '''\n\n def __init__(self, id, location):\n self.id = id\n self.location = location\n\n def draw(self, surface):\n pos = U.sc(self.location)\n pygame.draw.circle(surface, S.GRAY, pos, 2)\n pygame.draw.circle(surface, S.GRAY, pos, 6, 1)\n fontobj = pygame.font.Font(S.MAIN_FONT, S.HUD_INFO_FONT_SIZE)\n label = fontobj.render(self.id, True, S.BLUE)\n label = label.subsurface(label.get_bounding_rect()).copy()\n w, h = label.get_size()\n x, y = pos\n # In order to keep the crammed central space free, beacons labels are\n # always placed towards the edges of the radar screen, if possible.\n offsets = [U.rint(6+w/3), -U.rint(6+w/3)-w]\n index = x < S.RADAR_RECT.w/2\n if not (0 < x+offsets[index] and x+offsets[index]+w < S.RADAR_RECT.w):\n index = not index\n surface.blit(label, (x+offsets[index], y-h/2))\n","repo_name":"quasipedia/atc-ng","sub_path":"entities/waypoints.py","file_name":"waypoints.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"15"} +{"seq_id":"22168747312","text":"from datetime import datetime\nimport re\n\nfrom openerp.addons.report_webkit.report_helper import WebKitHelper\nfrom pytils import numeral,dt\n\ndef numer(self, name):\n if name:\n numeration = re.findall('\\d+$', name)\n if numeration: return numeration[0]\n return ''\n\ndef ru_date(self, date):\n if date and date != 'False':\n return dt.ru_strftime(u'\"%d\" %B %Y года', date=datetime.strptime(date, \"%Y-%m-%d\"), inflected=True)\n return ''\n \ndef ru_date2(self, date):\n if date and date != 'False':\n return dt.ru_strftime(u'%d %B %Y г.', date=datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\"), inflected=True)\n return ''\n\ndef ru_date3(self, date):\n if date and date != 'False':\n return datetime.strptime(date, \"%Y-%m-%d\").strftime(\"%d.%m.%Y\")\n return ''\n\ndef in_words(self, number):\n return numeral.in_words(number)\n\ndef rubles(self, sum):\n \"Transform sum number in rubles to text\"\n text_rubles = numeral.rubles(int(sum))\n copeck = round((sum - int(sum))*100)\n text_copeck = numeral.choose_plural(int(copeck), (u\"копейка\", u\"копейки\", u\"копеек\"))\n return (\"%s %02d %s\")%(text_rubles, copeck, text_copeck)\n\ndef initials(self, fio):\n if fio:\n return (fio.split()[0]+' '+''.join([fio[0:1]+'.' for fio in fio.split()[1:]])).strip()\n return ''\n\ndef address(self, partner):\n repr = []\n if partner.zip: repr.append(partner.zip)\n if partner.city: repr.append(partner.city)\n if partner.street: repr.append(partner.street)\n if partner.street2: repr.append(partner.street2)\n return ', '.join(repr)\n\ndef representation(self, partner):\n repr = []\n if partner.name: repr.append(partner.name)\n if partner.inn: repr.append(u\"ИНН \" + partner.inn)\n if partner.kpp: repr.append(u\"КПП \" + partner.kpp)\n repr.append(self.address(partner))\n return ', '.join(repr)\n\ndef full_representation(self, partner):\n repr = [self.representation(partner)]\n if partner.phone: repr.append(u\"тел.: \" + partner.phone)\n elif partner.parent_id.phone: repr.append(u\"тел.: \" + partner.parent_id.phone)\n bank = None\n if partner.bank_ids: bank = partner.bank_ids[0]\n elif partner.parent_id.bank_ids: bank = partner.parent_id.bank_ids[0]\n if bank and bank.acc_number: repr.append(u\"р/сч \" + bank.acc_number)\n if bank and bank.bank_name: repr.append(u\"в банке \" + bank.bank_name)\n if bank and bank.bank_bic: repr.append(u\"БИК \" + bank.bank_bic)\n if bank and bank.bank_corr_acc: repr.append(u\"к/с \" + bank.bank_corr_acc)\n return ', '.join(repr)\n\nWebKitHelper.numer = numer\nWebKitHelper.ru_date = ru_date\nWebKitHelper.ru_date2 = ru_date2\nWebKitHelper.ru_date3 = ru_date3\nWebKitHelper.in_words = in_words\nWebKitHelper.rubles = rubles\nWebKitHelper.initials = initials\nWebKitHelper.address = address\nWebKitHelper.representation = representation\nWebKitHelper.full_representation = full_representation\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","repo_name":"yustas147/suspect","sub_path":"l10n_ru_doc/report_helper.py","file_name":"report_helper.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"15"} +{"seq_id":"13217188767","text":"from pickletools import uint8\nfrom turtle import circle\nimport libraries\nnp=libraries.np\ncv2=libraries.cv2\n\nimg_clear = np.zeros((350,350), dtype=\"uint8\")\n\ncircle=cv2.circle(img_clear.copy(),(0,0),50,255,-1)\nsquare = cv2.rectangle(img_clear.copy(),(25,25),(250,300), 255,-1)\nimg_clear=cv2.bitwise_xor(square,circle)\n\ncv2.imshow(\"result\",img_clear)\ncv2.waitKey(0)\n\n\n","repo_name":"SV1Stail/fihingbotALB","sub_path":"unification.py","file_name":"unification.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"14306950720","text":"import _init_paths\nfrom consts.exp_consts import EXC\n\nBASE_DIR = \"/home/rgaurav/Documents/Projects/\"\nRESULTS_DIR = BASE_DIR + \"/ExpResults/final_results/LSNN/ECG5000/seed_9/results/\"\nDATA_DIR = BASE_DIR + \"/lsnn_model/data/\"\n\nclass DRC(object):\n\n def __init__(self, dataset):\n \"\"\"\n Initializes the dataset paths.\n\n Args:\n dataset : The dataset to work with e.g. ECG5000\n \"\"\"\n if dataset == EXC.ECG5000:\n self._dataset = dataset\n self._data_path = DATA_DIR +\"/ECG5000/\"\n self._results_path = RESULTS_DIR +\"/ECG5000/\"\n self._pre_proc_data_path = DATA_DIR + \"/ECG5000/pre_processed_data/\"\n self._train_set = \"ECG5000_TRAIN.arff\"\n self._test_set = \"ECG5000_TEST.arff\"\n self._do_shuffle = True\n\n elif dataset == EXC.FORDA:\n self._dataset = dataset\n self._data_path = DATA_DIR +\"/FORDA/\"\n self._results_path = RESULTS_DIR +\"/FORDA/\"\n self._pre_proc_data_path = DATA_DIR + \"/FORDA/pre_processed_data/\"\n self._train_set = \"FordA_TRAIN.arff\"\n self._test_set = \"FordA_TEST.arff\"\n self._do_shuffle = True\n\n elif dataset == EXC.FORDB:\n self._dataset = dataset\n self._data_path = DATA_DIR + \"/FORDB/\"\n self._results_path = RESULTS_DIR + \"/FORDB/\"\n self._pre_proc_data_path = DATA_DIR + \"/FORDB/pre_processed_data/\"\n self._train_set = \"FordB_TRAIN.arff\"\n self._test_set = \"FordB_TEST.arff\"\n self._do_shuffle = True\n\n elif dataset == EXC.WAFER:\n self._dataset = dataset\n self._data_path = DATA_DIR + \"/WAFER/\"\n self._results_path = RESULTS_DIR + \"/WAFER/\"\n self._pre_proc_data_path = DATA_DIR + \"/WAFER/pre_processed_data/\"\n self._train_set = \"Wafer_TRAIN.arff\"\n self._test_set = \"Wafer_TEST.arff\"\n self._do_shuffle = True\n\n elif dataset == EXC.EQUAKES:\n self._dataset = dataset\n self._data_path = DATA_DIR + \"/EQUAKES/\"\n self._results_path = RESULTS_DIR + \"/EQUAKES/\"\n self._pre_proc_data_path = DATA_DIR + \"/EQUAKES/pre_processed_data/\"\n self._train_set = \"Earthquakes_TRAIN.arff\"\n self._test_set = \"Earthquakes_TEST.arff\"\n self._do_shuffle = True\n","repo_name":"R-Gaurav/spiking-models-for-TSC","sub_path":"lsnn-model/consts/dir_consts.py","file_name":"dir_consts.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"22036052398","text":"#This program is to input feet and inches from the user and converting that to meters\r\nwhile True:\r\n\r\n feet_inches = input(\"Enter feet and inches : \")\r\n try:\r\n def parse(feet_inches):\r\n parts = feet_inches.split(\" \")\r\n feet = float(parts[0])\r\n inches = float(parts[1])\r\n return {\"feet\":feet,\"inches\":inches}\r\n def convert(feet_inches):\r\n\r\n #The split method split the string and returns a list of strings split at that character.\r\n\r\n #Remember the (Input) function always accepts variables as string only\r\n parts = feet_inches.split(\" \")\r\n feet = float(parts[0])\r\n inches = float(parts[1])\r\n\r\n meters = feet*0.3048+inches*0.0254 # now lets convert it to meters\r\n return meters\r\n #return f\"{feet} feet and {inches} inches is equal to {meters} meters. \"\r\n #print(convert(feet_inches))\r\n #what if we want to include this in a bigger program say whether or not if a kid is eligible to get in a ride\r\n #lets do that\r\n #for that we have to store the result of the function in a variable\r\n #but the function returns a string\r\n #so here comes the concept of decoupling\r\n #so just return one single values\r\n result = convert(feet_inches)\r\n print(f\"{parse(feet_inches)['feet']} feet and {parse(feet_inches)['inches']} inches is equal to {result} meters \")\r\n if result < 1:\r\n print(\"Sorry the Child is too short for the ride \")\r\n else:\r\n print(\"Welcome to the Ride\")\r\n except IndexError:\r\n print(\"Please Enter a Valid height , if the height is only 4 feet enter 4 0\")\r\n continue\r\n","repo_name":"akkulu95/Python","sub_path":"convert_feet_inches.py","file_name":"convert_feet_inches.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"32546990770","text":"import math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn.decomposition import PCA\n\nmatplotlib.style.use('ggplot')\npokemon = pd.read_csv('datasets/pokemon.csv')\ndf = pokemon[['HP', 'Attack', 'Defense', 'Sp_Atk', 'Sp_Def', 'Speed']]\n\n#Describe pca perimeters\n#print (df.describe())\n\npca = PCA(n_components=2, svd_solver='full')\npca.fit(df)\nT = pca.transform(df)\n#print(df.shape)\n#print(T.shape)\n\n#Explained variance ratio\n#print(pca.explained_variance_ratio_)\n\n#Correlation between components\n#components = pd.DataFrame(pca.components_, columns=df.columns, index=[1, 2])\n\ndef get_important_features(transformed_features, components_, columns):\n num_columns = len(columns)\n xvector = components_[0] * max(transformed_features[:, 0])\n yvector = components_[1] * max(transformed_features[:, 1])\n \n important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }\n important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)\n print (\"Features by importance:\\n\", important_features)\n \ndef draw_vectors(transformed_features, components_, columns):\n num_columns = len(columns)\n xvector = components_[0] * max(transformed_features[:, 0])\n yvector = components_[1] * max(transformed_features[:, 1])\n \n ax = plt.axes()\n for i in range(num_columns):\n plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)\n plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75)\n \n return ax\n \n#get_important_features(T, pca.components_, df.columns.values)\n\nax = draw_vectors(T, pca.components_, df.columns.values)\nT_df = pd.DataFrame(T)\nT_df.columns = ['component1', 'component2']\n\nT_df['color'] = 'y'\nT_df.loc[T_df['component1'] > 125, 'color'] = 'g'\nT_df.loc[T_df['component2'] > 125, 'color'] = 'r'\n\nplt.xlabel('Principle Component 1')\nplt.ylabel('Principle Component 2')\nplt.scatter(T_df['component1'], T_df['component2'], color=T_df['color'], alpha=0.5)\nplt.show()\n\n# High Attack, High Sp. Atk, all of these pokemon are legendary\n#print(pokemon.loc[T_df[T_df['color'] == 'g'].index])\n\n# High Defense, Low Speed\n#print(pokemon.loc[T_df[T_df['color'] == 'r'].index])","repo_name":"rajat19/Pyscience","sub_path":"pokemon_pca.py","file_name":"pokemon_pca.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"41829395201","text":"# -*- coding: utf-8 -*-\nimport subprocess\nimport sys\nimport json\nimport tempfile\n\nimport requests\nimport logging\n\nimport time\n\nfrom datetime import datetime\nfrom shapely.geometry import mapping\nfrom pyramid.httpexceptions import HTTPBadRequest\nfrom pyramid_oereb import Config\nfrom pyramid_oereb.lib.renderer.extract.json_ import Renderer as JsonRenderer\nfrom pyramid_oereb.lib.url import parse_url\nif sys.version_info.major == 2:\n import urlparse\nelse:\n from urllib import parse as urlparse\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Renderer(JsonRenderer):\n\n def lpra_flatten(self, items):\n for item in items:\n self._flatten_object(item, 'Lawstatus')\n self._localised_text(item, 'Lawstatus_Text')\n self._flatten_object(item, 'ResponsibleOffice')\n self._multilingual_text(item, 'ResponsibleOffice_Name')\n self._multilingual_text(item, 'TextAtWeb')\n\n self._multilingual_m_text(item, 'Text')\n self._multilingual_text(item, 'Title')\n self._multilingual_text(item, 'OfficialTitle')\n self._multilingual_text(item, 'Abbreviation')\n\n def __call__(self, value, system):\n \"\"\"\n Implements a subclass of pyramid_oereb.lib.renderer.extract.json_.Renderer to create a print result\n out of a json. The json extract is reformatted to fit the structure of mapfish print.\n\n Args:\n value (tuple): A tuple containing the generated extract record and the params\n dictionary.\n system (dict): The available system properties.\n\n Returns:\n buffer: The pdf content as received from configured mapfish print instance url.\n \"\"\"\n log.debug(\"Parameter webservice is {}\".format(value[1]))\n\n if value[1].images:\n raise HTTPBadRequest('With image is not allowed in the print')\n\n self._request = self.get_request(system)\n # If language present in request, use that. Otherwise, keep language from base class\n if 'lang' in self._request.GET:\n self._language = self._request.GET.get('lang')\n\n # Based on extract record and webservice parameter, render the extract data as JSON\n extract_record = value[0]\n extract_as_dict = self._render(extract_record, value[1])\n feature_geometry = mapping(extract_record.real_estate.limit)\n pdf_to_join = set()\n\n self.convert_to_printable_extract(extract_as_dict, feature_geometry, pdf_to_join)\n\n print_config = Config.get('print', {})\n\n extract_as_dict['Display_RealEstate_SubunitOfLandRegister'] = print_config.get(\n 'display_real_estate_subunit_of_land_register', True\n )\n\n spec = {\n 'layout': Config.get('print', {})['template_name'],\n 'outputFormat': 'pdf',\n 'lang': self._language,\n 'attributes': extract_as_dict,\n }\n\n response = self.get_response(system)\n\n if self._request.GET.get('getspec', 'no') != 'no':\n response.headers['Content-Type'] = 'application/json; charset=UTF-8'\n return json.dumps(spec, sort_keys=True, indent=4)\n\n print_result = requests.post(\n urlparse.urljoin(Config.get('print', {})['base_url'] + '/', 'buildreport.pdf'),\n headers=Config.get('print', {})['headers'],\n data=json.dumps(spec)\n )\n\n if not extract_as_dict['isReduced'] and print_result.status_code == 200:\n main = tempfile.NamedTemporaryFile(suffix='.pdf')\n main.write(print_result.content)\n main.flush()\n cmd = ['pdftk', main.name]\n temp_files = [main]\n for url in pdf_to_join:\n result = requests.get(url)\n content_type = result.headers.get('content-type')\n log.debug(\"document url: \" + url + \" => content_type: \" + content_type)\n if content_type != 'application/pdf':\n msg = \"Skipped document inclusion (url: '{}') because content_type: '{}'\"\n log.warn(msg.format(url, content_type))\n continue\n tmp_file = tempfile.NamedTemporaryFile(suffix='.pdf')\n tmp_file.write(result.content)\n tmp_file.flush()\n temp_files.append(tmp_file)\n cmd.append(tmp_file.name)\n out = tempfile.NamedTemporaryFile(suffix='.pdf')\n cmd += ['cat', 'output', out.name]\n sys.stdout.flush()\n time.sleep(0.1)\n subprocess.check_call(cmd)\n content = out.file.read()\n else:\n content = print_result.content\n\n response.status_code = print_result.status_code\n response.headers = print_result.headers\n if 'Transfer-Encoding' in response.headers:\n del response.headers['Transfer-Encoding']\n if 'Connection' in response.headers:\n del response.headers['Connection']\n return content\n\n def convert_to_printable_extract(self, extract_dict, feature_geometry, pdf_to_join):\n \"\"\"\n Converts an oereb extract into a form suitable for printing by mapfish print.\n\n Args:\n extract_dict: the oereb extract, will get converted by this function into a form\n convenient for mapfish-print\n feature_geometry: the geometry for this extract, will get added to the extract information\n pdf_to_join: a set of additional information for the pdf, will get filled by this function\n \"\"\"\n\n log.debug(\"Starting transformation, extract_dict is {}\".format(extract_dict))\n log.debug(\"Parameter feature_geometry is {}\".format(feature_geometry))\n\n creation_date = datetime.strptime(extract_dict['CreationDate'], '%Y-%m-%dT%H:%M:%S')\n extract_dict['Footer'] = ' '.join([\n creation_date.strftime('%d.%m.%Y'),\n creation_date.strftime('%H:%M:%S'),\n extract_dict['ExtractIdentifier']\n ])\n extract_dict['CreationDate'] = creation_date.strftime('%d.%m.%Y')\n\n for attr_name in ['NotConcernedTheme', 'ThemeWithoutData', 'ConcernedTheme']:\n for theme in extract_dict[attr_name]:\n self._localised_text(theme, 'Text')\n self._flatten_object(extract_dict, 'PLRCadastreAuthority')\n self._flatten_object(extract_dict, 'RealEstate')\n if 'Image' in extract_dict.get('RealEstate_Highlight', {}):\n del extract_dict['RealEstate_Highlight']['Image']\n\n main_page_url, main_page_params = \\\n parse_url(extract_dict['RealEstate_PlanForLandRegisterMainPage']['ReferenceWMS'])\n base_url = urlparse.urlunsplit((main_page_url.scheme,\n main_page_url.netloc,\n main_page_url.path,\n None,\n None))\n main_page_basemap = {\n 'type': 'wms',\n 'styles': 'default',\n 'opacity': extract_dict['RealEstate_PlanForLandRegisterMainPage'].get('layerOpacity', 0.6),\n 'baseURL': base_url,\n 'layers': main_page_params['LAYERS'][0].split(','),\n 'imageFormat': 'image/png',\n 'customParams': {'TRANSPARENT': 'true'},\n }\n extract_dict['baseLayers'] = {'layers': [main_page_basemap]}\n url, params = parse_url(extract_dict['RealEstate_PlanForLandRegister']['ReferenceWMS'])\n basemap = {\n 'type': 'wms',\n 'styles': 'default',\n 'opacity': extract_dict['RealEstate_PlanForLandRegister'].get('layerOpacity', 0.6),\n 'baseURL': urlparse.urlunsplit((url.scheme, url.netloc, url.path, None, None)),\n 'layers': params['LAYERS'][0].split(','),\n 'imageFormat': 'image/png',\n 'customParams': {'TRANSPARENT': 'true'},\n }\n del extract_dict['RealEstate_PlanForLandRegister'] # /definitions/Map\n\n self._multilingual_m_text(extract_dict, 'GeneralInformation')\n self._multilingual_m_text(extract_dict, 'BaseData')\n self._multilingual_m_text(extract_dict, 'Certification')\n\n for item in extract_dict.get('Glossary', []):\n self._multilingual_text(item, 'Title')\n self._multilingual_text(item, 'Content')\n self._multilingual_text(extract_dict, 'PLRCadastreAuthority_Name')\n\n for restriction_on_landownership in extract_dict.get('RealEstate_RestrictionOnLandownership', []):\n self._flatten_object(restriction_on_landownership, 'Lawstatus')\n self._flatten_object(restriction_on_landownership, 'Theme')\n self._flatten_object(restriction_on_landownership, 'ResponsibleOffice')\n self._flatten_array_object(restriction_on_landownership, 'Geometry', 'ResponsibleOffice')\n self._localised_text(restriction_on_landownership, 'Theme_Text')\n self._localised_text(restriction_on_landownership, 'Lawstatus_Text')\n self._multilingual_m_text(restriction_on_landownership, 'Information')\n self._multilingual_text(restriction_on_landownership, 'ResponsibleOffice_Name')\n\n url, params = parse_url(restriction_on_landownership['Map']['ReferenceWMS'])\n restriction_on_landownership['baseLayers'] = {\n 'layers': [{\n 'type': 'wms',\n 'opacity': restriction_on_landownership['Map'].get('layerOpacity', 0.6),\n 'styles': 'default',\n 'baseURL': urlparse.urlunsplit((url.scheme, url.netloc, url.path, None, None)),\n 'layers': params['LAYERS'][0].split(','),\n 'imageFormat': 'image/png',\n 'customParams': {'TRANSPARENT': 'true'},\n }, basemap]\n }\n restriction_on_landownership['legend'] = restriction_on_landownership['Map'].get(\n 'LegendAtWeb', '')\n\n # Legend of other visible restriction objects in the topic map\n restriction_on_landownership['OtherLegend'] = restriction_on_landownership['Map'].get(\n 'OtherLegend', [])\n for legend_item in restriction_on_landownership['OtherLegend']:\n self._multilingual_text(legend_item, 'LegendText')\n\n for legend_entry in restriction_on_landownership['OtherLegend']:\n for element in list(legend_entry.keys()):\n if element not in ['LegendText', 'SymbolRef', 'TypeCode']:\n del legend_entry[element]\n\n del restriction_on_landownership['Map'] # /definitions/Map\n\n for item in restriction_on_landownership.get('Geometry', []):\n self._multilingual_text(item, 'ResponsibleOffice_Name')\n\n legal_provisions = {}\n laws = {}\n hints = {}\n\n if 'LegalProvisions' in restriction_on_landownership:\n finish = False\n while not finish:\n finish = True\n for legal_provision in restriction_on_landownership['LegalProvisions']:\n if 'Base64TextAtWeb' in legal_provision:\n del legal_provision['Base64TextAtWeb']\n if 'Reference' in legal_provision:\n for reference in legal_provision['Reference']:\n self._categorize_documents(reference, legal_provisions, laws, hints)\n del legal_provision['Reference']\n finish = False\n if 'Article' in legal_provision:\n for article in legal_provision['Article']:\n self._categorize_documents(article, legal_provisions, laws, hints)\n del legal_provision['Article']\n finish = False\n\n self._categorize_documents(legal_provision, legal_provisions, laws, hints)\n\n del restriction_on_landownership['LegalProvisions']\n\n restriction_on_landownership['LegalProvisions'] = legal_provisions\n restriction_on_landownership['Laws'] = laws\n restriction_on_landownership['Hints'] = hints\n\n # One restriction entry per theme\n theme_restriction = {}\n text_element = [\n 'Information', 'Lawstatus_Code', 'Lawstatus_Text', 'ResponsibleOffice_Name',\n 'ResponsibleOffice_OfficeAtWeb', 'SymbolRef', 'TypeCode'\n ]\n legend_element = [\n 'TypeCode', 'TypeCodelist', 'AreaShare', 'PartInPercent', 'LengthShare',\n 'SymbolRef', 'Information'\n ]\n split_sub_themes = Config.get('print', {}).get('split_sub_themes', False)\n for restriction_on_landownership in extract_dict.get('RealEstate_RestrictionOnLandownership', []):\n theme = restriction_on_landownership['Theme_Code']\n\n if split_sub_themes:\n if 'SubTheme' in restriction_on_landownership:\n theme = theme + '_' + restriction_on_landownership['SubTheme']\n restriction_on_landownership['Split_SubTheme'] = True\n\n geom_type = \\\n 'AreaShare' if 'AreaShare' in restriction_on_landownership else \\\n 'LengthShare' if 'LengthShare' in restriction_on_landownership else 'NrOfPoints'\n\n if theme not in theme_restriction:\n current = dict(restriction_on_landownership)\n current['Geom_Type'] = geom_type\n theme_restriction[theme] = current\n\n # Legend\n legend = {}\n for element in legend_element:\n if element in current:\n legend[element] = current[element]\n del current[element]\n legend['Geom_Type'] = geom_type\n current['Legend'] = [legend]\n\n # Text\n for element in text_element:\n if element in current:\n current[element] = set([current[element]])\n else:\n current[element] = set()\n continue\n current = theme_restriction[theme]\n\n if 'Geom_Type' in current and current['Geom_Type'] != geom_type:\n del current['Geom_Type']\n\n # Legend\n legend = {}\n for element in legend_element:\n if element in restriction_on_landownership:\n legend[element] = restriction_on_landownership[element]\n del restriction_on_landownership[element]\n legend['Geom_Type'] = geom_type\n current['Legend'].append(legend)\n\n # Remove in OtherLegend elements that are already in the legend\n current['OtherLegend'] = [other_legend_element\n for other_legend_element in current['OtherLegend']\n if other_legend_element['SymbolRef'] != legend['SymbolRef']]\n\n # Number or array\n for element in ['Laws', 'LegalProvisions', 'Hints']:\n if current.get(element) is not None and restriction_on_landownership.get(element) is not None:\n current[element].update(restriction_on_landownership[element])\n elif restriction_on_landownership.get(element) is not None:\n current[element] = restriction_on_landownership[element]\n\n # Text\n for element in text_element:\n if element in restriction_on_landownership:\n current[element].add(restriction_on_landownership[element])\n\n for restriction_on_landownership in theme_restriction.values():\n for element in text_element:\n restriction_on_landownership[element] = '\\n'.join(restriction_on_landownership[element])\n for element in ['Laws', 'LegalProvisions', 'Hints']:\n values = list(restriction_on_landownership[element].values())\n self.lpra_flatten(values)\n restriction_on_landownership[element] = values\n if element == 'LegalProvisions':\n pdf_to_join.update([legal_provision['TextAtWeb'] for legal_provision in values])\n\n restrictions = list(theme_restriction.values())\n for restriction in restrictions:\n legends = {}\n for legend in restriction['Legend']:\n type_ = legend['TypeCode']\n if type_ in legends:\n for item in ['AreaShare', 'LengthShare', 'PartInPercent']:\n if item in legend:\n if item in legends[type_]:\n legends[type_][item] += legend[item]\n else:\n legends[type_][item] = legend[item]\n else:\n legends[type_] = legend\n for legend in legends.values():\n for item in ['AreaShare', 'LengthShare']:\n if item in legend:\n legend[item] = legend[item]\n # After transformation, get the new legend entries, sorted by TypeCode\n transformed_legend = \\\n list([transformed_entry for (key, transformed_entry) in sorted(legends.items())])\n restriction['Legend'] = transformed_legend\n\n sorted_restrictions = []\n if split_sub_themes:\n # sort sub themes if sub theme splitting is enabled\n sorted_restrictions = self._sort_sub_themes(restrictions)\n else:\n # default sorting\n for theme in Config.get_themes():\n for restriction in restrictions:\n if theme.code == restriction.get('Theme_Code'):\n sorted_restrictions.append(restriction)\n\n extract_dict['RealEstate_RestrictionOnLandownership'] = sorted_restrictions\n # End one restriction entry per theme\n\n for item in extract_dict.get('ExclusionOfLiability', []):\n self._multilingual_text(item, 'Title')\n self._multilingual_text(item, 'Content')\n\n extract_dict['features'] = {\n 'features': {\n 'type': 'FeatureCollection',\n 'features': [{\n 'type': 'Feature',\n 'geometry': feature_geometry,\n 'properties': {}\n }]\n }\n }\n\n # Reformat land registry area\n extract_dict['RealEstate_LandRegistryArea'] = u'{0} m²'.format(\n extract_dict['RealEstate_LandRegistryArea']\n )\n\n # Reformat AreaShare, LengthShare and part in percent values\n for restriction in extract_dict['RealEstate_RestrictionOnLandownership']:\n for legend in restriction['Legend']:\n if 'LengthShare' in legend:\n legend['LengthShare'] = '{0} m'.format(legend['LengthShare'])\n if 'AreaShare' in legend:\n legend['AreaShare'] = u'{0} m²'.format(legend['AreaShare'])\n if 'PartInPercent' in legend:\n legend['PartInPercent'] = '{0}%'.format(legend['PartInPercent'])\n\n log.debug(\"After transformation, extract_dict is {}\".format(extract_dict))\n return extract_dict\n\n def _flatten_array_object(self, parent, array_name, object_name):\n if array_name in parent:\n for item in parent[array_name]:\n self._flatten_object(item, object_name)\n\n @staticmethod\n def _flatten_object(parent, name):\n if name in parent:\n for key, value in parent[name].items():\n parent['{}_{}'.format(name, key)] = value\n del parent[name]\n\n @staticmethod\n def _categorize_documents(document, legal_provisions, laws, hints):\n \"\"\"\n Categorize document by their documentType (LegalProvision, Law or Hint)\n\n Args:\n dcoument (dict): The document type as dictionary.\n legal_provisions (dict): The legal_provisions dictionary to fill.\n laws (dict): The laws dictionary to fill.\n hints (dict): The Hints dictionary to fill.\n \"\"\"\n uid = Renderer._get_element_of_legal_provision_maybe_uid(document)\n documentType = document.get('DocumentType')\n if documentType is None:\n error_msg = \"mandatory attribute document_type is missing in document \" \\\n \": {}\".format(document)\n log.error(error_msg)\n raise AttributeError(error_msg)\n\n if documentType == 'LegalProvision':\n legal_provisions[uid] = document\n elif documentType == 'Law':\n laws[uid] = document\n else:\n hints[uid] = document\n\n @staticmethod\n def _get_element_of_legal_provision_maybe_uid(element):\n \"\"\"\n Make a unique key string out of title and TextAtWeb. This is necessary to satisfy the KBS's theme.\n There it can happen, that we get different titles with the same URL. This way we keep even them.\n\n Args:\n element (dict): The document type as dictionary.\n\n Returns:\n str: The constructed unique key made of Title and TextAtWeb\n \"\"\"\n unique_key = []\n if element['TextAtWeb'] is not None:\n # If TextAtWeb exists, we want it for uniqueness too.\n unique_key.append(element['TextAtWeb'][0]['Text'])\n unique_key.append(element['Title'][0]['Text'])\n return '_'.join(unique_key)\n\n @staticmethod\n def _localised_text(parent, name):\n if name in parent:\n parent[name] = parent[name]['Text']\n\n def _multilingual_m_text(self, parent, name):\n self._multilingual_text(parent, name)\n\n def _multilingual_text(self, parent, name):\n if name in parent:\n lang_obj = dict([(e['Language'], e['Text']) for e in parent[name]])\n parent[name] = lang_obj[self._language]\n\n def _sort_sub_themes(self, restrictions):\n # split restrictions by theme codes\n split_by_theme_code = self._split_restrictions_by_theme_code(restrictions)\n\n # sort sub themes of the same theme\n for theme_code in split_by_theme_code:\n sub_themes = []\n non_sub_themes = []\n for restriction in split_by_theme_code[theme_code]:\n if restriction.get('Split_SubTheme', False):\n sub_themes.append(restriction)\n else:\n non_sub_themes.append(restriction)\n # only sort if there are multiple sub themes\n if len(sub_themes) > 1:\n sorter, params = self._get_sorter(theme_code)\n sub_themes = sorter.sort(sub_themes, params)\n split_by_theme_code[theme_code] = non_sub_themes + sub_themes\n\n # sort + flatten the splitted themes again\n sorted_restrictions = []\n for theme in Config.get_themes():\n if theme.code in split_by_theme_code:\n sorted_restrictions += split_by_theme_code[theme.code]\n\n return sorted_restrictions\n\n @staticmethod\n def _split_restrictions_by_theme_code(restrictions):\n \"\"\"\n Args:\n restrictions (list): array of restrictions\n\n Returns:\n (dict) restrictions split up by theme code\n \"\"\"\n split_by_theme_code = {}\n for restriction in restrictions:\n theme_code = restriction.get('Theme_Code')\n if theme_code in split_by_theme_code:\n split_by_theme_code[theme_code].append(restriction)\n else:\n split_by_theme_code[theme_code] = [restriction]\n return split_by_theme_code\n\n @staticmethod\n def _load_sorter(module, class_name):\n \"\"\"\n Dynamically loads a (sorter) class from a module.\n\n Args:\n module (str): Module name to load\n class_name (str): Class name to load\n\n Returns:\n (object) Requested (sorter) class\n \"\"\"\n sorter_module = __import__(module, fromlist=[class_name])\n sorter = getattr(sorter_module, class_name)\n return sorter\n\n @staticmethod\n def _get_sorter(theme_code):\n \"\"\"\n Returns the sub theme sorter for the given theme_code.\n\n Args:\n theme_code (str): theme_code\n\n Returns:\n sorter: Sub theme sorter object\n params (dict): parameters for the sorter (from theme configuration)\n \"\"\"\n sorter_config = Config.get_sub_theme_sorter_config(theme_code)\n sorter = Renderer._load_sorter(sorter_config['module'], sorter_config['class_name'])\n params = sorter_config.get('params', {})\n return sorter, params\n","repo_name":"voisardf/pyramid_oereb_old","sub_path":"pyramid_oereb/contrib/print_proxy/mapfish_print.py","file_name":"mapfish_print.py","file_ext":"py","file_size_in_byte":25036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"} +{"seq_id":"74556968651","text":"# This file is part of ManG.\r\n\r\n# ManG is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n\r\n# ManG is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nclass MangSettings(QDialog):\r\n def __init__(self, parent = None, v_values = [5,4,5]):\r\n super(QDialog, self).__init__(parent)\r\n\r\n layout = QVBoxLayout(self)\r\n l1 = QHBoxLayout()\r\n l2 = QHBoxLayout()\r\n l3 = QHBoxLayout()\r\n layout.addLayout(l1)\r\n layout.addLayout(l2)\r\n layout.addLayout(l3)\r\n self.setWindowTitle(\"Interval in sec\")\r\n # nice widget for editing the date\r\n self.editPI = QLineEdit(str(v_values[0])) # Ping interval\r\n self.editPT = QLineEdit(str(v_values[1])) # Ping timeout\r\n self.editGI = QLineEdit(str(v_values[2])) # GUI refresh interval\r\n lbl1 = QLabel(\"Ping interval\")\r\n lbl2 = QLabel(\"Ping timeout\")\r\n lbl3 = QLabel(\"GUI refresh\")\r\n l1.addWidget(lbl1)\r\n l2.addWidget(lbl2)\r\n l3.addWidget(lbl3)\r\n l1.addWidget(self.editPI)\r\n l2.addWidget(self.editPT)\r\n l3.addWidget(self.editGI)\r\n\r\n # OK and Cancel buttons\r\n buttons = QDialogButtonBox(\r\n QDialogButtonBox.Ok | QDialogButtonBox.Cancel,\r\n Qt.Horizontal, self)\r\n buttons.accepted.connect(self.accept)\r\n buttons.rejected.connect(self.reject)\r\n layout.addWidget(buttons)\r\n\r\n def getVlalues(self):\r\n return (int(self.editPI.text()), int(self.editPT.text()), int(self.editGI.text()))\r\n\r\n @staticmethod\r\n def getSettings(parent = None):\r\n dialog = MangSettings(parent)\r\n result = dialog.exec_()\r\n k,l,m = dialog.getVlalues()\r\n return (k,l,m, result == QDialog.Accepted)\r\n\r\n","repo_name":"maksokami/ManG","sub_path":"mang_settings.py","file_name":"mang_settings.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"15"}