diff --git "a/592.jsonl" "b/592.jsonl" new file mode 100644--- /dev/null +++ "b/592.jsonl" @@ -0,0 +1,1732 @@ +{"seq_id":"28840502768","text":"# source for model https://gist.github.com/EternalSorrrow/f8af26a007b23ea32a50f250813a82e7\n\nimport numpy as np\nfrom keras.optimizers import SGD\nfrom keras.callbacks import ModelCheckpoint\nimport tensorflow as tf\nfrom keras.models import Model\nfrom keras.layers import Input, ZeroPadding2D, concatenate, add, SpatialDropout2D\nfrom keras.layers.core import Dropout, Activation\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.layers.pooling import AveragePooling2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nimport keras.backend as K\n\n#Better model, contatining less parameters and converging faster\ndef dense_conv_block(x, growth_rate, name, use_bias, regularizer):\n x1 = BatchNormalization(name=name+'_bn')(x)\n x1 = Activation('relu', name=name+'_relu')(x1)\n x1 = Conv2D(growth_rate, (3,3), name=name+'_conv', padding='same',\n use_bias=use_bias, kernel_regularizer=regularizer)(x1)\n return concatenate([x, x1])\n\ndef dense_block(x, conv_blocks, growth_rate, name, use_bias, regularizer):\n for i in range(conv_blocks):\n block_name = name + '_' + str(i)\n x = dense_conv_block(x, growth_rate, name=block_name, use_bias=use_bias, regularizer=regularizer)\n return x\n\ndef transition_down(x):\n x = MaxPooling2D((2,2))(x)\n return x\n\ndef transition_up(x, name, reduction, use_bias, regularizer):\n x = BatchNormalization(name=name+'_bn')(x)\n x = Activation('relu')(x)\n x = Conv2D(int(K.int_shape(x)[3] * reduction), 1, name = name + '_1x1conv',\n use_bias=use_bias, kernel_regularizer=regularizer)(x)\n x = UpSampling2D((2, 2))(x)\n\n return x\n\ndef dense_stem(x, filters, name, use_bias, regularizer):\n x = Conv2D(filters, (3,3), padding='same', name=name+'_conv',\n use_bias = use_bias, kernel_regularizer = regularizer)(x)\n x = BatchNormalization(name = name + '_bn')(x)\n x = Activation('relu')(x)\n return x\n\ndef reduction(x, reduction, name, use_bias, regularizer):\n x = BatchNormalization(name = name + '_bn')(x)\n x = Activation('relu')(x)\n x = Conv2D(int(K.int_shape(x)[3] * reduction), (1,1), name=name+'_1x1conv',\n use_bias = use_bias, kernel_regularizer = regularizer)(x)\n return x\n\ndef Dense_UNet_small(model_input,num_classes, dropout=0.05, use_bias=True, regularizer=None):\n blocks = [4, 4, 4, 4, 4] #[12, 12, 12, 12, 12]\n growth_rate = [4, 8, 16, 32, 64]\n\n #Contracting path\n x = dense_stem(model_input, 16, 'stem', use_bias, regularizer)\n\n d1 = dense_block(x, blocks[0], growth_rate[0], 'dense1', use_bias, regularizer)\n p1 = transition_down(d1)\n p1 = SpatialDropout2D(dropout)(p1)\n\n d2 = dense_block(p1, blocks[1], growth_rate[1], 'dense2', use_bias, regularizer)\n p2 = transition_down(d2)\n p2 = SpatialDropout2D(dropout)(p2)\n\n d3 = dense_block(p2, blocks[2], growth_rate[2], 'dense3', use_bias, regularizer)\n p3 = transition_down(d3)\n p3 = SpatialDropout2D(dropout)(p3)\n\n d4 = dense_block(p3, blocks[3], growth_rate[3], 'dense4', use_bias, regularizer)\n p4 = transition_down(d4)\n p4 = SpatialDropout2D(dropout)(p4)\n\n d5 = dense_block(p4, blocks[4], growth_rate[4], 'dense5', use_bias, regularizer)\n d5 = SpatialDropout2D(dropout)(d5)\n\n #Expanding path\\n\",\n u1 = transition_up(d5, 'up1', 0.5, use_bias, regularizer)\n c1 = concatenate([u1, d4])\n c1 = SpatialDropout2D(dropout)(c1)\n r1 = reduction(c1, 0.25, 'reduction1', use_bias, regularizer)\n d6 = dense_block(r1, blocks[3], growth_rate[3], 'dense6', use_bias, regularizer)\n\n u2 = transition_up(d6, 'up2', 0.5, use_bias, regularizer)\n c2 = concatenate([u2, d3])\n c2= SpatialDropout2D(dropout)(c2)\n r2 = reduction(c2, 0.25, 'reduction2', use_bias, regularizer)\n d7 = dense_block(r2, blocks[2], growth_rate[2], 'dense7', use_bias, regularizer)\n\n\n u3 = transition_up(d7, 'up3', 0.5, use_bias, regularizer)\n c3 = concatenate([u3, d2])\n c3 = SpatialDropout2D(dropout)(c3)\n r3 = reduction(c3, 0.25, 'reduction3', use_bias, regularizer)\n d8 = dense_block(r3, blocks[1], growth_rate[1], 'dense8', use_bias, regularizer)\n\n\n u4 = transition_up(d8, 'up4', 0.5, use_bias, regularizer)\n c4 = concatenate([u4, d1])\n c4 = SpatialDropout2D(dropout)(c4)\n r4 = reduction(c4, 0.25, 'reduction4', use_bias, regularizer)\n d9 = dense_block(r4, blocks[0], growth_rate[0], 'dense9', use_bias, regularizer)\n\n r5 = reduction(d9, 0.25, 'reduction5', use_bias, regularizer)\n c5 = concatenate([r5, x])\n d10 = dense_block(r4, blocks[0], growth_rate[0], 'dense10', use_bias, regularizer)\n\n outputs = Conv2D(num_classes, 1, activation='softmax', name='output')(d10)\n model = Model(model_input, outputs)\n\n return model","repo_name":"HypnoOcio/kits19","sub_path":"models/denseUNet_small.py","file_name":"denseUNet_small.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"20910743359","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport q\n\n\nclass TestController(unittest.TestCase):\n\n \"\"\"Test case pour tester la methode sousChaine().\"\"\"\n\n def test_exist(self):\n if not hasattr(q, 'sousChaine'):\n self.fail(_(\"La methode 'sousChaine()' n´est pas définie.\"))\n\n def test_arg(self):\n try:\n q.sousChaine(\"hhsyygajjzjehhrtdy\", \"azerty\")\n except:\n self.fail(_(\"Votre code relève une exception lors de l´utilisation d´une chaines de caractère normale.\"))\n \n def test_excep(self):\n try:\n q.sousChaine(\"\", \"\")\n q.sousChaine(\"azerty\", \"\")\n q.sousChaine(\"\", \"azerty\")\n except:\n self.fail(_(\"Votre code relève une exception lors de l´utilisation de chaines de caractères vides.\"))\n\n def test_exemple(self):\n if not q.sousChaine(\"dcUjnfakCvfLcgah\", \"UCL\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat pour l´exemple x='dcUjnfakCvfLcgah' et y='UCL'\\nAttendu: True | Reçu: False\"))\n\n if q.sousChaine(\"dcLjnfUakCvfcgah\", \"UCL\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat pour l´exemple x='dcLjnfUakCvfcgah' et y='UCL'\\nAttendu: False | Reçu: True\"))\n\n def test_cases(self):\n #normal case\n if not q.sousChaine(\"hhsyygajjzjehhrtdy\", \"azerty\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat.\\nAttendu: True | Reçu: False\"))\n\n if q.sousChaine(\"hhsyygajjzjeaze\", \"azersdty\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat.\\nAttendu: False | Reçu: True\"))\n\n #Borders cases\n if q.sousChaine(\"hhsyyg\", \"azersdtyzer\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat.\\nAttendu: False | Reçu: True\"))\n\n if not q.sousChaine(\"\", \"\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat lors de l´utilisation de chaines de caractères vides.\\nAttendu: True | Reçu: False\"))\n\n if q.sousChaine(\"\", \"azerty\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat.\\nAttendu: False | Reçu: True\"))\n\n if not q.sousChaine(\"esrdtfbygnhoj\", \"\"):\n self.fail(_(\"Votre code ne donne pas le bon résultat.\\nAttendu: True | Reçu: False\"))\n\n \n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"OpenWeek/inginious-task-LINGE","sub_path":"TP6Ex4/src/TestQ.py","file_name":"TestQ.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73283817443","text":"# Use the 'File' menu above to 'Save' after pasting in your own mm_shared function definition.\nimport numpy as np\nfrom numba import cuda, types\n\n@cuda.jit\ndef mm_shared(a, b, c):\n column, row = cuda.grid(2)\n sum = 0\n \n a_cache = cuda.shared.array(block_size, types.int32)\n b_cache = cuda.shared.array(block_size, types.int32)\n\n tx = cuda.threadIdx.x\n ty = cuda.threadIdx.y\n\n for i in range(a.shape[0]):\n # Preload data into shared memory\n a_cache[tx][ty] = a[column][ty + i * N]\n b_cache[tx][ty] = b[tx + i * N][ row]\n\n # Wait until all threads finish preloading\n cuda.syncthreads()\n\n # Computes partial product on the shared memory\n for j in range(a.shape[1]):\n sum += a_cache[tx][j] * b_cache[j][ty]\n\n # Wait until all threads finish computing\n cuda.syncthreads()\n\n c[column][row] = sum\n\n","repo_name":"Ethereal-Coder/CUDA-Python-with-Numba","sub_path":"tasks/task3/task/assessment/definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18708200883","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport re\nN = int(input())\na = []\nfor i in range(N):\n st = input()\n if(re.findall(r'^[789]\\d{9}$',st)):\n a.append(\"YES\")\n else:\n a.append(\"NO\")\nfor i in a:\n print(i)\n","repo_name":"KASHYPPRABHAT254/HACKERRANK-CODE","sub_path":"Validating_phone_number.py","file_name":"Validating_phone_number.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36498152675","text":"# -*- coding: utf-8 -*-\n\n# courses.py\n# Jennifer Wang, Isabelle Li, Shan Lu\n# CS 230 Final Project\n# Scraping courses information from course browser using library Selenium\n# Modified by: slu5\n# Modified date: 04/22/17\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport os,time\nfrom pprint import pprint\nimport json\n\n\ndriver = webdriver.Chrome(executable_path='/Users/Regina/Desktop/webdriver/chromedriver.exe')\ndriver.get(\"https://courses.wellesley.edu\")\ncourses = driver.find_elements_by_xpath(\"//div[@class='coursecode']\")\nnames = driver.find_elements_by_xpath(\"//div[@class='coursename_small']\")\nprofessors = driver.find_elements_by_xpath(\"//div[@class='professorname']\")\n\n\ndef parse1(info):\n checker = info.split()[1]\n start = 0\n if checker.isdigit():\n start = info.index(checker) \n for i in range(start,len(info)-2):\n if info[i].isupper() and info[i+1].isupper():\n if info[i:i+2] == \"II\":\n return (info[:i+2],info[i+2:])\n elif (info[i] == \"I\" and info[i:i+4] != \"IGOR\" and \n info[i:i+5] != \"ISMAR\" and info[i:i+5] != \"IRENE\"\n and info[i:i+6] != \"ISABEL\" and info[i:i+7] != \"IFEANYI\"\n and info[i:i+5] != \"INELA\" and info[i:i+3] != \"INA\"):\n return (info[:i+1],info[i+1:])\n return (info[:i],info[i:])\n return (info,'N/A')\n\ndef parse2(info):\n l = info.split()\n res = (l[0],l[1],l[3],l[4][1:6])\n if l[5] != '-':\n start = info.index(\")\")+2\n end = info.index(\"CURRENT\")-2\n res += ((info[start:end],))\n else:\n res += (('N/A',)) \n return res\n \nparsed1 = [parse1(name.text) for name in names]\nparsed2 = [parse2(course.text) for course in courses]\n\nparsed = [(parsed1[i]+parsed2[i]) for i in range(749)]\n\ninfoList = [{\"Subject\":c[2],\"Number\":c[3],\"Name\":c[0],\"Session\":c[4],\"Professor\":c[1],\"CRN\":c[5],\"Time\":c[6]} for c in parsed]\n\ndef writeJSON(filename,content):\n with open(filename, 'w') as fw:\n json.dump(content, fw, sort_keys= True, indent=2)\n\nwriteJSON(\"CourseInfo.json\",infoList)\n","repo_name":"isabelleli/coursepicker","sub_path":"courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9456083012","text":"import os\nfrom flask import Flask, jsonify, request\nfrom privacyspy import Spy\n\n\napp = Flask(__name__)\nspy = Spy()\n\n\n@app.route(\"/\")\ndef root():\n return jsonify({\n \"error\": \"No API request received. See documentation for details.\"\n })\n\n\n@app.route(\"/analyze\", methods=[\"GET\", \"POST\"])\ndef analyze():\n token = None\n if request.method == \"GET\":\n token = request.args.get(\"token\")\n elif request.method == \"POST\":\n token = request.form[\"token\"]\n\n if token == None:\n return Spy.output(\"No token provided.\", error=True, errorCode=2)\n elif token != os.environ[\"privacyspy_token\"]:\n return Spy.output(\"Invalid token.\", error=True, errorCode=3)\n\n article = None\n\n if request.method == \"GET\":\n url = request.args.get(\"url\")\n if url == None:\n return Spy.output(\"No URL provided.\", error=True, errorCode=1)\n try:\n article = spy.extract_policy_from_url(url=url)\n except:\n return Spy.output(\"Failed to extract a privacy policy from URL.\", error=True, errorCode=4)\n\n if request.method == \"POST\":\n article = request.form[\"text\"]\n\n # Checking for English not necessary\n analysis = spy.privacy_policy_summary(article)\n return Spy.output(analysis)\n\n\napp.run(host=\"0.0.0.0\", port=5000, debug=os.environ.get(\"DEBUG\", \"False\") == \"True\")\n","repo_name":"Politiwatch/PrivacySpy-API","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8343789595","text":"#!/usr/bin/env python3\n\n#############################################################\n# IMPORTS #\n#############################################################\nfrom logger import *\nfrom droneEightFigure import droneEightFigure\nfrom scanMission import scanMission\nimport os\nimport threading\nimport json\nimport asyncio\n\n#############################################################\n# MAIN #\n#############################################################\nclass missionsHandler(threading.Thread):\n #######################################################################\n # Prints log messages\n def printLog(self, message=\"\", typeMessage=\"info\"):\n if typeMessage == \"info\":\n logging.info(message)\n elif typeMessage == \"error\":\n logging.error(message)\n\n #######################################################################\n # Mission handler Init\n def __init__(self, arrUAVs):\n threading.Thread.__init__(self)\n print(\"AAA\")\n outputFile = \"mission_handler.log\"\n config_root_logger(log_file=outputFile)\n print(\"BBBB\")\n #self.printLog(message=\"Logger initialized\", typeMessage=\"info\")\n self.arrUAVs = arrUAVs\n #self.channel_layer = get_channel_layer()\n #self.groupName = \"uav\"\n print(\"CCCC\")\n\n\n def run(self):\n self.printLog(message=\"Run executed\", typeMessage=\"info\")\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)\n self.loop.run_until_complete(self.mission())\n self.loop.close()\n\n\n async def mission(self):\n print(\"EEE\")\n self.printLog(message=\"AAAA\", typeMessage=\"info\")\n #message = {\n # 'type': 'test',\n # 'message': \"event_trigered_from_views\"\n #}\n #await self.channel_layer.group_send(self.groupName, message)\n #self.printLog(message=\"AAVVV\", typeMessage=\"info\")\n print(\"FFF\")\n\n try:\n self.printLog(message=\"Starting mission handler\", typeMessage=\"info\")\n # initialize all Drone processes\n self.printLog(message=\"Creating control primitives\", typeMessage=\"info\")\n barrierArm = threading.Barrier(len(self.arrUAVs)) # Barrier where all thread will wait to continue sincronized\n barrierMission = threading.Barrier(len(self.arrUAVs)) # Barrier where all thread will wait to continue sincronized\n e = threading.Event() # Event used as signal where some threads will continue its execution\n lock = threading.Lock() # Lock to let each variable to securely access global variables\n self.threads = []\n\n self.printLog(message=\"Creating each drone mission\", typeMessage=\"info\")\n for i, drone in enumerate(self.arrUAVs):\n # Se define la misión según la selección en la web\n \"\"\"\n if drone['General']['missionDrone'] == \"Eight figure\":\n print(\"Mission Eight figure\")\n droneName = drone['General']['nameDrone']\n camerasName = []\n for j, cameraName in enumerate(drone['Cameras']):\n camerasName.append(drone['Cameras'][cameraName]['General']['CameraName'])\n FlightAltitude = float( drone['Mission']['FlightAltitude'] )\n Rate = float( drone['Mission']['Rate'] )\n Radius = float( drone['Mission']['Radius'] )\n SecondsPerCycle = float( drone['Mission']['SecondsPerCycle'] )\n # Threading variables\n #barrierArm = threading.Barrier(len(arrUAVs)) # Barrier where all thread will wait to continue sincronized\n #barrierMission = threading.Barrier(len(arrUAVs)) # Barrier where all thread will wait to continue sincronized\n #e = threading.Event() # Event used as signal where some threads will continue its execution\n #lock = threading.Lock() # Lock to let each variable to securely access global variables\n \"\"\"\n #mission = droneEightFigure(drone_id=i+1, droneName=\"Drone1\", camerasName=[\"front\"], FLIGHT_ALTITUDE=30, RATE=10, RADIUS=20, CYCLE_S=5)\n #else:\n # mission = drone_lazaro(drone_id=i+1, barrierArm=barrierArm, barrierMission=barrierMission, lock=lock, e=e)\n \n mission = scanMission(drone_id=i+1, droneName=\"Drone1\",\n camerasName=[\"front\"], FLIGHT_ALTITUDE=30, RATE=10, RADIUS=20, CYCLE_S=5,\n origin=[40.544289,-4.012101],dest=[40.545148,-4.011331])\n \n mission.name = \"Drone\"+str(i) #drone['General']['nameDrone']\n self.threads.append(mission)\n\n self.printLog(message=\"Starting missions: \", typeMessage=\"info\")\n for mission in self.threads:\n mission.start()\n mission.join()\n self.printLog(message=\"Processes Joined: \", typeMessage=\"info\")\n\n except Exception as e:\n self.printLog(typeMessage=\"error\", message=PrintException())","repo_name":"Jorgelzn/UAV-3D-Reconstruction","sub_path":"mavsdk/web mission/missionsHandler.py","file_name":"missionsHandler.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"73696148001","text":"import sys\r\nfrom collections import Counter\r\nfrom itertools import permutations\r\n\r\ndef load_dictionary(file):\r\n try:\r\n with open(file) as f:\r\n text = f.read().strip().split('\\n')\r\n text = [x.lower() for x in text]\r\n return text\r\n except IOError as e:\r\n print(\"{}\\nError opening {}. Terminating program.\".format(e, file), file=sys.stderr)\r\n sys.exit(1)\r\n \r\n\r\ndef main():\r\n name = 'tmvoordle'\r\n name = name.lower()\r\n \r\n word_list = load_dictionary('words.txt')\r\n trigrams = load_dictionary('least_likely_trigraphs.txt')\r\n \r\n words = prep_words(name, word_list)\r\n cv_map = cv_map_words(words)\r\n filter1 = cv_map_filter(name, cv_map)\r\n filter2 = trigram_filter(filter1, trigrams)\r\n filter3 = letter_pair_filter(filter2)\r\n view_by_letter(name, filter3)\r\n \r\ndef prep_words(name, word_list):\r\n print(\"Length of initial word list: {}\".format(len(word_list)))\r\n len_name = len(name)\r\n words = [word.lower() for word in word_list if len(word) == len_name]\r\n print(\"Length of new word list: {}\".format(len(words)))\r\n return words\r\n\r\ndef cv_map_words(words):\r\n #Maps letters in words to consonants(c)/vowels(v)\r\n vowels = 'aeiouy'\r\n cv_mapped_words = []\r\n for word in words:\r\n temp = ''\r\n for letter in word:\r\n if letter in vowels:\r\n temp += 'v'\r\n else:\r\n temp += 'c'\r\n cv_mapped_words.append(temp)\r\n \r\n total = len(set(cv_mapped_words))\r\n target = 0.05\r\n n = int(total * target)\r\n count_pruned = Counter(cv_mapped_words).most_common(total - n)\r\n filtered_cv_map = set()\r\n for pattern, count in count_pruned:\r\n filtered_cv_map.add(pattern)\r\n print(\"Length filtered CV map: {}\".format(len(filtered_cv_map)))\r\n return filtered_cv_map\r\n\r\ndef cv_map_filter(name, filtered_cv_map):\r\n perms = {''.join(i) for i in permutations(name)}\r\n print(\"Length of initial permutations set: {}\".format(len(perms)))\r\n vowels = 'aeiouy'\r\n filter1 = set()\r\n for candidate in perms:\r\n temp = ''\r\n for letter in candidate:\r\n if letter in vowels:\r\n temp += 'v'\r\n else:\r\n temp += 'c'\r\n if temp in filtered_cv_map:\r\n filter1.add(candidate)\r\n print(\"Number of choices after filter 1: {}\".format(len(filter1)))\r\n return filter1\r\n\r\ndef trigram_filter(filter1, trigrams_filtered):\r\n filtered = set()\r\n for candidate in filter1:\r\n for triplet in trigrams_filtered:\r\n triplet = triplet.lower()\r\n if triplet in candidate:\r\n filtered.add(candidate)\r\n filter2 = filter1 - filtered\r\n print(\"Number of choices after filter 2: {}\".format(len(filter2)))\r\n return filter2\r\n\r\ndef letter_pair_filter(filter2):\r\n filtered = set()\r\n rejects = ['dt', 'lr', 'md', 'ml', 'mr', 'mt', 'mv', 'td', 'tv', 'vd', 'vl', 'vm', 'vr', 'vt']\r\n start_letter_rejects = ['ld', 'lm', 'lt', 'lv', 'rd', 'rl', 'rm', 'rt', 'rv', 'tl', 'tm']\r\n for candidate in filter2:\r\n for r in rejects:\r\n if r in candidate:\r\n filtered.add(candidate)\r\n for start in start_letter_rejects:\r\n if candidate.startswith(start):\r\n filtered.add(candidate)\r\n filter3 = filter2 - filtered\r\n print(\"Number of choices after filter 3: {}\".format(len(filter3)))\r\n \r\n if 'voldemort' in filter3:\r\n print(\"Voldemort found!\")\r\n \r\n return filter3\r\n\r\ndef view_by_letter(name, filter3):\r\n print(\"Remaining Letters: {}\".format(name))\r\n first = input(\"Select a starting letter or press 'enter' to see all: \")\r\n subset = []\r\n for candidate in filter3:\r\n if candidate.startswith(first):\r\n subset.append(candidate)\r\n print(*sorted(subset), sep='\\n')\r\n print(\"Number of choices starting fit {}: {}\".format(first, len(subset)))\r\n \r\n again = input(\"Go again? (Press enter to continue or 'n' to exit)\")\r\n if again.lower() == '':\r\n view_by_letter(name, filter3)\r\n elif again.lower.startswith('n'):\r\n sys.exit()\r\n else:\r\n sys.exit()\r\n \r\nif __name__ == '__main__':\r\n main()","repo_name":"athomsen115/Playing-with-Words","sub_path":"Anagrams/voldemort_anagram.py","file_name":"voldemort_anagram.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30687516075","text":"\nimport csv\nfrom io import StringIO\nfrom pathlib import Path\nfrom t2wml.spreadsheets.conversions import to_excel\nfrom t2wml.outputs.datamart_edges import (clean_id, create_metadata_for_custom_qnode, create_metadata_for_project, create_metadata_for_variable, \n create_metadata_for_qualifier_property, link_statement_to_dataset)\nfrom t2wml.input_processing.utils import VALID_PROPERTY_TYPES\nimport t2wml.utils.t2wml_exceptions as T2WMLExceptions\nfrom t2wml.wikification.utility_functions import get_property_type\nfrom t2wml.wikification.utility_functions import kgtk_to_dict\nfrom t2wml.utils.debug_logging import basic_debug\n\nclass EmptyValueException(Exception):\n pass\n\ndef enclose_in_quotes(value):\n if value != \"\" and value is not None:\n return \"\\\"\"+str(value.replace('\"','\\\\\"'))+\"\\\"\"\n return \"\"\n\n\ndef kgtk_add_property_type_specific_fields(property_dict, result_dict):\n property_type = get_property_type(property_dict[\"property\"])\n\n if property_type not in VALID_PROPERTY_TYPES:\n raise T2WMLExceptions.UnsupportedPropertyType(\n \"Property type \"+property_type+\" is not currently supported\" + \"(\" + property_dict[\"property\"] + \")\")\n\n\n # The only property that doesn't require value\n if property_type == \"globecoordinate\":\n '''\n node2;kgtk:latitude: for coordinates, the latitude\n node2;kgtk:longitude: for coordinates, the longitude\n '''\n result_dict[\"node2;kgtk:data_type\"] = \"location_coordinates\"\n result_dict[\"node2;kgtk:latitude\"] = property_dict[\"latitude\"]\n result_dict[\"node2;kgtk:longitude\"] = property_dict[\"longitude\"]\n result_dict[\"node2;kgtk:precision\"] = property_dict.get(\n \"precision\", \"\")\n result_dict[\"node2;kgtk:globe\"] = property_dict.get(\"globe\", \"\")\n\n else:\n try:\n value = property_dict[\"value\"]\n result_dict[\"node2\"] = value\n except:\n raise EmptyValueException(f'Cell {property_dict[\"cell\"]} has no value')\n\n if property_type == \"quantity\":\n '''\n node2;kgtk:magnitude: for quantities, the number\n node2;kgtk:units_node: for quantities, the unit\n node2;kgtk:low_tolerance: for quantities, the lower bound of the value (cannot do it in T2WML yet)\n node2;kgtk:high_tolerance: for quantities, the upper bound of the value (cannot do it in T2WML yet)\n '''\n result_dict[\"node2;kgtk:data_type\"] = \"quantity\"\n result_dict[\"node2;kgtk:number\"] = value\n result_dict[\"node2;kgtk:units_node\"] = property_dict.get(\n \"unit\", \"\")\n result_dict[\"node2;kgtk:low_tolerance\"] = property_dict.get(\n \"lower-bound\", \"\")\n result_dict[\"node2;kgtk:high_tolerance\"] = property_dict.get(\n \"upper-bound\", \"\")\n\n elif property_type == \"time\":\n '''\n node2;kgtk:date_and_time: for dates, the ISO-formatted data\n node2;kgtk:precision: for dates, the precision, as an integer (need to verify this with KGTK folks, could be that we use human readable strings such as year, month\n node2;kgtk:calendar: for dates, the qnode of the calendar, if specified\n '''\n result_dict[\"node2;kgtk:data_type\"] = \"date_and_times\"\n result_dict[\"node2;kgtk:date_and_time\"] = enclose_in_quotes(value)\n result_dict[\"node2;kgtk:precision\"] = property_dict.get(\n \"precision\", \"\")\n result_dict[\"node2;kgtk:calendar\"] = property_dict.get(\n \"calendar\", \"\")\n\n elif property_type in [\"string\", \"monolingualtext\", \"externalid\", \"url\"]:\n '''\n node2;kgtk:text: for text, the text without the language tag\n node2;kgtk:language: for text, the language tag\n '''\n result_dict[\"node2;kgtk:data_type\"] = \"string\"\n result_dict[\"node2;kgtk:text\"] = enclose_in_quotes(value)\n result_dict[\"node2;kgtk:language\"] = enclose_in_quotes(\n property_dict.get(\"lang\", \"\"))\n\n elif property_type in [\"wikibaseitem\", \"wikibaseproperty\"]:\n '''\n node2;kgtk:symbol: when node2 is another item, the item goes here\"\n '''\n result_dict[\"node2;kgtk:data_type\"] = \"symbol\"\n result_dict[\"node2;kgtk:symbol\"] = value\n\n\ndef handle_additional_edges(project, statements):\n tsv_data=[]\n tsv_data+=create_metadata_for_project(project)\n variable_ids=set()\n qualifier_ids=set([\"P585\", \"P248\"])\n qnode_ids=set()\n entity_dict={}\n for file in project.entity_files:\n full_path=project.get_full_path(file)\n entity_dict.update(kgtk_to_dict(full_path))\n\n for cell, statement in statements.items():\n variable=statement[\"property\"]\n if variable not in variable_ids:\n variable_ids.add(variable)\n variable_dict=entity_dict.get(variable, None)\n if variable_dict is not None:\n label=variable_dict.get(\"label\", variable)\n description=variable_dict.get(\"description\", \"\")\n data_type=variable_dict.get(\"data_type\", \"quantity\")\n if data_type.lower()==\"wikibaseitem\":\n qnode_ids.add(statement[\"value\"])\n tags=variable_dict.get(\"tags\", [])\n #TODO: P31?\n tsv_data+=create_metadata_for_variable(project, variable, label, description, data_type, tags)\n\n qualifiers=statement.get(\"qualifier\", [])\n for qualifier in qualifiers:\n property=qualifier[\"property\"]\n if property not in qualifier_ids:\n qualifier_ids.add(property)\n variable_dict=entity_dict.get(property, {})\n if True:# variable_dict is not None:\n label=variable_dict.get(\"label\", \"A \"+property)\n #description=variable_dict.get(\"description\", variable+\" relation\")\n data_type=variable_dict.get(\"data_type\", \"string\")\n if data_type.lower()==\"wikibaseitem\":\n qnode_ids.add(qualifier[\"value\"])\n tsv_data+=create_metadata_for_qualifier_property(project, variable, property, label, data_type)\n \n subject=statement[\"subject\"]\n if subject not in qnode_ids:\n qnode_ids.add(subject)\n #TODO\n \n for qnode_id in qnode_ids:\n variable_dict=entity_dict.get(qnode_id, {})\n if variable_dict:\n label=variable_dict.get(\"label\", qnode_id)\n tsv_data+=create_metadata_for_custom_qnode(qnode_id, label)\n\n for result_dict in tsv_data:\n property_type=result_dict.pop(\"type\")\n result_dict[\"node2;kgtk:data_type\"]=property_type\n value=result_dict[\"node2\"]\n\n if property_type == \"quantity\":\n result_dict[\"node2;kgtk:number\"] = value\n\n elif property_type == \"date_and_times\":\n result_dict[\"node2;kgtk:date_and_time\"] = enclose_in_quotes(value)\n\n elif property_type == \"string\":\n result_dict[\"node2;kgtk:text\"] = enclose_in_quotes(value)\n\n elif property_type == \"symbol\":\n result_dict[\"node2;kgtk:symbol\"] = value\n return tsv_data\n\n#@basic_debug\ndef create_kgtk(statements, file_path, sheet_name, project=None):\n file_name = Path(file_path).name\n\n file_extension = Path(file_path).suffix\n if file_extension == \".csv\":\n sheet_name = \"\"\n else:\n sheet_name = \".\"+sheet_name\n\n tsv_data = []\n\n if project:\n tsv_data+=handle_additional_edges(project, statements)\n \n error_cells={}\n\n for cell, statement in statements.items():\n try:\n id = file_name + sheet_name + \";\" + to_excel(cell[0], cell[1])\n\n if project:\n tsv_data.append(link_statement_to_dataset(project, id))\n\n cell_result_dict = dict(\n id=id, node1=statement[\"subject\"], label=statement[\"property\"])\n kgtk_add_property_type_specific_fields(statement, cell_result_dict)\n tsv_data.append(cell_result_dict)\n\n qualifiers = statement.get(\"qualifier\", [])\n for qualifier in qualifiers:\n qualifier_result_dict = dict(id=id+\"-\"+qualifier[\"property\"],\n node1=id, label=qualifier[\"property\"])\n\n try:\n kgtk_add_property_type_specific_fields(\n qualifier, qualifier_result_dict)\n tsv_data.append(qualifier_result_dict)\n except EmptyValueException:\n # Allow missing qualifier values\n pass\n\n\n\n references = statement.get(\"reference\", [])\n # todo: handle references\n except Exception as e:\n error_cells[cell]=str(e)\n\n string_stream = StringIO(\"\", newline=\"\")\n fieldnames = [\"id\", \"node1\", \"label\", \"node2\", \"node2;kgtk:data_type\",\n \"node2;kgtk:number\", \"node2;kgtk:low_tolerance\", \"node2;kgtk:high_tolerance\", \"node2;kgtk:units_node\",\n \"node2;kgtk:date_and_time\", \"node2;kgtk:precision\", \"node2;kgtk:calendar\",\n \"node2;kgtk:truth\",\n \"node2;kgtk:symbol\",\n \"node2;kgtk:latitude\", \"node2;kgtk:longitude\", \"node2;kgtk:globe\",\n \"node2;kgtk:text\", \"node2;kgtk:language\", ]\n\n writer = csv.DictWriter(string_stream, fieldnames,\n restval=\"\", delimiter=\"\\t\", lineterminator=\"\\n\",\n escapechar='', quotechar='',\n dialect=csv.unix_dialect, quoting=csv.QUOTE_NONE)\n writer.writeheader()\n for entry in tsv_data:\n writer.writerow(entry)\n\n output = string_stream.getvalue()\n string_stream.close()\n return output\n\n#@basic_debug\ndef get_all_variables(project, statements, validate_for_datamart=False):\n tsv_data=[]\n tsv_data+=create_metadata_for_project(project)\n variable_set=set()\n variable_ids=set()\n entity_dict={}\n for file in project.entity_files:\n full_path=project.get_full_path(file)\n entity_dict.update(kgtk_to_dict(full_path))\n\n for cell, statement in statements.items():\n variable=statement[\"property\"]\n property_type = get_property_type(variable)\n if validate_for_datamart:\n if property_type!=\"quantity\":\n raise T2WMLExceptions.InvalidDatamartVariables(\"A valid datamart variable must be of type quantity\")\n if variable not in variable_set:\n variable_set.add(variable)\n variable_dict=entity_dict.get(variable, None)\n if variable_dict is not None:\n label=variable_dict.get(\"label\", variable)\n variable_id=clean_id(label)\n variable_ids.add(variable_id)\n return variable_ids","repo_name":"usc-isi-i2/t2wml-api","sub_path":"t2wml/outputs/kgtk.py","file_name":"kgtk.py","file_ext":"py","file_size_in_byte":10925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13560312976","text":"from ome_types.model import Image, Pixels, Channel, OME\nfrom ome_types.model.simple_types import UnitsLength\n\npixel_metadata = {\n \"size_c\": 1000,\n \"size_y\": 1000,\n \"size_x\": 1000,\n \"size_z\": 1,\n \"size_t\": 1,\n \"dimension_order\": \"XYCZT\",\n \"type\": \"uint16\",\n \"physical_size_x\": 0.65,\n \"physical_size_y\": 0.65,\n}\nchannel_meta_list = [{\"name\": \"t1\", \"SamplesPerPixel\": 1, \"color\": \"FF00FF5B\"}]\n\n\ndef generate_ome(image_name, pixel_metadata: dict, channel_meta_list: list):\n pixel_metadata.update(\n {\n \"id\": \"Pixels:0\",\n \"physical_size_x_unit\": UnitsLength.MICROMETER,\n \"physical_size_y_unit\": UnitsLength.MICROMETER,\n \"metadata_only\": True,\n }\n )\n ome = OME()\n ome.images.append(\n Image(\n id=\"Image:0\",\n name=image_name,\n pixels=Pixels(\n **pixel_metadata,\n ),\n )\n )\n\n for idx, ch in enumerate(channel_meta_list):\n ch.update({\"id\": f\"Channel:{idx}\"})\n ome.images[0].pixels.channels.append(Channel(**ch))\n\n return ome\n","repo_name":"NHPatterson/napari-imsmicrolink","sub_path":"src/napari_imsmicrolink/utils/ome.py","file_name":"ome.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"40597868691","text":"import math, numpy\r\n\r\n# Free off-diagonal density matrix\r\ndef rho_free(x, xp, beta):\r\n return (math.exp(-(x - xp) ** 2 / (2.0 * beta)) /\r\n math.sqrt(2.0 * math.pi * beta))\r\n\r\n# Harmonic density matrix in the Trotter approximation (returns the full matrix)\r\ndef rho_harmonic_trotter(grid, beta):\r\n return numpy.array([[rho_free(x, xp, beta) * \\\r\n numpy.exp(-0.5 * beta * 0.5 * (x ** 2 + xp ** 2)) \\\r\n for x in grid] for xp in grid])\r\n\r\nx_max = 5.0\r\nnx = 100\r\ndx = 2.0 * x_max / (nx - 1)\r\nx = [i * dx for i in range(-(nx - 1) // 2, nx // 2 + 1)]\r\nbeta_tmp = 2.0 ** (-5) # initial value of beta (power of 2)\r\nbeta = 2.0 ** 4 # actual value of beta (power of 2)\r\nrho = rho_harmonic_trotter(x, beta_tmp) # density matrix at initial beta\r\nwhile beta_tmp < beta:\r\n rho = numpy.dot(rho, rho)\r\n rho *= dx\r\n beta_tmp *= 2.0\r\n print ('beta: %s -> %s' % (beta_tmp / 2.0, beta_tmp))\r\n","repo_name":"weka511/smac","sub_path":"lecture_5/matrix_square_harmonic.py","file_name":"matrix_square_harmonic.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"41354484991","text":"from cdips.vetting.centroid_analysis import make_wget_script\nimport os, shutil\nfrom glob import glob\n\nsector = 7\ntfasrdir = (\n \"/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}_TFA_SR\".\n format(sector)\n)\noutdir = (\n \"/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_cutouts/sector-{}_TFA_SR\".\n format(sector)\n)\n\nif not os.path.exists(outdir):\n make_wget_script(tfasrdir, xlen_px=10, ylen_px=10, tesscutvernum=0.1)\n raise Exception('need to manually run wget script')\n\n# To execute, run\n#\n# ./wget_the_TCE_cutouts.sh &> wget_log.txt &\n#\n# from the relevant directory. downloads about 20 cutouts per minute. for say\n# 600 cutous, takes half an hour. You probably want to run a \"check_wget.sh\"\n# type script as well.\n#\n# This DLs zip files. Then:\n\nfficutpaths = glob(os.path.join(outdir, 'astrocut*All'))\n\nfor fp in fficutpaths:\n if not os.path.exists(fp+'.zip'):\n shutil.move(fp, fp+'.zip')\n print('append .zip to {}'.format(fp))\n\n#\n# Then (manually) extract everything in the directory from shell. Note there\n# will be some frames from multiple sectors. This is OK.\n#\n","repo_name":"lgbouma/cdips","sub_path":"drivers/make_fficut_wget_script.py","file_name":"make_fficut_wget_script.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"1297974815","text":"import curses\n\ndef main(win):\n\twin.nodelay(True)\n\tkey=\"\"\n\twin.clear()\n\twin.addstr(\"Detected key\")\n\twhile 1:\n\t\ttry:\n\t\t\tkey = win.getkey()\n\t\t\twin.clear()\n\t\t\twin.addstr(\"Detected key:\")\n\t\t\twin.addstr(str(key))\n\t\t\tif key == os.linesep:\n\t\t\t\tbreak\n\t\texcept Exception as e:\n\t\t\t#No input\n\t\t\tpass\n\nbegin_x = 20; begin_y = 7\nheight = 5; width = 40\n\n\nwin = curses.initscr()\nwin = curses.newwin(height,width,begin_y, begin_x)\n","repo_name":"jmansamp/stepper","sub_path":"key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7057251425","text":"import pyneb as pn\nimport numpy as np\n\n\"\"\"\nThis is a toy-program to test the effect of a 2-densities region on the nebular analysis of the observations.\nThe 2 regions have different temperature (tem1 and tem2), densities (dens1 and dens2), masses (mass1 and mass2, \nwhatever unit, the important being the ratio). Intensities for various lines from various ions (included Hbeta) are computed: \nemis1 from region 1, emis2 from region2 and emis 3 from the sum of the 2 regions.\nThen Te and Ne are computed from these pseudo-observations, using various diagnostic line ratios.\nFinally, ionic fractions are obtained and compared to the original ionic fractions used to compute the intensities.\n\"\"\"\n\npn.log_.level = 3\n\n# Parameters of the toy model:\ntem1 = 1.e4\ntem2 = 1.e4\ndens1 = 1e3\ndens2 = 5.e4\nmass1 = 1\nmass2 = 1e-2\nSp = -6 #log S+/H+\nNp = -5 #log N+/H+\nOp = -4 #log O+/H+\nOpp = -4 #log O++/H+\n\n# The atoms and lines we will need:\nS2 = pn.Atom('S', 2)\nN2 = pn.Atom('N', 2)\nO2 = pn.Atom('O', 2)\nO3 = pn.Atom('O', 3)\nS2_lambda = (4069, 4076, 6716, 6731)\nN2_lambda = (6583, 5755)\nO2_lambda = (3726, 3729)\nO3_lambda = (5007, 4363)\n\n# computing line intensities:\nemis1 = {}\nemis2 = {}\nemis3 = {}\nfor line in S2_lambda:\n emis1['SII_'+str(line)] = S2.getEmissivity(tem1, dens1, wave = line) * 10.**Sp * dens1 * mass1\n emis2['SII_'+str(line)] = S2.getEmissivity(tem2, dens2, wave = line) * 10.**Sp * dens2 * mass2\n emis3['SII_'+str(line)] = emis1['SII_'+str(line)] + emis2['SII_'+str(line)]\nfor line in N2_lambda:\n emis1['NII_'+str(line)] = N2.getEmissivity(tem1, dens1, wave = line) * 10.**Np * dens1 * mass1\n emis2['NII_'+str(line)] = N2.getEmissivity(tem2, dens2, wave = line) * 10.**Np * dens2 * mass2\n emis3['NII_'+str(line)] = emis1['NII_'+str(line)] + emis2['NII_'+str(line)]\nfor line in O3_lambda:\n emis1['OIII_'+str(line)] = O3.getEmissivity(tem1, dens1, wave = line) * 10.**Opp * dens1 * mass1\n emis2['OIII_'+str(line)] = O3.getEmissivity(tem2, dens2, wave = line) * 10.**Opp * dens2 * mass2\n emis3['OIII_'+str(line)] = emis1['OIII_'+str(line)] + emis2['OIII_'+str(line)]\nfor line in O2_lambda:\n emis1['OII_'+str(line)] = O2.getEmissivity(tem1, dens1, wave = line) * 10.**Op * dens1 * mass1\n emis2['OII_'+str(line)] = O2.getEmissivity(tem2, dens2, wave = line) * 10.**Op * dens2 * mass2\n emis3['OII_'+str(line)] = emis1['OII_'+str(line)] + emis2['OII_'+str(line)]\n \nemis1['Hbeta'] = pn.getHbEmissivity(tem1) * dens1 * mass1\nemis2['Hbeta'] = pn.getHbEmissivity(tem2) * dens2 * mass2\nemis3['Hbeta'] = emis1['Hbeta'] + emis2['Hbeta'] \n\n# determining Te and Ne for different regions and diagnostics:\n# Region 1 only\ntemp_1, dens_S2_1 = pn.getCrossTemDen('N', 2, 'L(6583) / L(5755)', 'S', 2, 'L(6716)/L(6731)', \n emis1['NII_6583']/emis1['NII_5755'], \n emis1['SII_6716']/emis1['SII_6731'], \n guess_tem=10000, tol_tem = 1., tol_den = 1., max_iter = 5)\n# Region 2 only\ntemp_2, dens_S2_2 = pn.getCrossTemDen('N', 2, 'L(6583) / L(5755)','S', 2, 'L(6716)/L(6731)', \n emis2['NII_6583']/emis2['NII_5755'], \n emis2['SII_6716']/emis2['SII_6731'], \n guess_tem=10000, tol_tem = 1., tol_den = 1., max_iter = 5)\n# sum of region 1 and 2, using Te(NII) and Ne(SII)\ntemp_3, dens_S2_3 = pn.getCrossTemDen('N', 2, 'L(6583) / L(5755)', 'S', 2, 'L(6716)/L(6731)', \n emis3['NII_6583']/emis3['NII_5755'], \n emis3['SII_6716']/emis3['SII_6731'], \n guess_tem=10000, tol_tem = 1., tol_den = 1., max_iter = 5)\n# sum of region 1 and 2, using Te(NII) and Ne(SII 4 lines)\ntemp_4, dens_S2_4 = pn.getCrossTemDen('N', 2, 'L(6583) / L(5755)',\n 'S', 2, '(L(4069) + L(4076)) / (L(6716)+L(6731))', \n emis3['NII_6583']/emis3['NII_5755'],\n (emis3['SII_4069']+emis3['SII_4076']) / (emis3['SII_6716']+emis3['SII_6731']), \n guess_tem=10000, tol_tem = 1., tol_den = 1., max_iter = 5)\n# sum of region 1 and 2, using Te(OIII) and Ne(SII)\ntemp_5, dens_S2_5 = pn.getCrossTemDen('O', 3, 'L(5007) / L(4363)', 'S', 2, 'L(6716)/L(6731)', \n emis3['OIII_5007']/emis3['OIII_4363'], \n emis3['SII_6716']/emis3['SII_6731'], \n guess_tem=10000, tol_tem = 1., tol_den = 1., max_iter = 5)\n\ndef ab_ion5(atom, line, wave):\n \"\"\"\n this return ionic abundances in 5 cases: region 1, 2 and the sum of the 2 regions (using different Te and Ne)\n \"\"\"\n ab_1 = np.log10(atom.getIonAbundance(emis1[line]/emis1['Hbeta']*100, temp_1, dens_S2_1, wave= wave))\n ab_2 = np.log10(atom.getIonAbundance(emis2[line]/emis2['Hbeta']*100, temp_2, dens_S2_2, wave= wave))\n ab_3 = np.log10(atom.getIonAbundance(emis3[line]/emis3['Hbeta']*100, temp_3, dens_S2_3, wave= wave))\n ab_4 = np.log10(atom.getIonAbundance(emis3[line]/emis3['Hbeta']*100, temp_4, dens_S2_4, wave= wave))\n ab_5 = np.log10(atom.getIonAbundance(emis3[line]/emis3['Hbeta']*100, temp_5, dens_S2_5, wave= wave))\n return ab_1, ab_2, ab_3, ab_4, ab_5\n\nprint(' Dens1 = %.0f, Dens2 = %.0f, Dens3 = %.0f, Dens4 = %.0f, Dens5 = %.0f' % \\\n (dens_S2_1, dens_S2_2, dens_S2_3, dens_S2_4, dens_S2_5))\nprint(' Temp1 = %.0f, Temp2 = %.0f, Temp3 = %.0f, Temp4 = %.0f, Temp5 = %.0f' % \\\n (temp_1, temp_2, temp_3, temp_4, temp_5))\n\nfor line in S2_lambda:\n ab_Sp1, ab_Sp2, ab_Sp3, ab_Sp4, ab_Sp5 = ab_ion5(S2, 'SII_'+str(line), line) \n print(' S+%i : %.2f %.2f %.2f %.2f %.2f' % \\\n (line,ab_Sp1-Sp, ab_Sp2-Sp, ab_Sp3-Sp, ab_Sp4-Sp, ab_Sp5-Sp))\n\nab_Np1, ab_Np2, ab_Np3, ab_Np4, ab_Np5 = ab_ion5(N2, 'NII_6583', 6583) \nprint(' N+%i : %.2f %.2f %.2f %.2f %.2f' % \\\n (6583,ab_Np1-Np, ab_Np2-Np, ab_Np3-Np, ab_Np4-Np, ab_Np5-Np))\n\nab_Op1, ab_Op2, ab_Op3, ab_Op4, ab_Op5 = ab_ion5(O2, 'OII_3726', 3726) \nprint(' O+%i : %.2f %.2f %.2f %.2f %.2f' % \\\n (3726,ab_Op1-Op, ab_Op2-Op, ab_Op3-Op, ab_Op4-Op, ab_Op5-Op))\nab_Op1, ab_Op2, ab_Op3, ab_Op4, ab_Op5 = ab_ion5(O2, 'OII_3729', 3729) \nprint(' O+%i : %.2f %.2f %.2f %.2f %.2f' % \\\n (3729,ab_Op1-Op, ab_Op2-Op, ab_Op3-Op, ab_Op4-Op, ab_Op5-Op))\n\nab_Opp1, ab_Opp2, ab_Opp3, ab_Opp4, ab_Opp5 = ab_ion5(O3, 'OIII_5007', 5007) \nprint('O++%i : %.2f %.2f %.2f %.2f %.2f' % \\\n (5007,ab_Opp1-Opp, ab_Opp2-Opp, ab_Opp3-Opp, ab_Opp4-Opp, ab_Opp5-Opp))\n\n","repo_name":"Morisset/PyNeb_devel","sub_path":"pyneb/sample_scripts/multi_comp.py","file_name":"multi_comp.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"54"} +{"seq_id":"20595394528","text":"import torch\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\n# from utils import image_processing\nimport os\nimport pandas as pd\n\n\n\n\n# 以数据类型为单位,train是一个dataset,dev是一个,test是一个\nclass TorchDataset(Dataset):\n def __init__(self, data_list, data_dir, task, num_classes, repeat=1):\n '''\n :param data_list: 数据文件TXT:格式:imge_name.jpg label1_id labe2_id\n :param data_dir: 图片路径:image_dir+imge_name.jpg构成图片的完整路径\n :num_classes: 最后要分的类别数量,geniue+K种伪造方式\n # :param resize_height 为None时,不进行缩放\n # :param resize_width 为None时,不进行缩放,\n # PS:当参数resize_height或resize_width其中一个为None时,可实现等比例缩放\n :param repeat: 所有样本数据重复次数,默认循环一次,当repeat为None时,表示无限循环 self.fixed_length:\n data = data[:, (data.shape[1]-self.fixed_length)//2:(data.shape[1]-self.fixed_length)//2+self.fixed_length]\n else:\n data = np.pad(data, ((0, 0), (0, self.fixed_length-data.shape[1])), mode=\"wrap\")\n data = data.reshape(1, 256, self.fixed_length) # (1, 1, 256, frames_num), (N, C, W, H)\n # data = torch.from_numpy(data)\n return data\n\n def select_feature(self, feature_name):\n self.feature = feature_name\n\n def get_len(self):\n return self.len\n\n\n\n# ————————————————\n# 版权声明:本文为CSDN博主「pan_jinquan」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。\n# 原文链接:https://blog.csdn.net/guyuealian/article/details/88343924\n\n\n\n# 自定义的函数用于DataLoader的batch collect,以解决使用的数据长度不一的问题\ndef custom_collate(batch):\n # index = 0\n batch_flacs_MGD = []\n batch_flacs_STFT = []\n batch_labels = []\n for flac in batch:\n batch_flacs_MGD.append(flac[0])\n batch_flacs_STFT.append(flac[1])\n batch_labels.append(flac[2])\n # print(flac(1))\n # batch_labels = torch.cat((batch_labels, torch.from_numpy(flac[1])))\n # index += 1\n # batch_flacs: (32, 1, 256, frame_number)\n # batch_labels: (32, num_classes)\n # print(len(batch))\n # print(len(batch[0]))\n # print(len(batch[0][0]))\n # print(len(batch[0][0][0]))\n # print(len(batch[0][0][0][0]))\n # print(batch)\n return np.array(batch_flacs_MGD), np.array(batch_flacs_STFT), np.array(batch_labels)\n\n\ndef custom_collate_for_dev(batch):\n # index = 0\n batch_flacs_MGD = []\n batch_flacs_STFT = []\n batch_ids = []\n batch_names = []\n batch_types = []\n batch_genders = []\n batch_labels = []\n for flac in batch:\n batch_flacs_MGD.append(flac[0])\n batch_flacs_STFT.append(flac[1])\n batch_ids.append(flac[2])\n batch_names.append(flac[3])\n # batch_ids.append(flac[4])\n batch_types.append(flac[4])\n batch_genders.append(flac[5])\n batch_labels.append(flac[6])\n # print(flac(1))\n # batch_labels = torch.cat((batch_labels, torch.from_numpy(flac[1])))\n # index += 1\n # batch_flacs: (32, 1, 256, frame_number)\n # batch_labels: (32, num_classes)\n # print(len(batch))\n # print(len(batch[0]))\n # print(len(batch[0][0]))\n # print(len(batch[0][0][0]))\n # print(len(batch[0][0][0][0]))\n # print(batch)\n return np.array(batch_flacs_MGD), np.array(batch_flacs_STFT), batch_ids, batch_names, batch_types, batch_genders, np.array(batch_labels)\n\n\ndef custom_collate_for_compute(batch):\n # index = 0\n # batch_flacs_MGD = []\n # batch_flacs_STFT = []\n batch_labels = []\n batch_names = []\n batch_ids = []\n for flac in batch:\n # batch_flacs_MGD.append(flac[0])\n # batch_flacs_STFT.append(flac[1])\n batch_labels.append(flac[0])\n batch_names.append(flac[1])\n batch_ids.append(flac[2])\n # print(flac(1))\n # batch_labels = torch.cat((batch_labels, torch.from_numpy(flac[1])))\n # index += 1\n # batch_flacs: (32, 1, 256, frame_number)\n # batch_labels: (32, num_classes)\n # print(len(batch))\n # print(len(batch[0]))\n # print(len(batch[0][0]))\n # print(len(batch[0][0][0]))\n # print(len(batch[0][0][0][0]))\n # print(batch)\n return np.array(batch_labels), batch_names, batch_ids\n\nif __name__ == \"__main__\":\n\n path = \"/media/ssd1T/antispoof/2019/LA\"\n train_data = TorchDataset(data_list=path+\"/ASVspoof2019_LA_cm_protocols/ASVspoof2019.LA.cm.train.trn.txt\", data_dir=\"./\", num_classes=20, repeat=None)\n train_loader = DataLoader(dataset=train_data, batch_size=32, collate_fn=custom_collate, shuffle=True)\n\n for step, (batch_flacs, batch_labels) in enumerate(train_loader):\n # print(step)\n # print(batch_flacs, batch_labels)\n print(len(batch_flacs), len(batch_labels))\n print(batch_flacs[0].shape)\n print(batch_labels[0].shape)\n pass","repo_name":"SFBB/GRCNN","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70451581282","text":"#!/usr/bin/env python3\n\n# ALWAYS import cv2 before cvbridge (even when not otherwise needed):\n# https://answers.ros.org/question/362388/cv_bridge_boost-raised-unreported-exception-when-importing-cv_bridge/\n# noinspection PyUnresolvedReferences\nimport cv2\nimport numpy as np\nimport rospy\nfrom custom_msgs.msg import LaneDetection\nfrom cv_bridge import CvBridge\nfrom duckietown.dtros import DTROS, NodeType\nfrom sensor_msgs.msg import CompressedImage\nfrom duckietown_msgs.srv import SetValue, SetFSMState\nfrom duckietown_msgs.msg import FSMState\n\nimport CameraConfig\nimport image_analysis\nimport preprocessing\n\n\nclass LanePerceptionNode(DTROS):\n def __init__(self, node_name):\n # Initialize the DTROS parent class\n super(LanePerceptionNode, self).__init__(node_name=node_name,\n node_type=NodeType.BEHAVIOR)\n self.veh_name = rospy.get_namespace().strip(\"/\")\n\n self.bridge = CvBridge()\n\n self.camera_config = CameraConfig.CameraConfig.from_file(f\"/data/config/calibrations/camera_intrinsic/{self.veh_name}.yaml\")\n\n # params\n rospy.set_param(f\"/{self.veh_name}/lane_perception_node/gamma\", 0.9)\n rospy.set_param(f\"/{self.veh_name}/lane_perception_node/look_ahead\", 0.75)\n rospy.set_param(f\"/{self.veh_name}/lane_perception_node/red_angle\", np.pi / 3)\n rospy.set_param(f\"/{self.veh_name}/lane_perception_node/red_top_crop\", 0.75)\n rospy.set_param(f\"/{self.veh_name}/lane_perception_node/red_line_length_threshold\", 500)\n\n # sub, pub\n self.mode_sub = rospy.Subscriber(f\"/{self.veh_name}/state_machine_node/mode\", FSMState, self.cb_on_mode_change)\n self.im_sub = rospy.Subscriber(f\"/{self.veh_name}/camera_node/image/compressed\", CompressedImage, self.cb_img, queue_size=1,\n buff_size=1000000)\n self.lane_det_pub = rospy.Publisher(f\"/{self.veh_name}/lane_perception_node/lane_det\", LaneDetection, queue_size=1)\n self.line_pub = rospy.Publisher(f\"/{self.veh_name}/lane_perception_node/lines/compressed\", CompressedImage, queue_size=1)\n self.red_line_pub = rospy.Publisher(f\"/{self.veh_name}/lane_perception_node/red_lines/compressed\", CompressedImage, queue_size=1)\n\n change_mode_srv = f\"/{self.veh_name}/state_machine_node/change_mode\"\n self.mode_srv = rospy.ServiceProxy(change_mode_srv, SetFSMState)\n\n self.last_red = rospy.get_time()\n self.red_active = False\n\n self.rect_mat = None\n\n self.loginfo(\"Initialized\")\n\n def cb_on_mode_change(self, msg):\n if msg.state == \"LANE_FOLLOWING\":\n self.red_active = True\n\n\n def cb_img(self, message):\n # if self.camera_config is None or self.camera_header is None:\n # return\n image = self.bridge.compressed_imgmsg_to_cv2(message, desired_encoding=\"passthrough\")\n\n gamma = rospy.get_param(f\"/{self.veh_name}/lane_perception_node/gamma\")\n image = preprocessing.gamma_correction(image, gamma)\n rect_image = self.camera_config.rectify_image(image)\n rect_color_dict = preprocessing.extract_colors(rect_image)\n\n h, w = image.shape[:2]\n\n look_ahead = rospy.get_param(f\"/{self.veh_name}/lane_perception_node/look_ahead\")\n color_dict = preprocessing.extract_colors(image)\n\n mid_point, _, line_img = image_analysis.detect_lane(image, color_dict=color_dict, look_ahead=look_ahead)\n\n red_angle = rospy.get_param(f\"/{self.veh_name}/lane_perception_node/red_angle\")\n red_top_crop = rospy.get_param(f\"/{self.veh_name}/lane_perception_node/red_top_crop\")\n red_line_length_threshold = rospy.get_param(f\"/{self.veh_name}/lane_perception_node/red_line_length_threshold\")\n\n redline_detected, redline_angle, length = image_analysis.detect_redline(rect_image, color_dict=rect_color_dict, max_angle=red_angle,\n red_top_crop=red_top_crop,\n length_threshold=red_line_length_threshold, debug=True)\n\n #self.loginfo(f\"Redline: {redline_detected} with angle {redline_angle} and length {length}\")\n if redline_angle is not None and self.red_active:\n #self.red_line_srv(redline_angle) # TODO angle is not signed!\n self.mode_srv(\"WAITING\")\n self.red_active = False\n\n line_img_msg = self.bridge.cv2_to_compressed_imgmsg(line_img)\n red_line_img_msg = self.bridge.cv2_to_compressed_imgmsg(rect_image)\n\n det = LaneDetection()\n if mid_point is not None:\n det.mid_point_h = mid_point[0] / w\n det.mid_point_v = mid_point[1] / h\n\n self.lane_det_pub.publish(det)\n self.line_pub.publish(line_img_msg)\n self.red_line_pub.publish(red_line_img_msg)\n\n\nif __name__ == '__main__':\n # Initialize the node\n node = LanePerceptionNode(node_name='lane_perception_node')\n # Keep it spinning to keep the node alive\n rospy.spin()\n","repo_name":"XHZhang01/duckie-driving","sub_path":"packages/perception/src/lane_perception_node.py","file_name":"lane_perception_node.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24893917815","text":"import heapq\nfrom collections import deque\n\nimport sys\n\ninput = sys.stdin.readline\n\nn,x = map(int,input().split())\n\ncount = list(map(int,input().split()))\n\nmaxcount = 0\ndate = 1\n\nmaxcount = sum(count[0:x])\ntemp = maxcount\nfor i in range(x,n) :\n\n temp -= count[i-x]\n temp += count[i]\n if temp > maxcount :\n maxcount = temp\n date = 1\n elif temp == maxcount :\n date += 1\n\nif maxcount == 0 :\n print(\"SAD\")\nelse :\n print(maxcount)\n print(date)\n","repo_name":"sy8044/snippet","sub_path":"baekjoon/07twopointer/21921.py","file_name":"21921.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6811791619","text":"from collections import deque\n\n# Definição do grafo como um dicionário de listas de adjacência\ngrafo = {\n 'A': ['B'],\n 'B': ['D', 'E'],\n 'C': ['F'],\n 'D': [],\n 'E': ['F'],\n 'F': []\n}\n\ndef bfs(grafo, inicio):\n visitados = [] # Lista para armazenar os nós visitados\n fila = deque([inicio]) # Fila para armazenar os nós a serem visitados\n\n while fila:\n no = fila.popleft() # Remove o nó mais antigo da fila\n if no not in visitados:\n visitados.append(no)\n vizinhos = grafo[no]\n\n # Adiciona os vizinhos não visitados na fila\n for vizinho in vizinhos:\n if vizinho not in visitados:\n fila.append(vizinho)\n \n return visitados\n\nprint(bfs(grafo, 'A')) \n","repo_name":"NessaMd/IA","sub_path":"AlgoritmosBases/buscaEmLargura.py","file_name":"buscaEmLargura.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31679407721","text":"from pathlib import Path\nfrom typing import Optional\n\nfrom datasets import Dataset, DatasetDict, load_dataset\nfrom torchvision.transforms import functional as fn\n\nfrom src.dataloader.dataloader import CustomDataset\nfrom src.helpers.image_utils import resize_img\n\nlocal_FILE_LENGTH_MAP_JSON_PATH = Path(__file__).parent\n\n\ndef export_wikiart_labels():\n dataset = load_dataset(\"huggan/wikiart\", cache_dir=\"./data/wikiart/labels\")\n labels = dataset[\"train\"].remove_columns(\"image\")\n labels.save_to_disk(\"./data/wikiart/labels\")\n\n\ndef save_wikiart_labels_as_dataframe():\n labels = Dataset.load_from_disk(\"./data/wikiart/labels\")\n labels.to_pandas().to_csv(\"./data/wikiart/labels/labels.csv\", index=False)\n\n\nclass CustomWikiartDataset(CustomDataset):\n def __init__(\n self,\n chosen_label=\"genre\",\n chunk_size=1,\n ):\n super().__init__(chosen_label=chosen_label, chunk_size=chunk_size)\n\n dataset = load_dataset(\"huggan/wikiart\", cache_dir=\"./data/wikiart/dataset\")\n\n if isinstance(dataset, DatasetDict):\n dataset = dataset[\"train\"]\n column_names_to_remove = [\n column_name\n for column_name in dataset.column_names\n if column_name != chosen_label and column_name != \"image\"\n ]\n dataset = dataset.remove_columns(column_names_to_remove)\n self.__dataset = dataset.with_format(\"torch\")\n else:\n raise ValueError(f\"Wrong type of dataset. Expected {DatasetDict}, got {type(dataset)}\")\n\n def __len__(self):\n return len(self.__dataset)\n\n def __getitem__(self, key):\n image = self.__dataset[key][\"image\"]\n\n return (\n fn.convert_image_dtype(resize_img(image.permute(2, 0, 1))),\n self.__dataset[key][self.chosen_label].item(),\n )\n\n def __iter__(self):\n return self.generator()\n\n def generator(self):\n for item in self.__dataset:\n if isinstance(item, dict):\n image = item[\"image\"]\n yield (\n fn.convert_image_dtype(resize_img(image.permute(2, 0, 1))),\n item[self.chosen_label].item(),\n )\n else:\n raise ValueError(f\"Expected item in dataset to have type {dict}, found {type(item)}\")\n\n def slice(self, start: int = 0, stop: Optional[int] = None, step: int = 1):\n self.__dataset = self.__dataset.select(range(start, stop or self.__length, step))\n\n return self\n\n\n# import ast\n# import json\n# import os\n# import torch\n# import torchvision\n# import numpy as np\n# import pandas as pd\n\n# def init(\n# self,\n# use_huggingface=False,\n# wikiart_data_path=\"wikiart\",\n# file_length_map_json_path=local_FILE_LENGTH_MAP_JSON_PATH,\n# chosen_label=\"genre\",\n# chunk_size=1,\n# ):\n# temp_ = 0\n# with open(os.path.join(file_length_map_json_path, \"file_length_map.json\"), \"r\") as file_id_map:\n# map_dict = json.load(file_id_map)\n# for file_length in map_dict.values():\n# temp_ += file_length\n\n# self.file_length_map_json_path = file_length_map_json_path # type: ignore\n# self.data_path = PurePath(self.data_path, wikiart_data_path)\n# self.length = temp_\n\n\n# def csv_getitem(self, raw_row_id):\n# with open(os.path.join(self.file_length_map_json_path, \"file_length_map.json\"), \"r\") as file_id_map:\n# map_dict = json.load(file_id_map)\n\n# file_number = int(raw_row_id)\n# file_name = \"\"\n# for file_name, file_length in map_dict.items():\n# if file_number - int(file_length - 1) < 0:\n# break\n# else:\n# file_number -= int(file_length - 1)\n\n# file_length = int(map_dict.get(file_name, -1))\n# assert file_length > 0, f\"No key called {file_name}\"\n\n# row_id_in_file = raw_row_id % file_length + 1 if raw_row_id >= file_length else raw_row_id\n\n# row = pd.read_csv(f\"{self.data_path}/csv/\" + file_name, skiprows=range(1, row_id_in_file), nrows=1)\n\n# return (\n# resize_img(\n# torchvision.io.decode_image(\n# torch.tensor(np.frombuffer(ast.literal_eval(row[\"image\"].iloc[0]).get(\"bytes\"), dtype=np.uint8))\n# )\n# ),\n# row[self.chosen_label].iloc[0],\n# )\n\n\n# def csv_generator(self):\n# with open(os.path.join(self.file_length_map_json_path, \"file_length_map.json\"), \"r\") as file_id_map:\n# map_dict = json.load(file_id_map)\n# for file_name in map_dict:\n# for chunk in pd.read_csv(f\"{self.data_path}/csv/\" + file_name, chunksize=self.chunk_size):\n# image_arrays, labels = [], []\n# image_arrays = torch.tensor(\n# chunk[\"image\"]\n# .map(\n# lambda img: resize_img(\n# torchvision.io.decode_image(\n# torch.tensor(np.frombuffer(ast.literal_eval(img).get(\"bytes\"), dtype=np.uint8))\n# )\n# )\n# )\n# .values\n# )\n# labels = chunk[self.chosen_label].values # .decode('utf-8')\n\n# if self.chunk_size == 1:\n# for row in range(self.chunk_size):\n# yield (image_arrays[row], labels[row])\n\n# yield (\n# image_arrays,\n# np.asarray(labels),\n# )\n","repo_name":"Domina32/fine-art-classification","sub_path":"src/dataloader/wikiart_generator/custom_wikiart_dataset.py","file_name":"custom_wikiart_dataset.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30360132191","text":"_base_='./swin_t_yolox.py'\n\nmodel=dict(\n backbone=dict(\n depths=[2,2,18,2]\n ),\n neck=dict(\n num_csp_blocks=2\n )\n)\nbatch_size=16 #batchsize=6 mem=32\ndata = dict(\n samples_per_gpu=batch_size,\n workers_per_gpu=2)\n\n","repo_name":"maifatai/Particle-cluster-detection","sub_path":"configs/swin/swin_s_yolox.py","file_name":"swin_s_yolox.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19780494129","text":"\"\"\"Given a string S, find the longest palindromic substring in S. You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.\"\"\"\nclass Solution(object):\n def getlongestpalindrome(self, s, l, r):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1; r += 1\n return s[l+1 : r]\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if s is None: return s\n res = ''\n for i in range(len(s)):\n s1 = self.getlongestpalindrome(s, i, i)\n if len(s1) > len(res):\n res = s1\n s2 = self.getlongestpalindrome(s, i, i + 1)\n if len(s2) > len(res):\n res = s2\n return res \n","repo_name":"brotherhuang/notebook_leetcode","sub_path":"python/Longest_Palindromic_Substring.py","file_name":"Longest_Palindromic_Substring.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"64410816","text":"from aocd import data\n\nfrom aoc.util import perf\n\nsteps = [(d, int(x)) for (d, x) in (line.split() for line in data.strip().split('\\n'))]\nmap = dict(forward=1, down=1j, up=-1j)\n\n\n@perf\ndef part1(steps):\n return sum(map[d] * x for (d, x) in steps)\n\n\n@perf\ndef part2(steps):\n a = 0\n pos = 0\n for (d, x) in steps:\n d = map[d]\n if d.real:\n pos += x + x * a * 1j\n else:\n a += x * d.imag\n return pos\n\n\npos = part1(steps)\nprint(f'part1: x={pos.real}, d={pos.imag}, pos={pos.real * pos.imag}')\n\npos = part2(steps)\nprint(f'part2: x={pos.real}, d={pos.imag}, pos={pos.real * pos.imag}')\n","repo_name":"bj0/aoc","sub_path":"aoc/2021/d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37188985024","text":"#2079242 - Ilbey Evcil\r\nimport time\r\nstartTime = time.time()\r\n\r\nmap_file = open(\"map.txt\", \"r\")\r\nmap_str = map_file.read()\r\n\r\nmap_array = map_str.split(\"\\n\")\r\nmap_file.close()\r\n\r\ndef main():\r\n rows = len(map_array) - 1\r\n columns = len(map_array[0])\r\n vertices = []\r\n nodes = []\r\n i = 0\r\n j = 0\r\n while j < columns:\r\n i = 0\r\n while i < rows:\r\n if map_array[i][j] != '*' and map_array[i][j] != ' ':\r\n vertices.append([i,j])\r\n nodes.append(map_array[i][j])\r\n i = i + 1\r\n j = j + 1\r\n\r\n weight = []\r\n counter = 0\r\n for i in vertices:\r\n weight.append([])\r\n for j in vertices:\r\n x = helperFunc(i,j)\r\n weight[counter].append(x)\r\n \r\n counter = counter + 1 \r\n \r\n x = 0\r\n while x < len(vertices):\r\n y = x\r\n while y < len(vertices):\r\n if nodes[x] != nodes[y] or [nodes[x],nodes[y]] != [nodes[y],nodes[x]]:\r\n print(\"{},{},{}\".format(nodes[x],nodes[y],weight[x][y]))\r\n y = y + 1\r\n x = x + 1\r\n\r\n choice = input(\"Enter an algorithm(BFS or UCS): \")\r\n \r\n if choice == \"BFS\" or choice == \"bfs\":\r\n totalCost = bfs(weight, vertices)\r\n stat(vertices, 1, totalCost)\r\n elif choice == \"UCS\" or choice == \"ucs\":\r\n totalCost = ucs(weight, vertices)\r\n stat(vertices, 2, totalCost)\r\n else:\r\n print(\"Entered wrong input!\")\r\n\r\n\r\ndef stat(vertices, choice, totalCost):\r\n print(\"Statistics: \")\r\n print(\"\\tNodes\\tTime(ms)\\tCost\")\r\n if choice == 1:\r\n print(\"BFS\\t{}\\t{}\\t\\t{}\".format(len(vertices), round((time.time() - startTime)*100,3), totalCost))\r\n elif choice == 2:\r\n print(\"UCS\\t{}\\t{}\\t\\t{}\".format(len(vertices), round((time.time() - startTime)*100,3), totalCost))\r\n\r\ndef ucs(graph, vertices):\r\n path = []\r\n path.append(vertices[0])\r\n control = []\r\n tempElem = []\r\n total = 0\r\n num = len(vertices)\r\n i = 0\r\n j = 0\r\n counter = 0\r\n tempControl = []\r\n while counter < num - 1:\r\n if j < num:\r\n control.append(j)\r\n i = 0\r\n while i < num:\r\n if (i not in control):\r\n if graph[i][j] != 0:\r\n tempControl.append(i)\r\n tempElem.append(graph[i][j])\r\n else:\r\n tempElem.append(100)\r\n i = i + 1\r\n i = 0\r\n temp = 100\r\n x = 0\r\n while x < len(tempElem):\r\n if tempElem[x] < temp:\r\n temp = tempElem[x]\r\n jTemp = tempControl[x]\r\n j = jTemp\r\n x = x + 1\r\n if x >= len(tempElem):\r\n tempElem = []\r\n tempControl = []\r\n total = total + temp\r\n control.append(jTemp)\r\n path.append(vertices[j])\r\n counter = counter + 1\r\n dx = abs(vertices[j][0] - vertices[0][0])\r\n dy = abs(vertices[j][1] - vertices[0][1])\r\n total = total + dx + dy\r\n path.append(vertices[0])\r\n \r\n print(\"Algorithm Used: UCS\")\r\n countPrint = 0\r\n for x in coordToChar(path):\r\n if countPrint < len(vertices):\r\n print(x, end = \"-\")\r\n else: \r\n print(x)\r\n countPrint = countPrint + 1\r\n print(\"Total Tour Cost: \",total)\r\n return total\r\n\r\n\r\ndef coordToChar(path):\r\n i = 0\r\n for elem in path:\r\n path[i] = map_array[elem[0]][elem[1]]\r\n i = i + 1\r\n return path\r\n\r\n\r\ndef bfs(graph, vertices):\r\n k = 0\r\n j = 0\r\n i = 1\r\n path = []\r\n totalCost = 0\r\n while j < len(vertices):\r\n totalCost = totalCost + graph[i][k]\r\n path.append(vertices[j])\r\n j = j + 1\r\n if i < len(vertices) - 1:\r\n i = i + 1\r\n else: \r\n i = 0\r\n k = k + 1\r\n i = 0\r\n totalCost = totalCost + graph[i][k]\r\n path.append(vertices[0])\r\n countPrint = 0\r\n print(\"Algorithm Used: BFS\")\r\n path = coordToChar(path)\r\n for x in path:\r\n if countPrint < len(vertices):\r\n print(x, end = \"-\")\r\n else: \r\n print(x)\r\n countPrint = countPrint + 1\r\n print(\"Total Tour Cost: \",totalCost)\r\n return totalCost\r\n\r\n\r\ndef helperFunc(current,goal):\r\n htotal = 0\r\n counter = 0\r\n control = []\r\n \r\n while(current != goal):\r\n harray = AStar(current,goal)\r\n\r\n if harray[1] == 1:\r\n a = [current[0], current[1] + 1]\r\n elif harray[1] == 2:\r\n a = [current[0] - 1, current[1]]\r\n elif harray[1] == 3:\r\n a = [current[0] + 1, current[1]] \r\n elif harray[1] == 4:\r\n a = [current[0], current[1] - 1]\r\n\r\n if a not in control:\r\n current = a\r\n control.append(a)\r\n else:\r\n a = [current[0], current[1] + 1]\r\n if a not in control and map_array[current[0]][current[1] + 1] != '*':\r\n current = a\r\n control.append(a)\r\n else:\r\n if map_array[current[0]+1][current[1]] != '*':\r\n a = [current[0] + 1, current[1]] \r\n current = a\r\n control.append(a)\r\n else:\r\n a = [current[0], current[1] - 1] \r\n current = a\r\n control.append(a)\r\n\r\n htotal = htotal + harray[0]\r\n counter = counter + 1\r\n return counter\r\n\r\n \r\ndef AStar(current,goal):\r\n f = 15\r\n gn = 1\r\n fup = 15\r\n fdown = 15\r\n fright = 15\r\n fleft = 15\r\n \r\n \r\n #right\r\n if map_array[current[0]][current[1]+1] != \"*\":\r\n hx = abs(current[0] - goal[0])\r\n hy = abs((current[1]+1) - goal[1])\r\n fright = hx + hy + gn\r\n\r\n if fright <= f:\r\n f = fright\r\n pos = 1\r\n\r\n #up\r\n if map_array[current[0]-1][current[1]] != \"*\":\r\n hx = abs((current[0]-1) - goal[0])\r\n hy = abs(current[1] - goal[1])\r\n fup = hx + hy + gn\r\n\r\n if fup <= f:\r\n f = fup\r\n pos = 2\r\n\r\n #down\r\n if map_array[current[0]+1][current[1]] != \"*\":\r\n hx = abs((current[0]+1) - goal[0])\r\n hy = abs(current[1] - goal[1])\r\n fdown = hx + hy + gn\r\n \r\n if fdown <= f:\r\n f = fdown\r\n pos = 3\r\n \r\n #left\r\n if map_array[current[0]][current[1]-1] != \"*\":\r\n hx = abs(current[0] - goal[0])\r\n hy = abs((current[1]-1) - goal[1])\r\n fleft = hx + hy + gn\r\n \r\n if fleft < f:\r\n f = fleft\r\n pos = 4\r\n\r\n f = f - 1\r\n return [f,pos]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"ilbey/Maze_Traverser","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73638708003","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def findMode(self, root: Optional[TreeNode]) -> List[int]:\n def mostFrequent(node, counter):\n if not node:\n return counter\n counter[node.val] += 1\n mostFrequent(node.left, counter)\n mostFrequent(node.right, counter)\n return counter\n\n counter = mostFrequent(root, defaultdict(int))\n max_freq = max(counter.values())\n return [num for num, freq in counter.items() if freq == max_freq]","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"find-mode-in-binary-search-tree.py","file_name":"find-mode-in-binary-search-tree.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15588236731","text":"import pandas as pd\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef readDataSet(csvFile):\n #yLabel = ['GL_ELEVATION_GRADIENT_MEAN_MPKM_NGA.2m']\n yLabel = ['GL_ELEVATION_M_ASL_ETOPO2v2.2m']\n df = pd.read_csv(csvFile, index_col=0, skiprows=range(1, 10000000), nrows=100000)\n # xLabels = list(df.columns.values)\n # xLabels.remove(yLabel[0])\n # return df[xLabels], np.ravel(df[yLabel].values)\n return df\n\nprint('Reading Dataset') # Read Data\ncsvFile = 'C:/Users/nmoran/Documents/bathy-project/bathy-model/data/features2m.csv'\nx = readDataSet(csvFile)\nlabels = ['CM_MANTLE_DEN_KGM3_CRUST1.2m',\n 'SC_CRUST_THICK_M_CRUST1.2m',\n 'SC_MID_CRUST_DEN_KGM3_CRUST1.2m',\n 'SF_CURRENT_EAST_MS_2012_12_HYCOMx.2m',\n 'SF_CURRENT_MAG_MS_2012_12_HYCOMx.2m',\n 'SF_CURRENT_NORTH_MS_2012_12_HYCOMx.2m',\n 'SF_SEA_NITRATE_MCML_DECADAL_MEAN_woa13x.2m',\n 'SF_SEA_OXYGEN_PCTSAT_DECADAL_MEAN_woa13x.2m',\n 'SF_SEA_PHOSPHATE_MCML_DECADAL_MEAN_woa13x.2m',\n 'SF_SEA_SALINITY_PSU_DECADAL_MEAN_woa13x.2m',\n 'SF_SEA_SILICATE_MCML_DECADAL_MEAN_woa13x.2m',\n 'SF_SEA_TEMPERATURE_C_DECADAL_MEAN_woa13x.2m',\n 'SF_UP_SED_THICK_M_CRUST1.2m',\n 'SL_GEOID_GRADIENT_MEAN_MPKM_NGA.2m',\n 'SS_BIOMASS_FISH_LOG10_MGCM2_Wei2010x.2m',\n 'SS_BIOMASS_MACROFAUNA_LOG10_MGCM2_Wei2010x.2m',\n 'GL_ELEVATION_M_ASL_ETOPO2v2.2m']\nnewLabels = ['Mantle',\n 'Crust Thickness',\n 'Crust Density',\n 'East Current',\n 'Magnitude Current',\n 'North Current',\n 'Nitrate',\n 'Oxygen',\n 'Phosphate',\n 'Salinity',\n 'Silicate',\n 'Temperature',\n 'Sediment Thickness',\n 'Geoid Gradient',\n 'Fish Biomass',\n 'MacroFauna Biomass',\n 'Bathy']\n\n\n\nx = x[labels]\n\nx.columns = newLabels\n\n\nplt = sns.pairplot(x)\nplt.savefig('./plots/pairplot.png')\n\n# for col in x.columns:\n# val = x[col].values\n# title = '{}_X_{}'.format('Bathymetry', col)\n# # plt.title(title)\n# # plt.xlabel(col)\n# # plt.ylabel('Altimetry')\n# # plt.scatter(val,y,c='blue')\n# # plt.savefig(\n# # plt.clf()\n# data = pd.DataFrame({col.casefold(): val, 'bathymetry': y})\n# plt = sns.scatterplot(x=col.casefold(), y='bathymetry', data=data).get_figure()\n# plt.savefig('./plots/{}.png'.format(title))\n# plt.clf()\n","repo_name":"nichipedia/masters-thesis-code","sub_path":"data_grapher.py","file_name":"data_grapher.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72358060963","text":"import discord\nimport pymongo\nimport logging\n\n\nimport rde_client\n\n\ngroup_blacklist = discord.app_commands.Group(\n name='blacklist',\n description='Blacklist commands'\n)\ngroup_blacklist_user = discord.app_commands.Group(\n name='users',\n description='User blacklist commands',\n parent=group_blacklist\n)\ngroup_blacklist_character = discord.app_commands.Group(\n name='characters',\n description='Character blacklist commands',\n parent=group_blacklist\n)\n\n\n@group_blacklist_user.command(\n name='add',\n description='Adds discord user to the blacklist'\n)\nasync def __command_blacklist_add_user(\n interaction: discord.Interaction,\n user: discord.Member,\n reason: str | None\n) -> None:\n bl_user = interaction.client.blacklist[user]\n if bl_user.blacklisted():\n await interaction.response.send_message('User is already blacklisted')\n return\n else:\n bl_user.add(reason)\n await interaction.response.send_message(f'Blacklisted {user.mention}')\n logging.warning(' Blacklist: added from '.format(\n user.name,\n user.id,\n interaction.guild.name,\n interaction.guild.id\n ))\n\n\n@group_blacklist_user.command(\n name='remove',\n description='Removes discord user from the blacklist'\n)\nasync def __command_blacklist_remove_user(\n interaction: discord.Interaction,\n user: discord.Member\n) -> None:\n bl_user = interaction.client.blacklist[user]\n if not bl_user.blacklisted():\n await interaction.response.send_message('User is not blacklisted')\n return\n bl_user.remove()\n await interaction.response.send_message('Removed user from the blacklist')\n logging.warning(' Blacklist: removed from '.format(\n user.name,\n user.id,\n interaction.guild.name,\n interaction.guild.id\n ))\n\n\n@group_blacklist_character.command(\n name='add',\n description='Adds Albion Online character to blacklist'\n)\nasync def __command_blacklist_add_character(\n interaction: discord.Interaction,\n character: str,\n reason: str | None\n) -> None:\n bl_character = interaction.client.blacklist[interaction.guild][character]\n if bl_character.blacklisted():\n await interaction.response.send_message('Character is already blacklisted')\n return\n bl_character.add(reason)\n await interaction.response.send_message('Added character to blacklist')\n logging.warning(' Blacklist: added from '.format(\n character,\n interaction.guild.name,\n interaction.guild.id\n ))\n\n\n@group_blacklist_character.command(\n name='remove',\n description='Removes Albion Online character from blacklist'\n)\nasync def __command_blacklist_remove_character(\n interaction: discord.Interaction,\n character: str\n) -> None:\n bl_character = interaction.client.blacklist[interaction.guild][character]\n if not bl_character.blacklisted():\n await interaction.response.send_message('Character is not blacklisted')\n return\n bl_character.remove()\n await interaction.response.send_message('Removed character from the blacklist')\n logging.warning(' Blacklist: removed from '.format(\n character,\n interaction.guild.name,\n interaction.guild.id\n ))\n\n\n@group_blacklist_user.command(\n name='list',\n description='Shows all blacklisted users'\n)\nasync def __command_blacklist_users(\n interaction: discord.Interaction\n) -> None:\n members = [interaction.guild.get_member(uid) for uid in interaction.client.blacklist[interaction.guild].list_user_ids()]\n if not members:\n await interaction.response.send_message('No blacklisted users')\n return\n resp_content = '**Blacklisted users**\\n' + '\\n'.join(member.mention for member in members)\n await interaction.response.send_message(resp_content)\n\n\n@group_blacklist_character.command(\n name='list',\n description='Shows list of blacklisted characters'\n)\nasync def __command_blacklist_characters(\n interaction: discord.Interaction\n) -> None:\n members = interaction.client.blacklist[interaction.guild].list_characters()\n if not members:\n await interaction.response.send_message('No blacklisted characters')\n return\n resp_content = '**Blacklisted characters**\\n' + '\\n'.join(members)\n await interaction.response.send_message(resp_content)\n\n\n@discord.app_commands.command(\n name='register',\n description='Registers albion online character'\n)\nasync def __command_register(\n interaction: discord.Interaction,\n character: str\n) -> None:\n if interaction.guild is None:\n await interaction.response.send_message('You\\'re trying to use server-only command')\n return\n if not await albiononline.Character.search(character):\n await interaction.response.send_message('Such character does not exist')\n return\n result = interaction.client.register.add(interaction.user, character)\n if result == 'failed-member':\n await interaction.response.send_message('You\\'re already registered')\n return\n elif result == 'failed-character':\n await interaction.response.send_message('This character is already registered')\n return\n else:\n await interaction.response.send_message('Successfully registered')\n\n\n@discord.app_commands.command(\n name='unregister',\n description='Unregister user'\n)\nasync def __command_unregister(\n interaction: discord.Interaction,\n user: discord.Member\n) -> None:\n if interaction.guild is None:\n await interaction.response.send_message('You\\'re trying to use server-only command')\n return\n result = interaction.client.register.remove(user)\n if result == 'failed':\n await interaction.response.send_message('User is not registered')\n return\n await interaction.response.send_message('Unregistered user')\n","repo_name":"m-w-kozlowski/stryk3r","sub_path":"rde_commands.py","file_name":"rde_commands.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31210301309","text":"import os\nimport numpy as np\nimport torch\n\nimport lib.roipool3d.roipool3d_utils as roipool3d_utils\nimport lib.object3d as object3d\nimport lib.bbox3d as bbox3d\n\ndef clean_up(BASE_DIR, SPLIT, skip=20):\n # remove unnecessary label files\n VELO_DIR = os.path.join(BASE_DIR, 'object', SPLIT, 'velodyne')\n LABEL_DIR = os.path.join(BASE_DIR, 'object', SPLIT, 'label_2')\n if os.path.exists(os.path.join(BASE_DIR, 'ImageSets')):\n pass\n else:\n os.makedirs(os.path.join(BASE_DIR, 'ImageSets'))\n\n index_file = open(os.path.join(BASE_DIR, 'ImageSets', SPLIT+'.txt'), 'w')\n label_list = os.listdir(LABEL_DIR).copy()\n label_list.sort()\n for index,label_name in enumerate(label_list):\n velo_name = label_name[:-4] + '.bin'\n print(index, label_name)\n if index self._max_len, lambda: tf.slice(ws, [0], [self._max_len]), lambda: ws)\n c = tf.string_to_number(c, tf.int32)\n return c, ws, tf.size(ws)\n\n dataset = dataset.map(__parse_line)\n dataset = dataset.shuffle(1000)\n padded_shapes = (tf.TensorShape([]), tf.TensorShape([None]), tf.TensorShape([]))\n self._batched_dataset = dataset.padded_batch(self._batch_size, padded_shapes=padded_shapes)\n\n self._iterator = self._batched_dataset.make_initializable_iterator()\n self._c_op, self._ws_op, self._len_op = self._iterator.get_next()\n\n def __next__(self):\n try:\n c, ws, lens = self._sess.run([self._c_op, self._ws_op, self._len_op])\n return c, ws, lens\n except tf.errors.OutOfRangeError:\n self._cur_epoch += 1\n if self._cur_epoch >= self._epoch:\n raise StopIteration()\n else:\n self._sess.run(self._iterator.initializer)\n c, ws, lens = self._sess.run([self._c_op, self._ws_op, self._len_op])\n return c, ws, lens\n\n def next(self):\n return self.__next__()\n\n def __iter__(self):\n self._sess.run(self._iterator.initializer)\n return self\n","repo_name":"EmbolismSoil/SelfAttentiveSentenceEmbedding-TF","sub_path":"Model/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"54"} +{"seq_id":"38250547858","text":"\"\"\"\nCreated on Tue Apr 16 2019\n@author: Esteban Grisales && Andres Felipe Bravo\nArquitectura Cliente Servidor - UTP\n\"\"\"\nimport sys\nimport zmq\nimport json\nimport os\n\nsizePart = 1024*1024*10 #bytes\nPORT_SERVERS = \"8000\"\n\n\"\"\"\nmain register\n{\n\tsha256.decode():\n\t\t{\n\t\t\"parts\":[] \t#partes del archivo\n\t\t\"loc\":[]\t#ip donde esta cada una\n\t\t}\n}\n\"\"\"\nclass Proxy:\n\tdef Start(self):\n\t\tos.system(\"clear\")\n\t\tprint(\"\\n-- Welcome to UltraServer¢2019 --\\n\")\n\t\tprint(\"SYNTAX: python3 proxy.py \\n\")\n\t\tprint(\"\\tTo initialize the proxy provide a file to register the servers\")\n\t\tprint(\"\\t\\t register of contained files (will be create if doesn't exist)\\n\")\n\t\tprint(\"\\tThis file must be in the same folder of this file an you should called like an argument\")\n\t\tprint(\"\\tExample: python3 proxy.py servers.json\\n\")\n\n\t\tself.main_register={}\n\t\tself.reg_file=sys.argv[1]\n\n\t\tif os.path.isfile('./'+self.reg_file) == False:\n\t\t\tprint(\"\\nThis register file doesn't exist \")\n\t\t\tprint(\"Creating the new register /{}\\n\".format(self.reg_file))\n\t\t\twith open(self.reg_file,\"x\") as f:\n\t\t\t\tjson.dump(self.main_register,f)\n\t\telse:\n\t\t\tprint(\"Register find\\nCharging file ...\\n\")\n\t\t\twith open(self.reg_file) as read_file:\n\t\t\t\tself.main_register=json.load(read_file)\n\n\t\tself.context = zmq.Context()\n\t\tself.socket = self.context.socket(zmq.REP)\n\t\tself.socket.bind(\"tcp://*:\"+PORT_SERVERS)\n\t\tprint (\"Proxy is now listening servers in port \"+PORT_SERVERS)\n\t\tself.listening()\n\n\tdef listening(self):\n\t\tself.register_server=[]\n\t\twhile True:\n\t\t\tparts=[]\n\t\t\tprint(\"\\nListening ...\\n\")\n\t\t\twho,*rest = self.socket.recv_multipart()\n\t\t\tif who.decode()==\"server\":\n\t\t\t\tprint(\"Welcome server: \")\n\t\t\t\tip,port,parts = rest\n\t\t\t\tprint(ip.decode(),port.decode(),parts.decode())\n\t\t\t\tnodo=ip.decode()+\":\"+port.decode()\n\t\t\t\tself.register_server.append(nodo)\n\n\t\t\telif who.decode()==\"client\":\n\t\t\t\tprint(\"Welcome client: \")\n\t\t\t\toperation, hash_file, register = rest\n\t\t\t\tprint(register.decode())\n\t\t\t\tparts = eval(register.decode())\n\t\t\t\t#parts = json.loads(register.decode())\n\t\t\t\t#parts = json.dumps(partes)\n\t\t\t\tfor key in parts.values():\n\t\t\t\t\tprint(key)\n\n\t\t\t\tprint(parts)\n\t\t\t\t\"\"\"\n\t\t\t\tif operation.decode()==\"upload\":\n\t\t\t\t\tif (hash_file.decode() in self.main_register):\n\t\t\t\t\t\tprint(\"ya existe\")\n\t\t\t\t\t\tself.socket.send(b\"repeated\")\n\t\t\t\t\telse:\t\n\t\t\t\t\t\tself.socket.send(b\"OK\")\n\t\t\t\telse:\n\t\t\t\t\tif operation.decode()==\"download\":\n\t\t\t\t\t\tprint(\"hagamos download\")\n\t\t\t\t\"\"\"\n\t\t\t\tprint(\"Operation :\"+operation.decode())\n\t\t\t\t#self.socket.send(b\"OK\")\n\t\t\t\tif operation.decode()==\"upload\":\n\t\t\t\t\tself.upload(hash_file,parts)\n\t\t\t\telif operation.decode()==\"download\":\n\t\t\t\t\tself.download(hash_file)\n\t\t\tprint(\"Operation complete successfully!\")\n\t\t\t#print(self.register_server)\n\n\n\tdef upload(self,hash_file,parts):\n\t\tprint(\"Reciving parts ...\")\n\t\t#parts=self.socket.recv_json()\n\t\tself.main_register.update(parts)\n\t\twith open(self.reg_file, \"w\") as f:\n\t\t\tjson.dump(self.main_register, f)\n\n\t\tn=len(self.main_register.get(hash_file.decode()).get('parts'))\n\t\tprint(\"Parts to recive: \"+str(n))\n\t\t#a = \"192.168.9.1:8000\"\n\t\tloc=[]\n\t\tx=0\n\t\twhile x < n:\n\t\t\tfor s in range(0,len(self.register_server)):\n\t\t\t\tif x == n:\n\t\t\t\t\tbreak\n\t\t\t\tloc.append(self.register_server[s])\n\t\t\t\tx=x+1\n\n\n\t\tloc2=[x.encode() for x in loc] # archivo codificado para envio\n\t\tself.socket.send_multipart(loc2)\n\t\tself.main_register.get(hash_file.decode()).update({'loc':loc})\n\t\tprint(self.main_register)\n\t\t#self.socket.send(b\"OK\")\n\n\tdef download(self,hash_file):\n\t\tprint(\"send parts ...\")\n\t\thash_file = self.socket.recv()\n\t\tprint(\"hash file: {}\".format(hash_file))\n\t\tdicc = self.main_register.get(hash_file.decode())\n\t\tlocs = json.dumps(dicc)\n\t\tself.socket.send_json(locs)\n\nif __name__ == '__main__':\n\tProxy = Proxy()\n\tProxy.Start()\n","repo_name":"andresfbravo/ARCH-Client-Server","sub_path":"tareas/FileServerProxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28804276670","text":"assignments = []\n\ndef cross(A, B):\n \"Cross product of elements in A and elements in B.\"\n return [a + b for a in A for b in B]\n\nrows='ABCDEFGHI'\ncols='123456789'\n\nboxes = cross(rows, cols) # It is a list of all boxes keys\n\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\n\ndiaglist = [[row_units[i][i] for i in range(9)],[row_units[i][8-i] for i in range(9)]] # A list of diagonal box keys for solving diagonal sudoku\n\nunitlist = row_units + column_units + square_units + diaglist\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes) # A dictionary of box id as key and value as a set of keys of it's row, column, and grid\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n\n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers\n\n for box in boxes:\n if(len(values[box]) == 2):#Checks if length of box value is two to find it's naked twin\n v = values[box]\n z=units[box]#Gives a list of lists of box's row, column and grid unit\n for l in z:\n poped_box = l.pop(l.index(box))\n for b in l:\n if values[b] == v:\n neighbours = l\n poped_b = neighbours.pop(neighbours.index(b))\n for n in neighbours:\n #Replace the naked twin value of it's peer with empty string\n values = assign_value(values ,n, values[n].replace(v[0],''))\n values = assign_value(values ,n, values[n].replace(v[1],''))\n neighbours.append(poped_b)\n poped_box = l.append(poped_box)\n return values\n\n\ndef grid_values(grid):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n\n sudgrid = dict(zip(boxes, grid))\n for key, value in sudgrid.items():\n if sudgrid[key] == '.':\n sudgrid[key] = '123456789'\n return sudgrid\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return\n\ndef eliminate(values):\n '''\n This function eliminates the solved value boxes values from it's peers' box\n '''\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values= assign_value(values,peer,values[peer].replace(digit,''))\n return values\n\ndef only_choice(values):\n '''\n This function is for finding the digit which can exists in only one box in every unit and assign that box the digit\n '''\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values = assign_value(values, dplaces[0], digit)\n return values\n\ndef reduce_puzzle(values):\n '''\n This function is used to leverage the two constraints only_choice and eliminate iteratively to solve the puzzle entirely\n '''\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n stalled = False\n while not stalled:\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n values = eliminate(values)\n values = only_choice(values)\n values = naked_twins(values)\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n stalled = solved_values_before == solved_values_after\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef search(values):\n '''\n Using depth first search, create a tree and solve the sudoku recursively\n '''\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False\n if all([len(values[s])==1 for s in boxes]):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n\ndef solve(grid):\n \"\"\"\n Find the solution to a Sudoku grid.\n Args:\n grid(string): a string representing a sudoku grid.\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns:\n The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n values = search(grid_values(grid))\n if values is False:\n return False\n else:\n return values\n\nif __name__ == '__main__':\n\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n values = solve(diag_sudoku_grid)\n if values is False:\n print(\"Sudoku cannot be solved\")\n else:\n display(values)\n\n try:\n from visualize import visualize_assignments\n visualize_assignments(assignments)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","repo_name":"tjdevWorks/Sudoku-AI","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31135734506","text":"peso = float(input(\"Digite o seu peso em Kg: \"))\naltura = float(input(\"Digite a sua altura em metros: \"))\n\nindividuo = {\n 'altura': altura,\n 'peso': peso\n}\n\nimc = calcular_imc(individuo['peso'], individuo['altura'])\nclassificacao = classificar_imc(imc)\nsituacao = verificar_situacao(imc)\n\ndef calcular_imc(peso, altura):\n imc = peso / (altura ** 2)\n return imc\n\ndef classificar_imc(imc):\n if imc < 18.5:\n return \"Baixo peso\"\n elif 18.5 <= imc < 25.0:\n return \"Peso normal\"\n elif 25.0 <= imc < 30.0:\n return \"Excesso de peso\"\n elif 30.0 <= imc < 35.0:\n return \"Obesidade de Classe 1\"\n elif 35.0 <= imc < 40.0:\n return \"Obesidade de Classe 2\"\n else:\n return \"Obesidade de Classe 3\"\n\ndef verificar_situacao(imc):\n if imc < 18.5:\n return \"Ganhar peso\"\n elif 18.5 <= imc < 25.0:\n return \"Normal\"\n else:\n return \"Perder peso\"\n\nprint(\"Dados do indivíduo:\")\nfor chave, valor in individuo.items():\n print(f\"{chave.capitalize()}: {valor}\")\nprint(f\"Classificação: {classificacao}\")\nprint(f\"Situação: {situacao}\")","repo_name":"josineudo-arruda/estudo-python","sub_path":"source/04-funcoes/exercícios/exer05.py","file_name":"exer05.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3673126450","text":"from django.shortcuts import render\nimport requests as r\nfrom django.contrib import messages\n\n# Create your views here.\ndef getNews(request):\n if request.method == \"POST\":\n keyword = request.POST.get('my-search')\n else:\n keyword = 'bikes'\n response = r.get(f'https://newsapi.org/v2/everything?q={keyword}&apiKey=ace489dd71b74e8f9cf8aeedf4c0a864')\n data = response.json()\n if data['status'] == 'ok':\n articles = data['articles']\n else:\n messages.warning(request, 'There is an issue with the news page. Sorry for any inconveniences.')\n articles=[]\n return render(request, 'news/news.html', context={'articles':articles})","repo_name":"smtsuchi/intro_to_django_legends3","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27059109452","text":"import itertools\n\n\ndef solution(numbers):\n answer = []\n cards = list(numbers)\n pers = []\n for i in range(1,len(cards)+1):\n pers += list(map(''.join, itertools.permutations(cards, i)))\n\n for x in pers:\n if x[0]=='0':\n continue\n x=int(x)\n k = int(x**(1/2))+1\n if x in answer:\n continue\n if x ==1:\n continue\n if x ==2 or x==3:\n answer.append(x)\n else:\n for j in range(2,k):\n if x%j==0:\n break\n else:\n answer.append(x)\n\n return len(answer)\n\nnumbers ='17'\nprint(solution(numbers))\n","repo_name":"tjdgns1284/Algo-CS","sub_path":"csagain/0905/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22329026962","text":"import tensorflow as tf\nfrom tqdm import tqdm\nimport pdb\n\nschill_fn = \"/home/lab/jonny/schillbot/schill.txt\"\n#schill_fn = \"/mnt/data/schillbot_pt/spooky.txt\"\ndecode_out = \"/mnt/data/schillbot_pt/decode_full.txt\"\n\n\nlow_threshold=10\nup_threshold = 512\n\nout_sentences = []\n\nprint('splitting sentences')\nwith open(schill_fn, 'r') as schill:\n\n txt = \"\"\n for line in tqdm(schill):\n line = line.strip()\n\n if len(txt) + len(line) + 1 >= up_threshold:\n ret = txt\n txt = \"\"\n # We don't yield very short long parts to prevent noisy examples.\n if len(ret) > low_threshold and len(ret) < up_threshold:\n out_sentences.append(ret)\n\n if not txt:\n txt = line\n else:\n txt = \" \".join([txt, line])\n\nprint('writing sentences')\nwith open(decode_out, 'w+') as out:\n for sent in tqdm(out_sentences):\n out.write(sent+'\\n')\n\n\n\n","repo_name":"sneakers-the-rat/schillbot","sub_path":"schillbot/make_decode.py","file_name":"make_decode.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10976132198","text":"import sys\nsys.stdin = open('input.txt','r')\nfrom _collections import deque\n\nm,n = map(int, input().split())\nbox = [list(map(int, input().split())) for _ in range(n)]\nQ = deque()\nvisit = []\nvisited = [[0] * m for _ in range(n)]\nunriped = 0\n\nfor i in range(n):\n for j in range(m):\n if box[i][j] == 1:\n Q.append((i,j))\n visited[i][j] = 1\ndays = 1\n\nwhile Q:\n dr = [1, 0, -1, 0]\n dc = [0, 1, 0, -1]\n\n q = Q.popleft()\n visit.append((q[0],q[1]))\n\n for i in range(4):\n nr = q[0] + dr[i]\n nc = q[1] + dc[i]\n if 0<=nr self.buffer_size):\n\t\t\tprint(\"Trying to write huge buffer !!!!!!!\")\n\t\t\treturn\n\n\t\tself.lock.acquire()\n\n\t\t# TODO: Check for buffer overrun\n\t\t# Case A: Read pos was smaller than write pos\n\t\t# Case B: Read pos was bigger than write pos\n\n\t\t# Data fitting into remaining buffer\n\t\tif((self.write_pos + datalen) <= self.buffer_size):\n\t\t\tself.buffer[self.write_pos:self.write_pos+datalen] = data[:]\n\t\t\tself.write_pos += datalen\n\n\t\telse:\n\n\t\t\t#Write first part into buffer\n\t\t\tfirst_len = self.buffer_size - self.write_pos\n\t\t\tself.buffer[self.write_pos:self.write_pos+first_len] = data[0:first_len]\n\n\t\t\t#Write second part wrapped around\n\t\t\tsecond_len = datalen - first_len\n\t\t\tself.buffer[0:second_len] = data[first_len:first_len+datalen]\n\t\t\tself.write_pos = second_len\n\n\t\tself.lock.release()\n\n\tdef get_buffer_size(self):\n\t\treturn self.buffer_size\n\n\tdef can_read_n_bytes(self,n):\n\t\tif(self.read_pos <= self.write_pos):\n\t\t\treturn n <= (self.write_pos - self.read_pos)\n\t\telse:\n\t\t\tavail = (self.buffer_size - self.read_pos) + self.write_pos\n\t\t\treturn n <= avail\n\n\n\tdef read(self,blocksize,advance):\n\n\n\t\tself.lock.acquire()\n\n\t\t# Not enough data for reading\n\t\tif not self.can_read_n_bytes(blocksize):\n\t\t\tself.lock.release()\n\t\t\treturn None\n\n\t\t# Can read in one block\n\t\tif((self.read_pos + blocksize) <= self.buffer_size):\n\t\t\tdata = self.buffer[self.read_pos:self.read_pos+blocksize]\n\t\t\tself.read_pos += advance\n\t\t\tif(self.read_pos > self.buffer_size):\n\t\t\t\tself.read_pos %= self.buffer_size\n\n\t\t\tself.lock.release()\n\t\t\treturn data\n\n\t\t# Need to concatenate\n\t\telse:\n\n\t\t\tfirst_part = self.buffer[self.read_pos:self.buffer_size]\n\t\t\tfirst_len = (self.buffer_size - self.read_pos)\n\t\t\tsecond_len = blocksize - first_len\n\t\t\tsecond_part = self.buffer[0:second_len]\n\t\t\tself.read_pos += advance\n\t\t\tif(self.read_pos > self.buffer_size):\n\t\t\t\tself.read_pos %= self.buffer_size\n\n\t\t\tdata = first_part + second_part\n\n\t\t\tself.lock.release()\n\t\t\treturn data\n\n\n\nclass TestRingBuffer(unittest.TestCase):\n\n\tdef test_can_read(self):\n\t\tr = RingBuffer()\n\t\tself.assertEqual(r.can_read_n_bytes(1),False)\n\t\tself.assertEqual(r.can_read_n_bytes(0),True)\n\n\tdef test_read_write(self):\n\t\tr = RingBuffer()\n\t\tx = b\"Bytes objects are immutable sequences of single bytes\"\n\t\tr.write(x)\n\t\tself.assertEqual(r.can_read_n_bytes(len(x)),True)\n\t\tself.assertEqual(r.can_read_n_bytes(len(x)+1),False)\n\n\t\txread =r.read(len(x),0)\n\t\tself.assertEqual(x,xread)\n\n\tdef test_advance(self):\n\t\tr = RingBuffer(1024)\n\t\tx = b\"ABCDEFGHIJKlMNOPQRSTUVWXYZ\"\n\t\ty = b\"BCDEFGHIJKlMNOPQRSTUVWXYZ\"\n\t\tr.write(x)\n\t\tx1 = r.read(26,0)\n\t\tself.assertEqual(x,x1)\n\n\t\tx2 = r.read(26,1)\n\t\tself.assertEqual(x,x2)\n\n\t\ty1 = r.read(25,1)\n\t\tself.assertEqual(y,y1)\n\n\tdef test_full_read_write(self):\n\t\tr = RingBuffer()\n\t\ts = r.get_buffer_size()\n\t\tdata = bytearray(s)\n\t\tr.write(data)\n\t\tdata_r = r.read(s,0)\n\t\tself.assertEqual(data,data_r)\n\n\tdef test_overlap_read_write(self):\n\t\tr = RingBuffer(10)\n\t\tself.assertEqual(r.get_buffer_size(),10)\n\t\tx = b\"ABCDEFGH\"\n\t\ty = b\"IJKlMNOP\"\n\t\tr.write(x)\n\t\tdatax = r.read(8,8)\n\t\tself.assertEqual(datax,x)\n\n\t\tr.write(y)\n\t\tdatay = r.read(8,8)\n\t\tself.assertEqual(datay,y)\n\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\tsource = AudiostreamSource()\n\tsource.print_info()\n\tsource.start()\n\twhile True:\n\t\ttime.sleep(1)\n\n\n\n\n","repo_name":"nyumaya/nyumaya_audio_recognition","sub_path":"python/src/ringbuffer.py","file_name":"ringbuffer.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"54"} +{"seq_id":"9150087501","text":"import sys, os\nfrom ament_index_python.packages import get_package_share_directory, get_package_prefix\nfrom pprint import pprint\n\nfrom environment_common.convertors.templating.datum import DatumTemplates\nfrom environment_common.convertors.tools.kml import getroot, gettree, polyline_to_list\n\ndef run(args=None):\n ENV = get_package_share_directory('environment_template')\n kml_path = os.path.join(args['src'], 'config', 'location', 'fences.kml')\n locations = dict()\n\n while True:\n print(f'Constructing datum from `{kml_path}`. \\nTo use a different KML, place the `.kml` file in: `environment_template/config/location/` \\n\\n\\nEnter the name of the file below, or press [ENTER] to continue:')\n inp = input('>> environment_template/config/location/')\n print('\\n')\n print(inp)\n if inp != '':\n if not inp.endswith('.kml'):\n print('Ensure you have included the correct file extension of: `.kml`\\n\\n')\n else:\n kml_path = os.path.join(args['src'], 'config', 'location', inp)\n break\n else:\n break\n\n root = getroot(kml_path)\n locations = gettree(root)\n\n while True:\n print(\"\\nPlease select which Placemark to use for the gnss_fence:\")\n print(f\"Available Placemarks: {list(locations.keys())}\")\n loc = input('>> ')\n if loc in locations.keys():\n break\n environment = locations[loc]\n\n longitude = environment['fence'][0][0]\n latitude = environment['fence'][0][1]\n gnss_fence = [lle[:2][::-1] for lle in environment['fence']][:-1]\n\n xmin = -100\n xmax = 100\n ymin = -100\n ymax = 100\n\n datum_path = os.path.join(args['src'], 'config', 'location', 'datum_autogen.yaml')\n with open(datum_path, 'w') as f:\n txt = DatumTemplates.template%(latitude, longitude, str(gnss_fence).replace(\"'\",\"\"), xmin, xmax, ymin, ymax)\n f.write(txt)\n print(f'\\n\\nSaved `datum_autogen.yaml` to `{datum_path}`\\n\\n')\n\ndef main(args=None):\n e = 'environment_template'\n src = '/'.join(get_package_prefix(e).split('/')[:-2]) + f'/src/{e}'\n location_name = 'riseholme_field_1'\n run({'src': src, 'location_name':location_name})\n\nif __name__ == '__main__':\n main()\n","repo_name":"LCAS/environment_common","sub_path":"environment_common/convertors/kml_to_datum.py","file_name":"kml_to_datum.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44210428402","text":"''' imports '''\nfrom fastapi import APIRouter, File, Form\nfrom starlette.responses import StreamingResponse\nfrom io import BytesIO\nfrom .techniques import techniques\nimport pandas as pd\nimport timeit\nimport json\n''' router '''\nrouter = APIRouter(\n\t\t\tprefix=\"/anonymization\",\n \t\ttags=[\"anonymization\"],\n\t\t\tresponses={404: {\"description\": \"Not found\"}}\n\t\t\t)\n''' anonymization main endpoint '''\n@router.post(\"/files\", response_class = StreamingResponse)\nasync def anonymization(file: bytes = File(...), config: str = Form(...)):\n\t''' initiate timer '''\n\ttic = timeit.default_timer()\n\t\n\t''' input in-memory file to dataframe'''\n\tinMemoryFile = BytesIO(file)\n\tdf = pd.read_parquet(inMemoryFile)\n\tinMemoryFile.close()\n\n\t''' json configuration and apply techniques '''\n\tconfig_obj = json.loads(config)\n\tprint(config_obj)\n\tresults_df = techniques(df, config_obj)\n\n\t''' output in-memory file to streaming response '''\n\toutMemoryFile = BytesIO()\n\tresults_df.to_parquet(\n\t\toutMemoryFile, index=True, compression='gzip')\n\tresponse = StreamingResponse(\n\t\titer([outMemoryFile.getvalue()]),\n\t\tmedia_type='application/gzip',\n\t\theaders={\n\t\t'Content-Disposition': 'attachment; filename=dataset.parquet.gzip',\n\t\t'Access-Control-Expose-Headers': 'Content-Disposition'\n\t\t}\n\t)\n\toutMemoryFile.close()\n\n\t''' end timer '''\n\telapsed = timeit.default_timer() - tic\n\tprint( f'Time elapsed is aproximately {elapsed/60} minutes.' )\n\n\t''' return streaming response'''\n\treturn response","repo_name":"Davidmenamm/Anonymization","sub_path":"back_end/fastapi_server/endpoints/data_science/anonymization/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4067179173","text":"import requests\n\n\ndef main():\n\n # createGetRequest() # TODO :: Need to fix this\n createPostRequest()\n\n\n\n\ndef createGetRequest():\n #1. Create API - End Point\n URL = \"http://maps.googleapis.com/maps/api/geocode/json\"\n\n #2. Add location\n location = \"McMaster University\"\n\n #3. Define parameter dictionary to be sent to the API\n PARAMS = {\"address\" : location}\n\n #4. Send Get request & Save parameters as response Object\n response = requests.get(URL, PARAMS)\n\n #5. Extract data in json format\n data = response.json()\n\n latitude = data['results'][0]['geometry']['location']['lat']\n longitude = data['results'][0]['geometry']['location']['lng']\n formatted_address = data['results'][0]['formatted_address']\n\n # printing the output\n print(\"Latitude:%s\\nLongitude:%s\\nFormatted Address:%s\" % (latitude, longitude, formatted_address))\n\n\ndef createPostRequest():\n\n print(\"\\nCreating Post Request URL\")\n\n #1. Create API - End Point\n API_ENDPOINT = \"http://pastebin.com/api/api_post.php\"\n\n #2. Define parameters\n API_KEY = \"9b69dce56a6aab2bda995f25ea539f23\"\n\n #3. Add additional parameters\n sourceCode = \"Helloworldzzzzz\"\n\n data = {\"api_dev_key\" : API_KEY,\n \"api_option\" : \"paste\",\n \"api_paste_code\" : sourceCode,\n \"api_paste_format\" : \"python\"}\n\n #4. Send Post request & Save response as Object\n response = requests.post(API_ENDPOINT, data)\n\n #5. Extract response\n pastebin_url = response.text\n\n print(\"Response URL: \", pastebin_url)\n\n pass\n\n\n\nif __name__ == '__main__':\n main()\n\n'''\n\nGet - to request a data from a server\nPOST - To submit data to be process from a server\n\n\nTo make HTTP Requests in python we can use libraries such as\n- httplib\n- urllib\n- requests (Most Simplest)\n\n\nGET REQUEST\n\nWhat is happening ? \n- We are getting the latitude, longtitude and formatted address of a given location\n\nHow ? \n- We are sending a get request to the Google Maps API\n- We provide the location, Google api gives us the data (lat, long, address)\n- Data is provided in the form of JSON which we parse D\n\n\nSo What are the steps in which we create a GET request\n1. Set the API Endpoint - This is usually a URL\n2. Define the parameters - This will in the form of dictionary\n3. Send request & Store response in a response object\n4. Parse \n\n\n\n\n\nPOST REQUEST \n\nWhat is happening ? \n- We are pasting our source code onto Pastebin\n\nHow ? \n- We are sending a post request \n- We use the request.post() function to send the post\n- If everything is ok, we get a Web URL like \"https://pastebin.com/j3EDz3xn\"\n- If not, we get a error like \"Bad API request, invalid api_dev_key\"\n\n\nNote\n1. For Get Method - All form of data is encoded into the URL, added a query of String parameters\n2. For Post Method - All form of data is encoded in the message body of the HTTP request\n3. Only ASCII characters are allowed in GET, post can be whatever character\n4. GET is less secure since data is set as part of the URL. Therefore, passwords should never be sent with a GET requests\n\n'''","repo_name":"timManas/PythonProgrammingRecipes","sub_path":"project/src/Libraries&Functions/GetAndPostExample.py","file_name":"GetAndPostExample.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22401831818","text":"\"\"\"Pipeline for cleaning raw dataset.\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom typing import Any\n\nfrom dretch import io, config\n\nRAW_DATA = 'data/seafloor_lith_data_all_features.csv'\n\n\ndef main() -> Any:\n # load raw data\n raw_data = pd.read_csv(RAW_DATA)\n\n # convert special missing notation to nan\n data = raw_data.replace(to_replace='nan', value=np.nan)\n training_bool = pd.notnull(data['lithology'])\n query_bool = ~training_bool\n\n data_train = data[training_bool].copy()\n data_train['lithology'] = data_train['lithology'].astype(int).astype(str)\n data_query = data[query_bool].copy()\n\n io.save_csv(data_train, config.train_data, index=False)\n io.save_csv(data_query, config.query_data, index=False)\n\n return data\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joyceyuu/sea","sub_path":"sea/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"246876209","text":"# Calling a function in its own body is called as recursion\r\n# Recursion is similar to never ending loop\r\n'''\r\ndef greet()\r\n print(\"hello\")\r\n greet()\r\n\r\ngreet()\r\n\r\nloop repeats internally from line 6 to line 4\r\n\r\n'''\r\n#recursion\r\ni=1\r\ndef greet ():\r\n global i\r\n print (\"Hello-\",i)\r\n i+=1\r\n greet ()\r\ngreet ()\r\n\r\n# recursion error - maximum recursion depth exceeded\r\n\r\n'''\r\nIn recursion every function call is preserved or stored in the memory\r\nThis builds a stack of memory. When that stack depth is exceeded or when that memory is filled with function calls then it generates recursion error.\r\nAs there is a condition to stop the loop there is also a condition written in the function body to stop the recursion.\r\nTo stop the recursion, return statement is used.\r\n'''\r\n\r\nprint(\"----\")\r\n\r\n","repo_name":"imAshutoshGupta/python","sub_path":"Functions/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5943903402","text":"import keras\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, LSTM, ELU, TimeDistributed\nfrom keras.optimizers import Adam\n\nimport numpy as np\n\nfrom collections import deque\n\nclass Agent:\n def __init__(self, state_size, is_eval=False, model_name=\"\"):\n self.state_size = state_size # normalized previous days\n self.data_dim = 22\n self.action_size = 3 # sit, buy, sell\n self.memory = deque(maxlen=480)\n self.inventory = []\n self.model_name = model_name\n self.is_eval = is_eval\n\n self.gamma = 0.99\n self.epsilon = 1.0\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.tau = 0.001\n self.learning_rate = 0.0025\n\n self.model = load_model(\"models/\" + model_name) if is_eval else self._model()\n self.target_model = self._model()\n\n def _model(self):\n model = Sequential()\n model.add(TimeDistributed(Dense(units=32, input_dim=(self.state_size, self.data_dim))))\n model.add(ELU())\n model.add(TimeDistributed(Dense(units=32)))\n model.add(ELU())\n model.add(LSTM(units=64))\n model.add(Dense(units=3, activation=\"linear\"))\n model.compile(loss=\"mse\", optimizer=Adam(self.learning_rate))\n\n return model\n\n def act(self, state):\n if not self.is_eval and np.random.rand() <= self.epsilon:\n return np.random.randint(self.action_size)\n\n state = np.expand_dims(state, axis=0)\n options = self.model.predict(state)[0]\n return np.argmax(options)\n\n def sampleMemory(self, batch_size):\n idx = np.random.permutation(len(self.memory))[:batch_size]\n cols = [[], [], [], [], []] # state, action, reward, next_state, done \n for i in idx:\n memory = self.memory[i]\n for col, value in zip(cols, memory):\n col.append(value)\n\n cols = [np.array(col) for col in cols]\n return (cols[0], cols[1], cols[2], cols[3], cols[4])\n\n\n def expReplay(self, batch_size):\n states, actions, rewards, next_states, dones = self.sampleMemory(batch_size)\n # Predict actions from the online model\n action_values = self.model.predict(next_states)\n actions = np.argmax(action_values, axis=1)\n\n # Get Q Values from the target network\n target_q = self.target_model.predict(next_states)\n # Action augmentation\n target_q[np.arange(batch_size), actions].dot(self.gamma)\n target_q[np.arange(batch_size), actions] += rewards\n # Fit the model\n self.model.fit(states, target_q, epochs=1, verbose=0)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay \n\n def targetUpdate(self):\n # Transfer learned weights from the online model to the fixed target\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)\n\n self.target_model.set_weights(target_weights)\n\n","repo_name":"AdityaPareek22/tradegame","sub_path":"src/agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34839864570","text":"from mininet.log import setLogLevel,info\nfrom mininet.net import Mininet\nfrom mininet.cli import CLI\n\ndef MininetTopo():\n net = Mininet()\n\n info(\"Create host nodes.\\n\")\n lefthost = net.addHost(\"h1\")\n righthost = net.addHost(\"h2\")\n\n info(\"Create switch node.\\n\")\n switch = net.addSwitch(\"s1\",failMode = 'standalone')\n\n info(\"Create Link.\\n\")\n net.addLink(lefthost,switch)\n net.addLink(righthost,switch)\n\n info(\"Build and Start network.\\n\")\n net.bulid()\n net.start()\n\n info(\"Run mininet CLI.\\n\")\n CLI(net)\n\nif __name__==\"__main__\":\n setLogLevel('info')\n MininetTopo()","repo_name":"gtenmac/Ryu","sub_path":"minitest.py","file_name":"minitest.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18479203935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport itertools\nimport logging\n\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\n__logger = logging.getLogger(__name__)\n\ndef ciplot(x=None, y=None, hue=None, data=None, conf_level=.95, area_alpha=.5,\n legend=True, colors=None, markers=None, ax=None, hue_order=None, **kwargs):\n \"\"\"\n Line plot of mean with confidence intervals. Like seaborn's tseries plot,\n but doesn't assume unit level observations to pivot on.\n Also doesn't bootstrap.\n ``colors`` and ``markers`` can be lists or dicts of ``{hue:color}``.\n \"\"\"\n if (x is None or y is None) and data is None:\n raise AttributeError(\"Please input an x and y variable to plot.\")\n # Sort out default values for the parameters\n if ax is None:\n ax = plt.gca()\n BACKUP_HUE = '_got_no_hue_but_one_'\n # Handle different types of input data. Just DataFrame for now.\n if isinstance(data, pd.DataFrame):\n xlabel = x\n ylabel = y\n keep_cols = [x, y]\n if hue is not None:\n keep_cols.append(hue)\n\n data = data.loc[data[keep_cols].notnull().all(axis=1), keep_cols]\n\n # Condition is optional\n if hue is None:\n hue = BACKUP_HUE\n data[BACKUP_HUE] = 1\n legend = False\n # legend_name = None\n\n legend = True and legend\n # legend_name = hue\n\n _hue_order = sorted(data[hue].unique())\n n_hue = len(_hue_order)\n if hue_order is None:\n hue_order = _hue_order\n else:\n assert(len(hue_order) <= n_hue)\n\n else:\n raise NotImplementedError(\"Use a DataFrame please.\")\n\n # Set up the color palette\n if colors is None:\n current_palette = sns.utils.get_color_cycle()\n if len(current_palette) < n_hue:\n colors = sns.color_palette(\"husl\", n_hue)\n else:\n colors = sns.color_palette(n_colors=n_hue)\n colors = {c: colors[i] for i, c in enumerate(data[hue].unique())}\n elif isinstance(colors, dict):\n colors = {c: colors[c] for c in data[hue].unique()}\n elif isinstance(colors, list):\n colors = itertools.cycle(colors)\n colors = {c: next(colors) for c in data[hue].unique()}\n else:\n try:\n colors = sns.color_palette(colors, n_hue)\n except ValueError:\n colors = mpl.colors.colorConverter.to_rgb(colors)\n colors = [colors] * n_hue\n colors = {c: colors[i] for i, c in enumerate(data[hue].unique())}\n\n # Set up markers to rotate through\n if markers is None:\n markers = {c: 'o' for c in data[hue].unique()}\n elif isinstance(markers, dict):\n markers = {c: markers[c] for c in data[hue].unique()}\n else:\n markers = itertools.cycle(markers)\n markers = {c: next(markers) for c in data[hue].unique()}\n\n # Do a groupby with condition and plot each trace and area\n# for _h, (_hue, _huedf) in enumerate(data.groupby(hue, sort=False)):\n for _h, _thishue in enumerate(hue_order):\n _huedf = data[data[hue] == _thishue]\n\n label = _thishue if legend else \"_nolegend_\"\n\n _byx = _huedf.groupby(x)\n _byxmean = _byx[y].mean()\n _byxstd = _byx[y].std()\n _byxn = _byx[y].count()\n\n _cis = stats.norm.interval(\n conf_level, _byxmean, _byxstd / np.sqrt(_byxn))\n\n _x = _byxmean.index.astype(np.float)\n\n ax.fill_between(_x, _cis[0], _cis[1],\n color=colors[_thishue], alpha=area_alpha)\n ax.plot(_x, _byxmean, color=colors[_thishue], marker=markers[_thishue],\n label=label, **kwargs)\n\n # Add the plot labels\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n if legend:\n ax.legend(loc=0)\n return ax\n\n\n# Now monkey patch pandas.\n__logger.info(\"Run monkey_patch_seaborn() to monkey patch seaborn.\")\ndef monkey_patch_seaborn():\n sns.ciplot = ciplot\n __logger.info(\"Added to seaborn (sns): ciplot \")\n","repo_name":"gaulinmp/panda_cub","sub_path":"panda_cub/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5927429908","text":"import hexchat\nimport itertools\nimport re\n\n__module_name__ = 'Rainbows'\n__module_version__ = '1.0'\n__module_description__ = ''\n\nhexchat.prnt('Rainbows script loaded')\n\ncolors = itertools.cycle((\n\t('05', '10'),\n\t('04', '12'),\n\t('07', '02'),\n\t('08', '06'),\n\t('09', '13'),\n\t('03', '15'),\n\t('11', '14'),\n\t('10', '05'),\n\t('12', '04'),\n\t('02', '07'),\n\t('06', '08'),\n\t('13', '09'),\n\t('15', '03'),\n\t('14', '11'),\n))\n\nfab_hook = None\nin_fab_hook = False\n\ncolor_code_regex = re.compile(r'(?:(?:{0}\\d\\d?(?:,\\d\\d?)?))'.format('\\003'))\ncolor_code_or_regular_character_regex = re.compile(r'((?:{0}\\d\\d?(?:,\\d\\d?)?)|.)'.format('\\003'))\n\ndef fab_callback(word, word_eol, user_data):\n\tglobal in_fab_hook\n\t\n\tin_fab_hook = True\n\thexchat.command(\n\t\t'say {0}'.format(\n\t\t\t' '.join(\n\t\t\t\t''.join(\n\t\t\t\t\tadd_color(c) for c in color_code_or_regular_character_regex.split(w) if c\n\t\t\t\t) for w in word_eol[1].split(' ')\n\t\t\t)\n\t\t)\n\t)\n\tin_fab_hook = False\n\t\n\treturn hexchat.EAT_ALL\n\ndef fab2_callback(word, word_eol, user_data):\n\tglobal in_fab_hook\n\t\n\tin_fab_hook = True\n\thexchat.command(\n\t\t'say {0}'.format(\n\t\t\t''.join(\n\t\t\t\tadd_color_and_background_color(c) for c in color_code_or_regular_character_regex.split(word_eol[1]) if c\n\t\t\t)\n\t\t)\n\t)\n\tin_fab_hook = False\n\t\n\treturn hexchat.EAT_ALL\n\ndef spoiler_callback(word, word_eol, user_data):\n\thexchat.command(\n\t\t'say {0}'.format(\n\t\t\t''.join(\n\t\t\t\tadd_spoiler_color(c) for c in color_code_or_regular_character_regex.split(word_eol[1]) if c\n\t\t\t)\n\t\t)\n\t)\n\t\n\treturn hexchat.EAT_ALL\n\ndef add_color(character):\n\tif color_code_regex.match(character):\n\t\treturn character\n\telse:\n\t\tnext_color, _ = next(colors)\n\t\treturn '\\003{0}{1}'.format(next_color, character)\n\ndef add_color_and_background_color(character):\n\tif color_code_regex.match(character):\n\t\treturn character\n\telse:\n\t\tnext_color, next_bg_color = next(colors)\n\t\treturn '\\003{0},{1}{2}'.format(next_color, next_bg_color, character)\n\ndef add_spoiler_color(character):\n\tif color_code_regex.match(character):\n\t\treturn character\n\telse:\n\t\tnext_color, _ = next(colors)\n\t\treturn '\\003{0},{0}{1}'.format(next_color, character)\n\ndef enfab_callback(word, word_eol, user_data):\n\tglobal fab_hook\n\t\n\tif fab_hook is None:\n\t\tfab_hook = hexchat.hook_command('', fab_passthru_callback)\n\t\thexchat.prnt('Fabulous mode on')\n\t\n\treturn hexchat.EAT_ALL\n\ndef defab_callback(word, word_eol, user_data):\n\tglobal fab_hook\n\t\n\tif fab_hook is not None:\n\t\thexchat.unhook(fab_hook)\n\t\tfab_hook = None\n\t\thexchat.prnt('Fabulous mode off')\n\t\n\treturn hexchat.EAT_ALL\n\ndef fab_passthru_callback(word, word_eol, user_data):\n\tglobal in_fab_hook\n\t\n\tif in_fab_hook:\n\t\treturn hexchat.EAT_NONE\n\telse:\n\t\thexchat.command('fab {0}'.format(word_eol[0]))\n\t\t\n\t\treturn hexchat.EAT_ALL\n\nhexchat.hook_command('fab', fab_callback)\nhexchat.hook_command('fab2', fab2_callback)\nhexchat.hook_command('spoiler', spoiler_callback)\nhexchat.hook_command('enfab', enfab_callback)\nhexchat.hook_command('defab', defab_callback)\n","repo_name":"Poorchop/Arnavion-scripts","sub_path":"rainbows.py","file_name":"rainbows.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36510518726","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport sortedm2m.fields\nimport django.contrib.gis.db.models.fields\nimport travelogue.models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sites', '0002_set_site_domain_and_name'),\n ('travelogue', '0002_photosize_data'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='GeoTrack',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('track', django.contrib.gis.db.models.fields.MultiLineStringField(dim=3, srid=4326)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Trail',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('title', models.CharField(max_length=50)),\n ('description', models.TextField(verbose_name='description', blank=True)),\n ('tags', travelogue.models.TagField(help_text='Django-tagging was not found, tags will be treated as plain text.', verbose_name='tags', max_length=255, blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TrailPoint',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('timestamp', models.DateTimeField()),\n ('point', django.contrib.gis.db.models.fields.PointField(dim=3, srid=4326)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TripNote',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),\n ('date_added', models.DateTimeField(verbose_name='date published', default=django.utils.timezone.now)),\n ('title', models.CharField(unique=True, verbose_name='title', max_length=50)),\n ('story', models.TextField(help_text='user story content as html', verbose_name='userStroy')),\n ('slug', models.SlugField(help_text='A \"slug\" is a unique URL-friendly title for an object.', verbose_name='slug', unique=True)),\n ('description', models.TextField(verbose_name='description', blank=True)),\n ('is_public', models.BooleanField(help_text='Public TripNotes will be displayed in the default views.', verbose_name='is public', default=True)),\n ('tags', travelogue.models.TagField(help_text='Django-tagging was not found, tags will be treated as plain text.', verbose_name='tags', max_length=255, blank=True)),\n ('location_detail', models.ForeignKey(verbose_name='location_detail', related_name='tripnote_related', null=True, to='travelogue.TrailPoint', blank=True)),\n ('sites', models.ManyToManyField(verbose_name='sites', null=True, to='sites.Site', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'tripnotes',\n 'get_latest_by': 'date_added',\n 'verbose_name': 'tripnote',\n 'ordering': ['-date_added'],\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='trail',\n name='geoPoints',\n field=models.ManyToManyField(verbose_name='geoPoints', null=True, to='travelogue.TrailPoint', blank=True),\n preserve_default=True,\n ),\n migrations.AlterModelOptions(\n name='travelogue',\n options={'verbose_name_plural': 'travelogues', 'get_latest_by': 'date_added', 'verbose_name': 'travelogue', 'ordering': ['-date_added']},\n ),\n migrations.AddField(\n model_name='photo',\n name='location_detail',\n field=models.ForeignKey(verbose_name='location_detail', related_name='photo_related', null=True, to='travelogue.TrailPoint', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='travelogue',\n name='geoTrail',\n field=models.ForeignKey(verbose_name='geoTrail', related_name='travelogue_related', null=True, to='travelogue.Trail', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='travelogue',\n name='notes',\n field=sortedm2m.fields.SortedManyToManyField(help_text=None, verbose_name='tripNote', related_name='travelogues', null=True, to='travelogue.TripNote', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='photosize',\n name='quality',\n field=models.PositiveIntegerField(help_text='JPEG image quality.', verbose_name='quality', default=70, choices=[(30, 'Very Low'), (40, 'Low'), (50, 'Medium-Low'), (60, 'Medium'), (70, 'Medium-High'), (80, 'High'), (90, 'Very High')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='travelogue',\n name='photos',\n field=sortedm2m.fields.SortedManyToManyField(help_text=None, verbose_name='photos', related_name='travelogues', null=True, to='travelogue.Photo', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='watermark',\n name='image',\n field=models.ImageField(verbose_name='image', upload_to='travelogue/watermarks'),\n preserve_default=True,\n ),\n ]\n","repo_name":"rahulvgmail/TripMapR","sub_path":"TripMapR/travelogue/migrations/0003_auto_20150411_1626.py","file_name":"0003_auto_20150411_1626.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22519909361","text":"import sqlite3\nfrom flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine\n\n# Создаем соединение с нашей базой данных SQLITE tyt podkluchaem\nconn = sqlite3.connect('tablo.db', check_same_thread=False)\n# Создаем курсор - это специальный объект который делает запросы и получает их результаты\ncursor = conn.cursor()\n\n# sozdanie obekta, name = app.py\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tablo.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\ntablo = create_engine('sqlite:///tablo.db', echo=True)\n\n# Создание таблици если еще нету\ncursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS Result (\n id INTEGER,\n nomer INTEGER,\n name TEXT,\n team TEXT,\n cold INTEGER,\n hot INTEGER,\n sum INTEGER,\n wincold INTEGER,\n winsum INTEGER\n)\"\"\")\n\nconn.commit()\n\n\n# class Total() структура бази даних\nclass Result(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n nomer = db.Column(db.Integer, nullable=True)\n name = db.Column(db.String(200), nullable=False)\n team = db.Column(db.String(200), nullable=False)\n cold = db.Column(db.Integer)\n hot = db.Column(db.Integer)\n sum = db.Column(db.Integer)\n wincold = db.Column(db.Integer)\n winsum = db.Column(db.Integer)\n\n def __repr__(self):\n return '' % self.id\n\n\n# nayti agentov s naybolshim totalom\nwinsumz = 0\n\n\ndef topsum():\n # stavim vse wincold na 0:\n cursor.execute(\"UPDATE Result SET winsum = 0 WHERE winsum = 1\")\n\n # stavim znacheznnya winsum = 1 de top :\n cursor.execute(\"UPDATE Result SET winsum = 1 WHERE sum = (SELECT MAX(sum) FROM result)\")\n cursor.execute(\"SELECT winsum FROM Result WHERE sum = (SELECT MAX(sum) FROM result)\")\n (winsumz,) = cursor.fetchone()\n conn.commit()\n return winsumz\n\n\n# nayti agentov s naybolshim totalom cold\nwincoldz = 0\n\n\ndef topcold():\n cursor.execute(\"SELECT SUM(cold) FROM Result \")\n sumacoldov = cursor.fetchone()\n if sumacoldov == (0,):\n print(\"netu koldov, ne zapolnyat wincold\", sumacoldov)\n return viborkaGA\n else:\n # stavim vse wincold na 0:\n cursor.execute(\"UPDATE Result SET wincold = 0 WHERE wincold = 1\")\n\n # stavim znacheznnya wincold = 1 de top wincold:\n cursor.execute(\"UPDATE Result SET wincold = 1 WHERE cold = (SELECT MAX(cold) FROM result)\")\n cursor.execute(\"SELECT wincold FROM Result WHERE cold = (SELECT MAX(cold) FROM result)\")\n (wincoldz,) = cursor.fetchone()\n conn.commit()\n return wincoldz\n\n\n# popitka vivesti sumu (hot, cold) po timam\n# obyavlyaem peremennie spiski\nviborkaEA = []\nviborkaPA = []\nviborkaGA = []\nviborkaAll = []\n\n\n# esli est dannie vibiraem otdelno cold, hot i sum\ndef Eteam(viborkaEA):\n cursor.execute(\"SELECT SUM(sum) FROM Result WHERE team = 'ENG'\")\n viborkaEA = cursor.fetchone()\n if viborkaEA == (None,):\n print(\"Spisok ENG: netu danih\", viborkaEA)\n return viborkaEA\n else:\n viborkaEA = []\n cursor.execute(\"SELECT SUM(cold) FROM Result WHERE team = 'ENG'\")\n engcold = cursor.fetchone()[0]\n viborkaEA.insert(0, engcold)\n cursor.execute(\"SELECT SUM(hot) FROM Result WHERE team = 'ENG'\")\n enghot = cursor.fetchone()[0]\n viborkaEA.insert(1, enghot)\n viborkaEA.insert(2, (engcold + enghot))\n print(\"Spisok ENG: \", viborkaEA)\n return viborkaEA\n\n\n# esli est dannie vibiraem otdelno cold, hot i sum\ndef Gteam(viborkaGA):\n cursor.execute(\"SELECT SUM(sum) FROM Result WHERE team = 'GER'\")\n viborkaGA = cursor.fetchone()\n if viborkaGA == (None,):\n print(\"Spisok GER: netu danih\", viborkaGA)\n return viborkaGA\n else:\n viborkaGA = []\n cursor.execute(\"SELECT SUM(cold) FROM Result WHERE team = 'GER'\")\n gercold = cursor.fetchone()[0]\n viborkaGA.insert(0, gercold)\n cursor.execute(\"SELECT SUM(hot) FROM Result WHERE team = 'GER'\")\n gerhot = cursor.fetchone()[0]\n viborkaGA.insert(1, gerhot)\n viborkaGA.insert(2, (gercold + gerhot))\n print(\"Spisok GER: \", viborkaGA)\n return viborkaGA\n\n\n# esli est dannie vibiraem otdelno cold, hot i sum\ndef Pteam(viborkaPA):\n cursor.execute(\"SELECT SUM(sum) FROM Result WHERE team = 'POL'\")\n viborkaPA = cursor.fetchone()\n if viborkaPA == (None,):\n print(\"Spisok POL: netu danih\")\n return viborkaPA\n else:\n viborkaPA = []\n cursor.execute(\"SELECT SUM(cold) FROM Result WHERE team = 'POL'\")\n polcold = cursor.fetchone()[0]\n viborkaPA.insert(0, polcold)\n cursor.execute(\"SELECT SUM(hot) FROM Result WHERE team = 'POL'\")\n polhot = cursor.fetchone()[0]\n viborkaPA.insert(1, polhot)\n viborkaPA.insert(2, (polcold + polhot))\n print(\"Spisok POL: \", viborkaPA)\n return viborkaPA\n\n\n# esli est dannie vibiraem otdelno cold, hot i sum dlya vseh team\ndef SumaTeam(viborkaAll):\n cursor.execute(\"SELECT SUM(sum) FROM Result\")\n viborkaAll = cursor.fetchone()\n if viborkaAll == (None,):\n print(\"Spisok po timam: netu danih\", viborkaAll)\n return viborkaPA\n else:\n viborkaAll = []\n cursor.execute(\"SELECT SUM(cold) FROM Result\")\n allcold = cursor.fetchone()[0]\n viborkaAll.insert(0, allcold)\n cursor.execute(\"SELECT SUM(hot) FROM Result\")\n allhot = cursor.fetchone()[0]\n viborkaAll.insert(1, allhot)\n viborkaAll.insert(2, (allcold + allhot))\n print(\"Spisok po timam: \", viborkaAll)\n return viborkaAll\n\n\n# основной путь / и соответствующий ему обработчик запросов:\n@app.route(\"/\")\n@app.route('/index')\n@app.route(\"/home\")\ndef main():\n return render_template('index.html')\n\n\n@app.route(\"/about\")\ndef about():\n cursor.execute(\"SELECT SUM(sum) FROM Result\")\n viborka = cursor.fetchone()\n if viborka == (None,):\n pass\n else:\n topsum()\n topcold()\n\n result = Result.query.order_by(Result.sum.desc(), Result.cold.desc()).all()\n print(\"Vot: \", result)\n return render_template('about.html', result=result, viborkaAll=SumaTeam(viborkaAll), viborkaEA=Eteam(viborkaEA),\n viborkaGA=Gteam(viborkaGA), viborkaPA=Pteam(viborkaPA))\n\n\n@app.route(\"/add\", methods=['POST', 'GET']) # dobavlenie agentov\ndef add():\n if request.method == 'POST':\n id = request.form['nomer']\n nomer = id\n name = request.form['name']\n cold = request.form['cold']\n hot = request.form['hot']\n team = request.form['team']\n sum = int(hot) + int(cold)\n wincold = 0\n winsum = 0\n\n cursor.execute(f\"SELECT nomer FROM Result WHERE nomer ='{nomer}'\")\n if cursor.fetchone() is None:\n print(\"Takoy agent esche nety\")\n print(\"Peredano v bazy: \", id, nomer, name, hot, cold, team, sum)\n try:\n cursor.execute(f\"INSERT INTO Result VALUES (?,?,?,?,?,?,?,?,?)\",\n (id, nomer, name, team, cold, hot, sum, wincold, winsum))\n conn.commit()\n return redirect('/about')\n except:\n return \"ERROR oshibka sozdaniya!!!\"\n else:\n print(\"Takoy agent uzhe est!\")\n return redirect('/edit')\n else:\n print(\"Otkril add\")\n result = Result.query.order_by(Result.sum.desc()).all()\n pass\n return render_template('add.html', result=result, viborkaAll=SumaTeam(viborkaAll))\n\n\n@app.route(\"/del\", methods=['POST', 'GET']) # udalenie agentov\ndef udalit():\n result = Result.query.order_by(Result.nomer.asc()).all()\n if request.method == 'POST': # ne RABOTAET suka normalno\n id = request.form['nomer']\n print(\"Poluchil nomer :\", id)\n result = Result.query.get(id)\n try:\n with conn:\n cursor.execute(\"SELECT * FROM Result WHERE id = 'result'\")\n db.session.delete(result)\n db.session.commit()\n print(\"Udalil nomer :\", id)\n return redirect('/about')\n except:\n pass\n return \"ERROR pri udalenii!!!\"\n else:\n print(\"Otkril del\")\n pass\n return render_template('del.html', result=result, viborkaAll=SumaTeam(viborkaAll))\n\n\n@app.route(\"/del/vse\")\ndef delvle():\n cursor.execute(\"DELETE FROM Result\")\n conn.commit()\n return render_template('about.html')\n\n\n@app.route(\"/edit\", methods=['POST', 'GET']) # redaktirovanie obektov\ndef edit():\n if request.method == 'POST': # ne RABOTAET suka normalno\n if int(request.form.get('hot')) < 0:\n print(\"Oshibka vvoda hot!\")\n return redirect('/edit')\n elif int(request.form.get('cold')) < 0:\n print(\"Oshibka vvoda cold!\")\n return redirect('/edit')\n else:\n id = request.form['no_user']\n print(id)\n result = Result.query.get(id)\n print(\"Vzyato s bazi: \", id, result.nomer, result.name, result.hot, result.cold, result.team, result.sum)\n\n result = Result.query.get(id)\n result.hot = int(request.form.get('hot'))\n result.cold = int(request.form.get('cold'))\n result.sum = int(result.hot) + int(result.cold)\n db.session.commit()\n print(\"Peredano v bazy: \", id, result.hot, result.cold, result.sum)\n try:\n return redirect('/about')\n except:\n return \"ERROR, ne vernie dannie!!!\"\n else:\n result = Result.query.order_by(Result.nomer.asc()).all()\n print(\"Otkril edit\")\n pass\n return render_template('edit.html', result=result, viborkaAll=SumaTeam(viborkaAll))\n\n\n# является ли etot файл главной программой и запустите приложение:\nif __name__ == \"__main__\":\n app.run(debug=True) # zapuskaet lokalniy server i debazhet\n\n# закрыть соединение с базой данных\nconn.close()\n\n# шоб прога не закрилася в консолі\ninput(\"Press Enter\")\n","repo_name":"Sunshinedvlp/sasubo","sub_path":"Source/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28730804574","text":"from dotenv import load_dotenv\n\nload_dotenv()\nimport os\nimport sys\n\nsys.path.append(\"../technique\")\nfrom utils import StrasbourgSurveyScenario, base_period, determine_qf, adjust_df\nfrom results import result_index, extract\nimport numpy as np\nimport pandas as pd\n\n\ndef get_df(source):\n if source == \"caf\":\n df = pd.read_excel(\n f\"{os.getenv('DATA_FOLDER')}minimales/conservatoire_base_v7.xlsx\"\n )\n else:\n df = pd.read_excel(\n f\"{os.getenv('DATA_FOLDER')}minimales/conservatoire_insee_v7.xlsx\"\n )\n\n df.agent.fillna(0, inplace=True)\n df[\"habitant.EMS\"].fillna(0, inplace=True)\n # df['hors EMS'].fillna(0, inplace=True)\n # df[\"Enfant de la fratrie\"].fillna(1, inplace=True)\n # df[\"Cycle\"].fillna(0, inplace=True)\n # df.Dominante.fillna(1, inplace=True)\n df[\"prix_input\"] = df.MontantFactureSurEleve\n\n tdf = pd.DataFrame(\n {\n \"t\": [1, 2, 3, 4, 5],\n \"v\": [0, 16714, 21707, 29007, 36114],\n }\n )\n tdf[\"b\"] = tdf.v / 12 / 2.5\n tdf[\"n\"] = tdf.b.shift(-1, fill_value=4000)\n tdf[\"qfrule\"] = (\n tdf.b.round().astype(\"int\").astype(\"str\")\n + \"<=QF<\"\n + tdf.n.round().astype(\"int\").astype(\"str\")\n )\n df[\"ressources\"] = df.merge(tdf, left_on=\"Tranche\", right_on=\"t\", how=\"left\")[\n [\"index\", \"Tranche\", \"v\"]\n ].v\n\n df[\"ressources\"].fillna(tdf.v.iloc[-1], inplace=True)\n df[\"qfrule\"] = df.merge(tdf, left_on=\"Tranche\", right_on=\"t\", how=\"left\")[\n [\"index\", \"Tranche\", \"qfrule\"]\n ].qfrule\n df[\"qfrule\"].fillna(tdf.qfrule.iloc[-1], inplace=True)\n # df[\"qfrule\"].fillna(\"0<=QF<=4000\", inplace=True)\n\n df[\"individu_id\"] = [*range(len(df))]\n\n return df\n\n\ndef build_data(df, sample_count=1, adjustment=\"v1\"):\n count = len(df)\n if type(sample_count) == str:\n sample_field, qf_field = sample_count.split(\"#\")\n sample_ids = df[sample_field]\n sample_count = 1\n else:\n sample_field, qf_field = None, None\n sample_ids = np.repeat(list(range(sample_count)), count)\n\n individu_df = pd.DataFrame(\n {\n \"famille_id\": list(range(count * sample_count)),\n }\n )\n sample_qfrule = np.tile(df.qfrule, sample_count)\n\n famille_df = pd.DataFrame(\n {\n \"sample_id\": sample_ids,\n \"strasbourg_conservatoire_base_ressources_historique\": np.tile(\n df[\"ressources\"], sample_count\n ),\n \"strasbourg_conservatoire_nombre_cycles\": np.tile(\n df[\"Cycle.1\"], sample_count\n ),\n \"qfrule\": np.tile(df.qfrule, sample_count),\n \"strasbourg_conservatoire_bourse_historique\": np.tile(\n df.strasbourg_conservatoire_bourse_historique, sample_count\n ),\n \"agent_ems\": np.tile(df.agent, sample_count),\n \"habitant_ems\": np.tile(\n df[\"habitant.EMS\"],\n sample_count,\n ),\n # \"strasbourg_conservatoire_enfant_dans_la_fratrie\": np.tile(df[\"Enfant de la fratrie\"], sample_count),\n }\n )\n determine_qf(famille_df)\n if qf_field:\n famille_df[\"qf_fiscal\"] = df[qf_field]\n adjust_df(famille_df, adjustment)\n\n menage_df = pd.DataFrame({})\n foyerfiscaux_df = pd.DataFrame({})\n\n individu_df[\"famille_role_index\"] = 0\n individu_df[\"foyer_fiscal_id\"] = individu_df.famille_id\n individu_df[\"foyer_fiscal_role_index\"] = 0\n individu_df[\"menage_id\"] = individu_df.famille_id\n individu_df[\"menage_role_index\"] = 0\n\n extra = pd.DataFrame(\n {\n \"individu_id\": np.tile(df.individu_id, sample_count),\n \"sample_id\": sample_ids,\n \"qfrule\": np.tile(df.qfrule, sample_count),\n \"qf_caf\": famille_df.qf_caf,\n \"qf_fiscal\": famille_df.qf_fiscal,\n \"prix_input\": np.tile(df.MontantFactureSurEleve, sample_count),\n \"bourse\": famille_df.strasbourg_conservatoire_bourse_historique,\n }\n )\n\n return (\n dict(\n input_data_frame_by_entity=dict(\n individu=individu_df,\n famille=famille_df,\n menage=menage_df,\n foyer_fiscal=foyerfiscaux_df,\n )\n ),\n extra,\n )\n\n\ndef compute(tbs, data, base, openfisca_output_variable, suffix=\"\"):\n scenario = StrasbourgSurveyScenario(tbs, data=data)\n prix = scenario.simulation.calculate(openfisca_output_variable, base_period)\n\n base[\"prix\" + suffix] = prix\n base[\"res\" + suffix] = (base.prix - base.prix_input).abs() < 0.001\n\n\ndef get_results(tbs, sample_count=1, reform=None, source=\"caf\", adjustment=\"v1\"):\n df = get_df(source)\n\n results = []\n rows = []\n output_field = \"prix\"\n dfs = []\n for n, gdf in df.groupby(\"service\"):\n openfisca_output_variable = f\"strasbourg_conservatoire_{n}\"\n\n data, out_df = build_data(gdf, sample_count)\n compute(tbs, data, out_df, openfisca_output_variable)\n row = [\"Culture\", n]\n count, value = extract(out_df, output_field)\n row.extend([count[\"mean\"], count[\"count\"]])\n row.extend(value)\n\n if reform:\n compute(reform, data, out_df, openfisca_output_variable, \"_r\")\n _, r_value = extract(out_df, output_field + \"_r\")\n row.extend(r_value)\n dfs.append((n, out_df))\n rows.append(row)\n\n return pd.DataFrame(rows, columns=result_index[0 : len(rows[0])]), dfs\n","repo_name":"guillett/tarification-solidaire-strasbourg","sub_path":"culture/conservatoire.py","file_name":"conservatoire.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20587878667","text":"import urllib.request\r\nimport ssl\r\nimport json\r\nimport datetime\r\nimport math\r\nimport pandas as pd\r\nfrom itertools import count\r\nfrom config import *\r\n\r\ndef get_request_url(url):\r\n request = urllib.request.Request(url)\r\n try:\r\n response = urllib.request.urlopen(request)\r\n if response.getcode() == 200:\r\n print('[%s] URL Request Success' %datetime.datetime.now())\r\n return response.read().decode('utf-8')\r\n except Exception as e:\r\n print(e)\r\n print('[%s] Error for URL : %s' %(datetiem.datetime.now(), url))\r\n\r\nresult = []\r\n\r\nif __name__ == '__main__':\r\n searchBgnDe = '201601'\r\n searchEndDe = '202012'\r\n searchStatCd = 'US'\r\n numOfRows = '30'\r\n pageNo = '1'\r\n\r\n for pageNo in count():\r\n endPoint = 'http://openapi.customs.go.kr/openapi/service/newTradestatistics/getnationtradeList'\r\n parameter = '?_type=json&serviceKey=' + serviceKey\r\n parameter += '&searchBgnDe=' + searchBgnDe\r\n parameter += '&searchEndDe=' + searchEndDe\r\n parameter += '&searchStatCd=' + searchStatCd\r\n parameter += '&numOfRows=' + numOfRows\r\n parameter += '&pageNo=' + str(pageNo+1)\r\n url = endPoint + parameter\r\n \r\n resultData = get_request_url(url)\r\n jsonData = json.loads(resultData)\r\n\r\n isStop = False\r\n\r\n if jsonData['response']['header']['resultMsg'] == 'NORMAL SERVICE.':\r\n totalCount = jsonData['response']['body']['totalCount']\r\n if totalCount == 1:\r\n balPayments = jsonData['response']['body']['items']['item']['balPayments'] \r\n expCnt = jsonData['response']['body']['items']['item']['expCnt'] \r\n expDlr = jsonData['response']['body']['items']['item']['expDlr'] \r\n impCnt = jsonData['response']['body']['items']['item']['impCnt'] \r\n impDlr = jsonData['response']['body']['items']['item']['impDlr'] \r\n statCd = jsonData['response']['body']['items']['item']['statCd'] \r\n statCdCntnKor1 = jsonData['response']['body']['items']['item']['statCdCntnKor1'] \r\n year = jsonData['response']['body']['items']['item']['year']\r\n year = str(year).replace('.','')\r\n result.append([year]+[statCdCntnKor1]+[statCd]+[impDlr]+[impCnt]+[expDlr]+[expCnt]+[balPayments])\r\n if totalCount > 1:\r\n for item in jsonData['response']['body']['items']['item']:\r\n balPayments = item['balPayments'] \r\n expCnt = item['expCnt'] \r\n expDlr = item['expDlr'] \r\n impCnt = item['impCnt'] \r\n impDlr = item['impDlr'] \r\n statCd = item['statCd'] \r\n statCdCntnKor1 = item['statCdCntnKor1'] \r\n year = str(item['year']).replace('.','')\r\n result.append([year]+[statCdCntnKor1]+[statCd]+[impDlr]+[impCnt]+[expDlr]+[expCnt]+[balPayments])\r\n if (pageNo+1) == math.ceil(int(totalCount)/int(numOfRows)):\r\n isStop = True\r\n if isStop : break \r\n tradeList_table = pd.DataFrame(result, columns=('year','statCdCntnKor1','statCd','impDlr','impCnt','expDlr','expCnt','balPayments'))\r\n\r\n tradeList_table.to_csv('Multicampus/API/%s_%s_수출입실적(%s).csv' %(searchBgnDe, searchEndDe, statCd))\r\n print('테스트완료')\r\n\r\n\r\n\r\n","repo_name":"ayeon0804/MULTICAMPAS","sub_path":"API/NationalTradeList.py","file_name":"NationalTradeList.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34294617718","text":"from django.urls import path, include\n\nfrom store.api import views\n\nurlpatterns = [\n path('get_products/', views.get_products, name=\"get_products\"),\n path('get_categories/', views.get_categories, name=\"get_categories\"),\n path('add_to_basket/', views.add_to_basket, name=\"add_to_basket\"),\n path('remove_from_basket/', views.remove_from_basket, name='remove_from_basket'),\n path('get_product_info/', views.get_product_info, name=\"get_product_info\"),\n path('get_next_prev_of_product/', views.get_next_prev_of_product, name=\"get_next_prev_of_product\"),\n path('get_history/', views.get_basket_history, name=\"get_basket_history\"),\n path('search_product/', views.search_product, name=\"search_product\"),\n path('get_active_basket_info/', views.get_active_basket_info, name='get_active_basket_info'),\n path('rate_product/', views.rate_product, name='rate_product'),\n\n path('return_order/', views.return_order, name='return_order'),\n path('cancel_order/', views.cancel_order, name='cancel_order'),\n\n path('pay_order/', views.pay_order, name='pay_order'),\n path('verify_pay_order/', views.verify_pay_order, name='verify_pay_order'),\n]\n","repo_name":"SaEED-ABB/Hinata","sub_path":"store/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33497879996","text":"from tkinter import *\nimport time\n\napp = Tk()\n\napp.geometry('420x100')\napp.title(\"Digital Clock\")\napp.resizable(0, 0)\n\nlabel = Label(app, font=(\"Boulder\", 68, 'bold'), fg=\"Green\", bg=\"Black\", bd=25)\nlabel.grid(row=0, column=0)\nlabel.pack()\n\ndef digital_clock():\n live_time = time.strftime(\"%H:%M:%S\")\n label.config(text=live_time)\n label.after(200, digital_clock)\n\ndigital_clock()\napp.mainloop()","repo_name":"PremKumar-V/Python_Projects","sub_path":"Digital_Clock.py","file_name":"Digital_Clock.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7658463742","text":"import zipfile\nfrom pathlib import Path\nfrom tqdm.utils import CallbackIOWrapper\nfrom tqdm import tqdm\nfrom shutil import copyfileobj\nimport os\n\n\ndef _unzip_pb(fzip, dest, desc=\"Extracting\"):\n \"\"\"\n Useful function to unzip with progress bar\n Args:\n fzip: Filename of the zipped file\n dest: Destination where data must be written\n desc: Argument inherited from zipfile.ZipFile\n\n Returns:\n zipfile.Zipfile(fzip).extractall(dest) with progress\n \"\"\"\n\n dest = Path(dest).expanduser()\n Path(dest).mkdir(parents=True, exist_ok=True)\n\n with zipfile.ZipFile(fzip) as zipf, tqdm(\n desc=desc,\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n total=sum(getattr(i, \"file_size\", 0) for i in zipf.infolist()),\n ) as pbar:\n for i in zipf.infolist():\n if not getattr(i, \"file_size\", 0): # directory\n zipf.extract(i, os.fspath(dest))\n else:\n with zipf.open(i) as tempfi, open(\n os.fspath(dest / i.filename), \"wb\"\n ) as fileobj:\n copyfileobj(CallbackIOWrapper(pbar.update, tempfi), fileobj)\n","repo_name":"InseeFrLab/pynsee","sub_path":"pynsee/download/_unzip_pb.py","file_name":"_unzip_pb.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"54"} +{"seq_id":"35395734184","text":"import warnings\nwarnings.simplefilter('ignore', DeprecationWarning)\n\n#import hypergeom\n#from hypergeom import pvalue_enrichment\nfrom getopt import getopt\nimport shelve\nimport sys\nfrom sys import *\nfrom math import log\nfrom random import shuffle\nfrom pvalue_module import *\n\nfrom albertcommon import *\n\ntermSeperator=\"|\"\n\nfrom JHypergeometricPvalue import JHypergeometricPvalue\n\nErrorBinRealValue=-10000000.00\n\n\ndef getBinKey(binV,binDivider):\n\tglobal ErrorBinRealValue\n\tif binV==ErrorBinRealValue:\n\t\treturn 0\n\n\treturn int(binV/binDivider)+1\n\n#binDivider=-1 to indicates calculate binDivider! onlyTerms=[] use all terms; minTermMemberCount==0 => no filtering on termMemberCount\ndef calTerms(TERMS,filename,GeneIDCol,TermCol,BinCol,startRow1,allowDuplicate,numBins,binDivider,logBinValue,onlyTheseTerms,minTermMemberCount,maxTermMemberCount,minTermFraction,maxTermFraction):\n\tglobal termSeperator,ErrorBinRealValue\n\t\n\tgenes=set()\n\t\n\tnTerms=0\n\tnGenes=0\t\n\tgeneBins=dict()\n\t\n\tgeneExp=[]\n\tmaxExp=-100000.00\n\tminExp=100000.00\n\n\n\tfil=open(filename)\n\tReqCol=max(GeneIDCol,TermCol,BinCol)\n\tlino=0\n\tfor lin in fil:\n\t\tlino+=1\n\t\tif(lino> stderr,\"Invalid Col numbers, ignored:\",lin\n\t\t\tcontinue\n\t\t\n\t\tvalueToBin=float(spliton[BinCol])\n\t\tif logBinValue:\n\t\t\tif valueToBin==0.0:\n\t\t\t\tvalueToBin=ErrorBinRealValue\n\t\t\telse:\n\t\t\t\tvalueToBin=log(valueToBin)\n\n\t\t\n\t\t\t\t\n\t\tincNGenes=False\n\n\n\t\tThisID=spliton[GeneIDCol]\n\t\tThisEntryTerms=spliton[TermCol].split(termSeperator)\n\n\t\tgeneExp.append((valueToBin,ThisID))\n\n\t\tif valueToBin!=ErrorBinRealValue:\n\t\t\tmaxExp=max(valueToBin,maxExp)\n\t\t\tminExp=min(valueToBin,minExp)\n\n\n\t\tfor term in ThisEntryTerms:\n\t\t\tterm=term.strip()\n\t\t\tif(len(term)<1):\n\t\t\t\tcontinue\n\n\t\t\tif len(onlyTheseTerms)>0 and term not in onlyTheseTerms:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif not TERMS.has_key(term):\n\t\t\t\tnTerms+=1\t\t\t\t\n\t\t\t\tTERMS[term]=[]\n\n\n\n\t\t\tif allowDuplicate:\n\t\t\t\t\n\t\t\t\tTERMS[term].append(ThisID)\n\t\t\t\tincNGenes=True\n\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tif not incNGenes and ThisID not in genes:\n\t\t\t\t\tincNGenes=True\n\t\t\t\t\tgenes.add(ThisID)\n\n\n\t\t\t\tif ThisID not in TERMS[term]:\n\t\t\t\t\tincNGenes=True\n\t\t\t\t\tTERMS[term].append(ThisID)\n\n\t\tif incNGenes:\n\t\t\tnGenes+=1\n\t\t\t\n\t#do we need to calculate binDivider?\n\tif binDivider==-1: #yes\n\t\tbinDivider=(maxExp-minExp)/numBins\n\t\n\t#now go to each geneExp and hash\n\tfor valueToBin,ThisID in geneExp:\n\t\tbinKey=getBinKey(valueToBin,binDivider)\n\t\ttry:\n\t\t\tbinContent=geneBins[binKey]\n\t\texcept KeyError:\n\t\t\tbinContent=[]\n\t\t\tgeneBins[binKey]=binContent\n\n\t\tif allowDuplicate or ThisID not in binContent:\n\t\t\tbinContent.append(ThisID)\n\n\t\n\t\n\t#now filter terms\n\tfor term,termContent in TERMS.items():\n\t\tif \"__$$$\" in term:\n\t\t\tcontinue\n\n\t\tnGenesInTerm=len(termContent)\n\t\tfGenesInTerm=float(nGenesInTerm)/nGenes\n\t\tif nGenesInTermmaxTermMemberCount or fGenesInTermmaxTermFraction:\n\t\t\tnTerms-=1\n\t\t\tdel TERMS[term]\n\t\n\tTERMS[\"__$$$NGENES\"]=nGenes\n\tTERMS[\"__$$$NTERMS\"]=nTerms\n\tTERMS[\"__$$$GENEBINS\"]=geneBins\n\n\treturn binDivider\n\ndef drawMatchedItems(BgGeneBins,FgGeneBins):\n\tDrawnList=[]\n\n\tfor binKey,FgBinContent in FgGeneBins.items():\n\t\t\n\t\tBgBinContent=BgGeneBins[binKey]\n\t\tnumItemsInFgBin=len(FgBinContent)\n\t\tnumItemsInBgBin=len(BgBinContent)\n\n\t\tif numItemsInBgBin> stderr,\"warning: Bg bin has fewer items than fg bins: unable to perform permutation on this bin:\",\"binkey=\"+str(binKey),\"fg#=\"+str(numItemsInFgBin),\"bg#=\"+str(numItemsInBgBin)\n\t\t\t#continue\n\t\t\t#exit()\t\t\t\n\n\t\tif numItemsInBgBin==0:\n\t\t\tcontinue\n\n\t\tshuffle(BgBinContent)\t\t\n\t\tDrawnList.extend(BgBinContent[:min(numItemsInBgBin,numItemsInFgBin)])\n\n\treturn DrawnList\n\n\ndef intersectLists(L1,L2):\n\treturn list( set(L1) & set(L2) )\n\ndef subsetOf(BigSet,SmallSet,allowDuplicate):\n\tsubSet=[]\n\n\tif allowDuplicate:\n\t\tfor b in BigSet:\n\t\t\tif b in SmallSet:\n\t\t\t\tsubSet.append(b)\n\n\t\treturn subSet\n\telse:\n\t\treturn intersectLists(BigSet,SmallSet)\n\n\ndef enrich(FGTerms,nGenesFG,FGData,nGenesBG,BGData,geneIDConstraints,allowDuplicates,ignoreWorseThanBackgroundTerms,minSamt,HGWEPvalueCutOff): #=> returns [[samt,sam,popt,pop,FGTermEntry,term]],[pvalue],[FDR] geneIDConstraints=[] => no constraints\n\n\tconstraint=(len(geneIDConstraints)>0)\t\n\n\tJavaInputMatrix=[]\t\n\n\tfor term in FGTerms:\n\t\tif \"__$$$\" in term:\n\t\t\tcontinue\n\n\t\tif not BGData.has_key(term):\n\t\t\tprint >> stderr, \"Error: term\",term,\"not found in background\"\n\t\t\tsys.exit()\n\t\t\n\t\tBGTermEntry=BGData[term]\n\t\tFGTermEntry=FGData[term]\n\n\t\tpop=nGenesBG\n\t\tpopt=len(BGTermEntry)\n\n\t\tsam=nGenesFG\n\n\t\tif constraint:\n\t\t\t#now overlap\n\t\t\tFGTermEntryToInclude=subsetOf(FGTermEntry,geneIDConstraints,allowDuplicates)\n\t\telse:\n\t\t\tFGTermEntryToInclude=FGTermEntry\n\n\t\tsamt=len(FGTermEntryToInclude)\n\t\tJavaInputMatrix.append([samt,sam,popt,pop,FGTermEntryToInclude,term])\n\n\t\n\tif len(JavaInputMatrix)==0:\n\t\tprint >> stderr,\"JavaInputMatrix Empty\"\n\t\tsys.exit()\n\n\tJavaPvalues=JHypergeometricPvalue(JavaInputMatrix,-1)\n\t\n\tif ignoreWorseThanBackgroundTerms:\n\t\tfor i in range(0,len(JavaInputMatrix)):\n\t\t\tsamt,sam,popt,pop,FGTermEntryToInclude,term=JavaInputMatrix[i]\n\t\t\tif float(samt)/sam<=float(popt)/pop:\n\t\t\t\tJavaPvalues[i]=2.0\n\t\t\telif samtHGWEPvalueCutOff:\n\t\t\t\tJavaPvalues[i]=2.0\n\n\n\tif len(JavaPvalues)==0:\n\t\tprint >> stderr,\"JPvalue error\"\n\t\tsys.exit()\n\n\tFDR=getFDRfromPvalue(JavaPvalues)\n\n\treturn (JavaInputMatrix,JavaPvalues,FDR)\n\t\t\n\n\n\t\n\n\n\n\n\n\ndef main(BgShelve, BgFile, BgGeneIDCol, BgTermCol, BgBinCol, BgStartRow1, FgFile, FgGeneIDCol, FgTermCol, FgBinCol, FgStartRow1,recomputeBg,useJava,allowDuplicate,numBins,logBinValues,ndraws,minPopt,ignoreWorseThanBackgroundTerms,minSamt,HGWEPvalueCutOff,useTerms,maxPopt,minfe,minSamf,minPopf,maxPopf,excludeFGGenesFromRandomSets):\n\tglobal termSeperator\n\t\n\t\n\tif not recomputeBg:\n\t\tsaved=shelve.open(BgShelve)\n\t\t\n\t\tif not saved.has_key(\"BG\"):\n\t\t\trecomputeBg=True\t\t\n\t\telse:\n\t\t\tBGData=saved[\"BG\"]\n\t\t\tif len(BGData)<1 or not BGData.has_key(\"__$$$NGENES\"):\n\t\t\t\trecomputeBg=True\n\n\t\t\tif not BGData.has_key(\"__$$$NUMBINS\") or BGData[\"__$$$NUMBINS\"]!=numBins:\n\t\t\t\trecomputeBg=True\n\n\t\t\tif not BGData.has_key(\"__$$$MINPOPT\") or BGData[\"__$$$MINPOPT\"]!=minPopt:\n\t\t\t\trecomputeBg=True\n\n\t\t\tif not BGData.has_key(\"__$$$MAXPOPT\") or BGData[\"__$$$MAXPOPT\"]!=maxPopt:\n\t\t\t\trecomputeBg=True\n\n\t\t\tif not BGData.has_key(\"__$$$MINPOPF\") or BGData[\"__$$$MINPOPF\"]!=minPopf:\n\t\t\t\trecomputeBg=True\n\n\t\t\tif not BGData.has_key(\"__$$$MAXPOPF\") or BGData[\"__$$$MAXPOPF\"]!=maxPopf:\n\t\t\t\trecomputeBg=True\n\n\t\t\t#if not BGData.has_key(\"__$$$TERMUNIVERSE\") or BGData[\"__$$$TERMUNIVERSE\"]!=useTerms\n\t\t\t#\trecomputeBg=True\n\t\tsaved.close()\n\n\tif recomputeBg:\n\t\tprint >> stderr, \"recal BG\"\n\t\tBGData=dict()#=shelve.open(BgShelve)\n\n\t\t#TERMS,filename,GeneIDCol,TermCol,BinCol,startRow1,allowDuplicate,numBins,binDivider,logBinValue\n\t\tbinDivider=calTerms(BGData,BgFile,BgGeneIDCol,BgTermCol,BgBinCol,BgStartRow1,allowDuplicate,numBins,-1,logBinValues,useTerms,minPopt,maxPopt,minPopf,maxPopf)\n\t\tBGData[\"__$$$BINDIVIDER\"]=binDivider\n\t\tBGData[\"__$$$NUMBINS\"]=numBins\n\t\tBGData[\"__$$$MINPOPT\"]=minPopt\n\t\tBGData[\"__$$$MAXPOPT\"]=maxPopt\n\t\tBGData[\"__$$$MINPOPF\"]=minPopf\n\t\tBGData[\"__$$$MAXPOPF\"]=maxPopf\n\t\tsaved=shelve.open(BgShelve)\n\t\tsaved[\"BG\"]=BGData\n\t\tsaved.close()\n\telse:\n\t\tbinDivider=BGData[\"__$$$BINDIVIDER\"]\n\n\tprint >> stderr,\"bindivider=\",binDivider\n\t\n\t\n\n\t\n\tFGData=dict()\n\t\n\t#if minPopt==0:\n\t#\tOnlyTheseTerms=useTerms ###why????!\n\t#else:\n\tOnlyTheseTerms=BGData.keys() ####\n\n\t#TERMS,filename,GeneIDCol,TermCol,BinCol,startRow1,allowDuplicate,numBins,binDivider,logBinValue\n\t#calTerms(FGData,FgFile,FgGeneIDCol,FgTermCol,FgBinCol,FgStartRow1,allowDuplicate,0,binDivider,logBinValues,OnlyTheseTerms,0,1000000000,-100,1000000000) ##not filtering anything\n\tcalTerms(FGData,FgFile,FgGeneIDCol,FgTermCol,FgBinCol,FgStartRow1,allowDuplicate,0,binDivider,logBinValues,OnlyTheseTerms,minSamt,1000000000,minSamf,1000000000) ##not filtering anything\n\t#directly incorporate minSamt,minSamf into this\n\n\t#now calculate GO enrichment and FDR\n\n\t\n\n\tnTermsBG=BGData[\"__$$$NTERMS\"]\n\tnGenesBG=BGData[\"__$$$NGENES\"]\n\tBGBins=BGData[\"__$$$GENEBINS\"]\n\n\tnTermsFG=FGData[\"__$$$NTERMS\"]\n\tnGenesFG=FGData[\"__$$$NGENES\"]\n\tFGBins=FGData[\"__$$$GENEBINS\"]\n\tprint >> stderr,\"FGBins=\",FGBins\n\n\n\tif excludeFGGenesFromRandomSets:\n\t\t#make a new BGBins which doesn't have FG genes\n\t\tFGGeneSet=set()\t\t\n\t\tfor fgGeneVectors in FGBins.values():\n\t\t\tfor g in fgGeneVectors:\n\t\t\t\tFGGeneSet.add(g)\n\n\t\tnewBGBins=dict()\n\t\tif allowDuplicate:\n\t\t\tfor oldKey,oldGenes in BGBins.items():\n\t\t\t\tnewGenes=[]\n\t\t\t\tnewBGBins[oldKey]=newGenes\n\t\t\t\tfor g in oldGenes:\n\t\t\t\t\tif g not in FGGeneSet:\n\t\t\t\t\t\tnewGenes.append(g)\t\t\t\t\t\t\t\t\n\t\telse:\n\t\t\tfor oldKey,oldGenes in BGBins.items():\n\t\t\t\tnewGenes=[]\n\t\t\t\tnewBGBins[oldKey]=list(set(oldGenes)-FGGeneSet)\n\n\t\t#now replace\n\t\tBGBins=newBGBins\t\n\t\t\t\t\t\t\n\n\trandomGeneLists=drawMatchedItems(BGBins,FGBins)\n\n\tFGTerms=FGData.keys()\n\n\tprint >> stderr,\"calculate main\"\n\tMainFractionInfo,MainPvalues,MainFDRs=enrich(FGTerms,nGenesFG,FGData,nGenesBG,BGData,[],allowDuplicate,ignoreWorseThanBackgroundTerms,minSamt,HGWEPvalueCutOff) #the main one\n\n\t#for per term:\t\n\tnumRandomFractionExtreme=[]\n\tnumRandompvExtreme=[] \n\n\t#init:\n\tfor i in range(0,len(FGTerms)):\n\t\tnumRandomFractionExtreme.append(0)\n\t\tnumRandompvExtreme.append(0)\n\n\tfor draw_i in range(0,ndraws):\n\t\tprint >> stderr,\"drawing \",(draw_i+1),\"of\",ndraws\n\t\tdrawn_list=drawMatchedItems(BGBins,FGBins)\n\t\tThisDrawFractionInfo,ThisDrawPvalues,ThisDrawFDRs=enrich(FGTerms,nGenesFG,BGData,nGenesBG,BGData,drawn_list,allowDuplicate,False,0,2.0) #do not ignore worse than background terms for random sets, no samt cutoff, no pvaluecutoff\n\t\t\n\t\t#now goto each term in main and this draw\n\t\tfor itemIdx,main_fraction_info,main_pvalue,main_fdr,thisdraw_fraction_info,thisdraw_pvalue,thisdraw_fdr in zip(range(0,len(FGTerms)),MainFractionInfo,MainPvalues,MainFDRs,ThisDrawFractionInfo,ThisDrawPvalues,ThisDrawFDRs):\n\t\t\tmain_samt,main_sam,main_popt,main_pop,main_ids,main_term=main_fraction_info\n\t\t\tthisdraw_samt,thisdraw_sam,thisdraw_popt,thisdraw_pop,thisdraw_ids,thisdraw_term=thisdraw_fraction_info\n\t\t\t\n\t\t\tif main_term!=thisdraw_term:\n\t\t\t\tprint >> stderr,\"bug! main_term!=thisdraw_term\"\n\t\t\t\texit()\n\t\t\t\t\n\n\t\t\tmain_samf=float(main_samt)/main_sam\n\t\t\tthisdraw_samf=float(thisdraw_samt)/thisdraw_sam\n\n\t\t\tif thisdraw_samf>=main_samf:\n\t\t\t\tnumRandomFractionExtreme[itemIdx]+=1\n\t\t\t\n\t\t\tif thisdraw_pvalue<=main_pvalue:\n\t\t\t\tnumRandompvExtreme[itemIdx]+=1\n\n\n\n\tpvalueRandomFraction=[]\n\tpvalueRandompv=[]\n\t\t\n\tfor num_random_fraction_extreme,num_random_pv_extreme,main_pvalue in zip(numRandomFractionExtreme,numRandompvExtreme,MainPvalues):\n\t\tif main_pvalue==2.0:\n\t\t\tpvalueRandomFraction.append(2.0)\n\t\t\tpvalueRandompv.append(2.0)\n\t\telse:\n\t\t\tpvalueRandomFraction.append(float(num_random_fraction_extreme)/ndraws)\n\t\t\tpvalueRandompv.append(float(num_random_pv_extreme)/ndraws)\n\n\tFDRRandomFraction=getFDRfromPvalue(pvalueRandomFraction)\n\tFDRRandompv=getFDRfromPvalue(pvalueRandompv)\n\n\t#now output!\n\tfieldsToOutput=[]\n\n\t\n\t\n\tfieldsToOutput.append(\"term\")\n\tfieldsToOutput.append(\"popt\")\n\tfieldsToOutput.append(\"pop\")\n\tfieldsToOutput.append(\"popt/pop\")\n\tfieldsToOutput.append(\"samt\")\n\tfieldsToOutput.append(\"sam\")\n\tfieldsToOutput.append(\"samt/sam\")\n\tfieldsToOutput.append(\"foldEnrichment\")\n\tfieldsToOutput.append(\"genes\")\n\tfieldsToOutput.append(\"HGWE.pvalue\")\n\tfieldsToOutput.append(\"HGWE.FDR\")\n\tfieldsToOutput.append(\"RF.pvalue\")\t\t\t\n\tfieldsToOutput.append(\"RF.FDR\")\n\tfieldsToOutput.append(\"Rpv.pvalue\")\n\tfieldsToOutput.append(\"Rpv.FDR\")\n\tprint >> stdout,\"\\t\".join(fieldsToOutput)\n\n\tfor main_fraction_info,main_pvalue,main_fdr,rf_pvalue,rf_fdr,rpv_pvalue,rpv_fdr in zip(MainFractionInfo,MainPvalues,MainFDRs,pvalueRandomFraction,FDRRandomFraction,pvalueRandompv,FDRRandompv):\n\t\tmain_samt,main_sam,main_popt,main_pop,main_ids,main_term=main_fraction_info\n\t\tmain_samf=float(main_samt)/main_sam\n\t\tmain_popf=float(main_popt)/main_pop\n\t\tfEnrichment=main_samf/main_popf\n\t\tfieldsToOutput=[main_term,main_popt,main_pop,main_popf,main_samt,main_sam,main_samf,fEnrichment,\"|\".join(main_ids),main_pvalue,main_fdr,rf_pvalue,rf_fdr,rpv_pvalue,rpv_fdr]\n\t\ttoStrArrayInPlace(fieldsToOutput)\n\t\tprint >> stdout,\"\\t\".join(fieldsToOutput)\n\ndef printUsageAndExit(programName):\n\tprint >> stderr, \"Usage:\",programName,\"BgShelve BgFile BgGeneIDCol BgTermCol BgBinCol BgStartRow FgFile FgGeneIDCol FgTermCol FgBinCol FgStartRow\"\n\tprint >> stderr, \"Option\"\n\tprint >> stderr, \"--term-sep=|\"\n\tprint >> stderr, \"--force-recompute-bg\"\n\tprint >> stderr, \"[--no-duplicate], --allow-duplicate\"\n\tprint >> stderr, \"--num-bins=10\"\n\tprint >> stderr, \"--log-bin-values=False\"\n\tprint >> stderr, \"--ndraws=1000\"\n\tprint >> stderr, \"--min-popt=5 minimum number of genes annotated in the background to that term\"\n\tprint >> stderr, \"--ignore-worse-than-background-terms set p-value of those terms to 2.0\"\n\tprint >> stderr, \"--HGWE-pvalue-cutoff=2.0 (no cutoff)\"\n\tprint >> stderr, \"--min-samt=2 minimum number of genes annotated in the foreground to that term\"\n\tprint >> stderr, \"--select-terms-in filename use only terms specified in files, can do this multiple times\"\n\texplainColumns(stderr)\t\n\t#print >> stderr, \n\t\n\t\n\texit()\n\ndef toStrArrayInPlace(L):\n\tfor i in range(0,len(L)):\n\t\tL[i]=str(L[i])\n\nif __name__=='__main__':\n\t\n\tprogramName=argv[0]\n\topts, argvs = getopt(argv[1:],'',[\"term-sep=\",\"force-recompute-bg\",\"use-java\",\"use-c\",\"no-duplicate\",\"allow-duplicate\",\"num-bins=\",\"log-bin-values\",\"ndraws=\",\"min-popt=\",\"ignore-worse-than-background-terms\",\"HGWE-pvalue-cutoff=\",\"min-samt=\",'max-popt=','min-samf=','min-popf=','max-popf=','select-terms-in=','min-fe=','exclude-fg-from-random-sets'])\n\n\trecomputeBg=False\n\tfs=\"\\t\"\n\tallowDuplicate=False\n\tuseJava=True\n\tnumBins=50\n\tlogBinValues=False\n\tnDraws=1000\n\tminPopt=5\n\tmaxPopt=10000000\n\tminPopf=-1.0\n\tmaxPopf=2.0\n\tminSamf=-1.0\n\tminfe=-1.0\n\texcludeFGGenesFromRandomSets=False\n\tignoreWorseThanBackgroundTerms=False\n\tHGWEPvalueCutOff=2.0\n\tminSamt=2\n\tuseTermsIn=[]\n\tfor o,a in opts:\n\t\tif o==\"--force-recompute-bg\":\n\t\t\trecomputeBg=True\n\t\telif o==\"--term-sep\":\n\t\t\ttermSeperator=a\n\t\telif o==\"--use-java\":\n\t\t\tuseJava=True\n\t\telif o==\"--use-c\":\n\t\t\tuseJava=False\n\t\telif o==\"--allow-duplicate\":\n\t\t\tallowDuplicate=True\n\t\telif o==\"--no-duplicate\":\n\t\t\tallowDuplicate=False\n\t\telif o==\"--num-bins\":\n\t\t\tnumBins=int(a)\n\t\telif o==\"--log-bin-values\":\n\t\t\tlogBinValues=True\n\t\telif o==\"--ndraws\":\n\t\t\tnDraws=int(a)\n\t\telif o=='--min-popt':\n\t\t\tminPopt=int(a)\n\t\telif o=='--max-popt':\n\t\t\tmaxPopt=int(a)\n\t\telif o==\"--min-samt\":\n\t\t\tminSamt=int(a)\n\t\telif o=='--min-samf':\n\t\t\tminSamf=float(a)\n\t\telif o=='--min-popf':\n\t\t\tminPopf=float(a)\n\t\telif o=='--max-popf':\n\t\t\tmaxPopf=float(a)\n\t\telif o=='--min-fe':\n\t\t\tminfe=float(a)\n\t\telif o=='--exclude-fg-from-random-sets':\n\t\t\texcludeFGGenesFromRandomSets=True\n\t\telif o==\"--ignore-worse-than-background-terms\":\n\t\t\tignoreWorseThanBackgroundTerms=True\n\t\telif o==\"--HGWE-pvalue-cutoff\":\n\t\t\tHGWEPvalueCutOff=float(a)\n\t\telif o=='--select-terms-in':\n\t\t\tuseTermsIn.append(a)\n\ttry:\n\t\tBgShelve, BgFile, BgGeneIDCol, BgTermCol, BgBinCol, BgStartRow, FgFile, FgGeneIDCol, FgTermCol, FgBinCol, FgStartRow = argvs\n\texcept:\n\t\tprintUsageAndExit(programName)\n\n\n\tBgStartRow=int(BgStartRow)\n\tFgStartRow=int(FgStartRow)\n\n\t#for bg file:\n\theaderBG,prestartsBG=getHeader(BgFile,BgStartRow-1,BgStartRow,fs)\n\tBgGeneIDCol=getCol0ListFromCol1ListStringAdv(headerBG,BgGeneIDCol)[0]\n\tBgTermCol=getCol0ListFromCol1ListStringAdv(headerBG,BgTermCol)[0]\n\tBgBinCol=getCol0ListFromCol1ListStringAdv(headerBG,BgBinCol)[0]\t\n\n\t#for fg file\n\theaderFG,prestartsFG=getHeader(FgFile,FgStartRow-1,FgStartRow,fs)\n\tFgGeneIDCol=getCol0ListFromCol1ListStringAdv(headerFG,FgGeneIDCol)[0]\t\n\tFgTermCol=getCol0ListFromCol1ListStringAdv(headerFG,FgTermCol)[0]\t\n\tFgBinCol=getCol0ListFromCol1ListStringAdv(headerFG,FgBinCol)[0]\t\n\t\n\tuseTerms=[]\n\n\tif len(useTermsIn)>0:\n\t\tfor utfilename in useTermsIn:\n\t\t\tutfile=open(utfilename)\n\t\t\t#lines=utfile.readlines()\n\t\t\tfor lin in utfile:\n\t\t\t\tuseTerms.append(lin.strip().split(\"\\t\")[0])\n\t\t\tutfile.close()\t\t\n\t\t\t\n\n\t\n#main(BgShelve, BgFile, BgGeneIDCol, BgTermCol, BgBinCol, BgStartRow1, FgFile, FgGeneIDCol, FgTermCol, FgBinCol, FgStartRow1,recomputeBg,useJava,allowDuplicate,numBins,logBinValues,ndraws)\n\n\tmain(BgShelve,BgFile,BgGeneIDCol,BgTermCol,BgBinCol,BgStartRow,FgFile,FgGeneIDCol,FgTermCol,FgBinCol,FgStartRow,recomputeBg,useJava,allowDuplicate,numBins,logBinValues,nDraws,minPopt,ignoreWorseThanBackgroundTerms,minSamt,HGWEPvalueCutOff,useTerms,maxPopt,minfe,minSamf,minPopf,maxPopf,excludeFGGenesFromRandomSets)\n\n","repo_name":"albertwcheng/albert-bioinformatics-scripts","sub_path":"JMGOEnrichment.py","file_name":"JMGOEnrichment.py","file_ext":"py","file_size_in_byte":16533,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"19826864352","text":"#Created by: H. Timmermann - https://github.com/gottemmm\n#Last updated: 7/25/21\n#This code analyses CO2 data; this version does NOT sort out outliers, but, it plots all CO2 data as scatter points and plots the average CO2 level along with the best fit line for the data\n\n#Importing required libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import Counter\nimport time\n\n#Input .txt file name here\narray_from_file = np.loadtxt(\"INPUT HERE\", dtype=str)\n\n#Measuring time \nstart_time = time.perf_counter()\n\n#Setting up the figure \nfig = plt.figure()\na1 = fig.add_axes([0,0,1,1])\n\n#This prints out all np data in arrange; used for debugging\nnp.set_printoptions(threshold=np.inf)\n\n#Getting all the data \n\ndataset = []\n\nfor i in np.arange(len(array_from_file)):\n dataset.append(float(array_from_file[i][0]))\n\n#Best Fit Line Calculation\n\ndef best_fit(x,y):\n w, b = np.polyfit(x,y,deg=1)\n line = w * x + b\n return line\n\nnon_outlier_data_list = []\nfor i in np.arange(len(array_from_file)):\n non_outlier_data_list.append(float(array_from_file[i][0]))\n\nx1 = np.array(non_outlier_data_list)\nline = best_fit(np.arange(x1.shape[0]),x1)\n \n#Total of all CO2 levels; used for average CO2 level line \ntotal = 0\nfor ele in range(0, len(dataset)): \n total = total + float(array_from_file[ele][0])\n \n#Coloring the scatter points; green(0-800), yellow(800-1000), red(1000+)\n\nlength = np.arange(0,len(dataset))\ncol =[]\n\nfor i in range(0, len(dataset)):\n if array_from_file[i][0] in range(0,800):\n col.append('green') \n if array_from_file[i][0] in range(800,1000):\n col.append('yellow')\n else:\n col.append('red') \n \n#Graphing\n \n \n#Scatter plot with colored points; also graphing a random scatter point with the most popular color for the legend \n\nplt.scatter([10000], [0], color = Counter(col).most_common(1)[0][0], label ='CO2 Data point ')\nfor i in range(0,len(dataset)):\n plt.scatter(array_from_file[i][1], dataset[i], c = col[i], s = 10, linewidth = 0)\nz = np.arange(0, len(dataset),step=50)\n\n#Plotting only 10 ticks on the x-axis; can change \n\nxtick1 = int(len(dataset)/10)\nplt.xticks(np.arange(0, len(dataset), xtick1),rotation=30)\n\n#Graphs average CO2 level\ndavg = (total/len(dataset))\nplt.axhline(y=davg, color='black', linestyle='--', label = 'Average CO2 Level')\n\n#Graphs Best Fit Line\n\nplt.plot(np.arange(len(dataset)), line, color='blue', label = 'Best Fit')\n\n#prints max/min CO2 levels\nprint('The maximum CO2 level was: ' + str(max(dataset)) + ' PPM')\nprint('The minimum CO2 level was: ' + str(min(dataset)) + ' PPM')\n\n#Labeling and setting limits \nplt.xlabel('Time (HH:MM:SS)')\nplt.ylabel('CO2 levels (PPM)')\nplt.title('CO2 Levels on ' + array_from_file[0][2])\na1.set_xlim(0,len(dataset))\na1.set_ylim(0, max(dataset)+200)\nplt.legend()\nplt.show()\n\n#Printing total time taken - (Time will vary depending on length of .txt file and IDE used)\nend_time = time.perf_counter()\nprint('This took:', end_time - start_time, 'seconds')\nif (end_time - start_time) > 60:\n print('or', (end_time - start_time)/60, 'minutes')\n","repo_name":"htimmermann/CO2project","sub_path":"GITHUBNOTSORTED.py","file_name":"GITHUBNOTSORTED.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"5319796443","text":"from typing import Dict, List, Iterable, Tuple\n\nimport transition\n\nTransition = Dict[str, str]\nMachineParams = Dict[str, str]\n\n\ndef is_machine_param(pairs: MachineParams) -> bool:\n \"\"\"Checks if a list of pairs is a machine parameter.\"\"\"\n\n compiler_params = ['start', 'empty_symbol']\n return any([key in compiler_params for key in pairs])\n\n\ndef get_pairs(line: List[str]) -> Dict[str, str]:\n \"\"\"Collects key value pairs from a line.\n\n There may be several key value pairs in one line. These are separated by spaces.\n\n Example:\n line = 'language python version 3.9 answer 42'\n becomes\n {\n 'language': 'python',\n 'version': '3.9',\n 'answer': 42'\n }\n \"\"\"\n\n pairs = dict()\n for i in range(0, len(line), 2):\n key = line[i].lower()\n value = line[i + 1]\n pairs[key] = value\n return pairs\n\n\ndef machine(lines: Iterable[str]) -> Tuple[List[Transition], MachineParams]:\n \"\"\"Parses a machine into its transitions and parameters\"\"\"\n\n transitions: List[Dict[str, str]] = list()\n machine_params: Dict[str, str] = dict()\n for line_nr, line in enumerate(lines):\n split_line = line.split()\n pairs = get_pairs(split_line)\n if transition.valid(pairs):\n transitions.append(pairs)\n elif is_machine_param(pairs):\n for key, value in pairs.items():\n machine_params[key] = value\n else:\n print(f'ERROR at line {line_nr}: Unknown parameter')\n exit()\n return transitions, machine_params\n","repo_name":"linuskmr/turing-machine-compiler","sub_path":"parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41922825625","text":"from fastapi import FastAPI, APIRouter, Depends, HTTPException, status\nfrom pydantic import BaseModel\nfrom starlette import status\nfrom response_model import Data, BlogPost\nfrom database import engine, get_db\nimport models\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom routers import functionroute, authentication\nfrom query import queryall\nfrom oauth import get_current_user\n\napp = FastAPI()\n\nmodels.Base.metadata.create_all(engine)\n\n\n# def get_db():\n# db = SessionLocal()\n# try:\n# yield db\n# finally:\n# db.close()\n\n\n# add new user to database\n# @app.post('/blog', response_model=BlogPost, status_code=status.HTTP_201_CREATED)\n# def create(request: Data, db: Session = Depends(get_db)):\n#\n# # title_hash = Hash.get_password_hash(request.title)\n#\n# new_blog = models.Blog(title=request.title, body=request.body)\n# db.add(new_blog)\n# db.commit()\n# db.refresh(new_blog)\n#\n# # print(type(new_blog.id))\n# return {'id': new_blog.id,\n# 'title': new_blog.title,\n# 'body': new_blog.body\n# }\n\n\n# query all data in database\n# @app.get('/queryall', response_model=List[BlogPost])\n# def all(db: Session = Depends(get_db)):\n# blogs = db.query(models.Blog).all()\n# return blogs\n\n\n# select specific data compare with id\n@app.get('/selected/{id}')\ndef all( id = int, db: Session = Depends(get_db), get_current_user: BlogPost = Depends(get_current_user)):\n blog = db.query(models.Blog).filter(models.Blog.id == id).all()\n\n if not blog:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Blog with the {id} is not available ')\n return blog\n\n\n# delete form selected id\n@app.delete('/delete/{id}')\ndef nomore(id, db: Session = Depends(get_db), get_current_user: BlogPost = Depends(get_current_user)):\n blog = db.query(models.Blog).filter(models.Blog.id == id).delete(synchronize_session=False)\n\n db.commit()\n\n return db.query(models.Blog).all()\n\n\n@app.put('/putting/{id}', response_model=BlogPost)\ndef update(request: Data, id, db: Session = Depends(get_db), get_current_user: BlogPost = Depends(get_current_user)):\n blog = db.query(models.Blog).filter(models.Blog.id == id).update({'title': request.title,\n 'body': request.body})\n\n if blog == 1:\n db.commit()\n return db.query(models.Blog).filter(models.Blog.id == id).first()\n elif blog == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'No {id} in the database')\n\n\napp.include_router(functionroute.router)\napp.include_router(queryall.router)\napp.include_router(authentication.router)\n","repo_name":"nongporloris/fastapi_2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35100907555","text":"# -*- coding:utf-8 -*-\n# author: Awet H. Gebrehiwot\n# at 10/13/22\n# --------------------------|\nimport glob\nimport os\nfrom os.path import join\n\nimport numpy as np\n\nsemanticKitti_learning_map = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" --------------------------mapped\n 10: 1, # \"car\"\n 11: 2, # \"bicycle\"\n 13: 4, # \"bus\" mapped to \"other-vehicle\" --------------------------mapped\n 15: 3, # \"motorcycle\"\n 16: 4, # \"on-rails\" mapped to \"other-vehicle\" ---------------------mapped\n 18: 6, # \"truck\"\n 20: 4, # \"other-vehicle\"\n 30: 5, # \"person\"\n 31: 2, # \"bicyclist\"\n 32: 3, # \"motorcyclist\"\n 40: 7, # \"road\"\n 44: 7, # \"parking\"\n 48: 8, # \"sidewalk\"\n 49: 10, # \"other-ground\"\n 50: 11, # \"building\"\n 51: 11, # \"fence\"\n 52: 11, # \"other-structure\" mapped to \"unlabeled\" ------------------mapped\n 60: 7, # \"lane-marking\" to \"road\" ---------------------------------mapped\n 70: 10, # \"vegetation\"\n 71: 10, # \"trunk\"\n 72: 9, # \"terrain\"\n 80: 11, # \"pole\"\n 81: 11, # \"traffic-sign\"\n 99: 11, # \"other-object\" to \"unlabeled\" ----------------------------mapped\n 252: 1, # \"moving-car\" to \"car\" ------------------------------------mapped\n 253: 2, # \"moving-bicyclist\" to \"bicyclist\" ------------------------mapped\n 254: 5, # \"moving-person\" to \"person\" ------------------------------mapped\n 255: 3, # \"moving-motorcyclist\" to \"motorcyclist\" ------------------mapped\n 256: 4, # \"moving-on-rails\" mapped to \"other-vehicle\" --------------mapped\n 257: 4, # \"moving-bus\" mapped to \"other-vehicle\" -------------------mapped\n 258: 6, # \"moving-truck\" to \"truck\" --------------------------------mapped\n 259: 4 # \"moving-other\"-vehicle to \"other-vehicle\" ----------------mapped\n}\n\nnusceneces_learning_map = {\n 1: 0, # 'noise'\n 5: 0,\n 7: 0,\n 8: 0,\n 10: 0,\n 11: 0,\n 13: 0,\n 19: 0,\n 20: 0,\n 0: 0,\n 29: 0,\n 31: 0,\n 17: 1, # 'car'\n 14: 2, # 'bicycle'\n 21: 3, # 'motorcycle'\n 15: 4, # 'other-vehicle'\n 16: 4,\n 18: 4,\n 2: 5, # 'pedestrian'\n 3: 5,\n 4: 5,\n 6: 5,\n 23: 6, # 'truck'\n 24: 7, # drivable-surface\n 26: 8, # sidewalk\n 27: 9, # 'terrain'\n 30: 10, # 'vegetation'\n 9: 11, # manmade\n 12: 11,\n 22: 11,\n 25: 11, #\n 28: 11\n}\n\nwod_learning_map = {\n 0: 0, # \"Undefined\"\n 1: 1, # \"Car\"\n 2: 6, # \"Truck\"\n 3: 2, # \"Bus\"\n 4: 4, # \"Other Vehicle\" # Other small vehicles (e.g. pedicab), large vehicles (e.g. const vehicl, RV, limo, tram).\n 5: 3, # \"Motorcyclist\"\n 6: 2, # \"Bicyclist\"\n 7: 5, # \"Pedestrian\"\n 8: 11, # \"Sign\"\n 9: 11, # \"Traffic Light\"\n 10: 11, # \"Pole\" # Lamp post, traffic sign pole etc.\n 11: 11, # \"Construction Cone\" # Construction cone/pole.\n 12: 2, # \"Bicycle\"\n 13: 3, # \"Motorcycle\"\n 14: 11, # \"Building\"\n 15: 10, # \"Vegetation\" # Bushes, tree branches, tall grasses, flowers etc.\n 16: 11, # \"Tree Trunk\"\n 17: 8, # \"Curb\" # Curb on the edge of roads. This does not include road boundaries if there’s no curb.\n 18: 7,\n # \"Road\" # Surface a vehicle could drive on. This include the driveway connecting // parking lot and road over a section of sidewalk.\n 19: 7,\n # \"Lane Marker\" # Marking on the road that’s specifically for defining lanes such as // single/double white/yellow lines.\n 20: 7, # \"Other Ground\" # Marking on the road other than lane markers, bumps, cateyes, railtracks // etc.\n 21: 9,\n # \"Walkable\" # Most horizontal surface that’s not drivable, e.g. grassy hill, // pedestrian walkway stairs etc.\n 22: 8 # \"Sidewalk\" # Nicely paved walkable surface when pedestrians most likely to walk on.\n}\n\n\ndef convert_labels(data_path, dest_path, dataset):\n sequeses = os.listdir(data_path)\n if dataset == 'WOD':\n label_mapping = wod_learning_map\n elif dataset == 'SemanticKITTI':\n label_mapping = semanticKitti_learning_map\n elif dataset == 'NuSceneces':\n label_mapping = nusceneces_learning_map\n for seq in sequeses:\n frames = sorted(glob.glob(os.path.join(data_path, seq, 'labels', '*.npy')))\n seq_folder = join(data_path, seq)\n\n label_path = os.path.join(dest_path, seq, 'mapped_labels')\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n\n for frame in frames:\n ss = np.load(frame) & 0xFFFF\n mapped_labels = (np.vectorize(label_mapping.__getitem__)(ss)).astype(np.int32)\n new_frame = frame.split('/')[-1].split('.')[0]\n np.save(os.path.join(label_path, new_frame), mapped_labels)\n\n\nif __name__ == '__main__':\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/semantic-kitti/all_npy/sequences'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/semantic-kitti/all_npy/sequences'\n # dataset = 'SemanticKITTI'\n\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_64_beam/training'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_64_beam/training'\n # dataset = 'WOD'\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_32_beam/training'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_32_beam/training'\n #\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_64_beam/validation'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_64_beam/validation'\n #\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_32_beam/validation'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/WOD/processed/Labeled_32_beam/validation'\n\n # data_path = '/mnt/personal/gebreawe/Datasets/RealWorld/NUSCENES/processed/train'\n # dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/NUSCENES/processed/train'\n dataset = 'NuSceneces'\n dest_path = '/mnt/personal/gebreawe/Datasets/RealWorld/NUSCENES/processed/val'\n\n # 'NuSceneces'\n convert_labels(data_path, dest_path, dataset)\n","repo_name":"ctu-vras/T-UDA","sub_path":"tools/label_mapping_npy.py","file_name":"label_mapping_npy.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"390820674","text":"from __future__ import annotations\n\nimport pytest\n\nfrom urllib3.fields import (\n RequestField,\n format_header_param,\n format_header_param_html5,\n format_header_param_rfc2231,\n format_multipart_header_param,\n guess_content_type,\n)\n\n\nclass TestRequestField:\n @pytest.mark.parametrize(\n \"filename, content_types\",\n [\n (\"image.jpg\", [\"image/jpeg\", \"image/pjpeg\"]),\n (\"notsure\", [\"application/octet-stream\"]),\n (None, [\"application/octet-stream\"]),\n ],\n )\n def test_guess_content_type(\n self, filename: str | None, content_types: list[str]\n ) -> None:\n assert guess_content_type(filename) in content_types\n\n def test_create(self) -> None:\n simple_field = RequestField(\"somename\", \"data\")\n assert simple_field.render_headers() == \"\\r\\n\"\n filename_field = RequestField(\"somename\", \"data\", filename=\"somefile.txt\")\n assert filename_field.render_headers() == \"\\r\\n\"\n headers_field = RequestField(\n \"somename\", \"data\", headers={\"Content-Length\": \"4\"}\n )\n assert headers_field.render_headers() == \"Content-Length: 4\\r\\n\\r\\n\"\n\n def test_make_multipart(self) -> None:\n field = RequestField(\"somename\", \"data\")\n field.make_multipart(content_type=\"image/jpg\", content_location=\"/test\")\n assert (\n field.render_headers()\n == 'Content-Disposition: form-data; name=\"somename\"\\r\\n'\n \"Content-Type: image/jpg\\r\\n\"\n \"Content-Location: /test\\r\\n\"\n \"\\r\\n\"\n )\n\n def test_make_multipart_empty_filename(self) -> None:\n field = RequestField(\"somename\", \"data\", \"\")\n field.make_multipart(content_type=\"application/octet-stream\")\n assert (\n field.render_headers()\n == 'Content-Disposition: form-data; name=\"somename\"; filename=\"\"\\r\\n'\n \"Content-Type: application/octet-stream\\r\\n\"\n \"\\r\\n\"\n )\n\n def test_render_parts(self) -> None:\n field = RequestField(\"somename\", \"data\")\n parts = field._render_parts({\"name\": \"value\", \"filename\": \"value\"})\n assert 'name=\"value\"' in parts\n assert 'filename=\"value\"' in parts\n parts = field._render_parts([(\"name\", \"value\"), (\"filename\", \"value\")])\n assert parts == 'name=\"value\"; filename=\"value\"'\n\n @pytest.mark.parametrize(\n (\"value\", \"expect\"),\n [(\"näme\", \"filename*=utf-8''n%C3%A4me\"), (b\"name\", 'filename=\"name\"')],\n )\n def test_format_header_param_rfc2231_deprecated(\n self, value: bytes | str, expect: str\n ) -> None:\n with pytest.deprecated_call(match=r\"urllib3 v2\\.1\\.0\"):\n param = format_header_param_rfc2231(\"filename\", value)\n\n assert param == expect\n\n def test_format_header_param_html5_deprecated(self) -> None:\n with pytest.deprecated_call(match=r\"urllib3 v2\\.1\\.0\"):\n param2 = format_header_param_html5(\"filename\", \"name\")\n\n with pytest.deprecated_call(match=r\"urllib3 v2\\.1\\.0\"):\n param1 = format_header_param(\"filename\", \"name\")\n\n assert param1 == param2\n\n @pytest.mark.parametrize(\n (\"value\", \"expect\"),\n [\n (\"name\", \"name\"),\n (\"näme\", \"näme\"),\n (b\"n\\xc3\\xa4me\", \"näme\"),\n (\"ski ⛷.txt\", \"ski ⛷.txt\"),\n (\"control \\x1A\\x1B\\x1C\", \"control \\x1A\\x1B\\x1C\"),\n (\"backslash \\\\\", \"backslash \\\\\"),\n (\"quotes '\\\"\", \"quotes '%22\"),\n (\"newline \\n\\r\", \"newline %0A%0D\"),\n ],\n )\n def test_format_multipart_header_param(\n self, value: bytes | str, expect: str\n ) -> None:\n param = format_multipart_header_param(\"filename\", value)\n assert param == f'filename=\"{expect}\"'\n\n def test_from_tuples(self) -> None:\n field = RequestField.from_tuples(\"file\", (\"スキー旅行.txt\", \"data\"))\n cd = field.headers[\"Content-Disposition\"]\n assert cd == 'form-data; name=\"file\"; filename=\"スキー旅行.txt\"'\n\n def test_from_tuples_rfc2231(self) -> None:\n with pytest.deprecated_call(match=r\"urllib3 v2\\.1\\.0\"):\n field = RequestField.from_tuples(\n \"file\", (\"näme\", \"data\"), header_formatter=format_header_param_rfc2231\n )\n\n cd = field.headers[\"Content-Disposition\"]\n assert cd == \"form-data; name=\\\"file\\\"; filename*=utf-8''n%C3%A4me\"\n","repo_name":"urllib3/urllib3","sub_path":"test/test_fields.py","file_name":"test_fields.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":3526,"dataset":"github-code","pt":"54"} +{"seq_id":"5322214752","text":"# INITAL ANALYSIST MNIST DATASET\n\n# 70,000 exampels of hand written digits. 60,000 in the training set, and 10,000 in the test set.\n\n# The shape of the images is 28x28 greyscale\n\n# 10 classes from 0 - 10\n\n# examples are almost evenly distributed between the classes\n\n# testing set is simular to the training set\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Dropout\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ntraining_fraction = 0.9\ntraining_examples = 60000\n\n\n#Setting up the data in traing and testing set. x for input and y for label\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\n#Normalizing the input data between -0.5 and 0.5 to improve the learning.\npixel_mean = x_train.mean(axis=0)\npixel_std = x_train.std(axis=0) + 1e-10 # Prevent division-by-zero errors\nx_train = (x_train - pixel_mean) / pixel_std\nx_test = (x_test - pixel_mean) / pixel_std\n\n\n\n# One hot encode labels\ny_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)\n\n\n\n# Change data shape to fit tensorflow\nx_train = x_train[:,:,:, np.newaxis].astype(np.float32)\nx_test = x_test[:,:,:, np.newaxis].astype(np.float32)\n\n\n\n#Splitting the data into training and validation\nindexes = np.arange(training_examples)\nnp.random.shuffle(indexes)\n# Select random indexes for train/val set\nidx_train = indexes[:int(training_fraction*training_examples)]\nidx_val = indexes[int(training_fraction*training_examples):]\n\nx_val = x_train[idx_val]\ny_val = y_train[idx_val]\n\nx_train = x_train[idx_train]\ny_train = y_train[idx_train]\n\n\n\"\"\" # *****************************Implementing the network in table 1*************************************\n\n# Hyperparameters for the set\nlearning_rate = 0.001\nlossfunction = keras.losses.categorical_crossentropy\n#opimize = keras.optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False)\nopimize = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\nnumber_of_epochs = 40\nbatch_size = 128\n\n\n#Construct model\nmodel = Sequential()\nmodel.add(Flatten(input_shape=x_train.shape[1:]))\nmodel.add(Dense(128, activation = \"relu\"))\nmodel.add(Dense(10, activation = \"softmax\"))\n\n# ********************************************************************************************************************\"\"\"\n\n\n\n# **********************Finding the simplest fully connected network that achieves more than 95%******************\n \n# Hyperparameters for the set\nlearning_rate = 0.001\nlossfunction = keras.losses.categorical_crossentropy\n#opimize = keras.optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False)\nopimize = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\nnumber_of_epochs = 120\nbatch_size = 32\n\n\n#Construct model\nmodel = Sequential()\nmodel.add(Flatten(input_shape=x_train.shape[1:]))\nmodel.add(Dropout(0.18))\nmodel.add(Dense(12, activation = \"relu\"))\nmodel.add(Dense(10, activation = \"softmax\"))\n\n# ********************************************************************************************************************\n\n\n\n\"\"\" # ************************************Implementing the best mnist network*************************************\n\n# Hyperparameters for the set\nlearning_rate = 0.001\nlossfunction = keras.losses.categorical_crossentropy\n#opimize = keras.optimizers.SGD(lr=learning_rate, momentum=0.0, decay=0.0, nesterov=False)\nopimize = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\nnumber_of_epochs = 25\nbatch_size = 256\n\n\n#Construct model\nmodel = Sequential()\nmodel.add(Flatten(input_shape=x_train.shape[1:]))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(784, activation = \"relu\"))\nmodel.add(Dense(784*2, activation = \"relu\"))\nmodel.add(Dense(784, activation = \"relu\"))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(256, activation = \"relu\"))\nmodel.add(Dense(128, activation = \"relu\"))\nmodel.add(Dense(28, activation = \"relu\"))\nmodel.add(Dense(10, activation = \"softmax\")) \n# ********************************************************************************************************************\"\"\"\n\nmodel.compile(loss = lossfunction, optimizer = opimize, metrics = ['accuracy'])\n\nmodel.summary()\n\nmodel.fit(x_train, y_train, \n batch_size = batch_size,\n epochs = number_of_epochs,\n verbose = 1,\n validation_data = (x_val, y_val))\n\n\n\n#test results\nfinal_loss, final_accuracy = model.evaluate(x_test, y_test)\nprint(\"Test loss: \"+str(final_loss))\nprint(\"Test accuracy: \"+str(final_accuracy))\n\n#plot confusion matrix\npredictions = model.predict_classes(x_test)\n\ncm = confusion_matrix(y_test_values, predictions)\ncm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nplot_confusion_matrix(cm, cm_plot_labels, title = 'Confusion matrix')\n\n#plot accuracy\n\nhistory = model.history.history\nplt.figure(figsize=(12, 8))\nplt.plot(history[\"val_acc\"], label=\"Validation accuracy\")\nplt.plot(history[\"acc\"], label=\"Training accuracy\")\nplt.plot([number_of_epochs-1], [final_accuracy], 'o', label=\"Final test loss\")\nplt.show()\n\n#plot loss\nplt.figure(figsize=(12, 8))\nplt.title(\"Model Loss\")\nplt.plot(history[\"loss\"], label=\"Validation accuracy\")\nplt.plot(history[\"val_loss\"], label=\"Training accuracy\")\nplt.legend(['test', 'train'], loc='upper left')\nplt.plot([number_of_epochs-1], [final_loss], 'o', label=\"Final test loss\")\nplt.show()\n","repo_name":"alfredronning/visuelle","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24497847471","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 1 19:37:45 2022\n\n@author: marc\n\"\"\"\n\nfrom Blotto_discretizer import discretize_action_space\nfrom Blotto_alpha_rank import blotto_alpha_rank\nfrom Blotto_hyperparameter_evaluation import evaluate_hyperparameters\nimport numpy as np\nimport multiprocessing as mp\nfrom multiprocessing import set_start_method\nimport sys\nfrom numba import cuda\n\nnumber_of_battlefields = 3\nbudget1 = 1000\nbudget2 = 1000\ngranularity_level = 32\nadd_noise = False\nepochs = 10**3\nmode = \"kmeans\"\neval_every = 10**2\npatience = 10**8\nrestarts = 1\ntrack_every = 10**5\ntie_breaking_rule = \"right-in-two\"\nweights1 = np.array([1, 1, 1])\nsymmetric_battlefields = len(np.unique(weights1)) == 1\n\n#cuda.select_device(gpu)\n\nstrategies1, probs1 = discretize_action_space(number_of_battlefields, budget1, symmetric_battlefields, \n granularity_level, add_noise = add_noise, integer_bids = True)\n\nprobs1 = \"uniform\"\nstrategies2, probs2 = None, None\n#strategies2, probs2 = discretize_action_space(number_of_battlefields, budget2, symmetric_battlefields, \n# granularity_level, add_noise = add_noise, integer_bids = True)\n\n#mrs = [5**i for i in range(-6, -1)]\n#pop_sizes = [10*i for i in range(1, 6)]\n\ninput_tuples = [(5*i, 10**(j / 2), False) for i in range(1, 7) for j in range(-7, -1)]\n\n# set one tuple for progress display (find a better solution for this later)\nprogress_tuple = list(input_tuples[-1])\nprogress_tuple[-1] = True\ninput_tuples[-1] = tuple(progress_tuple)\n\n# helps to apply function in multiprocessing\ndef helper_func(pop_size, mr, track_progress):\n return blotto_alpha_rank(strategies1, probs1, strategies2, probs2, weights1 = weights1, weights2 = None, tie_breaking_rule = tie_breaking_rule, \n pop_size = pop_size, alpha = 100, mr = mr, restarts = restarts, epochs = epochs, \n track_every = track_every, eval_mode = mode, eval_every = eval_every, patience = patience, plot_every = epochs * 10)\n\n# converts multiprocessing output to dictionary\ndef output_to_dict(val_list, input_tuples):\n output_dict = {}\n for i, input in enumerate(input_tuples):\n output_dict[input] = val_list[i]\n return output_dict\n\npool_obj = mp.Pool(processes = 10)\n\n#set_start_method(\"spawn\")\noutput = pool_obj.starmap(helper_func, input_tuples)\noutput_dict = output_to_dict(output, input_tuples)\n \n# write into textfile\nw = open(sys.path[0] + \"/outputs/output_granularity_\" + str(granularity_level) + \".txt\", \"w\")\nw.write(str(output_dict))\nw.close()\n\n# evaluate the hyperparameters\nevaluate_hyperparameters(\"outputs/output_granularity_\" + str(granularity_level) + \".txt\", strategies1, weights1, weights1, tie_breaking_rule)","repo_name":"MarcJohler/ColonelBlotto","sub_path":"Blotto_hyperparameter_tuning.py","file_name":"Blotto_hyperparameter_tuning.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39378935604","text":"#dışarıdan girilen adete göre girlen sayıya göre elEMn kaydeder ve yazar.\r\nliste = []\r\n\r\nadet = int(input(\"kaç eleman girilecek.:\"))\r\n\r\nfor sayi in range(1, adet +1):\r\n eleman = input(\"{}. elemanı girin.: \" .format(sayi))\r\n liste.append(eleman)\r\n\r\nprint(liste)\r\n \r\nsilmeAdet=int(input(\"kaç eleman silinecek.: \")) \r\nif(len(liste) >= silmeAdet):\r\n\r\n listesilinen = [] # for ile silinen elemanları buraya yazdır.\r\n\r\n for sayi in range(silmeAdet):\r\n elemansilinen =liste.pop() #her döndügünde sondan bir elemanı sil ve degişkene ata\r\n listesilinen.append(elemansilinen)\r\n print(\"\\nsiilinen listesi.: \", listesilinen)\r\nelse:\r\n print(\"Silinen, listeden büyük olamaz\")\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\"\"\"\r\nfor eleman in liste:\r\n eleman2= input(\"Elaman Gir.:\")\r\n liste.append(eleman2)\r\n \"\"\"\r\n # bu döngü, listeye sonsuz elaman girmemizi saglar.","repo_name":"EsatKORUKCU/Work-done-in-the-course-in-Python-Programming-Basic-Education-----Python-Programlama-Temel-Egitimi","sub_path":"ders06 for/for02.py","file_name":"for02.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71872027040","text":"from PySide6.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, QLabel, QPushButton, QLineEdit, QWidgetAction\nfrom m_window import MainWindow\nfrom display import Display, Info\nfrom PySide6.QtGui import QIcon\nfrom var import ICON_PATH\nfrom style import setupTheme\nimport ctypes\n\nif __name__ == '__main__':\n calcApp = QApplication()\n window = MainWindow()\n display = Display()\n info = Info()\n setupTheme('auto')\n\n menu = window.menuBar()\n styleLayout = menu.addMenu('Estilo')\n escolheCor1 = styleLayout.addAction('Dark')\n escolheCor1.triggered.connect(lambda: setupTheme('dark'))\n escolheCor2 = styleLayout.addAction('Light')\n escolheCor2.triggered.connect(lambda: setupTheme('light'))\n\n # escolheCor2 = styleLayout.addMenu('Light')\n\n # self.acao2.toggled.connect(self.segundo_slot) # type: ignore\n # self.acao2.hovered.connect(self.segundo_slot)\n\n # txt = QLabel('Texto')\n # txt.setStyleSheet('font-size: 20px;')\n\n # window.addWidgetToVLayout(txt)\n window.addWidgetToVLayout(info)\n window.addWidgetToVLayout(display)\n display.setPlaceholderText('Números Aqui')\n # window.addWidgetToVLayout(Display('Display 2'))\n # window.addWidgetToVLayout(Display('Display 3'))\n\n calcApp.setWindowIcon(QIcon(str(ICON_PATH)))\n\n myappid = 'mycompany.myproduct.subproduct.version'\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n window.adjustFixedSize()\n window.show()\n calcApp.exec_()\n","repo_name":"FreeWillieVitin/Curso_Python","sub_path":"Modulo_5_InterfaceGrafica_PySide6/Calculadora/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"317792680","text":"import pygame\n\nclass Ship():\n \"\"\"英雄,飞船开走打怪兽吧~\"\"\"\n def __init__(self, ai_settings, screen):\n \"\"\"飞机上炮加油\"\"\"\n super(Ship, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n\n # 加载飞船图像并获取其外接矩形\n self.image = pygame.image.load('imgs/ship.png')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n # 将每艘飞船放在屏幕底部中央\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n # 在center中保存浮点数\n self.centerx = float(self.rect.centerx)\n self.centery = float(self.rect.centery)\n\n # 移动标志\n self.moving_right = False\n self.moving_left = False\n self.moving_up = False\n self.moving_down = False\n\n def update(self):\n \"\"\"更新位置\"\"\"\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.centerx += self.ai_settings.ship_speed_factor\n if self.moving_left and self.rect.left > 0:\n self.centerx -= self.ai_settings.ship_speed_factor\n if self.moving_up and self.rect.top > 0:\n self.centery -= self.ai_settings.ship_speed_factor\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.centery += self.ai_settings.ship_speed_factor\n self.rect.centerx = self.centerx\n self.rect.centery = self.centery\n\n def blitme(self):\n \"\"\"在指定位置绘制飞船\"\"\"\n self.screen.blit(self.image, self.rect)\n","repo_name":"Rustic-Z/pytest","sub_path":"role/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9376203590","text":"\nimport torch\nimport torch.nn.functional\nimport matplotlib.pyplot as plt\n\nfrom numpy import meshgrid, linspace, empty, double\n\n\ntrain_in = torch.tensor([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]).reshape(-1, 2)\ntrain_out = torch.tensor([[1.0], [1.0], [1.0], [0.0]]).reshape(-1, 1)\n\n\nclass NandModel:\n def __init__(self):\n self.W = torch.tensor([[0.0], [0.0]], requires_grad=True)\n self.b = torch.tensor([[0.0]], requires_grad=True)\n\n def f(self, x1, x2):\n return torch.sigmoid((x1 @ self.W[0]) + (x2 @ self.W[1]) + self.b)\n\n def logits(self, x1, x2):\n return ((x1 @ self.W[0]) + (x2 @ self.W[1]) + self.b).reshape(-1, 1)\n\n def loss(self, x1, x2, y):\n return torch.nn.functional.binary_cross_entropy_with_logits(self.logits(x1, x2), y)\n\n\nmodel = NandModel()\noptimizer = torch.optim.SGD([model.b, model.W, model.W], 0.1)\n\nfor epoch in range(10_000):\n model.loss(train_in[:, 0].reshape(-1, 1), train_in[:, 1].reshape(-1, 1), train_out).backward()\n optimizer.step()\n optimizer.zero_grad()\n\n\nprint(f'W = {model.W}, '\n f'b = {model.b}, '\n f'loss = {model.loss(train_in[:, 0].reshape(-1, 1), train_in[:, 1].reshape(-1, 1), train_out)}')\n\n\nin1, in2 = meshgrid(linspace(-0.1, 1.1, 100), linspace(-0.1, 1.1, 100))\nout = empty([100, 100], dtype=double)\n\nfor i in range(0, in1.shape[0]):\n for j in range(0, in1.shape[1]):\n out[i, j] = model.f(\n torch.tensor(float(in1[i, j])).reshape(-1, 1),\n torch.tensor(float(in2[i, j])).reshape(-1, 1)\n )\n\n\nfig = plt.figure()\nplot = fig.add_subplot(projection='3d')\n\n\nplot.set_xlabel(\"$in_1$\")\nplot.set_ylabel(\"$in_2$\")\nplot.set_zlabel(\"$out$\")\n\n\ntable = plt.table(cellText=[[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 0]],\n colWidths=[0.1] * 3,\n colLabels=[\"$in_1$\", \"$in_2$\", \"$out$\"],\n cellLoc=\"center\",\n loc=\"upper left\")\n\nplot.plot_wireframe(in1, in2, out, color=\"green\")\nplot.plot(train_in[:, 0].squeeze(),\n train_in[:, 1].squeeze(),\n train_out[:, 0].squeeze(),\n 'o',\n color=\"blue\")\n\nplt.show()\n","repo_name":"eivind-bn/IDATT2502","sub_path":"NeuralNet/nand.py","file_name":"nand.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34656395877","text":"# cit1113\n# Alex Brown\n# Lecture M\n\n\"\"\"\nelse if (elif in python)\n\"\"\"\n\n# Program to get letter grade based on given percentage\n# print(\"Enter grade 0-100\")\n# grade = float(input(\">\"))\n\n# if grade >= 90:\n# print(\"A\")\n# elif grade >= 80:\n# print(\"B\")\n# elif grade >= 70:\n# print(\"C\")\n# elif grade >= 60:\n# print(\"D\")\n# else:\n# print(\"F\")\n\n# print(\"done\")\n\n# Function to get letter grade based on given percentage\ndef getGrade(percent = None, shading = True, getInput = True):\n\n #Get percent if none given and getInput is true\n if getInput and percent == None:\n print(\"Enter grade 0-100\")\n percent = input(\">\")\n\n percent = float(percent)\n \n #set grade to string containing letter grade based on percent\n if shading and percent >= 97:\n grade = \"A+\"\n gpa = 4.0\n elif shading and (percent >= 92 and percent <= 90):\n grade = \"A-\"\n gpa = 4.0\n elif percent >= 90:\n grade = \"A\"\n gpa = 3.7\n \n elif shading and (percent >=87 and percent <= 89):\n grade = \"B+\"\n gpa = 3.3\n elif shading and (percent >=80 and percent <= 82):\n grade = \"B-\"\n gpa = 3.0\n elif percent >= 80:\n grade = \"B\"\n gpa = 2.7\n\n elif shading and (percent >=77 and percent <= 79):\n grade = \"C+\"\n gpa = 2.3\n elif shading and (percent >=70 and percent <= 72):\n grade = \"C-\"\n gpa = 2.0\n elif percent >= 70:\n grade = \"C\"\n gpa = 1.7\n\n elif shading and (percent >=67 and percent <= 69):\n grade = \"D+\"\n gpa = 1.3\n elif percent >= 65:\n grade = \"D\"\n gpa = 1.0\n \n else:\n grade = \"F\"\n gpa = 0.0\n\n #Returns grade as string, gpa as float, and percent as float\n return grade, round(float(percent), 1), float(gpa)\n\ndef debugGradeRange(minGrade = 0, maxGrade = 100, shading = True):\n # Function to print out possible grades from getGrade()\n i = minGrade\n for i in range(minGrade, maxGrade + 1):\n print(i, \":\", getGrade(i, shading))\n\n\n\n\n\n# Main function\ndef main():\n debugGradeRange(-100, 105)\n # grade, percent, gpa = getGrade()\n # print(\"Grade: \"+grade)\n # print(\"Percent: \"+str(percent)+\"%\")\n # print(\"GPA: \"+str(gpa))\n\nif __name__ == \"__main__\":\n main()","repo_name":"EssenceofLuna/ProgrammingIntro","sub_path":"Week 08/lectureM.py","file_name":"lectureM.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31126327643","text":"\r\nimport requests \r\nimport datetime\r\nimport sys\r\nimport time\r\nimport argparse\r\nimport os \r\nimport colorama \r\nimport csv \r\nimport json \r\nimport vk\r\nimport random\r\nfrom colorama import init\r\nfrom colorama import Fore, Back, Style\r\ninit(autoreset=True)\r\nimport itertools\r\nimport threading\r\nimport time\r\nimport base64\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport socket\r\n\r\n\r\nos.system('clear')\r\ndone = False\r\n#here is the animation\r\ndef animate():\r\n for c in itertools.cycle(['|', '/', '-', '\\\\']):\r\n if done:\r\n break\r\n sys.stdout.write('\\rзагрузка ' + c)\r\n sys.stdout.flush()\r\n time.sleep(0.1)\r\n \r\n\r\nt = threading.Thread(target=animate)\r\nt.start()\r\n\r\n#long process here\r\ntime.sleep(0.8)\r\ndone = True\r\nos.system ('clear')\r\nprint(Fore.GREEN +\r\n\"\"\"Exploite\"\"\")\r\n\r\nprint(Fore.BLUE +Back.WHITE + 'Автор : Arlion')\r\nprint(Fore.RED + 'Выберите опцию :')\r\nprint(Fore.YELLOW+\"\"\"-----#Деанон#-----\r\n[1] - Saycheese\r\n[2] - Seeker\r\n[3] - sayhello \r\n[4] - sherlock \r\n[5] - Deanons\r\n-----#Работа с ВК#-----\r\n[6] - VTOOL\r\n[7] - VKBRUTE\r\n[8] - VK DNK \r\n[9] - kingfish\r\n-----#прочее#-----\r\n[10] - smsham \r\n[11] - spymer\r\n[12] - Recreator Phishing\r\n[13] - DDOS\r\n[14] - http тунели\"\"\")\r\n\r\n\r\nos.chdir('utils')\r\nos.system('chmod 777 *')\r\nnum =int(input(''))\r\n\r\n#Приступим к самому соку :\r\n\r\n\r\nif num == (1) :\r\n print (Fore.GREEN + 'Запуск Saycheese')\r\n os.system('clear')\r\n os.chdir('saycheese-master')\r\n os.system('bash saycheese.sh')\r\nelif num == (2):\r\n print (Fore.GREEN + 'Запуск Seeker')\r\n os.system('clear')\r\n os.chdir('seeker-master')\r\n os.system('python3 seeker.py -t manual')\r\nelif num == (3):\r\n print (Fore.GREEN + 'Запуск sayhello')\r\n os.system('clear')\r\n os.chdir('sayhello-master')\r\n os.system('bash sayhello.sh')\r\nelif num == (4):\r\n print (Fore.GREEN + 'Запуск sherlock')\r\n os.system('clear')\r\n os.chdir('sherlock-master')\r\n os.system('python3 sherl.py')\r\nelif num == (5):\r\n\tprint(Fore.GREEN+\"Запуск deanons\")\r\n\tos.chdir('deanons-master')\r\n\tos.system('clear')\r\n\tos.system('python3 dean.py')\r\nelif num == (6):\r\n print(Fore.GREEN + 'Запуск VTOOL')\r\n os.system('clear')\r\n os.chdir('vtool-master')\r\n os.system('python3 vtool.py')\r\nelif num == (7) :\r\n print(Fore.GREEN + 'Запуск VKBRUTE')\r\n os.system('clear')\r\n os.chdir('vtool-master')\r\n os.system('python3 brute.py')\r\nelif num == (8):\r\n print(Fore.YELLOW+'Запуск VKtoPasswd')\r\n os.chdir('VkToPassword-master')\r\n os.system('clear')\r\n os.system('python3 vtp.py')\r\nelif num == (9):\r\n\tprint(Fore.GREEN+'Запуск kingfish')\r\n\tos.system('clear')\r\n\tos.chdir('kingfish-master')\r\n\tos.system('python3 fsh.py')\r\nelif num == (10):\r\n print (Fore.GREEN + 'Запуск smsham')\r\n os.system('clear')\r\n os.chdir('smsham-master')\r\n os.system('python3 smsham.py')\r\nelif num == (11):\r\n print(Fore.GREEN+'За��уск spymmer')\r\n os.system('clear')\r\n os.chdir('spymer-master')\r\n os.system('python3 spammer.py')\r\nelif num == (12) :\r\n print (Fore.GREEN + 'Запуск Recreator Phishing')\r\n os.system ('clear')\r\n os.chdir ('Recreator-Phishing-master')\r\n os.system ('python3 recreator-phishing.py')\r\n\r\nelif num == (13):\r\n print(Fore.GREEN + 'Запуск DDOS')\r\n os.system('clear')\r\n os.chdir('DDos')\r\n os.system('chmod 777 *')\r\n print (Fore.LIGHTBLUE_EX+\"\"\"Какой порт?[1]-80 или [2]-443\"\"\")\r\n pod = int(input(''))\r\n if pod == (1) or pod == (80):\r\n os.system('python3 80port.py')\r\n elif pod == (2) or pod == (443):\r\n os.system('python3 443port.py')\r\n else:\r\n print(Fore.RED+'ОШИБКА')\r\nelif num == (14):\r\n print (Back.WHITE +Fore.BLUE + '1 - ngrok или 2 - localhost.run')\r\n num = str(input(''))\r\n if num == ('1'):\r\n print(Fore.LIGHTBLUE_EX+'Какая ОС?')\r\n print(Fore.LIGHTRED_EX+\"\"\"[1]-android [2]-Linux\"\"\")\r\n \r\n osy = int(input(''))\r\n port = str(input('Порт:'))\r\n if osy == (1):\r\n os.system('./ngroka http '+port)\r\n elif osy == (2):\r\n os.system('./ngrok http '+port)\r\n elif num == ('2'):\r\n print (Fore.LIGHTBLUE_EX + 'Порт:')\r\n port = str(input())\r\n os.system('ssh -R 80:localhost:'+port+' ssh.localhost.run')\r\n else :\r\n print(Fore.RED +'ОШИБКА')\r\nelse :\r\n print(Fore.RED +'ОШИБКА')\r\n\r\n","repo_name":"yra123707/exploite","sub_path":"Z/Zru.py","file_name":"Zru.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12921090901","text":"import math\r\nfrom random import *\r\nprint(math.pi)\r\nn = 1000\r\nlicznik = 0\r\nfor i in range(1, n+1):\r\n x = math.pi* random()\r\n y = random()\r\n r = math.sin(x/y)\r\n\r\n print(r)\r\n","repo_name":"makspervov/1-semestr","sub_path":"WDI/Sinus.py","file_name":"Sinus.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7340852682","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 7 00:00:18 2016\n\n@author: McSim\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn\n\n# рисует один scatter plot\ndef scatter(actual, predicted, T):\n plt.scatter(actual, predicted)\n plt.xlabel(\"Labels\")\n plt.ylabel(\"Predicted probabilities\")\n plt.plot([-0.2, 1.2], [T, T])\n plt.axis([-0.1, 1.1, -0.1, 1.1])\n \n# рисует несколько scatter plot в таблице, имеющей размеры shape\ndef many_scatters(actuals, predicteds, Ts, titles, shape):\n plt.figure(figsize=(shape[1]*5, shape[0]*5))\n i = 1\n for actual, predicted, T, title in zip(actuals, predicteds, Ts, titles):\n ax = plt.subplot(shape[0], shape[1], i)\n ax.set_title(title)\n i += 1\n scatter(actual, predicted, T)\n \nactual_0 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., \n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\npredicted_0 = np.array([ 0.19015288, 0.23872404, 0.42707312, 0.15308362, 0.2951875 ,\n 0.23475641, 0.17882447, 0.36320878, 0.33505476, 0.202608 ,\n 0.82044786, 0.69750253, 0.60272784, 0.9032949 , 0.86949819,\n 0.97368264, 0.97289232, 0.75356512, 0.65189193, 0.95237033,\n 0.91529693, 0.8458463 ])\n \nplt.figure(figsize=(5, 5))\nscatter(actual_0, predicted_0, 0.5)\nplt.show()\n\nactual_1 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1.])\npredicted_1 = np.array([ 0.41310733, 0.43739138, 0.22346525, 0.46746017, 0.58251177,\n 0.38989541, 0.43634826, 0.32329726, 0.01114812, 0.41623557,\n 0.54875741, 0.48526472, 0.21747683, 0.05069586, 0.16438548,\n 0.68721238, 0.72062154, 0.90268312, 0.46486043, 0.99656541,\n 0.59919345, 0.53818659, 0.8037637 , 0.272277 , 0.87428626,\n 0.79721372, 0.62506539, 0.63010277, 0.35276217, 0.56775664])\nactual_2 = np.array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\npredicted_2 = np.array([ 0.07058193, 0.57877375, 0.42453249, 0.56562439, 0.13372737,\n 0.18696826, 0.09037209, 0.12609756, 0.14047683, 0.06210359,\n 0.36812596, 0.22277266, 0.79974381, 0.94843878, 0.4742684 ,\n 0.80825366, 0.83569563, 0.45621915, 0.79364286, 0.82181152,\n 0.44531285, 0.65245348, 0.69884206, 0.69455127])\n \nmany_scatters([actual_0, actual_1, actual_2], [predicted_0, predicted_1, predicted_2], \n [0.5, 0.5, 0.5], [\"Perfect\", \"Typical\", \"Awful algorithm\"], (1, 3))\nplt.show()\n\n# рискующий идеальный алгоитм\nactual_0r = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\npredicted_0r = np.array([ 0.23563765, 0.16685597, 0.13718058, 0.35905335, 0.18498365,\n 0.20730027, 0.14833803, 0.18841647, 0.01205882, 0.0101424 ,\n 0.10170538, 0.94552901, 0.72007506, 0.75186747, 0.85893269,\n 0.90517219, 0.97667347, 0.86346504, 0.72267683, 0.9130444 ,\n 0.8319242 , 0.9578879 , 0.89448939, 0.76379055])\n# рискующий хороший алгоритм\nactual_1r = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\npredicted_1r = np.array([ 0.13832748, 0.0814398 , 0.16136633, 0.11766141, 0.31784942,\n 0.14886991, 0.22664977, 0.07735617, 0.07071879, 0.92146468,\n 0.87579938, 0.97561838, 0.75638872, 0.89900957, 0.93760969,\n 0.92708013, 0.82003675, 0.85833438, 0.67371118, 0.82115125,\n 0.87560984, 0.77832734, 0.7593189, 0.81615662, 0.11906964,\n 0.18857729])\n \nmany_scatters([actual_0, actual_1, actual_0r, actual_1r], \n [predicted_0, predicted_1, predicted_0r, predicted_1r], \n [0.5, 0.5, 0.5, 0.5],\n [\"Perfect careful\", \"Typical careful\", \"Perfect risky\", \"Typical risky\"], \n (2, 2))\nplt.show()\n \nactual_10 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1.])\npredicted_10 = np.array([ 0.29340574, 0.47340035, 0.1580356 , 0.29996772, 0.24115457, 0.16177793,\n 0.35552878, 0.18867804, 0.38141962, 0.20367392, 0.26418924, 0.16289102, \n 0.27774892, 0.32013135, 0.13453541, 0.39478755, 0.96625033, 0.47683139, \n 0.51221325, 0.48938235, 0.57092593, 0.21856972, 0.62773859, 0.90454639, 0.19406537,\n 0.32063043, 0.4545493 , 0.57574841, 0.55847795 ])\nactual_11 = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\npredicted_11 = np.array([ 0.35929566, 0.61562123, 0.71974688, 0.24893298, 0.19056711, 0.89308488,\n 0.71155538, 0.00903258, 0.51950535, 0.72153302, 0.45936068, 0.20197229, 0.67092724,\n 0.81111343, 0.65359427, 0.70044585, 0.61983513, 0.84716577, 0.8512387 , \n 0.86023125, 0.7659328 , 0.70362246, 0.70127618, 0.8578749 , 0.83641841, \n 0.62959491, 0.90445368])\n \nmany_scatters([actual_1, actual_10, actual_11], [predicted_1, predicted_10, predicted_11], \n [0.5, 0.5, 0.5], [\"Typical\", \"Avoids FP\", \"Avoids FN\"], (1, 3))\nplt.show()\n\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score\n\nT = 0.5\nprint(\"Алгоритмы, разные по качеству:\")\nfor actual, predicted, descr in zip([actual_0, actual_1, actual_2], \n [predicted_0 > T, predicted_1 > T, predicted_2 > T],\n [\"Perfect:\", \"Typical:\", \"Awful:\"]):\n print(descr, \"precision =\", precision_score(actual, predicted), \"recall =\", \\\n recall_score(actual, predicted), \";\",\\\n \"accuracy =\", accuracy_score(actual, predicted))\nprint()\nprint(\"Осторожный и рискующий алгоритмы:\")\nfor actual, predicted, descr in zip([actual_1, actual_1r], \n [predicted_1 > T, predicted_1r > T],\n [\"Typical careful:\", \"Typical risky:\"]):\n print(descr, \"precision =\", precision_score(actual, predicted), \"recall =\", \\\n recall_score(actual, predicted), \";\",\\\n \"accuracy =\", accuracy_score(actual, predicted))\nprint()\nprint(\"Разные склонности алгоритмов к ошибкам FP и FN:\")\nfor actual, predicted, descr in zip([actual_10, actual_11], \n [predicted_10 > T, predicted_11 > T], \n [\"Avoids FP:\", \"Avoids FN:\"]):\n print(descr, \"precision =\", precision_score(actual, predicted), \"recall =\", \\\n recall_score(actual, predicted), \";\",\\\n \"accuracy =\", accuracy_score(actual, predicted))\n \nfrom sklearn.metrics import precision_recall_curve\n\nprecs = []\nrecs = []\nthreshs = []\nlabels = [\"Typical\", \"Avoids FP\", \"Avoids FN\"]\nfor actual, predicted in zip([actual_1, actual_10, actual_11], \n [predicted_1, predicted_10, predicted_11]):\n prec, rec, thresh = precision_recall_curve(actual, predicted)\n precs.append(prec)\n recs.append(rec)\n threshs.append(thresh)\nplt.figure(figsize=(15, 5))\nfor i in range(3):\n ax = plt.subplot(1, 3, i+1)\n plt.plot(threshs[i], precs[i][:-1], label=\"precision\")\n plt.plot(threshs[i], recs[i][:-1], label=\"recall\")\n plt.xlabel(\"threshold\")\n ax.set_title(labels[i])\n plt.legend()\nplt.show()\n \n############### Programming assignment: problem 1 ###############\nT = 0.65\nprecision_1 = precision_score(actual_1, predicted_1 > T)\nrecall_1 = recall_score(actual_1, predicted_1 > T)\nprint(\"Typical:\", \"precision =\", precision_1, \"recall =\", recall_1)\nprecision_10 = precision_score(actual_10, predicted_10 > T)\nrecall_10 = recall_score(actual_10, predicted_10 > T)\nprint(\"Avoids FP:\", \"precision =\", precision_10, \"recall =\", recall_10)\nprecision_11 = precision_score(actual_11, predicted_11 > T)\nrecall_11 = recall_score(actual_11, predicted_11 > T)\nprint(\"Avoids FP:\", \"precision =\", precision_11, \"recall =\", recall_11)\n\ndef write_answer_1(precision_1, recall_1, precision_10, recall_10, precision_11, recall_11):\n answers = [precision_1, recall_1, precision_10, recall_10, precision_11, recall_11]\n with open(\"pa_metrics_problem1.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in answers]))\n\nwrite_answer_1(precision_1, recall_1, precision_10, recall_10, precision_11, recall_11)\n\nfrom sklearn.metrics import f1_score\n\nT = 0.5\nprint(\"Разные склонности алгоритмов к ошибкам FP и FN:\")\nfor actual, predicted, descr in zip([actual_1, actual_10, actual_11], \n [predicted_1 > T, predicted_10 > T, predicted_11 > T], \n [\"Typical:\", \"Avoids FP:\", \"Avoids FN:\"]):\n print(descr, \"f1 =\", f1_score(actual, predicted))\n \n############### Programming assignment: problem 2 ###############\nf1_list_1 = []\nfor k in range(10):\n f1_list_1 += [f1_score(actual_1, predicted_1 > 0.1*k)]\nm = [i for i, j in enumerate(f1_list_1) if j == max(f1_list_1)]\nk_1 = min(m)\n\nf1_list_10 = []\nfor k in range(10):\n f1_list_10 += [f1_score(actual_10, predicted_10 > 0.1*k)]\nm = [i for i, j in enumerate(f1_list_10) if j == max(f1_list_10)]\nk_10 = min(m)\n\nf1_list_11 = []\nfor k in range(10):\n f1_list_11 += [f1_score(actual_11, predicted_11 > 0.1*k)]\nm = [i for i, j in enumerate(f1_list_11) if j == max(f1_list_11)]\nk_11 = min(m)\n\nks = [k_1, k_10, k_11]\n\nprint(\"Модели максимизируются при следующих значениях k для порогов вида T = 0.1 * k :\")\nprint(\"Typical:\", k_1)\nprint(\"Avoids FP:\", k_10)\nprint(\"Avoids FN:\", k_11)\n\nmany_scatters([actual_1, actual_10, actual_11], [predicted_1, predicted_10, predicted_11], \n np.array(ks)*0.1, [\"Typical\", \"Avoids FP\", \"Avoids FN\"], (1, 3))\nplt.show()\n \ndef write_answer_2(k_1, k_10, k_11):\n answers = [k_1, k_10, k_11]\n with open(\"pa_metrics_problem2.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in answers]))\n\nwrite_answer_2(k_1, k_10, k_11)\n\nfrom sklearn.metrics import log_loss\n\nprint(\"Алгоритмы, разные по качеству:\")\nfor actual, predicted, descr in zip([actual_0, actual_1, actual_2], \n [predicted_0, predicted_1, predicted_2],\n [\"Perfect:\", \"Typical:\", \"Awful:\"]):\n print(descr, log_loss(actual, predicted))\nprint()\nprint(\"Осторожный и рискующий алгоритмы:\")\nfor actual, predicted, descr in zip([actual_0, actual_0r, actual_1, actual_1r], \n [predicted_0, predicted_0r, predicted_1, predicted_1r],\n [\"Ideal careful\", \"Ideal risky\", \"Typical careful:\", \"Typical risky:\"]):\n print(descr, log_loss(actual, predicted))\nprint()\nprint(\"Разные склонности алгоритмов к ошибкам FP и FN:\")\nfor actual, predicted, descr in zip([actual_10, actual_11], \n [predicted_10, predicted_11], \n [\"Avoids FP:\", \"Avoids FN:\"]):\n print(descr, log_loss(actual, predicted))\n \n############### Programming assignment: problem 3 ###############\ndef weighted_log_loss(actual, predicted):\n wll=0\n for i in range(len(actual)):\n wll += -1 / (len(actual)) * ((0.3 * actual[i] * np.log(predicted[i])) + 0.7 * (1 - actual[i]) * np.log(1 - predicted[i]))\n return wll\n\nprint(\"========== Правдоподобие меток в actual с вероятностями из predicted с штрафованием false positive ==========\")\nprint(\"Алгоритмы, разные по качеству:\")\nfor actual, predicted, descr in zip([actual_0, actual_1, actual_2], \n [predicted_0, predicted_1, predicted_2],\n [\"Perfect:\", \"Typical:\", \"Awful:\"]):\n print(descr, weighted_log_loss(actual, predicted))\nprint()\nprint(\"Осторожный и рискующий алгоритмы:\")\nfor actual, predicted, descr in zip([actual_0, actual_0r, actual_1, actual_1r], \n [predicted_0, predicted_0r, predicted_1, predicted_1r],\n [\"Ideal careful\", \"Ideal risky\", \"Typical careful:\", \"Typical risky:\"]):\n print(descr, weighted_log_loss(actual, predicted))\nprint()\nprint(\"Разные склонности алгоритмов к ошибкам FP и FN:\")\nfor actual, predicted, descr in zip([actual_10, actual_11], \n [predicted_10, predicted_11], \n [\"Avoids FP:\", \"Avoids FN:\"]):\n print(descr, weighted_log_loss(actual, predicted))\n\nfor actual, predicted, pfx in zip([actual_0, actual_1, actual_2, actual_0r,\n actual_1r, actual_10, actual_11],\n [predicted_0, predicted_1, predicted_2, predicted_0r,\n predicted_1r, predicted_10, predicted_11],\n ['0', '1', '2', '0r', '1r', '10', '11']):\n exec('wll_' + pfx + ' = ' + str(weighted_log_loss(actual, predicted)))\n \ndef write_answer_3(wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11):\n answers = [wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11]\n with open(\"pa_metrics_problem3.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in answers]))\n\nwrite_answer_3(wll_0, wll_1, wll_2, wll_0r, wll_1r, wll_10, wll_11)\n \nfrom sklearn.metrics import roc_curve, roc_auc_score\n\nplt.figure(figsize=(15, 5))\nplt.subplot(1, 3, 1)\naucs = \"\"\nfor actual, predicted, descr in zip([actual_0, actual_1, actual_2], \n [predicted_0, predicted_1, predicted_2],\n [\"Perfect\", \"Typical\", \"Awful\"]):\n fpr, tpr, thr = roc_curve(actual, predicted)\n plt.plot(fpr, tpr, label=descr)\n aucs += descr + \":%3f\"%roc_auc_score(actual, predicted) + \" \"\nplt.xlabel(\"false positive rate\")\nplt.ylabel(\"true positive rate\")\nplt.legend(loc=4)\nplt.axis([-0.1, 1.1, -0.1, 1.1])\nplt.subplot(1, 3, 2)\nfor actual, predicted, descr in zip([actual_0, actual_0r, actual_1, actual_1r], \n [predicted_0, predicted_0r, predicted_1, predicted_1r],\n [\"Ideal careful\", \"Ideal Risky\", \"Typical careful\", \"Typical risky\"]):\n fpr, tpr, thr = roc_curve(actual, predicted)\n aucs += descr + \":%3f\"%roc_auc_score(actual, predicted) + \" \"\n plt.plot(fpr, tpr, label=descr)\nplt.xlabel(\"false positive rate\")\nplt.ylabel(\"true positive rate\")\nplt.legend(loc=4)\nplt.axis([-0.1, 1.1, -0.1, 1.1])\nplt.subplot(1, 3, 3)\nfor actual, predicted, descr in zip([actual_1, actual_10, actual_11], \n [predicted_1, predicted_10, predicted_11], \n [\"Typical\", \"Avoids FP\", \"Avoids FN\"]):\n fpr, tpr, thr = roc_curve(actual, predicted)\n aucs += descr + \":%3f\"%roc_auc_score(actual, predicted) + \" \"\n plt.plot(fpr, tpr, label=descr)\nplt.xlabel(\"false positive rate\")\nplt.ylabel(\"true positive rate\")\nplt.legend(loc=4)\nplt.axis([-0.1, 1.1, -0.1, 1.1])\nplt.show()\nprint(aucs)\n\n############### Programming assignment: problem 4 ###############\nfor actual, predicted, pfx in zip([actual_0, actual_1, actual_2, actual_0r,\n actual_1r, actual_10, actual_11],\n [predicted_0, predicted_1, predicted_2, predicted_0r,\n predicted_1r, predicted_10, predicted_11],\n ['0', '1', '2', '0r', '1r', '10', '11']):\n dist=[]\n fpr, tpr, thr = roc_curve(actual, predicted)\n for i in range(len(thr)):\n dist += [np.linalg.norm(np.array((0,1)) - np.array((fpr[i],tpr[i])))]\n m = (i for i, j in enumerate(dist) if j == min(dist))\n thr_max = 0\n for i in m:\n if thr_max < thr[i]:\n thr_max = thr[i]\n i_max = i \n exec('T_' + pfx + ' = ' + str(thr[i_max]))\n print(pfx,thr[i_max])\n\ndef write_answer_4(T_0, T_1, T_2, T_0r, T_1r, T_10, T_11):\n answers = [T_0, T_1, T_2, T_0r, T_1r, T_10, T_11]\n with open(\"pa_metrics_problem4.txt\", \"w\") as fout:\n fout.write(\" \".join([str(num) for num in answers]))\n \nwrite_answer_4(T_0, T_1, T_2, T_0r, T_1r, T_10, T_11)","repo_name":"McSim/McGit","sub_path":"archive/Kurs2-Week2-HW2.py","file_name":"Kurs2-Week2-HW2.py","file_ext":"py","file_size_in_byte":17219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29725537111","text":"from binascii import hexlify\nfrom expliot.core.tests.test import Test, TCategory, TTarget, TLog\nfrom expliot.core.protocols.hardware.can import CanBus #, CanMessage\n\nclass CANRead(Test):\n\n def __init__(self):\n super().__init__(name = \"readcan\",\n summary = \"CANbus reader\",\n descr = \"\"\"This plugin allows you to read message(s) from the CANBus. As of now it \n uses socketcan but if you want to extend it to other interfaces, just \n open an issue on the official expliot project repository\"\"\",\n author = \"Aseem Jakhar\",\n email = \"aseemjakhar@gmail.com\",\n ref = [\"https://en.wikipedia.org/wiki/CAN_bus\"],\n category = TCategory(TCategory.CAN, TCategory.HW, TCategory.ANALYSIS),\n target = TTarget(TTarget.GENERIC, TTarget.GENERIC, TTarget.GENERIC))\n\n self.argparser.add_argument(\"-i\", \"--iface\", default=\"vcan0\", help=\"Interface to use. Default is vcan0\")\n self.argparser.add_argument(\"-a\", \"--arbitid\", type=lambda x: int(x,0),\n help=\"Show messages of the specified arbitration ID only. For hex value prefix it with 0x\")\n self.argparser.add_argument(\"-c\", \"--count\", type=int, default=10,\n help=\"Specify the count of messages to read from the CANBus. Default is 10\")\n self.argparser.add_argument(\"-t\", \"--timeout\", type=float,\n help=\"\"\"Specify the time, in seconds, to wait for each read. Default is to wait \n forever. You may use float values as well i.e. 0.5\"\"\")\n\n def execute(self):\n TLog.generic(\"Reading ({}) messages from CANbus on interface({})\".format(self.args.count, self.args.iface))\n\n bus = None\n try:\n if self.args.count < 1:\n raise ValueError(\"Illegal count value {}\".format(self.args.count))\n bus = CanBus(bustype=\"socketcan\", channel=self.args.iface)\n for cnt in range(1, self.args.count + 1):\n m = bus.recv(timeout=self.args.timeout)\n if m is None:\n raise TimeoutError(\"Timed out while waiting for CAN message\")\n if self.args.arbitid:\n if self.args.arbitid == m.arbitration_id:\n TLog.success(\"(msg={})(data={})\".format(cnt, hexlify(m.data).decode()))\n else:\n TLog.success(\"(msg={})(arbitration_id=0x{:x})(data={})\".format(cnt,\n m.arbitration_id,\n hexlify(m.data).decode()))\n except:\n self.result.exception()\n finally:\n if bus:\n bus.shutdown()","repo_name":"kzwkt/iot-exploit","sub_path":"expliot/plugins/can/canread.py","file_name":"canread.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"6990681848","text":"import asyncio\n\n\n@asyncio.coroutine\ndef func1():\n print(1)\n # 遇到io阻塞自动切换,可以自动识别io,asyncio.sleep(2)是io的一种\n yield from asyncio.sleep(2) # 遇到IO耗时操作,自动化切换到tasks中的其他任务\n print(2)\n\n\n@asyncio.coroutine\ndef func2():\n print(3)\n yield from asyncio.sleep(2) # 遇到IO耗时操作,自动化切换到tasks中的其他任务\n print(4)\n\n# 将所有的协程打包到task中\ntasks = [\n asyncio.ensure_future(func1()),\n asyncio.ensure_future(func2())\n]\n# 生成一个时间循环\nloop = asyncio.get_event_loop()\n# 启动事件循环把所有的协程放如其中,随机执行task中的程序\nloop.run_until_complete(asyncio.wait(tasks))\n","repo_name":"jonbenzhang/python-","sub_path":"01basic_knowledge/base_model/05协程/06asyncio/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"25138045826","text":"from setuptools import find_packages, setup\n\n# format 'dependency~=MAJOR.MINOR' ensures that only MAJOR version is locked\n\ninstall_requires = []\n\ndev_requires = [\n \"pytest\",\n \"pyinstaller\",\n \"coverage\",\n \"mypi\"\n]\n\nsetup(\n name=\"number-spell\",\n version=\"0.0.1\",\n url=\"git@github.com:zbacskai/number-spell.git\",\n author=\"Zoltan Bacskai\",\n author_email=\"z.bacskai.jr@gmail.com\",\n description=\"A simple CLI to spell numbers in British English\",\n packages=find_packages(),\n include_package_data=True,\n platforms=\"any\",\n install_requires=install_requires,\n setup_requires=[\"pytest-runner\"],\n tests_require=dev_requires,\n extras_require={\"dev\": dev_requires},\n)\n","repo_name":"zbacskai/number-spell","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70973880152","text":"import cv2\n\n#อ่านภาพ\nimg = cv2.imread(\"image/cat.jpg\")\n'''print(type(img.ndim)) \nprint(img)'''\n\n#รูปแบบภาพ ใส่ 0 ต่อท้าย imread จะเป็นภาพ grayscale \nimgGray = cv2.imread(\"image/cat.jpg\",0)\n\n#เปลียนขนาดภาพที่แสดงผล\nimgResize = cv2.resize(imgGray, (400,300))\n\n#แสดงผลภาพ\ncv2.imshow(\"Output\", imgResize)\ncv2.waitKey(delay=5000)\ncv2.destroyAllWindows()\n\n\n","repo_name":"astrrr/OpenCV-Tutorial","sub_path":"basic1.py","file_name":"basic1.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26837055210","text":"import tkinter as tk\r\n\r\ndef contar_casas_decimais(numero):\r\n # Verifica se o número é uma string, se não for, converte-o em string\r\n numero_str = str(numero)\r\n\r\n # Divide o número em partes inteira e decimal usando o ponto decimal como separador\r\n partes = numero_str.split('.')\r\n\r\n # Se o número não tiver parte decimal, retorna 0\r\n if len(partes) == 1:\r\n return 0\r\n\r\n # Retorna o número de caracteres após o ponto decimal\r\n return len(partes[1])\r\n\r\ndef formatar_moeda(valor):\r\n # Obtém o valor digitado e formata como moeda\r\n return \"{:.2f}\".format(valor)\r\n\r\ndef formatar_preco(entry):\r\n try:\r\n valor_atual = entry.get()\r\n\r\n if contar_casas_decimais(valor_atual) < 2:\r\n # O campo foi apagado, o valor será dividido por 10\r\n preco_decimal = float(valor_atual) / 10\r\n else:\r\n # O campo não está vazio, então continuamos com a formatação normal\r\n preco_decimal = float(valor_atual) * 10\r\n\r\n preco_formatado = formatar_moeda(preco_decimal)\r\n entry.delete(0, tk.END)\r\n entry.insert(0, preco_formatado)\r\n\r\n except ValueError:\r\n pass\r\n\r\ndef calcular1(*args):\r\n try:\r\n global preco_var, lucro_var, taxa2_checkbox_var, resultado_label\r\n preco = preco_var.get()\r\n lucro = lucro_var.get()\r\n\r\n # Verifica se o Checkbutton está selecionado (marcado)\r\n if taxa2_checkbox_var.get() == 1:\r\n taxa1 = (preco+lucro) * 0.20\r\n taxa1 = 100 if taxa1 >= 100 else taxa1\r\n else:\r\n taxa1 = (preco+lucro) * 0.14\r\n taxa1 = 100 if taxa1 >= 100 else taxa1\r\n\r\n taxa2 = preco * 0.02\r\n\r\n preco_total = preco + lucro + taxa1 + taxa2\r\n\r\n resultado_label.config(\r\n text=f\"Se comprou o produto por {formatar_moeda(preco)}\\nEntão terá que vender por {formatar_moeda(preco_total)}\\n\\nPreço: {formatar_moeda(preco)}\\nLucro: {formatar_moeda(lucro)}\\nTaxa de venda e frete grátis: {formatar_moeda(taxa1)}\\nTaxa de serviço de transações: {formatar_moeda(taxa2)}\")\r\n\r\n except ValueError:\r\n resultado_label.config(text=\"Insira apenas números válidos.\")\r\n\r\ndef calcular2(*args):\r\n try:\r\n global preco_var, taxa2_checkbox_var, resultado_label\r\n preco = preco_var.get()\r\n\r\n # Verifica se o Checkbutton está selecionado (marcado)\r\n if taxa2_checkbox_var.get() == 1:\r\n taxa1 = preco * 0.20\r\n taxa1 = 100 if taxa1 >= 100 else taxa1\r\n else:\r\n taxa1 = preco * 0.14\r\n taxa1 = 100 if taxa1 >= 100 else taxa1\r\n\r\n taxa2 = preco * 0.02\r\n\r\n valor = preco - (taxa1 + taxa2)\r\n\r\n resultado_label.config(\r\n text=f\"Se o produto está sendo vendido por {formatar_moeda(preco)}\\nEntão o valor do produto com o lucro é {formatar_moeda(valor)}\\n\\nValor do Produto:: {formatar_moeda(preco)}\\nTaxa de venda e frete grátis: {formatar_moeda(taxa1)}\\nTaxa de serviço de transações: {formatar_moeda(taxa2)}\")\r\n\r\n except ValueError:\r\n resultado_label.config(text=\"Insira apenas números válidos.\")\r\n\r\ndef criar_janela1():\r\n global janela1, preco_var, lucro_var, taxa2_checkbox_var, resultado_label\r\n janela.destroy()\r\n\r\n # Cria uma nova janela\r\n janela1 = tk.Tk()\r\n janela1.title(\"Shopee\")\r\n\r\n # Adicione widgets e configurações específicas da nova janela aqui\r\n label = tk.Label(janela1, text=\"Cacular por quanto vender\")\r\n label.pack()\r\n\r\n # Criar as variáveis de controle\r\n preco_var = tk.DoubleVar()\r\n lucro_var = tk.DoubleVar()\r\n\r\n # Inicializa as variáveis de controle com o valor \"0.00\"\r\n preco_var.set(formatar_moeda(0))\r\n lucro_var.set(formatar_moeda(0))\r\n\r\n # Criar o Checkbutton para adicionar ou não a taxa2\r\n taxa2_checkbox_var = tk.IntVar()\r\n taxa2_checkbox = tk.Checkbutton(\r\n janela1, text=\"Adicionar Taxa2\", variable=taxa2_checkbox_var, command=calcular1) \r\n taxa2_checkbox.pack()\r\n\r\n # Criar os frames para agrupar os widgets do preço e do lucro\r\n frame_preco_label = tk.Frame(janela1)\r\n frame_preco_label.pack(pady=5, padx=5)\r\n frame_preco = tk.Frame(janela1)\r\n frame_preco.pack(pady=5, padx=5)\r\n frame_lucro_label = tk.Frame(janela1)\r\n frame_lucro_label.pack(pady=5, padx=5)\r\n frame_lucro = tk.Frame(janela1)\r\n frame_lucro.pack(pady=5, padx=5)\r\n\r\n # Criar os widgets do preço\r\n preco_label = tk.Label(\r\n frame_preco_label, text=\"Qual valor você pagará pelo produto:\")\r\n preco_label.pack(side=tk.LEFT)\r\n preco_label_r = tk.Label(frame_preco, text=\"R$ \")\r\n preco_label_r.pack(side=tk.LEFT)\r\n preco_entry = tk.Entry(frame_preco, textvariable=preco_var)\r\n preco_entry.pack(side=tk.LEFT)\r\n\r\n # Criar os widgets do lucro\r\n lucro_label = tk.Label(\r\n frame_lucro_label, text=\"Quanto deseja ganhar de lucro em cima do produto:\")\r\n lucro_label.pack(side=tk.LEFT)\r\n lucro_label_r = tk.Label(frame_lucro, text=\"R$ \")\r\n lucro_label_r.pack(side=tk.LEFT)\r\n lucro_entry = tk.Entry(frame_lucro, textvariable=lucro_var)\r\n lucro_entry.pack(side=tk.LEFT)\r\n\r\n # Adicionar o evento para atualização automática ao digitar o valor do produto ou lucro\r\n preco_var.trace_add(\"write\", calcular1)\r\n lucro_var.trace_add(\"write\", calcular1)\r\n\r\n # Adicionar o evento para formatar o campo de lucro e preco como uma quantia monetária\r\n preco_entry.bind(\"\", lambda event: formatar_preco(preco_entry))\r\n lucro_entry.bind(\"\", lambda event: formatar_preco(lucro_entry))\r\n\r\n # Adicionar o evento de clique ao Checkbutton para chamar a função calcular\r\n taxa2_checkbox.bind(\"\", calcular1)\r\n\r\n resultado_label = tk.Label(janela1, text=\"\")\r\n resultado_label.pack()\r\n\r\n # Botão para voltar à janela principal\r\n btn_voltar = tk.Button(\r\n janela1, text=\"Voltar para Janela Principal\", command=lambda:voltar_janela_principal(1))\r\n btn_voltar.pack(pady=5)\r\n\r\ndef criar_janela2():\r\n janela.destroy() # Destroi a janela atual\r\n global janela2, preco_var, taxa2_checkbox_var, resultado_label\r\n\r\n # Cria uma nova janela\r\n janela2 = tk.Tk()\r\n janela2.title(\"Shoppe\")\r\n\r\n # Adicione widgets e configurações específicas da nova janela aqui\r\n label = tk.Label(janela2, text=\"Cacular por quanto estão vendendo\")\r\n label.pack()\r\n\r\n # Criar as variáveis de controle\r\n preco_var = tk.DoubleVar()\r\n\r\n # Inicializa as variáveis de controle com o valor \"0.00\"\r\n preco_var.set(formatar_moeda(0))\r\n\r\n # Criar o Checkbutton para adicionar ou não a taxa2\r\n taxa2_checkbox_var = tk.IntVar()\r\n taxa2_checkbox = tk.Checkbutton(\r\n janela2, text=\"Frete gratis\", variable=taxa2_checkbox_var, command=calcular2) \r\n taxa2_checkbox.pack()\r\n\r\n # Criar os frames para agrupar os widgets do preço e do lucro\r\n frame_preco_label = tk.Frame(janela2)\r\n frame_preco_label.pack(pady=5, padx=5)\r\n frame_preco = tk.Frame(janela2)\r\n frame_preco.pack(pady=5, padx=5)\r\n\r\n # Criar os widgets do preço\r\n preco_label = tk.Label(frame_preco_label, text=\"Qual valor do produto:\")\r\n preco_label.pack(side=tk.LEFT)\r\n preco_label_r = tk.Label(frame_preco, text=\"R$ \")\r\n preco_label_r.pack(side=tk.LEFT)\r\n preco_entry = tk.Entry(frame_preco, textvariable=preco_var)\r\n preco_entry.pack(side=tk.LEFT)\r\n\r\n # Adicionar o evento para atualização automática ao digitar o valor do produto ou lucro\r\n preco_var.trace_add(\"write\", calcular2)\r\n\r\n # Adicionar o evento para formatar o campo de lucro e preco como uma quantia monetária\r\n preco_entry.bind(\"\", lambda event: formatar_preco(preco_entry))\r\n\r\n # Adicionar o evento de clique ao Checkbutton para chamar a função calcular\r\n taxa2_checkbox.bind(\"\", calcular2)\r\n\r\n resultado_label = tk.Label(janela2, text=\"\")\r\n resultado_label.pack()\r\n\r\n # Botão para voltar à janela principal\r\n btn_voltar = tk.Button(\r\n janela2, text=\"Voltar\", command=lambda: voltar_janela_principal(2))\r\n btn_voltar.pack(pady=5)\r\n\r\ndef voltar_janela_principal(valor):\r\n # Destroi a janela atual\r\n if (valor == 1):\r\n janela1.destroy()\r\n else:\r\n janela2.destroy()\r\n\r\n # Cria uma nova janela principal\r\n criar_janela_principal()\r\n\r\ndef criar_janela_principal():\r\n global janela\r\n janela = tk.Tk()\r\n janela.title(\"Shopee\")\r\n\r\n # Largura: 800 pixels, Altura: 600 pixels, Posição inicial: (100, 100)\r\n janela.geometry(\"400x120+100+100\")\r\n\r\n # Janela não redimensionável em nenhum eixo (horizontal e vertical)\r\n janela.resizable(False, False)\r\n\r\n # Botões para abrir as janelas 1 e 2\r\n btn_janela1 = tk.Button(\r\n janela, text=\"Cacular por quanto vender\", command=criar_janela1)\r\n btn_janela1.pack(pady=10)\r\n\r\n btn_janela2 = tk.Button(\r\n janela, text=\"Saber por quanto estão vendendo\", command=criar_janela2)\r\n btn_janela2.pack(pady=10)\r\n\r\n# Iniciar com a janela principal\r\ncriar_janela_principal()\r\n\r\n# Executar a interface gráfica\r\njanela.mainloop()\r\n","repo_name":"leonardo-rangel159/Shopee","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14072335492","text":"import fritzbox.api.callmonitor as callmonitor\nimport json\n\nclass callmonitor(callmonitor.callmonitor):\n\n # def __init__(self):\n # print('Callmonitor Object')\n # self._callback = None\n\n def register_callback(self, callback = None):\n # print('e',callback)\n if (callback == None):\n # print('Not register')\n self._log.error('Failed to resiger Callback handler %s' % callback)\n else:\n self._log.debug('Registered Callback handler for call notifications %s' % callback)\n self._callback = callback\n # print(self._callback)\n\n def call_handler(self,notification):\n # print('test',a)\n self._log.debug('Received call Notification %s'% notification)\n self._callback(notification)\n\n","repo_name":"ms412/fritzbox","sub_path":"fritzbox/calls/callmonitor.py","file_name":"callmonitor.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74275457113","text":"#!/usr/bin/env python\n__author__ = \"Thijs van Gansewinkel\"\n__version__ = \"0.1\"\n\nconfigfile = \"config.json\"\ndbfile = \"bonbotdb.json\"\n\nimport logging\nfrom telegram import Update, File, PhotoSize\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext\nimport ujson # for database stuff\nfrom datetime import time\nfrom tinydb import TinyDB\n\nimport callbacks as cbs\nimport commands as cmd\n\nconfig = ujson.load(open(configfile, \"r\"))\ndb = TinyDB(dbfile)\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n# command, function, level\ncommands = {\"start\": (cmd.start, -1),\n \"help\": (cmd.help, 0),\n \"html\": (cmd.html, 0),\n \"info\": (cmd.info, 0),\n \"anonymous\": (cmd.anon, 1),\n \"latex\": (cmd.latex, 1),\n \"stats\": (cmd.stats, 1),\n \"shell\": (cmd.shell, 2),\n \"grant\": (cmd.grant, 2),\n \"revoke\": (cmd.revoke, 2),\n \"sendto\": (cmd.sendto, 2),\n \"spam\": (cmd.spam, 2),\n \"database\": (cmd.database, 2),\n \"printq\": (cmd.printq, 2),\n \"purge\": (cmd.purge, 2),\n \"sleep\": (cmd.sleep, 2)}\n\nif __name__ == '__main__':\n mhandler = cbs.mhandler(logger, config, db)\n\n updater = Updater(config['token'])\n dispatcher = updater.dispatcher\n\n for command, (handler, level) in commands.items():\n dispatcher.add_handler(CommandHandler(command, handler(level, mhandler).handlecmd))\n\n for filter in (Filters.sticker, Filters.text, Filters.photo, Filters.document):\n dispatcher.add_handler(MessageHandler(filter, mhandler.message))\n\n # scheduled tasks that run in UTC so 6:30 in utc is 7:30 here (summer time)\n # will fix later\n job = updater.job_queue\n #job.run_daily(mhandler.russian, time(8, 00, 00, 000000),days=(0, 1, 2, 3, 4, 5, 6))\n job.run_daily(mhandler.go_sleep, time(22, 30, 00, 000000),days=(0, 1, 2, 3, 4, 5, 6))\n job.run_daily(mhandler.wake, time(6, 30, 00, 000000),days=(0, 1, 2, 3, 4, 5, 6))\n\n # add exception handler\n #dispatcher.add_error_handler(mhandler.exception)\n\n # start_polling() is non-blocking and will stop the bot gracefully on SIGTERM\n updater.start_polling()\n updater.idle()\n","repo_name":"H0yVoy/Bonnetjesprinter","sub_path":"cpython/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"70001224792","text":"from typing import List\n\ndigits = [1,2,3]\n\n\nclass Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n n = len(digits); \n\n for i in range(n-1, -1, -1):\n # print(i); \n if digits[i] < 9:\n digits[i]+=1; \n break; \n else:\n digits[i] = 0; \n \n \n if digits[0] == 0:\n digits.insert(0, 1); \n return digits; \n\nsol = Solution(); \n\ndigits_res = sol.plusOne(digits); \n\nprint(digits_res); ","repo_name":"dhineshmuthukaruppan/practice-code","sub_path":"top interview preparation/8 plus one.py","file_name":"8 plus one.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23997448170","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def deepestLeavesSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n \n # bfs solution , adding all nodes in the last layer\n ret = 0\n q = deque([root])\n \n while q:\n ret = 0\n layer_size = len(q)\n \n for _ in range(layer_size):\n popped = q.popleft()\n if popped.left: q.append(popped.left)\n if popped.right: q.append(popped.right)\n ret+=popped.val\n \n return ret\n \n \n \n ","repo_name":"tyler-le/LeetCode","sub_path":"1302-deepest-leaves-sum/1302-deepest-leaves-sum.py","file_name":"1302-deepest-leaves-sum.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"25511253833","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\n\n\nclass CRMLead(models.Model):\n _inherit = 'crm.lead'\n\n claim_count = fields.Integer(\n string='# Claims',\n compute='_claim_count',\n )\n crm_claim_ids = fields.One2many(\n 'crm.claim',\n 'opportunity_id',\n string='Opportunity',\n )\n\n @api.multi\n @api.depends('crm_claim_ids', 'crm_claim_ids.opportunity_id')\n def _claim_count(self):\n Claim = self.env['crm.claim']\n for lead in self:\n claim = Claim.search([('opportunity_id', '=', lead.id)])\n lead.claim_count = len(claim)\n","repo_name":"ecosoft-odoo/cft","sub_path":"crm_claim_adjust_cft/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"74240998551","text":"from ArubaCloud.Compute.LoadBalancer.Models import Instance, Rules, NotificationContacts, NotificationContact\nfrom ArubaCloud.base import Request\n\n\nclass SetEnqueueLoadBalancerCreation(Request):\n def __init__(self, healthCheckNotification, instance, ipAddressResourceId, loadBalancerClassOfServiceID, name,\n notificationContacts, rules, *args, **kwargs):\n \"\"\"\n :type healthCheckNotification: bool\n :type instance: Instance\n :type ipAddressResourceId: list[int]\n :type loadBalancerClassOfServiceID: int\n :type name: str\n :type notificationContacts: NotificationContacts or list[NotificationContact]\n :type rules: Rules\n :param healthCheckNotification:\n :param instance:\n :param ipAddressResourceId:\n :param loadBalancerClassOfServiceID:\n :param name:\n :param notificationContacts:\n :param rules:\n :param args:\n :param kwargs:\n \"\"\"\n self.HealthCheckNotification = healthCheckNotification\n self.Instance = instance\n self.IpAddressResourceId = ipAddressResourceId\n self.LoadBalancerClassOfServiceID = loadBalancerClassOfServiceID\n self.Name = name\n self.NotificationContacts = notificationContacts\n self.Rules = rules\n super(SetEnqueueLoadBalancerCreation, self).__init__(*args, **kwargs)\n\n def commit(self):\n return self._commit()\n","repo_name":"Arubacloud/pyArubaCloud","sub_path":"ArubaCloud/Compute/LoadBalancer/Requests/SetEnqueueLoadBalancerCreation.py","file_name":"SetEnqueueLoadBalancerCreation.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"5"} +{"seq_id":"73956888151","text":"import datetime\nfrom django.core.paginator import Paginator\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\n\nfrom bitlipa.resources import constants, error_messages\nfrom bitlipa.apps.users.models import User\nfrom bitlipa.utils.get_object_attr import get_object_attr\nfrom bitlipa.utils.remove_dict_none_values import remove_dict_none_values\nfrom bitlipa.utils.to_int import to_int\nfrom bitlipa.utils.send_sms import send_sms\nfrom bitlipa.utils.firebase_messaging import send_notification as send_push_notification\n\n\nclass NotificationsManager(models.Manager):\n def list(self, user=None, **kwargs):\n table_fields = {**kwargs}\n page = to_int(kwargs.get('page'), 1)\n per_page = to_int(kwargs.get('per_page'), constants.DB_ITEMS_LIMIT)\n\n for key in ['page', 'per_page', 'q']:\n table_fields.pop(key, None) # remove fields not in the DB table\n\n query = None\n\n if kwargs.get('q'):\n for field in table_fields:\n query = query | models.Q(**{f'{field.replace(\"iexact\", \"icontains\")}': kwargs.get('q')}) if query else\\\n models.Q(**{f'{field.replace(\"iexact\", \"icontains\")}': kwargs.get('q')})\n query = models.Q(**{'deleted_at': None}) & query if query else models.Q(**{'deleted_at': None})\n else:\n query = models.Q(**{'deleted_at': None, **remove_dict_none_values(table_fields)})\n\n if get_object_attr(user, 'id'):\n query = query & models.Q(**{'recipient_id': get_object_attr(user, 'id')})\n\n object_list = self.model.objects.filter(query).order_by('-created_at')\n data = Paginator(object_list, per_page).page(page).object_list\n return {\n 'data': data,\n 'meta': {\n 'page': page,\n 'per_page': per_page,\n 'total': object_list.count()\n }\n }\n\n def create_notification(self, user, **kwargs):\n (notifications, recipients, errors) = ([], [], {})\n delivery_option = kwargs.get('delivery_option')\n errors['title'] = error_messages.REQUIRED.format('title is ') if not kwargs.get('title') else None\n errors['content'] = error_messages.REQUIRED.format('content is ') if not kwargs.get('content') else None\n errors['delivery_option'] = error_messages.REQUIRED.format('delivery option is ') \\\n if not delivery_option else None\n errors['phonenumber'] = error_messages.REQUIRED.format('phone number is ') \\\n if delivery_option == 'sms' and not kwargs.get('phonenumber') else None\n\n if delivery_option == 'in_app':\n errors['emails'] = error_messages.REQUIRED.format('recipient emails are ') \\\n if not isinstance(kwargs.get('emails'), list) or not len(kwargs.get('emails')) else None\n errors['content'] = error_messages.REQUIRED.format('notification content is ') \\\n if not isinstance(kwargs.get('content'), dict) or not len(kwargs.get('content')) else None\n errors['event_type'] = error_messages.REQUIRED.format('event type is ') \\\n if errors['content'] or not kwargs.get('content').get('event_type') else None\n\n if len(remove_dict_none_values(errors)) != 0:\n raise ValidationError(str(errors))\n\n if isinstance(kwargs.get('emails'), list) and len(kwargs.get('emails')):\n recipients = User.objects.filter(email__in=kwargs.get('emails'))\n\n if delivery_option == 'sms':\n send_sms(kwargs.get('phonenumber'), message=kwargs.get('content'))\n\n if delivery_option == 'in_app':\n receivers = list(filter(lambda token: token is not None, map(lambda r: r.firebase_token, recipients)))\n if len(receivers):\n data = {\n 'title': kwargs.get('title'),\n 'body': kwargs.get('content').get('body'),\n 'icon': kwargs.get('content').get('icon'),\n 'image': kwargs.get('content').get('image') or kwargs.get('image_url'),\n 'payload': kwargs.get('content').get('payload'),\n }\n send_push_notification(receivers, kwargs.get('content').get('event_type'), data)\n\n for recipient in (recipients if len(recipients) else [None]):\n notification = self.model()\n notification.sender = user\n notification.recipient = recipient\n notification.title = kwargs.get('title')\n notification.content = kwargs.get('content') if delivery_option != 'in_app' \\\n else kwargs.get('content').get('body')\n notification.delivery_option = delivery_option\n notification.image_url = kwargs.get('image_url')\n notifications.append(notification)\n\n if kwargs.get('save') is not False:\n self.model.objects.bulk_create(notifications)\n\n return notifications\n\n def update(self, id=None, user=None, **kwargs):\n notification = self.model.objects.get(id=id) \\\n if get_object_attr(user, \"is_admin\")\\\n else self.model.objects.get(id=id, recipient_id=get_object_attr(user, \"id\"))\n\n notification.title = kwargs.get('title') or notification.title\n notification.content = kwargs.get('content') or notification.content\n notification.delivery_option = kwargs.get('delivery_option') or notification.delivery_option\n notification.image_url = kwargs.get('image_url') or notification.image_url\n notification.status = kwargs.get('status') or notification.status\n\n notification.save(using=self._db)\n return notification\n\n def multi_update(self, user=None, **kwargs):\n ids = kwargs.get('IDs')\n if not isinstance(ids, list) or not len(ids):\n raise ValidationError(error_messages.REQUIRED.format('notification IDs are '))\n\n notifications = self.model.objects.filter(id__in=ids) \\\n if get_object_attr(user, \"is_admin\")\\\n else self.model.objects.filter(id__in=ids, recipient_id=get_object_attr(user, \"id\"))\n\n for notification in notifications:\n notification.title = kwargs.get('title') or notification.title\n notification.content = kwargs.get('content') or notification.content\n notification.delivery_option = kwargs.get('delivery_option') or notification.delivery_option\n notification.image_url = kwargs.get('image_url') or notification.image_url\n notification.status = kwargs.get('status') or notification.status\n\n self.model.objects.bulk_update(notifications, ['title', 'content', 'delivery_option', 'image_url', 'status'])\n return notifications\n\n def delete(self, id=None, user=None):\n notification = self.model.objects.get(id=id) \\\n if get_object_attr(user, \"is_admin\")\\\n else self.model.objects.get(id=id, recipient_id=get_object_attr(user, \"id\"))\n notification.deleted_at = datetime.datetime.now()\n notification.save(using=self._db)\n return notification\n","repo_name":"Jaman-dedy/lipabackend","sub_path":"bitlipa/apps/notifications/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28817175062","text":"\"\"\"\nhttps://leetcode.com/problems/jump-game\nAuthor: Andrew Jarombek\nDate: 11/24/2022\n\"\"\"\n\nfrom typing import List\n\n\ndef can_jump(nums: List[int]) -> bool:\n maximum = 0\n\n for i in range(len(nums)):\n val = nums[i]\n\n if maximum == i and i < len(nums) - 1 and val == 0:\n return False\n\n maximum = max(maximum, i + val)\n\n return True\n\n\ndef can_jump_v1(self, nums: List[int]) -> bool:\n if len(nums) == 1:\n return True\n\n val = nums[0]\n\n if val == 0:\n return False\n\n if val >= len(nums):\n return True\n\n for i in range(1, min(len(nums), val) + 1):\n if self.canJump(nums[i:]):\n return True\n\n return False\n\n\nif __name__ == '__main__':\n assert can_jump([0])\n assert not can_jump([3, 2, 1, 0, 4])\n assert can_jump([2, 3, 1, 1, 4])\n","repo_name":"AJarombek/morning-programs","sub_path":"2022/11/24/can_jump.py","file_name":"can_jump.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4616113748","text":"from typing import Any, Callable\n\nimport numpy as np\n\nfrom .context import compile_function, gpu_to_numpy, measure_time, numpy_to_gpu, sync\n\n\ndef matmul_simple_block_v1():\n fn = compile_function(\"matmul_simple_block_v1.ptx\", \"blockedMatmul\")\n evaluate_matmul_fn(fn)\n\n\ndef matmul_simple_block_v2():\n fn = compile_function(\"matmul_simple_block_v2.ptx\", \"blockedMatmulV2\")\n evaluate_matmul_fn(fn)\n\n\ndef matmul_simple_block_v3():\n fn = compile_function(\"matmul_simple_block_v3.ptx\", \"blockedMatmulV3\")\n evaluate_matmul_fn(fn)\n\n\ndef matmul_simple_block_v4():\n fn = compile_function(\"matmul_simple_block_v4.ptx\", \"blockedMatmulV4\")\n evaluate_matmul_fn(fn)\n\n\ndef matmul_inner_loop():\n fn = compile_function(\"matmul_inner_loop.ptx\", \"simpleMatmul\")\n evaluate_matmul_fn(fn)\n\n\ndef matmul_big_blocks():\n fn = compile_function(\"matmul_big_blocks.ptx\", \"bigBlocksMatmul\")\n\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // 32),\n grid=(\n A.shape[0] // 32,\n A.shape[1] // 32,\n 1,\n ),\n block=(32, 8, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef matmul_big_blocks_v2():\n fn = compile_function(\"matmul_big_blocks_v2.ptx\", \"bigBlocksMatmulV2\")\n\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // 32),\n grid=(\n A.shape[0] // 32,\n A.shape[1] // 32,\n 1,\n ),\n block=(32, 8, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef matmul_big_blocks_v3():\n fn = compile_function(\"matmul_big_blocks_v3.ptx\", \"bigBlocksMatmulV3\")\n\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // 64),\n grid=(\n A.shape[0] // 64,\n A.shape[1] // 64,\n 1,\n ),\n block=(32, 8, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef matmul_big_blocks_v4():\n fn = compile_function(\"matmul_big_blocks_v4.ptx\", \"bigBlocksMatmulV4\")\n\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // 64),\n grid=(\n A.shape[0] // 64,\n A.shape[1] // 64,\n 1,\n ),\n block=(32, 8, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef matmul_big_blocks_v5():\n fn = compile_function(\"matmul_big_blocks_v5.ptx\", \"bigBlocksMatmulV5\")\n\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // 64),\n grid=(\n A.shape[0] // 64,\n A.shape[1] // 64,\n 1,\n ),\n block=(32, 8, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef evaluate_matmul_fn(fn: Callable):\n def call_fn(A: np.ndarray, B: np.ndarray, A_buf: Any, B_buf: Any, out_buf: Any):\n block_size = 32\n fn(\n A_buf,\n B_buf,\n out_buf,\n np.int32(A.shape[0] // block_size),\n grid=(\n A.shape[0] // block_size,\n A.shape[1] // block_size,\n 1,\n ),\n block=(block_size, block_size, 1),\n )\n\n generic_eval_matmul(call_fn)\n\n\ndef generic_eval_matmul(fn: Callable, block_mult: int = 1):\n size = 8192\n A = np.random.normal(size=[size, size]).astype(np.float32)\n B = np.random.normal(size=[size, size]).astype(np.float32)\n A_buf = numpy_to_gpu(A)\n B_buf = numpy_to_gpu(B)\n out_buf = numpy_to_gpu(A * 0)\n with measure_time() as timer:\n fn(\n A,\n B,\n A_buf,\n B_buf,\n out_buf,\n )\n sync()\n results = gpu_to_numpy(out_buf, A.shape, A.dtype)\n expected = A @ B\n print(f\"maximum absolute error of matmul is {np.abs(results - expected).max()}\")\n print(f\"time elapsed: {timer()}\")\n\n\nif __name__ == \"__main__\":\n matmul_big_blocks_v5()\n","repo_name":"unixpickle/learn-ptx","sub_path":"learn_ptx/matmul.py","file_name":"matmul.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"24782581159","text":"nums = [5, 1, 6, 0, 2, 3, 9, 4, 8, 7]\n\n\ndef quick_sort(left, right):\n if left > right:\n return\n l, r, key = left, right, nums[left]\n while l < r:\n while l < r and nums[r] >= key:\n r -= 1\n nums[l] = nums[r]\n while l < r and nums[l] < key:\n l += 1\n nums[r] = nums[l]\n nums[l] = key\n quick_sort(left, l - 1)\n quick_sort(l + 1, right)\n\n\nquick_sort(0, len(nums) - 1)\nprint(nums)\n","repo_name":"xlswork/Algorithm","sub_path":"book/2.2.2快速排序(原地版).py","file_name":"2.2.2快速排序(原地版).py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39036081373","text":"nouns = [\"автомобиль\", \"лес\", \"огонь\", \"город\", \"дом\"]\r\nadverbs = [\"сегодня\", \"вчера\", \"завтра\", \"позавчера\", \"ночью\"]\r\nadjectives = [\"веселый\", \"яркий\", \"зеленый\", \"утопичный\", \"мягкий\"]\r\n\r\n\r\ndef get_jokes(num_of_j, rep=0):\r\n \"\"\"\r\n Функция для создания списка из случайных шуток, взятых из трех списков\r\n :param num_of_j: запрос необходимого кол-ва шуток\r\n :param rep: если аргумент равен 0 выдются неограниченное кол-во комбинаций, 1 - без повторений слов\r\n :return: возвращает список с шутками\r\n \"\"\"\r\n jokes = []\r\n min_pos = min(nouns, adverbs, adjectives)\r\n from random import randrange\r\n r_n = randrange(len(nouns))\r\n r_adv = randrange(len(adverbs))\r\n r_adj = randrange(len(adverbs))\r\n i = randrange(len(min_pos))\r\n while len(jokes) < int(num_of_j):\r\n if int(rep) == 0:\r\n jokes.append(f'{nouns[r_n]} {adverbs[r_adv]} {adjectives[r_adj]}')\r\n elif int(rep) != 0 and int(num_of_j) > 4:\r\n print('Превышено количество возможных шуток без повторений!')\r\n break\r\n else:\r\n jokes.append(f'{nouns.pop(i)} {adverbs.pop(i)} {adjectives.pop(i)}')\r\n return jokes\r\n\r\n\r\nprint(get_jokes(input('Введите желаемое количество шуток: '), # обращение к функции\r\n int(input('С повторениями - поставте 0, без - поставте 1 (не больше 4 шуток): '))))\r\n","repo_name":"BambooRonin/Ian_Lukas_dz_3","sub_path":"task_3_5.py","file_name":"task_3_5.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1509080358","text":"from sklearn.cluster import KMeans\nimport numpy\nfrom PIL import Image\n\nnumpy.set_printoptions(threshold=numpy.inf)\nimage = 'lentes.jpg'\n\n## Leer las imágenes\nImg = Image.open('images/'+image)\nImg.show()\nImg = numpy.array(Img)\n\n##Vectorizar las imagenes\nnrows,ncols,nch = Img.shape\nXimg = numpy.reshape(Img,(nrows*ncols,nch))\n\n##Agrupamiento K-Means\nk = 3\nkmeans = KMeans(n_clusters=k,).fit(Ximg)\nclusters = kmeans.labels_\ncolors = [(66, 134, 244),(65, 244, 140),(235, 65, 244)]\n\n#Pintar pixeles de cada grupo acorde al color de su centroide\nfor i in range(nrows*ncols):\n for e in range(k):\n if(clusters[i] == e):\n for y in range(nch):\n Ximg[i][y] = colors[e][y]\n\nImage.fromarray(numpy.reshape(Ximg,(nrows,ncols,nch))).show()\n\n\n\n\n","repo_name":"sanemiliano/Artificial-Intelligence-Projects","sub_path":"Inteligencia Artificial 2/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22097251098","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nd1 = {'a': 1, 'b': 2, 'c':3, 'd': 9}\nd2 = {'a': 5, 'b': 6, 'c':7, 'e': 4} \n\n# get keys from one of the dictionary\nd1_ks = [k for k in d1.keys()]\nd2_ks = [k for k in d2.keys()]\n\nall_ks = set(d1_ks + d2_ks)\n\nprint(all_ks)\n\n\n# call values from each dictionary on available keys\nd_merged = {k: [d1.get(k), d2.get(k)] for k in all_ks}\n\nprint(d_merged)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"sandesh1504/Assignment_Solution","sub_path":"Assignment1/Assignment1.py","file_name":"Assignment1.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73493271193","text":"from collections import deque\n\ndef bfs(graph, start, end):\n prev = [-1] * len(graph.matrix[0])\n seen = [False] * len(graph.matrix[0])\n\n seen[0] = True\n queue = deque([start])\n \n while len(queue) > 0:\n curr = queue.popleft()\n neighbor_row = graph.matrix[curr]\n \n for neighbor_i in range(len(neighbor_row)):\n neighbor_weight = neighbor_row[neighbor_i]\n if neighbor_weight <= 0:\n continue\n if seen[neighbor_i]:\n continue\n \n seen[neighbor_i] = True\n queue.append(neighbor_i)\n prev[neighbor_i] = curr\n \n if prev[end] == -1:\n print(\"Couldn't find!\")\n return\n \n path = deque([end])\n curr = prev[end]\n while curr != -1:\n path.appendleft(curr)\n curr = prev[curr]\n \n print(path)\n\nclass matrix:\n def __init__(self, filename):\n self.matrix = []\n with open(filename, 'rt', encoding='utf-8') as file:\n for line in file:\n self.matrix.append(list(map(int, line.strip().split(','))))\n \n def __str__(self):\n retval = \"\"\n height = range(len(self.matrix))\n width = range(len(self.matrix[0]))\n for row in height:\n for col in width:\n retval += f\"{self.matrix[row][col]} \"\n retval += \"\\n\\n\"\n return retval\n\ndef main():\n matrix1 = matrix('graph_bfs_input.txt')\n print(matrix1)\n bfs(matrix1, 0, 4)\n \nif __name__ == \"__main__\":\n main()","repo_name":"garrett-low/leetcode","sub_path":"python/leetcode_dsa_crash_course/graph_bfs_20230903.py","file_name":"graph_bfs_20230903.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1246437019","text":"\"\"\"\nPrecision matrix can be used for predictions, so we see how they compare as time goes on\n\"\"\"\nimport numpy as np\nimport scipy\nimport pandas as pd\nimport math\nimport os\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef predict_from_precision_matrix(X, prec, i):\n \"\"\"\n Predicts the value of X_i usig the rest of X and the precision matrix\n \"\"\"\n n, p = X.shape\n indices = np.arange(p)\n X_i = X[:, indices!=i]\n beta = -prec[indices!=i, i] / prec[i, i]\n X_bar = X_i @ beta\n return X_bar\n\ndef save_open_figures(prefix=\"\"):\n \"\"\"\n Saves all open figures\n \"\"\"\n figures=[manager.canvas.figure\n for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]\n\n for i, figure in enumerate(figures):\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n figure.savefig(prefix+'figure%d.png' % i)\ndf = pd.read_csv(\"s_and_p_500_daily_close_filtered.csv\", index_col=0)\n\ncompany_sectors = df.iloc[0, :].values\ncompany_names = df.T.index.values\nsectors = list(sorted(set(company_sectors)))\ndf_2 = df.iloc[1:, :]\ndf_2 = df_2.apply(pd.to_numeric)\ndf_2 = np.log(df_2) - np.log(df_2.shift(1))\nX = df_2.values[1:, :]\n\nwindow_size = 300\nslide_size = 30\nno_samples = X.shape[0]\np = X.shape[1]\nno_runs = math.floor((no_samples - window_size)/ (slide_size))\nprint(\"We're running %s times\" % no_runs)\n\ndates = df.index[2:]\n\nmatrices_folder = \"precision_matrices_lw/\"\nonlyfiles = [os.path.abspath(os.path.join(matrices_folder, f)) for f in os.listdir(matrices_folder) if os.path.isfile(os.path.join(matrices_folder, f))]\n#onlyfiles = list(map(lambda x: os.path.splitext(x)[0], onlyfiles))\nmatrices = []\n# Sort the files into order\nind = [int(Path(x).stem[5:]) for x in onlyfiles]\nind = np.argsort(np.array(ind))\n\nfor i in ind:\n f = onlyfiles[i]\n m = np.load(f)\n matrices.append(m)\n\ndt = pd.to_datetime(dates)\n\nfor i,G in enumerate(matrices):\n X_new = X[i*slide_size:, :]\n X_new = X_new - X_new.mean(0)\n new_n = X_new.shape[0]\n prec = matrices[i]\n res_norms = np.zeros(p)\n reses = np.zeros((new_n, p))\n for j in range(p):\n X_hat = predict_from_precision_matrix(X_new, prec, j)\n res = np.divide(X_new[:, j] - X_hat, X_new[:, j])\n res_norms[j] = np.linalg.norm(res)\n reses[:, j] = res\n\n # Get the largest residual\n ind = np.argsort(res_norms)[::-1]\n max_res = ind[0]\n min_res = ind[-1]\n\n\n ts_min = pd.Series(reses[:, min_res], index=dt[i*slide_size:])\n ts_max = pd.Series(reses[:, max_res], index=dt[i*slide_size:])\n\n print(\"Min error is %s, %s\" % (company_names[min_res], company_sectors[min_res]))\n print(\"Max error is %s, %s\" % (company_names[max_res], company_sectors[max_res]))\n\n fig = plt.figure()\n ts_min.plot()\n plt.title(\"Residual for company %s at %s (min)\" % (company_names[min_res], i))\n plt.savefig(\"res_%s_%s\" % (i, min_res))\n plt.close()\n\n fig = plt.figure()\n ts_max.plot()\n plt.title(\"Residual for company %s at %s (max)\" % (company_names[max_res], i))\n plt.savefig(\"res_%s_%s\" % (i, max_res))\n plt.close()\n #values = pd.DataFrame()\n\n #values[\"Predicted\"] = pd.Series(X_hat, index=dt[i*slide_size:])\n #values[\"Actual\"] = pd.Series(X_new[:, 0], index=dt[i*slide_size:])\n\n #fig = plt.figure()\n #values.plot()\n #plt.title(\"Predicted vs Actual for company %s at %s\" % (j, i))\n #plt.savefig(\"pred_%s_%s\" % (i, j))\n #plt.close()\n\n \n#save_open_figures(\"financial_networks_graphml_\")\nplt.close('all')","repo_name":"shazzzm/financial_network_inference","sub_path":"prediction_networks.py","file_name":"prediction_networks.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"12356317248","text":"import pandas as pd\n#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.colors as colors\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nimport json\nimport socket, struct\n\ndef optimise_k_means(data,max_k):\n\n\tmeans = []\n\tinertias = []\n\n\tfor k in range(1,max_k):\n\t\tkmeans = KMeans(n_clusters=k)\n\t\tkmeans.fit(data)\n\t\tmeans.append(k)\n\t\tinertias.append(kmeans.inertia_)\n\n\tfig = plt.subplots(figsize=(10,5))\n\tplt.plot(means, inertias, 'o-')\n\tplt.xlabel(\"Number of Clusters\")\n\tplt.ylabel(\"Inertia\")\n\tplt.grid(True)\n\tplt.show()\n\n\n#with open('/home/gonzalo/Escritorio/IBERDROLA/dataset.json') as f:\n# data = json.load(f)\n\ndef cluster():\n\tdata = pd.read_json('/home/gonzalo/Escritorio/IBERDROLA/dataset.json')\n\n\tcolumns = []\n\tfor col in data.columns:\n\t columns.append(col)\n\n\tdata = data.to_numpy()\n\n\tfor d in data:\n\t\ttry:\n\t\t\td[0]=int.from_bytes(bytes(d[0], 'utf-8'), byteorder='big')\n\t\texcept:\n\t\t\td[0]=d[0]\t\n\t\td[1]=int.from_bytes(bytes(d[1], 'utf-8'), byteorder='big')\n\t\td[2]=0\n\t\td[3]=int.from_bytes(bytes(d[3], 'utf-8'), byteorder='big')\n\t\td[4]=int.from_bytes(bytes(d[4], 'utf-8'), byteorder='big')\n\t\td[5]=int.from_bytes(bytes(d[5], 'utf-8'), byteorder='big')\n\t\td[6]=0\n\t\td[7]=int.from_bytes(bytes(d[7], 'utf-8'), byteorder='big')\n\t\td[9]=int.from_bytes(bytes(d[9], 'utf-8'), byteorder='big')\n\n\n\tdf = pd.DataFrame(data=data,columns=columns)\n\n\t#optimise_k_means(df[['alert','platform','destination-ip','source-ip','time','priority','hostname']],30)\n\t#optimise_k_means(df[['priority','time']],30)\n\tkmeans = KMeans(n_clusters=9)\n\tkmeans.fit(df[['priority','time']])\n\t#kmeans.fit(df[['alert','platform','destination-ip','source-ip','time','priority','hostname']])\n\tdf['KMeans'] = kmeans.labels_\n\n\t#print(df)\n\n\tdf1 = df[['platform','priority','time','KMeans']]\n\n\tdf1.to_csv('/home/gonzalo/Escritorio/IBERDROLA/cluster.csv')\n\n\t\n\tlabels = kmeans.predict(df[['priority','time']])\n\tC = kmeans.cluster_centers_\n\tcolores=['red','green','blue','cyan','yellow','purple','brown','black','orange']\n\tasignar=[]\n\tfor row in labels:\n\t asignar.append(colores[row])\n\n\n\t# Getting the values and plotting it\n\tf1 = df1['time'].values\n\tf2 = df1['priority'].values\n\t \n\tplt.scatter(f1, f2, c=asignar, s=70)\n\tplt.scatter(C[:, 0], C[:, 1], marker='*', c=colores, s=1000)\n\tplt.show()\n\n\n\n\t\n\n\n\nif __name__==\"__main__\":\n\tcluster()","repo_name":"MrChamizo98/Clusterizado","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37381935815","text":"from collections import deque\n\nanswer = 1e9\ndef solution(rectangle, characterX, characterY, itemX, itemY):\n maps = set()\n # 직사각형 테두리선의 각 좌표 추가하기\n for lx, ly, rx, ry in rectangle:\n for x in range(lx, rx):\n maps.add(((x, x+1), ly))\n maps.add(((x, x+1), ry))\n \n for y in range(ly, ry):\n maps.add((lx, (y, y+1)))\n maps.add((rx, (y, y+1)))\n \n # 직사각형 내부 선의 각 좌표 제거하기 (겹치는 부분들)\n for lx, ly, rx, ry in rectangle:\n for x in range(lx+1, rx):\n for y in range(ly, ry):\n if (x, (y, y+1)) in maps :\n maps.remove((x, (y, y+1)))\n for y in range(ly+1, ry):\n for x in range(lx, rx):\n if ((x, x+1), y) in maps :\n maps.remove(((x, x+1), y))\n \n # 최단거리 찾기\n visited = dict(zip(maps, [False]*len(maps))) # 방문 배열\n q = deque([(characterX, characterY, 0)])\n \n while q:\n x, y, cnt = q.popleft()\n\n if x == itemX and y == itemY : # 아이템에 도달한 경우 stop\n return cnt\n \n # 상하좌우 방향으로 이동한다.\n if ((x-1, x), y) in maps and not visited[((x-1, x), y)]: # 좌\n q.append((x-1, y, cnt+1))\n visited[((x-1, x), y)] = True\n if ((x, x+1), y) in maps and not visited[((x, x+1), y)]: # 우\n q.append((x+1, y, cnt+1))\n visited[((x, x+1), y)] = True\n if (x, (y, y+1)) in maps and not visited[(x, (y, y+1))]: # 위\n q.append((x, y+1, cnt+1))\n visited[(x, (y, y+1))] = True\n if (x, (y-1, y)) in maps and not visited[(x, (y-1, y))]: # 아래\n q.append((x, y-1, cnt+1))\n visited[(x, (y-1, y))] = True","repo_name":"songhee-lee/2023-python-coding-test","sub_path":"Programmers/05. 아이템 줍기.py","file_name":"05. 아이템 줍기.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9441516631","text":"infile = open('day10.in', 'r')\n\nN = infile.readline().strip()\n\n\ndef gen(n):\n res = ''\n i = 0\n j = 1\n curr = n[0]\n while j < len(n):\n if n[j] != curr:\n res += str(j - i)\n res += n[i]\n curr = n[j]\n i = j\n j += 1\n\n res += str(j - i)\n res += n[i]\n return res\n\n\nfor _ in range(40):\n N = gen(N)\nprint(len(N))\n\nfor _ in range(10):\n N = gen(N)\nprint(len(N))","repo_name":"AdamHe17/advent-of-code","sub_path":"2015/python/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36320720245","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 2 18:46:13 2020\r\n\r\n@author: Chewnnakesava Reddy\r\n\"\"\"\r\n\r\n#Write a python program to concate two dictionaries?\r\nd1={'A':1,'B':2}\r\nd2={'C':3}\r\nd1.update(d2)\r\nprint(\"Concatenated dictionary is:\")\r\nprint(d1)\r\n\r\n","repo_name":"ChennakesavaReddy123/assignment6","sub_path":"assignment6_6.3_concate_two_dictonaries.py","file_name":"assignment6_6.3_concate_two_dictonaries.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38220365967","text":"import os\r\nimport multiprocessing\r\nfrom threading import Thread\r\n\r\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nimport numpy as np\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\nfrom client import *\r\nfrom server import *\r\nfrom attack import *\r\n\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\nif gpus:\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\ntf.keras.backend.set_floatx('float64')\r\naccuracy_list = []\r\npoison_accuracy_list = []\r\ntimes_list = []\r\nif __name__ == '__main__':\r\n multiprocessing.set_start_method('spawn')\r\n parameter = get_parameter(dataset_name='cifar-10', non_iid_p=0.5, malicious_clients_p=0.25,\r\n aggregation_method='median', defense_method=None, attack_mothod='ad')\r\n\r\n num_client = parameter['num_client']\r\n dataset_name = parameter['dataset_name']\r\n model_name = parameter['model_name']\r\n input_shape = parameter['input_shape']\r\n classes_num = parameter['classes_num']\r\n non_iid_p = parameter['non_iid_p']\r\n malicious_clients_p = parameter['malicious_clients_p']\r\n aggregation_method = parameter['aggregation_method']\r\n defense_method = parameter['defense_method']\r\n attack_mothod = parameter['attack_mothod']\r\n global_epoch = 500\r\n target_label = 3\r\n\r\n X_train, X_test, y_train, y_test = get_datas(dataset_name)\r\n client_datasets_X, client_datasets_y = make_non_iid_datasets(X_train, y_train, non_iid_p, num_client, classes_num)\r\n\r\n # Initialize client and server\r\n server = Server(X_test, y_test, model_name, classes_num, int(num_client * malicious_clients_p), *input_shape)\r\n # weights = np.load('temp_weights.npy', allow_pickle=True)\r\n # server.model.set_weights(weights)\r\n # server.server_model_test(classes_num, target_label)\r\n\r\n datasets_index = np.argsort(np.array([len(client_datasets_X[i]) for i in range(len(client_datasets_X))]))\r\n # datasets_index = np.random.choice(np.arange(num_client), num_client, replace=False)\r\n benign_clients_list = []\r\n for i in range(num_client - int(num_client * malicious_clients_p)):\r\n dataset_index = datasets_index[i]\r\n benign_clients_list.append(Client(client_datasets_X[dataset_index], client_datasets_y[dataset_index]))\r\n malicious_clients_list = []\r\n for i in range(num_client - int(num_client * malicious_clients_p), num_client):\r\n dataset_index = datasets_index[i]\r\n malicious_clients_list.append(Client(client_datasets_X[dataset_index], client_datasets_y[dataset_index]))\r\n\r\n # federal training\r\n for epoch in tqdm(range(global_epoch)):\r\n server_model_weights = server.model.get_weights()\r\n pool = multiprocessing.Pool(processes=8)\r\n begin_result_list = []\r\n for benign_client in benign_clients_list:\r\n begin_result_list.append(pool.apply_async(benign_client.client_train, (server_model_weights, model_name, classes_num, *input_shape)))\r\n\r\n pool.close()\r\n pool.join()\r\n\r\n for i in range(len(begin_result_list)):\r\n benign_clients_list[i] = begin_result_list[i].get()\r\n\r\n if attack_mothod == 'krum':\r\n attack_by_krum(malicious_clients_list, server_model_weights, model_name, classes_num, *input_shape)\r\n elif attack_mothod == 'ad':\r\n attack_by_krum1(malicious_clients_list, server_model_weights, model_name, classes_num, *input_shape)\r\n elif attack_mothod == 'scaling':\r\n attack_by_scaling(malicious_clients_list, classes_num, server_model_weights, model_name, target_label, 6,\r\n *input_shape)\r\n elif attack_mothod == 'label-flipping':\r\n attack_by_label_flipping(malicious_clients_list, classes_num, server_model_weights, model_name,\r\n *input_shape)\r\n elif attack_mothod == 'gaussian':\r\n attack_by_gaussian_noise(malicious_clients_list, server_model_weights, model_name, classes_num,\r\n *input_shape)\r\n elif attack_mothod == 'mean':\r\n attack_by_trimmed_mean(malicious_clients_list, server_model_weights, model_name, classes_num,\r\n *input_shape)\r\n elif attack_mothod == 'median':\r\n attack_by_trimmed_median(malicious_clients_list, server_model_weights, model_name, classes_num,\r\n *input_shape)\r\n elif attack_mothod == 'uncertain':\r\n pool = multiprocessing.Pool(processes=10)\r\n malicious_result_list = []\r\n malicious_result_list.append(pool.apply_async(attack_by_uncertain_samples, (malicious_clients_list, server_model_weights, model_name, classes_num,\r\n target_label, *input_shape)))\r\n pool.close()\r\n pool.join()\r\n malicious_result_list = malicious_result_list[0].get()\r\n for i in range(len(malicious_result_list)):\r\n malicious_clients_list[i] = malicious_result_list[i]\r\n\r\n if defense_method == None:\r\n temp_time = server.aggregation(aggregation_method, benign_clients_list + malicious_clients_list)\r\n times_list.append(temp_time)\r\n elif defense_method == 'err':\r\n server.err(benign_clients_list + malicious_clients_list, aggregation_method)\r\n elif defense_method == 'lfr':\r\n server.lfr(benign_clients_list + malicious_clients_list, aggregation_method)\r\n elif defense_method == 'union':\r\n server.union(benign_clients_list + malicious_clients_list, aggregation_method)\r\n else:\r\n assert 1 == 0, 'defense method error'\r\n accuracy, poison_accuracy = server.server_model_test(classes_num, target_label)\r\n accuracy_list.append(accuracy)\r\n poison_accuracy_list.append(poison_accuracy)\r\n print('epoch:{},accuracy:{:.4f},posison_accuracy:{}'.format(epoch, accuracy, poison_accuracy))\r\n\r\n np.save(\r\n 'nc_{}_d_{}_np_{}_mp_{}_e_{}_ag_{}_d_{}_at_{}.npy'.format(str(num_client), str(dataset_name),\r\n str(non_iid_p),\r\n str(malicious_clients_p), global_epoch,\r\n aggregation_method,\r\n\r\n defense_method, attack_mothod),\r\n [accuracy_list, poison_accuracy_list])\r\n np.save('temp_weights.npy', server.model.get_weights())\r\n # np.save('minist-flvs-times.npy', times_list)\r\n","repo_name":"liangxyswpu/lxyCode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34270095401","text":"from anasymod.sim.sim import Simulator\nfrom anasymod.vivado import VivadoControl\n\nclass VivadoSimulator(Simulator):\n def simulate(self):\n # set up the simulation commands\n v = VivadoControl()\n\n # create a new project\n v.create_project(project_name=self.cfg.vivado_config.project_name,\n project_directory=self.target.project_root,\n force=True)\n\n # add all source files to the project (including header files)\n v.add_project_sources(content=self.target.content)\n\n # define the top module\n v.set_property('top', f\"{{{self.target.cfg.top_module}}}\", '[get_filesets {sim_1 sources_1}]')\n\n # set define variables\n v.add_project_defines(content=self.target.content, fileset='[get_filesets {sim_1 sources_1}]')\n\n # launch the simulation\n v.set_property('{xsim.simulate.runtime}', '{-all}', '[get_fileset sim_1]')\n v.println('launch_simulation')\n\n # run the simulation\n v.run(vivado=self.cfg.vivado_config.vivado, build_dir=self.cfg.build_root, filename='vivado_sim.tcl')","repo_name":"rowhit/anasymod","sub_path":"anasymod/sim/vivado.py","file_name":"vivado.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"71509083353","text":"\"\"\"\nThis module defines the Graph Neural Network, which is the first part of the agent.\n - GraphCNN is used to get embedding features of all stages.\n - GraphSNN is used to get the job level and global level embedding summarizations.\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\n\nclass GraphCNN:\n \"\"\"\n This Graph Convolutional Neural Network is used to get embedding features of each stage (node)\n via parameterized message passing scheme.\n \"\"\"\n def __init__(self, inputs, input_dim, hidden_dims, output_dim, max_depth, activate_fn, scope='gcn'):\n self.inputs = inputs\n self.input_dim = input_dim # length of original feature x\n self.hidden_dims = hidden_dims # dim of hidden layers of f and g\n self.output_dim = output_dim # length of embedding feature e\n self.max_depth = max_depth # maximum depth of root-leaf message passing\n self.activate_fn = activate_fn\n self.scope = scope\n\n # args.max_depth tensors\n self.adj_mats = [tf.sparse_placeholder(tf.float32, [None, None]) for _ in range(self.max_depth)]\n self.masks = [tf.placeholder(tf.float32, [None, 1]) for _ in range(self.max_depth)]\n\n # h: x --> x'\n self.prep_weights, self.prep_bias = init(self.input_dim, self.hidden_dims, self.output_dim, self.scope)\n # f: x' --> e\n self.proc_weights, self.proc_bias = init(self.output_dim, self.hidden_dims, self.output_dim, self.scope)\n # g: e --> e\n self.agg_weights, self.agg_bias = init(self.output_dim, self.hidden_dims, self.output_dim, self.scope)\n\n self.outputs = self.forward()\n\n def forward(self):\n \"\"\"\n Get embedding features of each node for all jobs simultaneously and use masks to filter non-runnable nodes.\n \"\"\"\n # message passing among nodes\n # the information is flowing from leaves to roots\n x = self.inputs\n # raise x into higher dimension\n for layer in range(len(self.prep_weights)):\n x = tf.matmul(x, self.prep_weights[layer])\n x += self.prep_bias[layer]\n x = self.activate_fn(x)\n\n for d in range(self.max_depth):\n # work flow: index_select -> f -> masked assemble via adj_mat -> g\n y = x\n # process the features on the nodes\n for layer in range(len(self.proc_weights)):\n y = tf.matmul(y, self.proc_weights[layer])\n y += self.proc_bias[layer]\n y = self.activate_fn(y)\n\n # message passing\n y = tf.sparse_tensor_dense_matmul(self.adj_mats[d], y)\n\n # aggregate child features\n for layer in range(len(self.agg_weights)):\n y = tf.matmul(y, self.agg_weights[layer])\n y += self.agg_bias[layer]\n y = self.activate_fn(y)\n\n # remove the artifact from the bias term in g\n y = y * self.masks[d]\n # assemble neighboring information\n x = x + y\n return x\n\n\nclass GraphSNN:\n \"\"\"\n The Graph Summarization Neural Network is used to get the DAG level and global level embedding features.\n \"\"\"\n def __init__(self, inputs, input_dim, hidden_dims, output_dim, activate_fn, scope='gsn'):\n \"\"\"\n Use stage (node) level summarization to obtain the DAG level summarization.\n Use DAG level summarization to obtain the global embedding summarization.\n \"\"\"\n self.inputs = inputs\n self.input_dim = input_dim\n self.hidden_dims = hidden_dims\n self.output_dim = output_dim\n self.activate_fn = activate_fn\n self.scope = scope\n\n self.summ_levels = 2\n self.summ_mats = [tf.sparse_placeholder(tf.float32, [None, None]) for _ in range(self.summ_levels)]\n self.job_summ_weights, self.job_summ_bias = init(self.input_dim, self.hidden_dims, self.output_dim, self.scope)\n self.global_summ_weights, self.global_summ_bias = init(self.output_dim, self.hidden_dims, self.output_dim, self.scope)\n\n self.summaries = self.forward()\n\n def forward(self):\n \"\"\"\n Get the job level and global level summary.\n \"\"\"\n # summarize information in each hierarchy\n x = self.inputs\n\n summaries = []\n # DAG level summary\n s = x\n for i in range(len(self.job_summ_weights)):\n s = tf.matmul(s, self.job_summ_weights[i])\n s += self.job_summ_bias[i]\n s = self.activate_fn(s)\n\n s = tf.sparse_tensor_dense_matmul(self.summ_mats[0], s)\n summaries.append(s)\n\n # global level summary\n for i in range(len(self.global_summ_weights)):\n s = tf.matmul(s, self.global_summ_weights[i])\n s += self.global_summ_bias[i]\n s = self.activate_fn(s)\n\n s = tf.sparse_tensor_dense_matmul(self.summ_mats[1], s)\n summaries.append(s)\n return summaries\n\n\ndef init(input_dim, hidden_dims, output_dim, scope):\n \"\"\"\n GNN parameter initialization.\n :return list of weights (tf tensors) and list of biases (tf tensors)\n \"\"\"\n weights, bias = [], []\n cur_in_dim = input_dim\n\n # hidden layers param init\n for hid_dim in hidden_dims:\n weights.append(glorot_init(shape=[cur_in_dim, hid_dim], scope=scope))\n bias.append(zeros(shape=[hid_dim], scope=scope))\n cur_in_dim = hid_dim\n\n # output layer param init\n weights.append(glorot_init(shape=[cur_in_dim, output_dim], scope=scope))\n bias.append(zeros(shape=[output_dim], scope=scope))\n\n return weights, bias\n\n\ndef glorot_init(shape, dtype=tf.float32, scope='default'):\n \"\"\"\n The initialization method proposed by Xavier Glorot & Yoshua Bengio in AISTATS '10.\n \"\"\"\n with tf.variable_scope(scope):\n init_range = np.sqrt(6. / (shape[0] + shape[1]))\n return tf.Variable(tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=dtype))\n\n\ndef zeros(shape, dtype=tf.float32, scope='default'):\n \"\"\"\n Used to init bias.\n \"\"\"\n with tf.variable_scope(scope):\n return tf.Variable(tf.zeros(shape, dtype=dtype))\n","repo_name":"hliangzhao/RL-Scheduling","sub_path":"algo/learn/graph_nn.py","file_name":"graph_nn.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"5"} +{"seq_id":"1807654216","text":"from Optimizacion.Blocks import Blocks\nfrom Optimizacion.Expresiones.Expresion import Expression\nfrom Optimizacion.Gotos.Goto import Goto\nfrom Optimizacion.Gotos.If import If\nfrom Optimizacion.Instructions.Asignacion import Assignment\nfrom Optimizacion.Instructions.Label import Label\nfrom Optimizacion.repOptimizacion import repOptimizacion\n\n\nclass Optimizador:\n\n def __init__(self, packages, temps, code):\n self.packages = packages\n self.temps = temps\n self.code = code\n self.blocks = []\n self.optimizaciones = []\n\n def getCode(self):\n ret = f'package main;\\n\\nimport (\\n\\t\"{self.packages}\"\\n);\\n'\n for temp in self.temps:\n ret = ret + f'var {temp}\\n'\n ret = ret + '\\n'\n\n for func in self.code:\n ret = ret + func.getCode() + '\\n\\n'\n return ret\n\n def Bloques(self):\n self.blocks = []\n self.GenerarBloques()\n\n # APLICAR REGLAS A NIVEL LOCAL Y GLOBAL\n\n def GenerarBloques(self):\n self.GenerarLideres()\n self.CrearBloques()\n self.ConnectBloques()\n\n def GenerarLideres(self):\n # Por cada funcion\n for func in self.code:\n # La primera instrucción de tres direcciones en el código intermedio es líder\n func.instr[0].isLeader = True\n\n # Cualquier instrucción que siga justo después de un salto\n # condicional o incondicional es líder\n flag = False\n for instr in func.instr:\n if flag:\n instr.isLeader = True\n flag = False\n if type(instr) is Goto or type(instr) is If:\n flag = True\n\n def CrearBloques(self):\n # Por cada funcion\n for func in self.code:\n # Bloques de la funcion actual\n blocks = []\n block = None\n for instr in func.instr:\n if instr.isLeader:\n # Si ya hay un bloque creado. Agregarlo al arreglo de bloques\n if block is not None:\n blocks.append(block)\n block = Blocks(instr)\n block.code.append(instr)\n # EOF\n blocks.append(block)\n # Guardo mis bloques de la funcion\n self.blocks.append(blocks)\n\n def ConnectBloques(self):\n # Por cada arreglo de bloques en una función\n for func in self.blocks:\n prevBlock = None\n # Por cada bloque en la funcion. Los uniremos en cascada\n for block in func:\n if prevBlock is None:\n prevBlock = block\n continue\n prevBlock.nexts.append(block)\n prevBlock = block\n\n # Revisar saltos entre bloques\n for block in func:\n # Obtener ultima instruccion\n lastIns = block.code[len(block.code) - 1]\n if type(lastIns) is Goto or type(lastIns) is If:\n label = lastIns.label\n # Revisando todos los bloques\n for check in func:\n if type(check.first) is Label and check.first.id == label:\n block.nexts.append(check)\n break\n\n def Mirilla(self):\n # Por cada funcion\n for func in self.code:\n tamanio = 20\n\n # Mientras no nos hemos pasado del tamaño (Fin del código)\n while tamanio <= len(func.instr):\n flagOpt = False\n\n # Darle 5 pasadas al codigo con el tamaño actual\n for i in range(5):\n aux = 0\n # Dar una pasada completa\n while (tamanio + aux) <= len(func.instr):\n flagOpt = flagOpt or self.Regla1(func.instr[0 + aux: tamanio + aux])\n flagOpt = flagOpt or self.Regla2(func.instr[0 + aux: tamanio + aux])\n flagOpt = flagOpt or self.Regla3(func.instr[0 + aux: tamanio + aux])\n flagOpt = flagOpt or self.Regla6(func.instr[0 + aux: tamanio + aux])\n flagOpt = flagOpt or self.Regla7(func.instr[0 + aux: tamanio + aux])\n aux = aux + 1\n\n # Si no hubo optimizacion en la pasada, subir el tamaño\n if not flagOpt:\n tamanio = tamanio + 20\n\n def Regla1(self, array):\n ret = False\n\n for i in range(len(array)):\n actual = array[i]\n apariciones = 0\n exp_actual = actual.getCode()\n if type(actual) is Assignment:\n\n for j in range(len(array)):\n if j <= i:\n continue\n comp = array[j]\n if not comp.deleted and type(comp) is Label:\n apariciones += 1\n break\n if not actual.deleted and not comp.deleted and type(actual) is Assignment and \\\n type(comp) is Assignment and actual is not comp:\n # if actual.place.getCode() == comp.place.getCode():\n # apariciones += 1\n if comp.exp.getCode() == actual.place.getCode() \\\n and comp.place.getCode() == actual.exp.getCode() and apariciones < 1:\n optimizado = exp_actual\n exp_actual += comp.getCode()\n comp.deleted = True\n self.agregar_optimizacion('Mirilla', 'Regla 1', exp_actual, optimizado, actual.line)\n exp_actual = ''\n ret = True\n cambia = False\n return ret\n\n def Regla2(self, array):\n ret = False\n\n for i in range(len(array)):\n actual = array[i]\n apariciones = 0\n eliminar = False\n inicio = i\n fin = 0\n if not actual.deleted and type(actual) is Goto:\n exp_actual = actual.getCode() + '\\n'\n optimizado = ''\n for j in range(len(array)):\n\n if j <= i:\n continue\n comp = array[j]\n exp_actual += comp.getCode() + '\\n'\n if not comp.deleted and type(comp) is Label and comp.id != actual.label:\n apariciones += 1\n if type(comp) is Label and comp.id == actual.label:\n eliminar = True\n optimizado = comp.getCode()\n fin = j\n break\n if eliminar and apariciones < 1:\n for num in range(inicio, fin):\n array[num].deleted = True\n ret = True\n\n self.agregar_optimizacion('Mirilla', 'Regla 2', exp_actual, optimizado, actual.line)\n return ret\n\n def Regla3(self, array):\n ret = False\n for i in range(len(array) - 2):\n actual = array[i]\n if type(actual) is If and not actual.deleted:\n nextIns = array[i + 1]\n if type(nextIns) is Goto and not nextIns.deleted:\n sigIns = array[i + 2]\n exp_actual = actual.getCode() + '\\n'\n exp_actual += nextIns.getCode() + '\\n'\n exp_actual += sigIns.getCode()\n if type(sigIns) is Label and not sigIns.deleted:\n if sigIns.id == actual.label:\n actual.condition.getContrary()\n actual.label = nextIns.label\n nextIns.deleted = True\n array[i + 2].deleted = True\n ret = True\n optimizada = actual.getCode()\n self.agregar_optimizacion('Mirilla', 'Regla 3', exp_actual, optimizada, actual.line)\n return ret\n\n def Regla6(self, array):\n ret = False\n for i in range(len(array)):\n actual = array[i]\n if type(actual) is Assignment and not actual.deleted and isinstance(actual.exp, Expression):\n exp_actual = actual.getCode()\n if actual.selfAssignment():\n actualOpt = actual.exp.neutralOps()\n if actualOpt:\n ret = True\n actual.deleted = True\n optimizada = actual.getCode()\n self.agregar_optimizacion('Mirilla', 'Regla 6', exp_actual, optimizada, actual.exp.line)\n return ret\n\n def Regla7(self, array):\n ret = False\n\n for i in range(len(array)):\n actual = array[i]\n if type(actual) is Assignment and not actual.deleted and isinstance(actual.exp, Expression):\n exp_actual = actual.getCode()\n if actual.exp.eliminar_neutros():\n optimizada = actual.getCode()\n self.agregar_optimizacion('Mirilla', 'Regla 7', exp_actual, optimizada, actual.exp.line)\n ret = True\n return ret\n\n def agregar_optimizacion(self, tipo, regla, original, optimizada, fila):\n opt = repOptimizacion(tipo, regla, original, optimizada, fila)\n self.optimizaciones.append(opt)\n\n def generar_reporte(self):\n cuerpohtml = \"
ReporteTablaSimbolos
\"\n cuerpohtml += \" \"\n cuerpohtml += \" \"\n cuerpohtml += \"\" + \"\" + \"\" + \"\" \\\n \"Regla \" \\\n \"aplicadaExpresion \" \\\n \"original\" \\\n \" \"\n\n for opt in self.optimizaciones:\n\n opt.expresion_original = opt.expresion_original.replace('<', '<')\n opt.expresion_original = opt.expresion_original.replace('>', '>')\n opt.expresion_original = opt.expresion_original.replace('{', '{')\n opt.expresion_original = opt.expresion_original.replace('}', '}')\n\n opt.expresion_optimizada = opt.expresion_optimizada.replace('<', '<')\n opt.expresion_optimizada = opt.expresion_optimizada.replace('>', '>')\n opt.expresion_optimizada = opt.expresion_optimizada.replace('{', '{')\n opt.expresion_optimizada = opt.expresion_optimizada.replace('}', '}')\n cuerpohtml += f'' \\\n f''\n\n cuerpohtml += ''\n return cuerpohtml\n","repo_name":"Villa01/OLC2_P2_201900907","sub_path":"Optimizacion/Optimizador.py","file_name":"Optimizador.py","file_ext":"py","file_size_in_byte":12322,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40501082466","text":"\"\"\"Настройки логера.\"\"\"\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n 'json': {\n 'class': 'pythonjsonlogger.jsonlogger.JsonFormatter',\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'formatter': 'console',\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stdout',\n },\n 'file': {\n 'formatter': 'json',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': 'logs/kafka_writer.log',\n 'mode': 'a',\n 'delay': '1',\n 'maxBytes': 1000000,\n 'backupCount': 3,\n },\n },\n 'loggers': {\n 'kafka': {\n 'level': 'ERROR',\n 'handlers': [\n 'console',\n 'file',\n ],\n },\n },\n 'root': {\n 'level': 'INFO',\n 'handlers': [\n 'console',\n ],\n },\n}\n","repo_name":"mbr55aa/UGC","sub_path":"kafka_rw/logger/logger_config.py","file_name":"logger_config.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"hi","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"130678813","text":"import asyncio\nimport base64\nimport json\nimport uuid\nfrom base64 import urlsafe_b64decode, urlsafe_b64encode\nfrom datetime import date, datetime, timezone\nfrom enum import Enum\nfrom json import JSONEncoder, dumps, loads\nfrom time import time\nfrom typing import Any, List\nfrom uuid import UUID\n\nfrom aiohttp.web import Request\nfrom boto3.dynamodb.types import Binary, Decimal\nfrom dateutil.relativedelta import relativedelta\nfrom pydantic import BaseModel # pylint: disable=no-name-in-module\nfrom typing_extensions import override\n\nfrom ._exceptions import AWSFrameworkException\n\n\ndef datetime_string(dtime: datetime) -> str:\n \"\"\"Return a human readable string representing 'time' weither in the past or in the future\"\"\"\n\n now = datetime.now().replace(tzinfo=timezone.utc)\n delta = relativedelta(now, dtime) if now > dtime else relativedelta(dtime, now)\n\n time_units = [\"years\", \"months\", \"days\", \"hours\", \"minutes\", \"seconds\"]\n\n for unit in time_units:\n value = getattr(delta, unit)\n if value != 0:\n ago_or_in = \"ago\" if now > dtime else \"in\"\n time_str = f\"{abs(value)} {unit[:-1] if abs(value) == 1 else unit}\"\n break\n else:\n time_str, ago_or_in = \"Just now\", \"\"\n\n return f\"{dtime.strftime('%A %d %B %Y @%I:%M:%S %p')} ({time_str} {ago_or_in})\"\n\n\ndef parse_json(json_string):\n try:\n return loads(json_string, parse_obj_hook=hook)\n except ValueError:\n pass\n\n\ndef hook(dct):\n if \"@date\" in dct:\n return datetime.strptime(dct[\"@date\"], \"%Y-%m-%dT%H:%M:%S.%f\")\n if \"@bytes\" in dct:\n return urlsafe_b64decode(dct[\"@bytes\"].encode(\"utf-8\"))\n return dct\n\n\ndef to_json(dct, pretty=True, sort_keys=True):\n if pretty:\n return dumps(\n dct,\n cls=SwaggerEncoder,\n sort_keys=True,\n indent=4,\n separators=(\", \", \": \"),\n allow_nan=False,\n ensure_ascii=True,\n )\n return dumps(\n dct,\n cls=SwaggerEncoder,\n sort_keys=sort_keys,\n separators=(\",\", \":\"),\n allow_nan=False,\n ensure_ascii=True,\n )\n\n\nclass SwaggerEncoder(JSONEncoder):\n @override\n def default(self, obj):\n if isinstance(obj, datetime):\n return datetime_string(obj)\n if isinstance(obj, date):\n return {\"@date\": obj.isoformat()}\n if isinstance(obj, (bytes, bytearray)):\n return {\"@bytes\": urlsafe_b64encode(obj).decode(\"utf-8\")}\n if isinstance(obj, BaseModel):\n return obj.dict()\n if isinstance(obj, Request):\n if obj.content_type in (\n \"application/json\",\n \"application/x-www-form-urlencoded\",\n ):\n data = parse_json(obj.content.read_nowait().decode())\n if data:\n return {\n \"method\": obj.method,\n \"path\": obj.path,\n \"headers\": dict(obj.headers),\n \"body\": data,\n }\n elif obj.content_type == \"multipart/form-data\":\n data = {}\n for k, v in asyncio.run(obj.post()):\n _v = None\n try:\n _v = loads(v)\n except ValueError:\n _v = \"file\"\n data[k] = v\n else:\n data = None\n return {\n \"method\": obj.method,\n \"query_params\": dict(obj.query),\n \"path_params\": dict(obj.match_info),\n \"headers\": dict(obj.headers),\n \"body\": data,\n }\n\n\ndef jsonable_encoder(\n obj: Any,\n *,\n include: List[str] = [],\n exclude: List[str] = [],\n by_alias: bool = False,\n skip_defaults: bool = False,\n custom_encoder: Any = None,\n) -> Any:\n \"\"\"\n Convert any object to a JSON-serializable object.\n\n This function is used by Aiofauna to convert objects to JSON-serializable objects.\n\n It supports all the types supported by the standard json library, plus:\n\n * datetime.datetime\n * datetime.date\n * datetime.time\n * uuid.UUID\n * enum.Enum\n * pydantic.BaseModel\n \"\"\"\n\n if custom_encoder is None:\n custom_encoder = SwaggerEncoder\n\n if obj is str:\n return \"string\"\n if obj is int or obj is float:\n return \"integer\"\n if obj is bool:\n return \"boolean\"\n if obj is None:\n return \"null\"\n if obj is list:\n return \"array\"\n if obj is dict:\n return \"object\"\n if obj is bytes:\n return \"binary\"\n if obj is datetime:\n return \"date-time\"\n if obj is date:\n return \"date\"\n if obj is time:\n return \"time\"\n if obj is UUID:\n return \"uuid\"\n if obj is Enum:\n return \"enum\"\n if isinstance(obj, (str, int, float, bool, type(None))):\n return obj\n if isinstance(obj, (list, tuple, set, frozenset)):\n return [\n jsonable_encoder(\n v,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=skip_defaults,\n custom_encoder=custom_encoder,\n )\n for v in obj\n ]\n if isinstance(obj, dict):\n return {\n jsonable_encoder(\n k,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=skip_defaults,\n custom_encoder=custom_encoder,\n ): jsonable_encoder(\n v,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=skip_defaults,\n custom_encoder=custom_encoder,\n )\n for k, v in obj.items()\n }\n if isinstance(obj, bytes):\n return base64.b64encode(obj).decode()\n if isinstance(obj, (set, frozenset)):\n return [\n jsonable_encoder(\n v,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=skip_defaults,\n custom_encoder=custom_encoder,\n )\n for v in obj\n ]\n if isinstance(obj, datetime):\n return obj.isoformat()\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, UUID):\n return str(obj)\n return custom_encoder().default(obj)\n\n\nclass AWSEncoder(json.JSONEncoder):\n\n \"\"\"JSON encoder for AWS objects\"\"\"\n\n def default(self, o) -> Any:\n \"\"\"Default encoder\"\"\"\n if isinstance(o, datetime):\n return datetime_string(o)\n if isinstance(o, uuid.UUID):\n return str(o)\n if isinstance(o, Decimal):\n return str(o)\n if isinstance(o, Binary):\n return base64.b64encode(o.value).decode(\"utf-8\")\n if isinstance(o, dict):\n return {k: self.default(v) for k, v in o.items()}\n if isinstance(o, list):\n return [self.default(e) for e in o]\n if isinstance(o, BaseModel):\n return o.dict()\n if isinstance(o, AWSFrameworkException):\n return o.json()\n return super().default(o)\n","repo_name":"obahamonde/aws_workshop","sub_path":"aws_framework/_fmt.py","file_name":"_fmt.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24810667448","text":"from collections import defaultdict\n\nfrom django.db import transaction\nfrom guessapp.models import Wordlist\n\n\nclass WordlistImporter:\n def __init__(self, lines: set):\n self._wordlist_raters = defaultdict(WordlistRater)\n self.__readlines(lines)\n\n def __readlines(self, lines: set):\n for line in lines:\n word = line.strip()\n word_len = len(word)\n is_word = True\n\n for i in range(1, word_len):\n if word[i].isupper():\n is_word = False\n break\n\n if is_word:\n self._wordlist_raters[word_len].add(word.lower())\n\n @transaction.atomic\n def persist(self, name: str) -> int:\n wordlist = Wordlist.objects.create(name=name)\n word_count = 0\n\n for rater in self._wordlist_raters.values():\n for word in rater.words:\n wordlist.words.create(word=word, rating=rater.rate(word))\n word_count += 1\n wordlist.save()\n\n return word_count\n\n\nclass WordlistRater:\n def __init__(self):\n self.words = set()\n self._character_counts = defaultdict(int)\n\n def add(self, word: str):\n self.words.add(word)\n self.__add_to_char_count(word)\n\n def __add_to_char_count(self, word: str):\n for char in word:\n self._character_counts[char] += 1\n\n def rate(self, word: str) -> int:\n rating = 0\n for char in set(word):\n rating += self._character_counts[char]\n return rating\n","repo_name":"marwils/wordguess","sub_path":"guessapp/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"289017592","text":"#%% Imports\r\nimport requests\r\n# %%\r\n\r\nresponse = requests.get(\"https://restcountries.com/v3.1/region/europe\")\r\nresponse.status_code\r\n# %%\r\neurope = {\r\n c['cca3']:{\r\n 'name': c['name']['common'],\r\n 'borders': c['borders'],\r\n }\r\n for c in response.json()\r\n \r\n if 'borders' in c\r\n}\r\n\r\n##comprehensions - přepis smyčky\r\neurope\r\n# %% Save\r\n\r\nimport pickle\r\n\r\nwith open('europe.pkl', mode='bw') as f:\r\n pickle.dump(europe, f)\r\n\r\n# %%\r\n#.....","repo_name":"vojtechBobcik/UmelaInteligence","sub_path":"mapa.py","file_name":"mapa.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5815049532","text":"#!/public/software/apps/anaconda3/5.3.0/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Wed Jun 8 18:23:55 2022\n\n@author: 马钰斌\n\nxshell use utf-8 \n\n这个程序是一个TBF的实例程序的torch部分,\n\n第一步:需要输入训练数据,验证数据,测试数据,\n\t1.计算一个y所需要的x1,x2...xm,这个是m = feature_count .\n\n第二步:网络架构设计:设置有几层,每层有几个节点,哪类激活函数\n\t1.每层有几个节点在point_count,feature,n = point_count 设置;\n\t2.有几层这个在nn.sequential{}中设置,然后在后面的o = {激活函数的个数} \n\t3.在nn.sequential 中激活函数和线性层交替排列,现代深度学习一般relu为激活函数\t 也可以使sigmod、tanh等,这个在torch最好设为一样的,在KBF目前只能设置成一样的。\n\n第三步:设置优化方法,主要是对优化方法,npoch,batch_size,等进行修改,以达到最好的优化效果,得到最优参数W,b,c,d\n\n第四步:查看训练的结果,两张图,方差,均方根误差等决定需不需要再次重复上面步骤。\n\n第五步:在训练效果很好,得到正确参数��情况下,传输模型参数先输出为txt文件,\n\t然后用fortran读取到KBF中。\n\t1.检查m,n,o,function_kind,shuchucanshu.txt\n\t2.检查w_input.txt w_dense.txt w_output.txt\n\t3.检查b_input.txt b_dense.txt b_output.txt \n\n第六步:将*.txt 传输到torch_bridge_fortran 文件夹中\n之后的操作留给fortran程序来解决。\n\n-------------------------------------------------------------------\n进一步优化计划\n我觉得得对训练质量进行控制,不然什么结果都往里面放的话,fortran再调用了就会很麻烦。\n\n\n这个示例程序,用ERA5的资料进行深度学习训练边界层高度模型\n主要可以修改的部分是因子选择,训练方法,绘图等等\n\n\"\"\"\n\n#time \nimport datetime\n\n# PyTorch\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\n# For data preprocess\nimport numpy as np\nimport csv\nimport os\nimport pandas as pd\nimport xarray as xr\nimport math\n\n# For plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\n\n#for earthlab no GPU\nplt.switch_backend('agg')\n\n#==================================================================================\n#数据准备\n\npath = 'h_small.nc' #路径和文件\n#用xarray输入数据\ndata=xr.open_dataset(path)\n#输入数据\nlon = data.longitude.data\nlat = data.latitude.data\ntime= data.time.data \n\nt2m = data.t2m.data \nbld = data.bld.data \nzust= data.zust.data \ngwd = data.gwd.data \nsst = data.sst.data \nskt = data.skt.data \nslhf= data.slhf.data \nssr = data.ssr.data \nst = data.str.data \nsp = data.sp.data \nssh = data.sshf.data\n\npblh= data.blh.data\n\n#将数据转为一维数据,-1 表示默认待定\nt2m1 = t2m.reshape(-1,1)\nbld1 = bld.reshape(-1,1)\nzust1= zust.reshape(-1,1) \ngwd1 = gwd.reshape(-1,1) \nsst1 = sst.reshape(-1,1) \nskt1 = skt.reshape(-1,1) \nslhf1= slhf.reshape(-1,1)\nssr1= ssr.reshape(-1,1) \nst1 = st.reshape(-1,1) \nsp1 = sp.reshape(-1,1) \nssh1= ssh.reshape(-1,1)\n\npblh1= pblh.reshape(-1,1) \n\n#将一维列数据拼接成矩阵,hstack 水平拼接,vstack垂直拼接:\ndata_for_dp = np.hstack((t2m1,bld1,zust1,gwd1,sst1,skt1,slhf1,ssr1,st1,sp1,ssh1,pblh1))\n\n#第一个索引表示行,第二表示列,选取训练数据量\n#训练数据\ndata_train = data_for_dp[0:3000,:]\n#测试数据,只给因子,不给y,:-1表示到倒数第二列\ndata_test = data_for_dp[3001:5000,:-1]\n\n#转化为pandas的数据结构:表,为了后面使用to_save函数\ndata_train = pd.DataFrame(data_train)\ndata_test =pd.DataFrame(data_test)\n\n#保存数据到csv\ndata_test.to_csv('test.csv',index=False, header=None)\ndata_train.to_csv('train.csv',index=False, header=None)\n\n# feature = [0,1,2,3,4,5,6,7,8,9,10]\n# feature_for_train=[0,1,2,3,4,5,6,7,8,9,10,11]\n\n#对因子选择\n#feature = [2,6,10]\nfeature = [0,2,6,10]\n#训练时需要加上y\n#feature_for_train=[2,6,10,11]\nfeature_for_train=[0,2,6,10,11]\n#统计因子个数\nfeature_count = np.size(feature)\n#每层的节点数\npoint_count = 64 #每层的节点数\n\n\n#%%=======================================================================\n\ntr_path = 'train.csv' # 训练数据的路径\ntt_path = 'test.csv' # 测试数据的路径\n\nmyseed = 42069 # 设置随机数种子\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(myseed)\ntorch.manual_seed(myseed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed_all(myseed)\n\n# 一些实用函数\n\n#获得计算设备:是CPU还是GPU,是否支持显卡加速计算\ndef get_device():\n ''' 获得设备 ( 如果 GPU 可用, 则用 GPU) '''\n return 'cuda' if torch.cuda.is_available() else 'cpu'\n\n#函数:绘制学习曲线\ndef plot_learning_curve(loss_record, title=''):\n '''绘制DNN学习曲线(训练和验证的误差函数)'''\n total_steps = len(loss_record['train'])\n\n x_1 = range(total_steps)\n x_2 = x_1[::len(loss_record['train']) // len(loss_record['dev'])]\n\n figure(figsize=(6, 4))\n\n plt.plot(x_1, loss_record['train'], c='tab:red', label='train')\n plt.plot(x_2, loss_record['dev'], c='tab:cyan', label='dev')\n\n plt.ylim(0.0, 2000.)\n plt.xlabel('Training steps')\n plt.ylabel('MSE loss')\n plt.title('Learning curve of {}'.format(title))\n plt.legend()\n\n plt.savefig(r'learncurve.jpg')\n# plt.show()#在超算上不能够直接显示\n\n\n#函数:绘制验证集和预测值的对比图,验证模型训练效果\ndef plot_pred(dv_set, model, device, lim=600, preds=None, targets=None):\n ''' 绘制DNN模型输出和验证集 '''\n if preds is None or targets is None:\n model.eval()\n preds, targets = [], []\n for x, y in dv_set:\n x, y = x.to(device), y.to(device)\n with torch.no_grad():\n pred = model(x)\n preds.append(pred.detach().cpu())\n targets.append(y.detach().cpu())\n preds = torch.cat(preds, dim=0).numpy()\n targets = torch.cat(targets, dim=0).numpy()\n\n figure(figsize=(5, 5))\n plt.scatter(targets, preds, c='r', alpha=0.5)\n plt.plot([-0.2, lim], [-0.2, lim], c='b')\n plt.xlim(-0.2, lim)\n plt.ylim(-0.2, lim)\n plt.xlabel('ground truth value')\n plt.ylabel('predicted value')\n plt.title('Ground Truth v.s. Prediction')\n plt.savefig(r'preds.jpg')\n# plt.show()\n\n\n\n\n\n\n \n#未来需要实现的功能:每次训练之后的数据,模型,代码,图片等都备份到一个特殊命名文件夹中\n\n\n#数据加载和前处理\nclass myDataset(Dataset):\n ''' 数据集的加载和预处理'''\n def __init__(self,\n path,\n mode='train',\n target_only=False):\n self.mode = mode\n\n # 将数据读到numpy数组中\n with open(path, 'r') as fp:\n data = list(csv.reader(fp))\n data = np.array(data[:])[:, :].astype(float)\n\n#给feats 赋值根据target_only\n if not target_only:\n feats = list((feature))\n else:\n feats = list((feature))\n\n#根据test或者是train或者是dev分配数据\n if mode == 'test':\n # 测试数据 \n data = data[:, feats]\n self.data = torch.FloatTensor(data)\n else:\n # 训练数据,-表示倒数第一个\n target = data[:, -1]\n data = data[:, feats]\n \n # 将训练数据分为训练集和验证集,这里是每十个里面选一个作为验证集\n if mode == 'train':\n indices = [i for i in range(len(data)) if i % 10 != 0]\n elif mode == 'dev':\n indices = [i for i in range(len(data)) if i % 10 == 0]\n \n # 将数据转化为PyTorch的数据格式 tensor\n self.data = torch.FloatTensor(data[indices])\n self.target = torch.FloatTensor(target[indices])\n\n\n #对数据正则化(可选,对参数优化有帮助) = 距平/标准差\n # self.data[:, :] = (self.data[:, :] - self.data[:, :].mean(dim=0, keepdim=True)) / self.data[:, :].std(dim=0, keepdim=True)\n\n\t#统计数据个数\n self.dim = self.data.shape[1]\n\n print('结束读取 {} 集, ({} 样本被输入, 每个 有 {} 变量)'\n .format(mode, len(self.data), self.dim))\n\n def __getitem__(self, index):\n # Returns one sample at a time\n if self.mode in ['train', 'dev']:\n # For training\n return self.data[index], self.target[index]\n else:\n # For testing (no target)\n return self.data[index]\n\n def __len__(self):\n # 返回数据尺寸\n return len(self.data)\n \n\ndef prep_dataloader(path, mode, batch_size, n_jobs=0, target_only=False):\n '''建立一个数据集,然后将它放在dataloader中 '''\n dataset = myDataset(path, mode=mode, target_only=target_only) # 建立数据集\n dataloader = DataLoader(\n dataset, batch_size,\n shuffle=(mode == 'train'), drop_last=False,\n num_workers=n_jobs, pin_memory=True) # 建立dataloader\n return dataloader\n\n\n# # 深度神经网络\n# \n# NeuralNet 被设计用来深度学习回归方程\n# 这个DNN含有多个全连接层和多个ReLU激活函数\n# 这个网络还定义了一个误差函数来计算误差\n# \nclass NeuralNet(nn.Module):\n ''' 一个简单的全连接深度神经网络(DNN) '''\n def __init__(self, input_dim):\n super(NeuralNet, self).__init__()\n # 在这里定义神经网络\n\t#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n\n self.net = nn.Sequential(\n\n nn.Linear(input_dim ,point_count),\n\n\t nn.ReLU(),\n\n nn.Linear(point_count,point_count),\n\n\t nn.ReLU(),\n\n nn.Linear(point_count,point_count),\n\n\t nn.ReLU(),\n\n nn.Linear(point_count,point_count),\n\n\t nn.ReLU(),\n\n nn.Linear(point_count,point_count),\n\n\t nn.ReLU(),\n\n nn.Linear(point_count, 1)\n\n )\n\n # 均方根误差\n self.criterion = nn.MSELoss(reduction='mean')\n\t#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n def forward(self, x):\n ''' Given input of size (batch_size x input_dim), compute output of the network '''\n return self.net(x).squeeze(1)\n\n def cal_loss(self, pred, target):\n ''' 计算损失函数 '''\n # 这里可以选择正则化\n return self.criterion(pred, target)\n\n#=================================================================================================================\n\n# ## **训练**\n\n\ndef train(tr_set, dv_set, model, config, device):\n ''' DNN 训练 '''\n\n n_epochs = config['n_epochs'] # 最大训练轮数\n\n # 设置优化\n optimizer = getattr(torch.optim, config['optimizer'])(\n model.parameters(), **config['optim_hparas'])\n\n min_mse = 100000. #能够被保存模型的 最小的误差\n loss_record = {'train': [], 'dev': []} # 为了保存训练的误差函数\n\n early_stop_cnt = 0#初始化提前终止步数\n epoch = 0#初始化轮数\n while epoch < n_epochs:\n model.train() # 设置模型到训练模式\n for x, y in tr_set: # 通过dataloader迭代\n optimizer.zero_grad() # 设置梯度为0\n x, y = x.to(device), y.to(device) # 移动数据到设备中 (cpu/cuda)\n pred = model(x) # 计算输出forward pass (compute output)\n mse_loss = model.cal_loss(pred, y) # 计算代价函数(误差函数)\n mse_loss.backward() # 计算梯度 (反向传播算法)\n optimizer.step() # 更新模型参数\n loss_record['train'].append(mse_loss.detach().cpu().item())\n\n # 结束每一轮训练后,使用验证集测试模型\n dev_mse = dev(dv_set, model, device)\n# print(epoch)\n if dev_mse < min_mse:\n # 如果模型优化了,则保存模型\n min_mse = dev_mse\n print('保存模型(epoch = {:4d}, loss = {:.4f})'\n .format(epoch + 1, min_mse))\n torch.save(model.state_dict(), config['save_path']) # 保存模型到特定的路径\n early_stop_cnt = 0\n else:\n early_stop_cnt += 1\n\n epoch += 1\n loss_record['dev'].append(dev_mse)\n if early_stop_cnt > config['early_stop']:\n #结束训练如果模型很久不更新参数了 ,需要在超参数中设置提前终止选项和轮数\"config['early_stop']\"\n break\n\n print('结束训练!经历了 {} 轮!'.format(epoch))\n return min_mse, loss_record\n\n#-----------------------------------------------------------------------------------------------------------------\n# ## **验证**\n\ndef dev(dv_set, model, device):\n model.eval() # 设置模型到评估模式\n total_loss = 0\n for x, y in dv_set: # 通过dataloader 迭代\n x, y = x.to(device), y.to(device) # 移动数据到‘设备’ (cpu/cuda)\n with torch.no_grad(): # 关闭梯度计算\n pred = model(x) # 计算输出forward pass (compute output)\n mse_loss = model.cal_loss(pred, y) # 计算代价函数\n total_loss += mse_loss.detach().cpu().item() * len(x) # 计算累计误差\n total_loss = total_loss / len(dv_set.dataset) # 计算平均误差\n\n return total_loss\n\n#-----------------------------------------------------------------------------------------------------------------\n# ## **测试**\n\ndef test(tt_set, model, device):\n model.eval() # 设置模型到评估模式\n preds = []\n for x in tt_set: # 通过dataloader 迭代\n x = x.to(device) # 移动数据到‘设备’ (cpu/cuda)\n with torch.no_grad(): # 关闭梯度计算\n pred = model(x) # 计算输出forward pass (compute output)\n preds.append(pred.detach().cpu()) # 收集预测值\n preds = torch.cat(preds, dim=0).numpy() # 收集所有的预测值并转化为一个numpy array 数组\n return preds\n\n#================================================================================================================\n\n#%%\n# \n# # **设置超参数**\n# \n# 设置训练的超参数和存放模型的路径\n\ndevice = get_device() # 获得可用设备 ('cpu' 或者 'cuda')\nos.makedirs('models', exist_ok=True) # 训练的模型将会放在当前目录下的models中 ./models/\ntarget_only = False #可以在选择因子时使用\n\n# 改变优化超参数去改进模型训练\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n#根据数据量改变优化参数\n\nconfig = {\n 'n_epochs': 3000, # 最大epochs数\n 'batch_size': 300, # mini-batch 尺寸,batch包含的数据量\n 'optimizer': 'SGD', # 优化算法选择 optimization algorithm (optimizer in torch.optim)\n 'optimizer': 'Adam', \n 'optim_hparas': { #优化算法的超参数(取决于选取的优化算法)\n# 'lr': 0.001, # SGD(随机梯度下降算法)的学习率\n# 'momentum': 0.09 # SGD的动能\n },\n 'early_stop': 100, # 提前终止的最大步数 (自从上次模型参数更新的轮数,如果这么多步仍然没有更新则提前终止优化)1起到正则化作用,2减少计算资源消耗\n 'save_path': 'models/model.pth' # 模型保存路径\n}\n\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# # **加载数据和模型**\n\ntr_set = prep_dataloader(tr_path, 'train', config['batch_size'], target_only=target_only)\n\ndv_set = prep_dataloader(tr_path, 'dev', config['batch_size'], target_only=target_only)\n\ntt_set = prep_dataloader(tt_path, 'test', config['batch_size'], target_only=target_only)\n\nmodel = NeuralNet(tr_set.dataset.dim).to(device) #实例化(创建)模型并移动到设备\n\n\n# # **开始训练!**\n\nmodel_loss, model_loss_record = train(tr_set, dv_set, model, config, device) # 训练过程\n\nplot_learning_curve(model_loss_record, title='deep model') #绘制学习曲线 \n\ndel model\nmodel = NeuralNet(tr_set.dataset.dim).to(device)\nckpt = torch.load(config['save_path'], map_location='cpu') # 加载最好的模型\nmodel.load_state_dict(ckpt)\t#加载模型参数\n\n\n\nplot_pred(dv_set, model, device) # 绘制验证集和预测值对比 \n\n\n# # **测试**\n# 利用测试集和模型计算的预测值将会被保存在pred.csv 中\n\ndef save_pred(preds, file):\n ''' 保存预测值到文件中 '''\n print('保存结果到 {}'.format(file))\n with open(file, 'w') as fp:\n writer = csv.writer(fp)\n writer.writerow(['id', 'tested_positive'])\n for i, p in enumerate(preds):\n writer.writerow([i, p])\n\n\npreds = test(tt_set, model, device) #使用测试集和训练好的深度模型 预测 y\n\nsave_pred(preds, 'pred.csv') # 保存预测数据到 pred.csv 文件\n\n\n##========================================================================\n#模型参数传递torch->TBF->Fortran\n\n\n# 计算模型参数总数。 \ntotal = sum(p.numel() for p in model.parameters())\nprint(\"总参数个数: %.2f\" % (total))\n\n#将pth 的模型参数输出出来\n\n#有几个因子 m = ?\nm = len(feature)\n\n#有几层 o = ?\no = ( len(list(model.net)) -1 )//2 #自动获取层数,减去输出层然后线性层和激活函数层的和整除以2\n\n#每层有几个节点 n = ?\nn = point_count\n\n#输入层的权重和偏差\nw_input = model.net[0].weight.data.cpu().numpy()\nb_input = model.net[0].bias.data.cpu().numpy()\n\n#print( model.net[0].weight.data.cpu().numpy())\nnp.savetxt('w_input.txt',w_input,fmt='%f')\nnp.savetxt('b_input.txt',b_input,fmt='%f')\n\n#中间层的权重和偏差\nos.system('rm w_dense.txt ')\nos.system('rm b_dense.txt')\n\nfor i in range(2,2*o,2):# range(x,y) 不包含y \n#\tprint(model.net[i].weight.data.cpu().numpy())\n\n\tw_dense = model.net[i].weight.data.cpu().numpy()\n\tb_dense = model.net[i].bias.data.cpu().numpy()\n\n\twith open(\"w_dense.txt\",\"ab\") as f:#追加写入模式\n\t\tnp.savetxt(f,w_dense,fmt = '%f')\n\twith open(\"b_dense.txt\",\"ab\") as g:\n\t\tnp.savetxt(g,b_dense,fmt = '%f')\n\n\n#输出层的权重和误差\n\n#print( model.net[2*o].weight.data.cpu().numpy())\n\nw_output = model.net[2*o].weight.data.cpu().numpy()\nb_output = model.net[2*o].bias.data.cpu().numpy()\n\nnp.savetxt('w_output.txt',w_output,fmt='%f')\nnp.savetxt('b_output.txt',b_output,fmt='%f')\n\n\n#向文件写入维度参数\n#\nnote = open ('shuchucanshu.txt','w')\ncurrent_time = datetime.datetime.now()\nnote.write(str(current_time))\nnote.write('\\n存放深度学习模型数据,用于输入到TBF中')\n\nnote.write('\\nm\\n')\nnote.write(str(m))\n\nnote.write('\\nn\\n')\nnote.write(str(n))\n\nnote.write('\\no\\n')\nnote.write(str(o))\n\nnote.close\n\n#将TXT传到TBF文件夹中:\nos.system('cp *.txt /data/chengxl/pblh_deeplearning/torch_bridge_fortran/') \n#一些有用的代码\n#查看tt_set 里面的x\n\n#for x in tt_set:\n#\tprint(x)\n\n","repo_name":"2469488573/torch_to_fortran-","sub_path":"python/deep_learning_regression.py","file_name":"deep_learning_regression.py","file_ext":"py","file_size_in_byte":19655,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18774969287","text":"\"\"\"\nAssignment:\n Prompt the user for a positive integer, n. n > 0. Check for user errors\n (e.g, entering a character instead of a number)\n The user should, however, be allowed to enter \"q\" and bypass the program.\n Write a recursive program which returns the nth row of Pascal's triangle.\n Examples:\n n = 1, return 1\n n = 2, return [1, 1]\n n = 5, return [1, 4, 6, 4, 1]\n\"\"\"\n\n# prompt the user for the generation, validate their input\ndef UserInput():\n\n while True:\n usr = input(\"Please enter a positive number: \")\n try:\n usr = int(usr)\n if usr > 0:\n return usr\n except ValueError:\n if usr == 'q':\n return usr\n\n# generate Pascal triangle, will need the iteration, the user defined limit('n')\n# and the current line (which is a list)\ndef Generate(itr, n, line):\n #print(f\"iteration {itr}, line length: {len(line)}\")\n #print(line)\n # assign the first value of line to the new line\n new_line = [line[0]]\n # while the iteration is less than the user defined limit ('n')\n while itr < n:\n # go through the entire rest of the line\n for i in range(len(line)):\n # i is at the end of the line, assign the value\n if i == len(line) - 1:\n new_line.append(line[i])\n # middle values need to be calculated\n else:\n new_line.append(line[i] + line[i+1])\n\n # increase the iteration by 1\n itr += 1\n # if the iteration has reached the user defined limit, then return\n # the new_line we generated\n if itr == n:\n return new_line\n else:\n # call Generate to get the next line\n # catch the return line when exiting the recursion\n new_line = Generate(itr, n, new_line)\n # break out of the loop\n break\n\n # after breaking out of the loop, return the line that was caught\n return new_line\n\n# start at the 1st iteration\nitr = 1\nn = UserInput()\nif n != 'q':\n line = [1]\n end = Generate(itr, n, line)\n print(end)\nelse:\n print(\"Exiting program...\")\n","repo_name":"SanGuillao/PlayGround","sub_path":"Pascal_Trig/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37352926860","text":"\nfrom threading import Thread\nfrom pod_structure import Pod\nfrom Communication.tcpserver import TCPComm\nfrom Communication.data_processing import Data_Processing\n\ndef main():\n pod = Pod()\n comm = TCPServer()\n processing = Data_Processing(comm)\n\n try:\n tcp_thread = Thread(target=comm.connect) \n data_thread = Thread(target=processing.run)\n\n tcp_thread.start()\n data_thread.start() \n \n except:\n tcp.stop_signal = True\n tcp_thread.join()\n tcp.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"HyperloopatVCU/FlightComputer","sub_path":"tests/test_comm.py","file_name":"test_comm.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19010841840","text":"import matplotlib.pyplot as plt\r\nfrom pylab import mpl\r\nimport xlrd\r\n\r\ndef liner_fitting(data_x, data_y):\r\n size = len(data_x)\r\n i = 0\r\n sum_xy = 0\r\n sum_y = 0\r\n sum_x = 0\r\n sum_sqare_x = 0\r\n while i < size:\r\n sum_xy += data_x[i] * data_y[i]\r\n sum_y += data_y[i]\r\n sum_x += data_x[i]\r\n sum_sqare_x += data_x[i] * data_x[i]\r\n i += 1\r\n a = (size * sum_xy - sum_x * sum_y) / (size * sum_sqare_x - sum_x * sum_x)\r\n print('a =', a)\r\n return a\r\n\r\ndef calculate(data_x, a):\r\n datay = []\r\n for x in data_x:\r\n datay.append(a * x)\r\n return datay\r\n\r\ndef draw(data_x, data_y_new, data_y_old):\r\n plt.plot(data_x, data_y_new, label=\"拟合曲线\", color=\"black\")\r\n plt.scatter(data_x, data_y_old, label=\"离散数据\")\r\n mpl.rcParams['font.sans-serif'] = ['SimHei']\r\n mpl.rcParams['axes.unicode_minus'] = False\r\n plt.title(\"一元线性拟合数据\")\r\n plt.legend(loc=\"upper left\")\r\n plt.show()\r\n\r\nif __name__==\"__main__\":\r\n data_excel = 'D:\\\\2\\\\2.xlsx'\r\n excel_workbook = xlrd.open_workbook(data_excel)\r\n data_table = excel_workbook.sheets()[0]\r\n nrows = data_table.nrows # 行数\r\n ncols = data_table.ncols # 列数\r\n # print(nrows, ncols)\r\n x = []\r\n y = []\r\n for i in range(0, nrows):\r\n all_rowdata = data_table.row_values(i)\r\n x.append(all_rowdata[0])\r\n y.append(all_rowdata[1])\r\n parameter = liner_fitting(x, y)\r\n draw_data = calculate(x, parameter)\r\n draw(x, draw_data, y)\r\n","repo_name":"isekeylabSunQixiang/Generalized-least-squares","sub_path":"2019.5.2square.py","file_name":"2019.5.2square.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23781734285","text":"import json\nimport os\nfrom datetime import datetime\nfrom time import time\nimport git\nimport torch\n\nfrom consts import NULL_ID_FOR_COREF\n\n\ndef flatten_list_of_lists(lst):\n return [elem for sublst in lst for elem in sublst]\n\n\ndef flatten_list_of_lists_by_num(lst, n, step):\n if n == -1:\n return flatten_list_of_lists(lst)\n new_lst = []\n curr = []\n idx = 0\n start = 0\n count = 0\n # print(\"total len\", len(lst))\n while idx < len(lst):\n l = lst[idx]\n\n if not (count % n):\n if curr:\n new_lst.append(curr)\n # print(\"start\", start)\n # print(\"count\", count)\n # print(\"idx\", idx)\n start += step\n count = 0\n idx = start\n curr = []\n\n for w in l:\n curr.append(w)\n idx += 1\n count += 1\n\n if curr:\n new_lst.append(curr)\n return new_lst\n\n\ndef mention_to_cluster(clusters):\n mentions_to_clusters = dict()\n for cluster in clusters:\n # print(\"mention_to_cluster\", cluster)\n for mention in cluster:\n mentions_to_clusters[mention] = tuple(cluster)\n return mentions_to_clusters\n\n\ndef check_float(string):\n try:\n f = float(string)\n return True\n except Exception as e:\n return False\n\n\ndef extract_clusters_from_text_causal(text, max_clusters):\n split_text = text.split()\n clusters_memory = dict()\n for idx, word in enumerate(split_text):\n clusters = []\n # found cluster mark\n if word.startswith(\"(\") and word.endswith(\")\"):\n clusters = [cluster_idx for cluster_idx in word.strip(\")\").strip(\"(\").strip(\" \").split(\",\") if check_float(cluster_idx)]\n\n for cluster_idx in clusters:\n if cluster_idx not in clusters_memory:\n clusters_memory[cluster_idx] = []\n clusters_memory[cluster_idx].append(idx)\n\n clusters_memory = [(cluster_idx, token_indices) for cluster_idx, token_indices in clusters_memory.items()]\n clusters_dict = dict()\n for cluster_idx, word_indices in clusters_memory:\n cluster = []\n prev = None\n start = None\n for index, word_index in enumerate(word_indices):\n if index == (len(word_indices) - 1):\n if prev == (word_index - 1):\n cluster.append((start, word_index))\n else:\n if start is not None and prev is not None:\n cluster.append((start, prev))\n cluster.append((word_index, word_index))\n else:\n if prev is None:\n start = word_index\n prev = word_index\n\n elif prev == (word_index - 1):\n prev = word_index\n\n else:\n cluster.append((start, prev))\n start = word_index\n prev = word_index\n try:\n cluster_idx = int(cluster_idx if \".\" not in cluster_idx else cluster_idx.split(\".\")[0])\n if cluster_idx not in clusters_dict:\n clusters_dict[cluster_idx] = cluster\n else:\n clusters_dict[cluster_idx] += cluster\n except Exception as e:\n continue\n\n clusters = []\n for i in range(max_clusters):\n if i in clusters_dict:\n clusters.append(clusters_dict[i])\n else:\n clusters.append([])\n return clusters\n\n\ndef extract_clusters_from_text(text):\n split_text = text.split()\n clusters_memory = dict()\n for idx, word in enumerate(split_text):\n # found cluster mark\n # if word.startswith(\"[\") and word.endswith(\"]\"):\n # clusters = [cluster_idx for cluster_idx in word.strip(\" \").strip(\"]\").split(\",\") if check_float(cluster_idx)]\n clusters = [cluster_idx for cluster_idx in word.split(\",\") if check_float(cluster_idx)]\n for cluster_idx in clusters:\n if cluster_idx == \"0\":\n continue\n elif cluster_idx not in clusters_memory:\n clusters_memory[cluster_idx] = []\n clusters_memory[cluster_idx].append(idx)\n\n clusters_memory = [(cluster_idx, token_indices) for cluster_idx, token_indices in clusters_memory.items()]\n clusters_dict = dict()\n for cluster_idx, token_indices in clusters_memory:\n cluster = []\n prev = None\n start = None\n for index, token_idx in enumerate(token_indices):\n if index == (len(token_indices) - 1):\n if prev == (token_idx - 1):\n cluster.append((start, token_idx))\n else:\n if start is not None and prev is not None:\n cluster.append((start, prev))\n cluster.append((token_idx, token_idx))\n else:\n if prev is None:\n start = token_idx\n prev = token_idx\n\n elif prev == (token_idx - 1):\n prev = token_idx\n\n else:\n cluster.append((start, prev))\n start = token_idx\n prev = token_idx\n try:\n cluster_idx = int(cluster_idx if \".\" not in cluster_idx else cluster_idx.split(\".\")[0])\n if cluster_idx not in clusters_dict:\n clusters_dict[cluster_idx] = cluster\n else:\n clusters_dict[cluster_idx] += cluster\n except Exception as e:\n continue\n\n clusters = [c[1] for c in sorted([(cluster_idx, cluster) for cluster_idx, cluster in clusters_dict.items()], key=lambda x: x[0])]\n return clusters\n\n\ndef extract_clusters(gold_clusters):\n gold_clusters = [tuple(tuple(m) for m in gc if NULL_ID_FOR_COREF not in m) for gc in gold_clusters.tolist()]\n gold_clusters = [cluster for cluster in gold_clusters if len(cluster) >= 0]\n return gold_clusters\n\n\ndef extract_mentions_to_predicted_clusters_from_clusters(gold_clusters):\n mention_to_gold = {}\n for gc in gold_clusters:\n for mention in gc:\n mention_to_gold[tuple(mention)] = gc\n return mention_to_gold\n\n\ndef extract_clusters_for_decode(mention_to_antecedent):\n mention_to_antecedent = sorted(mention_to_antecedent)\n mention_to_cluster = {}\n clusters = []\n for mention, antecedent in mention_to_antecedent:\n if antecedent in mention_to_cluster:\n cluster_idx = mention_to_cluster[antecedent]\n clusters[cluster_idx].append(mention)\n mention_to_cluster[mention] = cluster_idx\n else:\n cluster_idx = len(clusters)\n mention_to_cluster[mention] = cluster_idx\n mention_to_cluster[antecedent] = cluster_idx\n clusters.append([antecedent, mention])\n clusters = [tuple(cluster) for cluster in clusters]\n return clusters, mention_to_cluster\n\n\ndef mask_tensor(t, mask):\n t = t + ((1.0 - mask.float()) * -10000.0)\n t = torch.clamp(t, min=-10000.0, max=10000.0)\n return t\n\n\ndef write_meta_data(output_dir, args):\n output_path = os.path.join(output_dir, \"meta.json\")\n repo = git.Repo(search_parent_directories=True)\n hexsha = repo.head.commit.hexsha\n ts = time()\n print(f\"Writing {output_path}\")\n with open(output_path, mode='w') as f:\n json.dump(\n {\n 'git_hexsha': hexsha,\n 'args': {k: str(v) for k, v in args.__dict__.items()},\n 'date': datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n },\n f,\n indent=4,\n sort_keys=True)\n print(file=f)\n","repo_name":"ShimonMalnick/Generative-Coreference-Resolution","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73956271513","text":"def search(arr,key,low,high):\r\n \r\n while True:\r\n mid=(high+low)//2\r\n if mid>=len(arr) or high<0 or low>high:\r\n return -1\r\n\r\n if arr[mid]==key:\r\n return mid\r\n\r\n elif arr[mid]>key:\r\n high=mid-1\r\n else:\r\n low=mid+1\r\n \r\n\r\n\r\narr=[3,4,8,11,200]\r\nkey=int(input())\r\n\r\nres=search(arr,key,0,len(arr))\r\nprint(res)","repo_name":"cale-i/atcoder","sub_path":"algorithm/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14276656950","text":"import yfinance as yf\nfrom flask import Flask, request, jsonify\nfrom datetime import date\nimport sys\n\napp = Flask(__name__)\n\n@app.route('/', methods=['POST'])\ndef get_stock_value():\n data = request.get_json()\n\n # Obtener los datos de la solicitud\n accion = data['accion']\n fecha_inicial = data['fecha_inicial']\n fecha_final = data['fecha_final']\n\n # Realizar la consulta a yfinance para obtener los datos de la acción \n valores = yf.download(accion, fecha_inicial, fecha_final)\n valores.index = valores.index.strftime('%Y-%m-%d %H:%M:%S')\n\n # Crear la respuesta en formato JSON con el valor consolidado\n \n # Devolver la respuesta en formato JSON\n return jsonify(valores.to_dict())\n\nif __name__ == '__main__':\n app.debug=True\n app.run()\n","repo_name":"robertfgil2000/Script-y-web-service","sub_path":"web-service/financia.py","file_name":"financia.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36031257318","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Pavel Korshunov \n# Mon 8 Oct 14:09:22 CEST 2015\n#\n\nfrom __future__ import print_function\n\nimport numpy\nimport bob.bio.base\nimport os.path\n\nimport bob.core\nlogger = bob.core.log.setup(\"bob.pad.voice\")\n\n\ndef load_noattacks_scores(filename):\n # split in positives and negatives\n female_neg = []\n female_pos = []\n male_neg = []\n male_pos = []\n\n # read four column list line by line\n for (client_id, probe_id, filename, score) in bob.bio.base.score.load.four_column(filename):\n if client_id == probe_id:\n if 'female' in filename:\n female_pos.append(score)\n else:\n male_pos.append(score)\n else:\n if 'female' in filename:\n female_neg.append(score)\n else:\n male_neg.append(score)\n results = {}\n results['female'] = (numpy.array(female_neg, numpy.float64), numpy.array(female_pos, numpy.float64))\n results['male'] = (numpy.array(male_neg, numpy.float64), numpy.array(male_pos, numpy.float64))\n return results\n\n\n#attacks = ['replay_phone1', 'replay_phone2', 'replay_laptop', 'replay_laptop_HQ', 'speech_synthesis_logical_access', 'speech_synthesis_physical_access', 'speech_synthesis_physical_access_HQ', 'voice_conversion_logical_access', 'voice_conversion_physical_access', 'voice_conversion_physical_access_HQ']\n\n# load and split scores in positives and negatives\ndef load_attack_scores(scores_filename, support=\"all\", adevice=\"all\", recdevice=\"all\"):\n positives = []\n negatives = []\n\n # read four column list line by line\n for (client_id, probe_id, filename, score) in bob.bio.base.score.load.four_column(scores_filename):\n if client_id == probe_id:\n # if (support in filename or support == \"all\") and \\\n # (adevice in filename or adevice == \"all\") and \\\n # (recdevice in filename or recdevice == \"all\"):\n positives.append(score)\n else:\n correct_attack = False\n if len(probe_id.split('/')) > 1: probe_id = probe_id.split('/')[1]\n if any(s == probe_id for s in support): correct_attack = True\n if (correct_attack or not support or support == \"all\") and \\\n (adevice in filename or adevice == \"all\") and \\\n (recdevice in filename or recdevice == \"all\"):\n negatives.append(score)\n\n return numpy.array(negatives, numpy.float64), numpy.array(positives, numpy.float64)\n\n# load file with scores into two dictionaries: negative and positive\ndef scores_to_dict(filename, support=\"all\", adevice=\"all\", recdevice=\"all\"):\n \"\"\"\n\n :rtype : dict\n \"\"\"\n positives = {}\n negatives = {}\n\n # read four column list line by line\n for (client_id, probe_id, filename, score) in bob.bio.base.score.load.four_column(filename):\n if client_id == probe_id:\n if (support in filename or support == \"all\") and \\\n (adevice in filename or adevice == \"all\") and \\\n (recdevice in filename or recdevice == \"all\"):\n positives[\"%03d\"%int(client_id) + \"%03d\"%int(probe_id) + filename] = score\n else:\n # print (\"%03d\"%int(client_id) + \"%03d\"%int(probe_id) + filename)\n negatives[\"%03d\"%int(client_id) + \"%03d\"%int(probe_id) + filename] = score\n\n return negatives, positives\n\ndef accumulate_scores(pad_scores_dir, avs_scores_dir, support=\"all\", attackdevice=\"all\", device=\"all\"):\n\n all_scores = {}\n systems = ['pad', 'avs']\n groups = ['train', 'dev', 'eval']\n score_types = ['real', 'attack', 'zimp']\n\n # read score files from PAD and AVS directory\n # make sure all 6 files are present: real and attacks for train, dev, and eval sets\n for system in systems:\n if system == 'pad':\n curdir = pad_scores_dir\n else:\n curdir = avs_scores_dir\n if not os.path.exists(curdir):\n logger.error(\" - Score fusion: directory %s with scores does not exist\", curdir)\n return None\n all_scores[system] = {}\n # read scores from the current directory with scores\n for group in groups:\n all_scores[system][group] = {}\n # loop through real and attacks only, since zero-imposters are inside real scores\n for type in score_types[0:2]:\n path_scores = os.path.join(curdir, 'scores-' + group + '-' + type)\n if not os.path.exists(path_scores):\n logger.error(\" - Score fusion: score file %s does not exist\", path_scores)\n return None\n if type == 'attack':\n all_scores[system][group][type] = \\\n scores_to_dict(path_scores, support, attackdevice, device)[\n 1] # oly positive values\n if type == 'real':\n # zero imposters - negative set, real - positive set\n [all_scores[system][group][score_types[2]],\n all_scores[system][group][type]] = scores_to_dict(path_scores)\n logger.info(\"%s %s %s: %d\",\n system, group, score_types[2], len(all_scores[system][group][score_types[2]]))\n logger.info(\"%s %s %s: %d\", system, group, type, len(all_scores[system][group][type]))\n return all_scores\n","repo_name":"bioidiap/bob.pad.voice","sub_path":"bob/pad/voice/utils/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29178145974","text":"from typing import List\nfrom utils import *\n\n\nclass Solution_120_Triangle_2:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n prev_row = triangle[0]\n for row in range(1, len(triangle)):\n curr_row = []\n for col in range(row + 1):\n smallest_above = math.inf\n if col > 0:\n smallest_above = prev_row[col - 1]\n if col < row:\n smallest_above = min(smallest_above, prev_row[col])\n curr_row.append(triangle[row][col] + smallest_above)\n prev_row = curr_row\n return min(prev_row)\n\n\nif __name__ == \"__main__\":\n nums = [\n [2],\n [3, 4],\n [6, 5, 7],\n # [4, 1, 8, 3],\n [4, 2, 8, 1],\n ]\n\n so = Solution_120_Triangle_2()\n res = so.minimumTotal(nums)\n print(res)\n","repo_name":"santiagoom/leetcode","sub_path":"solution/python/120_Triangle_2.py","file_name":"120_Triangle_2.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70861615512","text":"#Создать массив, на четных местах в котором стоят единицы,\n# а на нечетных местах - числа, равные остатку от деления своего номера на 5.\na=[]\nk=int(input(\"Введите количество элементов: \"))\ndef arithElem(n, array=[]):\n for i in range(n):\n if i%2==0:\n array.append(1)\n else: array.append(i%5)\narithElem(k, a)\nfor elem in a:\n print(elem)","repo_name":"IM09112001/functionsX25","sub_path":"7/_1.7.py","file_name":"_1.7.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15429230485","text":"'''\nhttps://leetcode.com/explore/interview/card/top-interview-questions-medium/108/trees-and-graphs/792/\n'''\nfrom typing import List\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n EDGES = [(0,1), (1,0), (0,-1), (-1,0)]\n row = len(grid)\n column = len(grid[0])\n visited = set()\n adjancency_list = {}\n\n def get_neighbours(x, y):\n visited.add((x, y))\n\n adjancency_list[(x, y)] = []\n\n for edge in EDGES:\n neighbour = (x+edge[0], y+edge[1])\n\n if not (0 <= neighbour[0] < row and \\\n 0 <= neighbour[1] < column):\n continue\n\n if grid[neighbour[0]][neighbour[1]] == '1':\n adjancency_list[(x, y)].append(neighbour)\n\n for item in adjancency_list[(x, y)]:\n if item in visited:\n continue\n\n get_neighbours(item[0], item[1])\n\n islands = 0\n for i in range(row):\n for j in range(column):\n if grid[i][j] != '1' or (i, j) in visited:\n continue\n\n islands += 1\n get_neighbours(i, j)\n\n return islands\n\ngrid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n]\n\nobj = Solution()\nprint(obj.numIslands(grid))","repo_name":"Egahi/algo-practice","sub_path":"NumberOfIslands.py","file_name":"NumberOfIslands.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"17621785114","text":"import logging\nimport json\nimport azure.functions as func\nfrom . import dbtemplate\nfrom ..shared import transact\n\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n try:\n headers={'Content-Type':'application/json','Cache-Control':'no-store'}\n logging.info('Python HTTP trigger function processed a request.')\n\n m1 = req.params.get('m1')\n m2=req.params.get('m2')\n if not m1 or not m2:\n body={}\n body['status']='error'\n body['message']='m1 or m2 param missing'\n return func.HttpResponse(\n json.dumps(body),\n status_code=200,\n headers=headers\n )\n conn=transact.get_db_conn()\n cur=conn.cursor()\n cur.execute(dbtemplate.get_payments.format(m1,m2))\n res=cur.fetchall()\n data=[]\n for r in res:\n inter={}\n inter['sender_phone']=r[0]\n inter['sender_upi']=r[1]\n inter['receiver_phone']=r[2]\n inter['receiver_upi']=r[3]\n inter['amount']=r[4]\n inter['create_time']=str(r[5])\n inter['mode']=r[6]\n inter['id']=r[7]\n if r[8]==0:\n inter['status']='success'\n \n elif r[8]==1:\n inter['status']='incorrect pin'\n \n elif r[8]==2:\n inter['status']='insufficient balance'\n \n data.append(inter)\n body={}\n body['status']='success'\n body['data']=data\n return func.HttpResponse(\n json.dumps(body),\n status_code=200,\n headers=headers\n )\n\n\n\n except Exception as e:\n logging.info(str(e))\n body={}\n body['status']='error'\n body['message']=str(e)\n return func.HttpResponse(\n json.dumps(body),\n status_code=500,\n headers=headers\n )\n","repo_name":"aldmaxx100/hipay","sub_path":"list-payments/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32062670916","text":"import turtle as t\nimport tkinter as tk\nimport random as r\nimport threading\nimport time\nspeed = 1\ndef threadFunc():\n global loop\n while True:\n while len(ts) < 1:\n loop+=1\n x=ball()\n print(ts)\n for i in ts:\n i.update()\n \nth = threading.Thread(target=threadFunc)\n\nwn = t.Screen()\nts=[]\nWINDOW_WIDTH=100\ndef k2():\n global ts\n for i in ts:\n if i.t.xcor()> -24 and i.t.xcor()<1:\n ts.remove(i)\n i.t.hideturtle()\n \n print(ts)\n break\n # t.update()\n\ndef k1():\n global ts\n for i in ts:\n if i.t.xcor()> -49 and i.t.xcor()<-24:\n ts.remove(i)\n i.t.hideturtle()\n \n print(ts)\n break\n # t.update()\ndef k3():\n global ts\n for i in ts:\n if i.t.xcor()> 1 and i.t.xcor()<26:\n ts.remove(i)\n i.t.hideturtle()\n \n print(ts)\n break\n # t.update()\n\ndef k4():\n global ts\n for i in ts:\n if i.t.xcor()> 26 and i.t.xcor()< 51:\n ts.remove(i)\n i.t.hideturtle()\n \n print(ts)\n break\n # t.update()\n\ndef up():\n global speed\n speed+=1\ndef down():\n global speed\n speed-=1\n\n\nclass ball():\n def __init__(self):\n global ts,speed\n spot=[-25,0,25,50]\n self.t = t.Turtle()\n self.t.shape(\"circle\")\n self.t.penup()\n self.t.goto(r.choice(spot),100)\n ts.append(self)\n # t.update()\n def update(self):\n self.t.goto(self.t.xcor(),self.t.ycor()-speed)\n # t.update()\n\nloop=0\n \n\nth.start()\n \nwn.onkeypress(k1, \"d\")\nwn.onkeypress(k2, \"f\")\nwn.onkeypress(k3, \"j\")\nwn.onkeypress(k4, \"k\") \nwn.onkeypress(up, \"n\")\nwn.onkeypress(down, \"m\") \n# t.update() \nwn.listen()\nwn.mainloop()\nth.join()","repo_name":"natebrant/Python","sub_path":"OsuLUA/osum/osu.py","file_name":"osu.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16945774817","text":"from flask import jsonify\nimport json\n\n\nclass ResponseService:\n\n @staticmethod\n def list_of_users(instance: dict):\n\n response = jsonify(\n {\n 'message': 'Instance generated successfully',\n 'response': instance,\n 'total': len(instance),\n 'status code': 200\n }\n )\n response.status_code = 200\n response.mimetype = 'application/json'\n\n return response\n\n @staticmethod\n def structure_error(error):\n response = jsonify({'message': 'Structure error',\n 'response': str(error), 'status code': 400})\n response.status_code = 400\n response.mimetype = 'application/json'\n return response\n\n @staticmethod\n def collection_not_found():\n response = jsonify(\n {'message': 'The given collection was not found in the DB', 'response': None, 'status code': 404})\n response.status_code = 404\n response.mimetype = 'application/json'\n return response\n\n @staticmethod\n def email_not_found():\n response = jsonify(\n {'message': 'The given email was not found in the DB', 'response': None, 'status code': 404})\n response.status_code = 404\n response.mimetype = 'application/json'\n return response\n\n @staticmethod\n def custom_response(status_code=404, message=None, response=None):\n response = jsonify(\n {'message': message, 'response': response, 'status code': status_code})\n response.status_code = status_code\n response.mimetype = 'application/json'\n return response\n","repo_name":"riglesias1/gatsby-docz-login","sub_path":"whitelist-api/main/services/response_service.py","file_name":"response_service.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32259441789","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom game.actors import actorsFactory\r\nfrom game.actors import Actor\r\nfrom game.sound.sound import SoundsStore\r\n\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass Coin(Actor):\r\n SCORE = 1\r\n def __init__(self, parentMap, fromTile, actorType, x=0, y=0, w=None, h=None):\r\n Actor.__init__(self, parentMap, fromTile, actorType, x, y, w, h)\r\n \r\n # Register and load sounds for coins (if already loaded, this does nothing)\r\n SoundsStore.store.storeSoundFile('pop', 'coin.ogg')\r\n \r\n self.soundGetCoin = SoundsStore.store.get('pop')\r\n\r\n def notify(self, sender, message):\r\n Actor.notify(self, sender, message)\r\n if message == 'hit':\r\n self.soundGetCoin.play()\r\n sender.notify(self, self.actorType)\r\n \r\nactorsFactory.registerType('BronceCoin', Coin)\r\nactorsFactory.registerType('SilverCoin', Coin)\r\nactorsFactory.registerType('GoldCoin', Coin)\r\n\r\n","repo_name":"dkmstr/pygame-framework","sub_path":"src/items/coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28099988808","text":"import collections\n\n\nclass PageRankBuilder:\n PAGE_RANK_HISTORY_LENGTH = 3\n\n def __init__(self, page_type_recognizer, link_extractor, page_dup_filter):\n self.page_type_recognizer = page_type_recognizer\n self.link_extractor = link_extractor\n self.page_duplicator = page_dup_filter\n\n def build_page_rank(self, response):\n urls = [link.url for link in self.link_extractor.extract_links(response)]\n new_urls = [url for url in urls if not self.page_duplicator.url_seen(url)]\n new_article_urls = [url for url in new_urls if self.page_type_recognizer.is_article(url)]\n\n return len(new_article_urls)\n\n def build_combined_page_rank(self, response):\n page_rank = self.build_page_rank(response)\n if 'history_rank' not in response.request.meta.keys():\n history_rank = collections.deque(maxlen=self.PAGE_RANK_HISTORY_LENGTH)\n else:\n history_rank = response.request.meta['history_rank'].copy()\n\n history_rank.append(page_rank)\n return sum(history_rank), history_rank\n","repo_name":"tarnenok/pressball-crawler","sub_path":"scrapper/page_rank.py","file_name":"page_rank.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4914765955","text":"\n# coding: utf-8\n\n#

Library Setup

\n\n# In[4]:\n\n\n#

Display Coodinates of our Users

\n\n# In[193]:\n\n\ndef UserCoodinates(lat,lon,formatted_address):\n import folium\n import branca\n m = folium.Map(location=[lat,lon],zoom_start=16)\n html = \"\"\"\n

You are here

\n

\n %s\n

\n \"\"\"%formatted_address\n iframe = branca.element.IFrame(html=html, width=200, height=80)\n popup = folium.Popup(iframe, max_width=500)\n icon=folium.Icon(color='green',icon='ok-sign')\n folium.Marker([lat,lon], popup=popup,icon=icon).add_to(m)\n return m\n\n\n\ndef ResultsVisualization(results,formatted_address):\n \n \n import json\n import folium\n import branca\n \n feature_list = []\n feature_collection = {'type':\"FeatureCollection\",\"features\":feature_list} \n \n try: \n u_lat = results[0][0]\n u_lon = results[0][1]\n except:\n return(\"Oops, we cannot find restaurants for you :(\")\n \n m = UserCoodinates(u_lat,u_lon,formatted_address)\n result = results[1:]\n for restaurant in result:\n name = restaurant['name']\n address = restaurant['address']\n lat = restaurant['latitude']\n lon = restaurant['longitude']\n stars = restaurant['stars']\n \n col = \"#51A8DD\"\n color1 = \"#005CAF\"\n color2 = \"#113285\"\n \n html = \"\"\"\n

%s

\n \n \n
  • Address: %s\n
  • Graded by other costumers as %s Stars.\n \n \"\"\"%(color1,name,color2,address,color2,stars)\n\n iframe = branca.element.IFrame(html=html, width=200, height=130)\n popup = folium.Popup(iframe, max_width=500)\n icon=folium.Icon(color='blue')\n folium.Marker(\n location=[lat,lon], \n popup=popup,\n icon=icon).add_to(m)\n \n return m\n \n","repo_name":"rachelzirongfang/WhatForDinner","sub_path":".ipynb_checkpoints/Visualization-checkpoint.py","file_name":"Visualization-checkpoint.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33580732063","text":"# python trainer.py --model_path=/tmp/model --config config/test.yaml\nimport os\n\n# Hide welcome message from bitsandbytes\nos.environ.update({\n \"BITSANDBYTES_NOWELCOME\": \"1\",\n \"DIFFUSERS_VERBOSITY\": \"error\"\n})\n\nimport torch\nimport lightning.pytorch as pl\n\nfrom lib.args import parse_args\nfrom lib.callbacks import HuggingFaceHubCallback, SampleCallback\nfrom lib.model import StableDiffusionModel, get_pipeline\nfrom lib.compat import pl_compat_fix\nfrom lib.precision import HalfPrecisionPlugin\n\nfrom omegaconf import OmegaConf\nfrom lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint\nfrom lightning.pytorch.loggers import WandbLogger\nfrom lightning.pytorch.strategies import SingleDeviceStrategy\nfrom lightning.pytorch import seed_everything\nfrom lib.utils import rank_zero_print\n\ndef main(args):\n config = OmegaConf.load(args.config)\n seed_everything(config.trainer.seed)\n\n if args.model_path == None:\n args.model_path = config.trainer.model_path\n\n strategy = None\n if config.lightning.accelerator in [\"gpu\", \"cpu\"]:\n strategy = \"ddp\"\n\n if config.trainer.use_hivemind:\n from lib.hivemind import init_hivemind\n strategy = init_hivemind(config)\n\n rank_zero_print(f\"Loading model from {args.model_path}\")\n pipeline = get_pipeline(args.model_path)\n if config.get(\"lora\"):\n if config.lora.get(\"use_locon\"):\n from experiment.locon import LoConDiffusionModel\n model = LoConDiffusionModel(pipeline, config)\n else:\n from experiment.lora import LoRADiffusionModel\n model = LoRADiffusionModel(pipeline, config)\n strategy = config.lightning.strategy = \"auto\"\n else:\n model = StableDiffusionModel(pipeline, config)\n\n major, minor = torch.__version__.split('.')[:2]\n if (int(major) > 1 or (int(major) == 1 and int(minor) >= 12)) and torch.cuda.is_available():\n device = torch.cuda.get_device_properties(0)\n compute_capability = float(f\"{device.major}.{device.minor}\")\n precision = 'high' if config.lightning.precision == 32 else 'medium'\n if compute_capability >= 8.0:\n torch.backends.cuda.matmul.allow_tf32 = True\n torch.backends.cudnn.allow_tf32 = True\n torch.set_float32_matmul_precision(precision)\n\n callbacks = []\n if config.monitor.huggingface_repo != \"\":\n hf_logger = HuggingFaceHubCallback(\n repo_name=config.monitor.huggingface_repo,\n use_auth_token=config.monitor.hf_auth_token,\n **config.monitor\n )\n callbacks.append(hf_logger)\n\n logger = None\n if config.monitor.wandb_id != \"\":\n logger = WandbLogger(project=config.monitor.wandb_id)\n callbacks.append(LearningRateMonitor(logging_interval='step'))\n\n if config.get(\"custom_embeddings\") != None and config.custom_embeddings.enabled:\n from experiment.textual_inversion import CustomEmbeddingsCallback\n callbacks.append(CustomEmbeddingsCallback(config.custom_embeddings))\n\n if config.get(\"sampling\") != None and config.sampling.enabled:\n callbacks.append(SampleCallback(config.sampling, logger))\n\n if torch.cuda.device_count() == 1:\n strategy = SingleDeviceStrategy(device=\"cuda:0\")\n\n if config.lightning.get(\"strategy\") is not None:\n strategy = config.lightning.strategy\n del config.lightning[\"strategy\"]\n\n if not config.get(\"custom_embeddings\") or not config.custom_embeddings.freeze_unet:\n checkpoint_config = {\n k: v\n for k, v in config.checkpoint.items() if k != \"extended\"\n }\n callbacks.append(ModelCheckpoint(**checkpoint_config))\n enable_checkpointing = True\n else:\n enable_checkpointing = False\n\n if config.lightning.get(\"enable_checkpointing\") == None:\n config.lightning.enable_checkpointing = enable_checkpointing\n \n plugins = None\n target_precision = config.lightning.precision\n if target_precision in [\"16-true\", \"bf16-true\"]:\n plugins = HalfPrecisionPlugin(target_precision)\n model.to(torch.float16 if target_precision == \"16-true\" else torch.bfloat16)\n del config.lightning.precision\n\n # config.lightning.replace_sampler_ddp = False\n config, callbacks = pl_compat_fix(config, callbacks)\n trainer = pl.Trainer(\n logger=logger, \n callbacks=callbacks, \n strategy=strategy, \n plugins=plugins, \n **config.lightning\n )\n trainer.fit(model=model, ckpt_path=args.resume if args.resume else None)\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n\n","repo_name":"Mikubill/naifu-diffusion","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"54"} +{"seq_id":"22787709627","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis gives other modules access to the gritty details about characters and the\nencodings that use them.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport re\nimport zlib\nimport unicodedata\nimport itertools\nfrom pkg_resources import resource_string\nfrom ftfy.compatibility import unichr\n\n# These are the encodings we will try to fix in ftfy, in the\n# order that they should be tried.\nCHARMAP_ENCODINGS = [\n 'latin-1',\n 'sloppy-windows-1252',\n 'sloppy-windows-1250',\n 'iso-8859-2',\n 'sloppy-windows-1251',\n 'macroman',\n 'cp437',\n]\n\n\ndef _build_regexes():\n \"\"\"\n ENCODING_REGEXES contain reasonably fast ways to detect if we\n could represent a given string in a given encoding. The simplest one is\n the 'ascii' detector, which of course just determines if all characters\n are between U+0000 and U+007F.\n \"\"\"\n # Define a regex that matches ASCII text.\n encoding_regexes = {'ascii': re.compile('^[\\x00-\\x7f]*$')}\n\n for encoding in CHARMAP_ENCODINGS:\n # Make a sequence of characters that bytes \\x80 to \\xFF decode to\n # in each encoding, as well as byte \\x1A, which is used to represent\n # the replacement character � in the sloppy-* encodings.\n latin1table = ''.join(unichr(i) for i in range(128, 256)) + '\\x1a'\n charlist = latin1table.encode('latin-1').decode(encoding)\n\n # The rest of the ASCII bytes -- bytes \\x00 to \\x19 and \\x1B\n # to \\x7F -- will decode as those ASCII characters in any encoding we\n # support, so we can just include them as ranges. This also lets us\n # not worry about escaping regex special characters, because all of\n # them are in the \\x1B to \\x7F range.\n regex = '^[\\x00-\\x19\\x1b-\\x7f{0}]*$'.format(charlist)\n encoding_regexes[encoding] = re.compile(regex)\n return encoding_regexes\nENCODING_REGEXES = _build_regexes()\n\n\ndef _build_utf8_punct_regex():\n \"\"\"\n Recognize UTF-8 mojibake that's so blatant that we can fix it even when the\n rest of the string doesn't decode as UTF-8 -- namely, UTF-8 sequences for\n the 'General Punctuation' characters U+2000 to U+2040, re-encoded in\n Windows-1252.\n\n These are recognizable by the distinctive 'â€' ('\\xe2\\x80') sequence they\n all begin with when decoded as Windows-1252.\n \"\"\"\n # We're making a regex that has all the literal bytes from 0x80 to 0xbf in\n # a range. \"Couldn't this have just said [\\x80-\\xbf]?\", you might ask.\n # However, when we decode the regex as Windows-1252, the resulting\n # characters won't even be remotely contiguous.\n #\n # Unrelatedly, the expression that generates these bytes will be so much\n # prettier when we deprecate Python 2.\n continuation_char_list = ''.join(\n unichr(i) for i in range(0x80, 0xc0)\n ).encode('latin-1')\n obvious_utf8 = ('â€['\n + continuation_char_list.decode('sloppy-windows-1252')\n + ']')\n return re.compile(obvious_utf8)\nPARTIAL_UTF8_PUNCT_RE = _build_utf8_punct_regex()\n\n\n# Recognize UTF-8 sequences that would be valid if it weren't for a b'\\xa0'\n# that some Windows-1252 program converted to a plain space.\n#\n# The smaller values are included on a case-by-case basis, because we don't want\n# to decode likely input sequences to unlikely characters. These are the ones\n# that *do* form likely characters before 0xa0:\n#\n# 0xc2 -> U+A0 NO-BREAK SPACE\n# 0xc3 -> U+E0 LATIN SMALL LETTER A WITH GRAVE\n# 0xc5 -> U+160 LATIN CAPITAL LETTER S WITH CARON\n# 0xce -> U+3A0 GREEK CAPITAL LETTER PI\n# 0xd0 -> U+420 CYRILLIC CAPITAL LETTER ER\n#\n# These still need to come with a cost, so that they only get converted when\n# there's evidence that it fixes other things. Any of these could represent\n# characters that legitimately appear surrounded by spaces, particularly U+C5\n# (Å), which is a word in multiple languages!\n#\n# We should consider checking for b'\\x85' being converted to ... in the future.\n# I've seen it once, but the text still wasn't recoverable.\n\nALTERED_UTF8_RE = re.compile(b'[\\xc2\\xc3\\xc5\\xce\\xd0][ ]'\n b'|[\\xe0-\\xef][ ][\\x80-\\xbf]'\n b'|[\\xe0-\\xef][\\x80-\\xbf][ ]'\n b'|[\\xf0-\\xf4][ ][\\x80-\\xbf][\\x80-\\xbf]'\n b'|[\\xf0-\\xf4][\\x80-\\xbf][ ][\\x80-\\xbf]'\n b'|[\\xf0-\\xf4][\\x80-\\xbf][\\x80-\\xbf][ ]')\n\n# This expression matches UTF-8 and CESU-8 sequences where some of the\n# continuation bytes have been lost. The byte 0x1a (sometimes written as ^Z) is\n# used within ftfy to represent a byte that produced the replacement character\n# \\ufffd. We don't know which byte it was, but we can at least decode the UTF-8\n# sequence as \\ufffd instead of failing to re-decode it at all.\nLOSSY_UTF8_RE = re.compile(\n b'[\\xc2-\\xdf][\\x1a]'\n b'|\\xed[\\xa0-\\xaf][\\x1a]\\xed[\\xb0-\\xbf][\\x1a\\x80-\\xbf]'\n b'|\\xed[\\xa0-\\xaf][\\x1a\\x80-\\xbf]\\xed[\\xb0-\\xbf][\\x1a]'\n b'|[\\xe0-\\xef][\\x1a][\\x1a\\x80-\\xbf]'\n b'|[\\xe0-\\xef][\\x1a\\x80-\\xbf][\\x1a]'\n b'|[\\xf0-\\xf4][\\x1a][\\x1a\\x80-\\xbf][\\x1a\\x80-\\xbf]'\n b'|[\\xf0-\\xf4][\\x1a\\x80-\\xbf][\\x1a][\\x1a\\x80-\\xbf]'\n b'|[\\xf0-\\xf4][\\x1a\\x80-\\xbf][\\x1a\\x80-\\xbf][\\x1a]'\n b'|\\x1a'\n)\n\n# These regexes match various Unicode variations on single and double quotes.\nSINGLE_QUOTE_RE = re.compile('[\\u2018-\\u201b]')\nDOUBLE_QUOTE_RE = re.compile('[\\u201c-\\u201f]')\n\n\ndef possible_encoding(text, encoding):\n \"\"\"\n Given text and a single-byte encoding, check whether that text could have\n been decoded from that single-byte encoding.\n\n In other words, check whether it can be encoded in that encoding, possibly\n sloppily.\n \"\"\"\n return bool(ENCODING_REGEXES[encoding].match(text))\n\n\nCHAR_CLASS_STRING = zlib.decompress(\n resource_string(__name__, 'char_classes.dat')\n).decode('ascii')\n\ndef chars_to_classes(string):\n \"\"\"\n Convert each Unicode character to a letter indicating which of many\n classes it's in.\n\n See build_data.py for where this data comes from and what it means.\n \"\"\"\n return string.translate(CHAR_CLASS_STRING)\n\n\ndef _build_control_char_mapping():\n \"\"\"\n Build a translate mapping that strips likely-unintended control characters.\n See :func:`ftfy.fixes.remove_control_chars` for a description of these\n codepoint ranges and why they should be removed.\n \"\"\"\n control_chars = {}\n\n for i in itertools.chain(\n range(0x00, 0x09), [0x0b],\n range(0x0e, 0x20), [0x7f],\n range(0x206a, 0x2070),\n [0xfeff],\n range(0xfff9, 0xfffd),\n range(0x1d173, 0x1d17b),\n range(0xe0000, 0xe0080)\n ):\n control_chars[i] = None\n\n return control_chars\nCONTROL_CHARS = _build_control_char_mapping()\n\n\n# A translate mapping that breaks ligatures made of Latin letters. While\n# ligatures may be important to the representation of other languages, in\n# Latin letters they tend to represent a copy/paste error.\n#\n# Ligatures may also be separated by NFKC normalization, but that is sometimes\n# more normalization than you want.\nLIGATURES = {\n ord('IJ'): 'IJ',\n ord('ij'): 'ij',\n ord('ff'): 'ff',\n ord('fi'): 'fi',\n ord('fl'): 'fl',\n ord('ffi'): 'ffi',\n ord('ffl'): 'ffl',\n ord('ſt'): 'ſt',\n ord('st'): 'st'\n}\n\n\ndef _build_width_map():\n \"\"\"\n Build a translate mapping that replaces halfwidth and fullwidth forms\n with their standard-width forms.\n \"\"\"\n # Though it's not listed as a fullwidth character, we'll want to convert\n # U+3000 IDEOGRAPHIC SPACE to U+20 SPACE on the same principle, so start\n # with that in the dictionary.\n width_map = {0x3000: ' '}\n for i in range(0xff01, 0xfff0):\n char = unichr(i)\n alternate = unicodedata.normalize('NFKC', char)\n if alternate != char:\n width_map[i] = alternate\n return width_map\nWIDTH_MAP = _build_width_map()\n","repo_name":"HiroIshikawa/speech2craft","sub_path":"experiment/tak/text2command/tutorial/venv/lib/python3.6/site-packages/ftfy/chardata.py","file_name":"chardata.py","file_ext":"py","file_size_in_byte":7920,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"36235659905","text":"import asyncio\nfrom time import time\nfrom typing import Annotated\n\nimport aiohttp\nfrom fastapi import HTTPException, Path, Query, status\n\nfrom utils.config import get_settings\nfrom utils.schemas.events import (\n Event,\n EventCreate,\n EventsData,\n EventStatus,\n)\n\nevents = EventsData()\nsettings = get_settings()\n\n\nasync def init_events_data() -> None:\n for _ in range(20):\n event = Event(status=EventStatus.IN_PROCESS)\n to_storage: dict[str, Event] = {event.id: event}\n events.storage.update(to_storage)\n\n\nasync def filter_actual(is_active: Annotated[bool, Query()] = False) -> list[Event]:\n if is_active:\n result_list = []\n for _event_id, event in events.storage.items():\n if time() < event.deadline:\n result_list.append(event)\n return result_list\n return list(events.storage.values())\n\n\nasync def get_one(event_id: Annotated[str, Path()]) -> Event:\n if event_id in events.storage:\n return events.storage[event_id]\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"No Such Event\",\n )\n\n\ndef create_event(event: EventCreate) -> Event:\n if event.id in events.storage:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"Event with such id already exists\",\n )\n data_to_add = {event.id: event}\n events.storage.update(data_to_add)\n return event\n\n\nasync def notify_bets(event: Event) -> None:\n async with aiohttp.ClientSession(f\"http://{settings.BET_SERVICE_NAME}:80\") as session:\n no_success = True\n counter = 0\n while no_success:\n url = f\"/{event.id}/status\"\n async with session.patch(url, json={\"value\": event.status.value}) as res:\n if res.status not in (200, 406):\n # Сомнительная реализация, для того что если вдруг меняешь статус события\n # и что-то пошло не так\n if counter > 10:\n break\n await asyncio.sleep(10)\n else:\n break\n","repo_name":"korneyka3000/betting","sub_path":"line_provider/src/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28020184579","text":"files = ['demo.png','help.exe','sleep.png','work.doc']\nprint(files)\n\n# normal\ncl_files = []\nfor file in files:\n name = file.split('.')[0]\n cl_files.append(name)\n\nprint(cl_files)\n\n# lambda expression\nremove_ext = lambda filename: filename.split('.')[0]\nclean_files = list(map(remove_ext,files))\n\nprint(clean_files)","repo_name":"niddhu5598/project_python","sub_path":"LISTS/removing_extension_using_map_lambda.py","file_name":"removing_extension_using_map_lambda.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1210794672","text":"import heapq\n\ndef solution(begin, target, words):\n answer = 0\n if target not in words:\n return 0\n\n # begin -> target 이 되가는 최단경로\n queue = [(answer, begin)]\n\n #큐에 넣을 수 있는 조건을 만족하는 단어들 찾기\n def check(now, w): # 두 단어의 차이가 한 글자, 인덱스도 같아야함\n count = 0\n for i in range(len(w)):\n if now[i] != w[i]:\n count += 1\n\n if count == 1:\n return True\n return False\n\n # bfs 탐색\n while queue:\n answer, now = heapq.heappop(queue)\n if now == target:\n return answer\n\n for w in words: #큐에 넣을 수 있는 조건을 만족하는 단어들 찾기\n if check(now, w):\n heapq.heappush(queue, (answer+1, w))\n\n return 0\n\n# test-case\nprint(solution(\"hht\", \"cog\", ['hhi', 'hot', 'dot', 'dog', 'lot', 'log', 'cog']))\n","repo_name":"angelatto/Algorithm","sub_path":"programmers/1205.py","file_name":"1205.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23538378267","text":"\"\"\"\nWritten by Joshua Willman\nFeatured in \"Modern Pyqt - Create GUI Applications for Project Management, Computer Vision, and Data Analysis\"\n\"\"\"\n# Import necessary modules\nimport sys\nfrom chatterbot import ChatBot, utils\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nfrom chatterbot.comparisons import LevenshteinDistance\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLineEdit, QListView, \n QMessageBox, QVBoxLayout, QStyledItemDelegate)\nfrom PyQt5.QtCore import (Qt, QAbstractListModel, QMargins, QSize, QRect, QPoint, \n QThread, pyqtSignal)\nfrom PyQt5.QtGui import QIcon, QColor, QImage, QPolygon\n\nstyle_sheet = \"\"\" \n QPushButton {\n background: #83E56C /* Green */\n }\n\n QListView {\n background: #FDF3DD\n }\"\"\"\n\nclass ChatWorkerThread(QThread):\n # Signal emitted when the chatbot is finished training\n training_finished = pyqtSignal()\n\n def __init__(self, chatbot):\n super().__init__()\n self.chatbot = chatbot\n\n def run(self):\n \"\"\"This function handles training the chatbot. Once the training is complete, the \n training_finished signal is emitted, which allows the user to begin chatting.\"\"\"\n self.trainer = ChatterBotCorpusTrainer(self.chatbot)\n self.trainer.train(\"chatterbot.corpus.english\")\n self.training_finished.emit()\n \nclass ChatLogModel(QAbstractListModel):\n\n def __init__(self):\n super().__init__()\n self.chat_messages = []\n\n def rowCount(self, index):\n \"\"\"Necessary to include rowCount() when subclassing QAbstractListModel. \n For this program, we only need to update the the number of rows in the model,\n which is based on the length of chat_messages.\"\"\"\n return len(self.chat_messages)\n\n def data(self, index, role=Qt.DisplayRole):\n \"\"\"Necessary to include data() when subclassing QAbstractListModel. Retrieves \n items from the list and returns data specified by the role, which in this case \n is displayed as text.\"\"\"\n if role == Qt.DisplayRole:\n return self.chat_messages[index.row()]\n\n def appendMessage(self, user_input, user_or_chatbot):\n \"\"\"First, append new messages to chat_messages. Doing so will update the number\n of rows and indexes in the model (rowCount()), which will then update the data().\"\"\"\n self.chat_messages.append([user_input, user_or_chatbot])\n # Emit signal to indicate that the layout of items in the model has changed\n self.layoutChanged.emit() \n\nclass DrawSpeechBubbleDelegate(QStyledItemDelegate):\n\n def __init__(self):\n super().__init__()\n self.image_offset = 5 # Horizontal offset for the image \n # The following variables are used when drawing the speech bubbles\n self.side_offset, self.top_offset = 40, 5 \n self.tail_offset_x, self.tail_offset_y = 30, 0\n self.text_side_offset, self.text_top_offset = 50, 15\n\n def paint(self, painter, option, index):\n \"\"\"Reimplement the delegate's paint() function. Renders the delegate using the specified QPainter \n (painter) and QStyleOptionViewItem (option) for the item being drawn at given index (the row value).\n This function paints the item.\"\"\"\n text, user_or_chatbot = index.model().data(index, Qt.DisplayRole)\n image, image_rect = QImage(), QRect() # Initialize objects for the user and chahbot icons\n color, bubble_margins = QColor(), QMargins() # Initialize objects for drawing speech bubbles\n tail_points = QPolygon() # Initialize QPolygon object for drawing the tail on the speech bubbles\n\n # Use user_or_chatbot value to select the image to display, the color of the pen and the\n # brush. Set the margins for speech bubble. Set the points for the speech bubble's tail.\n if user_or_chatbot == \"chatbot\":\n image.load(\"images/bot.png\")\n image_rect = QRect(QPoint(option.rect.left() + self.image_offset, option.rect.center().y() - 12), QSize(24, 24))\n color = QColor(\"#83E56C\")\n bubble_margins = QMargins(self.side_offset, self.top_offset, self.side_offset, self.top_offset)\n tail_points = QPolygon([QPoint(option.rect.x() + self.tail_offset_x, option.rect.center().y()),\n QPoint(option.rect.x() + self.side_offset, option.rect.center().y() - 5),\n QPoint(option.rect.x() + self.side_offset, option.rect.center().y() + 5)])\n elif user_or_chatbot == \"user\":\n image.load(\"images/user.png\")\n image_rect = QRect(QPoint(option.rect.right() - self.image_offset - 24, option.rect.center().y() - 12), QSize(24, 24))\n color = QColor(\"#38E0F9\")\n bubble_margins = QMargins(self.side_offset, self.top_offset, self.side_offset, self.top_offset)\n tail_points = QPolygon([QPoint(option.rect.right() - self.tail_offset_x, option.rect.center().y()),\n QPoint(option.rect.right() - self.side_offset, option.rect.center().y() - 5),\n QPoint(option.rect.right() - self.side_offset, option.rect.center().y() + 5)])\n\n # Draw the image next to the speech bubble\n painter.drawImage(image_rect, image)\n\n # Set the QPainter's pen and brush colors; draw the speech bubble and tail\n painter.setPen(color)\n painter.setBrush(color)\n # Remove the margins from the rectangle to shrink its size \n painter.drawRoundedRect(option.rect.marginsRemoved(bubble_margins), 5, 5)\n painter.drawPolygon(tail_points)\n\n # Draw the text in the speech bubble\n painter.setPen(QColor(\"#4A4C4B\")) # Reset pen color for the text\n text_margins = QMargins(self.text_side_offset, self.text_top_offset, self.text_side_offset, self.text_top_offset)\n painter.drawText(option.rect.marginsRemoved(text_margins), Qt.AlignVCenter | Qt.TextWordWrap, text)\n\n def sizeHint(self, option, index):\n \"\"\"Reimplement to figure out the size of the item displayed at the given index.\n Uses option to figure out the style information, in this case, the margins of the speech bubble.\"\"\"\n text, user_or_chatbot = index.model().data(index, Qt.DisplayRole)\n font_size = QApplication.fontMetrics() # Calculate the size of the text \n text_margins = QMargins(self.text_side_offset, self.text_top_offset, self.text_side_offset, self.text_top_offset)\n\n # Remove the margins, get the rectangle for the font, and add the margins back in\n rect = option.rect.marginsRemoved(text_margins) \n rect = font_size.boundingRect(rect, Qt.TextWordWrap, text)\n rect = rect.marginsAdded(text_margins)\n return rect.size()\n \nclass Chatbot(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initializeUI()\n\n def initializeUI(self):\n \"\"\"Initialize the window and its contents.\"\"\"\n self.setMinimumSize(450, 600)\n self.setWindowTitle(\"8.1 - PyQt Chatbot\")\n self.setWindowFlag(Qt.Window)\n\n self.chat_started = False\n\n self.setupWindow()\n self.show()\n\n def setupWindow(self):\n \"\"\"Set up the widgets and model/view instances for the main window.\"\"\"\n self.chat_button = QPushButton(QIcon(\"images/chat.png\"), \"Start Chat\")\n self.chat_button.setLayoutDirection(Qt.RightToLeft)\n self.chat_button.pressed.connect(self.chatButtonPressed)\n\n # Create the model for keeping track of new messages (data), the list view \n # for displaying the chat log, and the delegate for drawing the items in the list view\n self.model = ChatLogModel()\n self.chat_log_view = QListView()\n self.chat_log_view.setModel(self.model)\n\n message_delegate = DrawSpeechBubbleDelegate()\n self.chat_log_view.setItemDelegate(message_delegate)\n\n # Create the QLineEdit widget for entering text\n self.user_input_line = QLineEdit()\n self.user_input_line.setMinimumHeight(24)\n self.user_input_line.setPlaceholderText(\"Press 'Start Chat' to begin chatting...\")\n self.user_input_line.returnPressed.connect(self.enterUserMessage)\n \n main_v_box = QVBoxLayout()\n main_v_box.setContentsMargins(0, 2, 0, 10)\n main_v_box.addWidget(self.chat_button, Qt.AlignRight)\n main_v_box.setSpacing(10)\n main_v_box.addWidget(self.chat_log_view)\n main_v_box.addWidget(self.user_input_line)\n self.setLayout(main_v_box)\n\n def chatButtonPressed(self):\n \"\"\"When the user begins chatting, the appearance and state of the chat_button are set, \n and the chatbot is created. The user can also end the chat.\"\"\"\n button = self.sender()\n if button.text() == \"Start Chat\":\n self.chat_button.setText(\"End Chat\")\n self.chat_button.setIcon(QIcon(\"images/end.png\"))\n self.chat_button.setStyleSheet(\"background: #EC7161\") # Red\n self.chat_button.setDisabled(True)\n self.createChatbot() \n elif button.text() == \"End Chat\":\n self.endCurrentChat()\n\n def enterUserMessage(self):\n \"\"\"Get the text from the line edit widget and append the message to the model. Then\n display the chatbot's response.\"\"\"\n user_input = self.user_input_line.text()\n if user_input != \"\" and self.chat_started == True:\n self.model.appendMessage(user_input, \"user\")\n self.displayChatbotResponse(user_input)\n self.user_input_line.clear() # Clear the QLineEdit's text\n \n def displayChatbotResponse(self, user_input):\n \"\"\"Get the response from the chatbot, convert the reply to a string and \n append the text to the model where it will be added to the window.\"\"\"\n chatbot_reply = self.chatbot.get_response(user_input)\n self.model.appendMessage(str(chatbot_reply), \"chatbot\")\n # Uncomment to get the time it takes for the chatbot to respond\n #print(utils.get_response_time(self.chatbot))\n\n def createChatbot(self):\n \"\"\"Create the chatbot and train it in a separate thread.\"\"\" \n self.chatbot = ChatBot(\"Chatbot\", storage_adapter=\"chatterbot.storage.SQLStorageAdapter\", \n database_uri='sqlite:///database.sqlite3',\n logic_adapters=[{\"import_path\": \"chatterbot.logic.BestMatch\",\n \"statement_comparison_function\": LevenshteinDistance}])\n\n self.chat_worker = ChatWorkerThread(self.chatbot) # Create worker thread\n self.chat_worker.training_finished.connect(self.trainingFinished)\n\n # Feedback for the user. Begin the thread for trainig the chatbot\n self.model.appendMessage(\"[INFO] Chatbot is learning. Please wait a moment.\", \"chatbot\")\n self.chat_worker.start()\n\n def trainingFinished(self):\n \"\"\"Once the chatbot has been trained, display messages to the user and start chatting.\"\"\"\n self.model.appendMessage(\"[INFO] Chatbot is ready to begin chatting with you.\", \"chatbot\")\n self.model.appendMessage(\"Welcome to Chatbot. This chatbot gets smarter the more you talk with it. Type anything to get started.\", \"chatbot\")\n self.user_input_line.setPlaceholderText(\"Type your message and press 'Enter'\")\n self.chat_started = True\n self.chat_button.setDisabled(False) # Enable the chat_button\n\n def endCurrentChat(self):\n \"\"\"Display a QMessageBox to the user asking if they want to quit the current chat.\"\"\"\n choice = QMessageBox.question(self, \"End Chat\", \n \"The chat history will be deleted. Are you sure you want to end the chat?\", \n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if choice == QMessageBox.Yes:\n # Clearing the list will set the number of rows to 0 and clear the chat area\n self.model.chat_messages = [] \n self.user_input_line.setPlaceholderText(\"Press 'Start Chat' to begin chatting...\")\n self.chat_button.setText(\"Start Chat\")\n self.chat_button.setIcon(QIcon(\"images/chat.png\"))\n self.chat_button.setStyleSheet(\"background: #83E56C\") # Green\n self.chat_started = False \n else:\n self.model.appendMessage(\"I thought you were going to leave me.\", \"chatbot\")\n\n def closeEvent(self, event):\n \"\"\"Display a dialog box to confirm that the user wants to close the application while in a chat.\"\"\"\n if self.chat_started:\n choice = QMessageBox.question(self, 'Leave Chat?', \"Are you sure you want to leave the chat?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if choice == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setStyleSheet(style_sheet)\n window = Chatbot()\n sys.exit(app.exec_())","repo_name":"Apress/modern-pyqt","sub_path":"ch08_chatbot/chatbotGUI.py","file_name":"chatbotGUI.py","file_ext":"py","file_size_in_byte":12949,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"54"} +{"seq_id":"42279877337","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\n\nfrom mydash.utils import render_json\nfrom bookmarks.models import Bookmark\nfrom bookmarks.forms import NewBookmarkForm, EditBookmarkForm\nfrom bookmarks.utils import search_bookmarks\nfrom tags.models import Tag\n\n\ndef list_bookmarks(request):\n \"\"\"Show a list of all bookmarks for a user\"\"\"\n bookmarks = Bookmark.objects.filter(user=request.user)\n\n if request.POST:\n bookmarks = search_bookmarks(request.POST.get('query', None), bookmarks)\n\n context = {\n 'bookmarks': bookmarks,\n }\n return render(request, 'bookmarks/list_bookmarks.html', context)\n\n\ndef list_favorited_bookmarks(request):\n \"\"\"Show a list of all favorited bookmarks for a user\"\"\"\n bookmarks = Bookmark.objects.filter(user=request.user, favorited=True)\n\n if request.POST:\n bookmarks = search_bookmarks(request.POST.get('query', None), bookmarks)\n\n context = {\n 'bookmarks': bookmarks,\n }\n return render(request, 'bookmarks/list_favorited_bookmarks.html', context)\n\n\ndef add_bookmark(request, tag_slug=None):\n \"\"\"Add a new bookmark\"\"\"\n if tag_slug:\n tag = get_object_or_404(Tag, user=request.user, slug=tag_slug)\n else:\n tag = None\n\n form = NewBookmarkForm(request.POST or None, tag=tag, user=request.user)\n\n if request.method == 'POST' and form.is_valid():\n bookmark = form.save()\n return redirect('view-tag', bookmark.tag.slug)\n\n context = {\n 'form': form,\n 'tag': tag,\n }\n return render(request, 'bookmarks/add_bookmark.html', context)\n\n\ndef edit_bookmark(request, slug, tag_slug):\n \"\"\"Edit an existing bookmark\"\"\"\n tag = get_object_or_404(Tag, user=request.user, slug=tag_slug)\n bookmark = get_object_or_404(Bookmark, user=request.user, tag=tag, slug=slug)\n\n form = EditBookmarkForm(request.POST or None, user=request.user, instance=bookmark)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('view-tag', tag_slug)\n\n context = {\n 'tag': tag,\n 'bookmark': bookmark,\n 'form': form,\n }\n return render(request, 'bookmarks/edit_bookmark.html', context)\n\n\n@require_POST\ndef favorite_bookmark(request, slug, tag_slug):\n \"\"\"Favorite a bookmark\"\"\"\n tag = get_object_or_404(Tag, user=request.user, slug=tag_slug)\n bookmark = get_object_or_404(Bookmark, user=request.user, tag=tag, slug=slug)\n\n bookmark.favorited = not bookmark.favorited\n bookmark.save()\n return render_json(bookmark.pk)\n\n\n@require_POST\ndef delete_bookmark(request, slug, tag_slug):\n \"\"\"Delete a bookmark\"\"\"\n tag = get_object_or_404(Tag, user=request.user, slug=tag_slug)\n bookmark = get_object_or_404(Bookmark, user=request.user, tag=tag, slug=slug)\n\n bookmark_pk = bookmark.pk\n bookmark.delete()\n return render_json(bookmark_pk)\n","repo_name":"dansackett/mydash","sub_path":"apps/bookmarks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36455650800","text":"'''\nSkip M delete N\nDescription\nGiven a linked list and two integers M and N. Traverse the linked list such that you retain M nodes then delete next N nodes, continue the same till end of the linked list.\n\nYou only need to implement a Function in the template.\n\nInput\nFor each testcase, first line of input contains number of elements in the linked list and next M and N respectively space separated. The last line contains the elements of the linked list.\n\nOutput\nPrint the final LL\n\nInput:\n\n8\n\n2 1\n\n9 1 3 5 9 4 10 1\n\nOutput:\n\n9 1 5 9 10 1\n\n\n'''\n# Python program to delete M nodes after N nodes \n \n# Node class \nclass Node: \n \n # Constructor to initialize the node object \n def __init__(self, data): \n self.data = data \n self.next = None\n \nclass LinkedList: \n \n # Function to initialize head \n def __init__(self): \n self.head = None\n \n # Function to insert a new node at the beginning \n def push(self, new_data): \n new_node = Node(new_data) \n new_node.next = self.head \n self.head = new_node \n \n # Utility function to prit the linked LinkedList \n def printList(self): \n temp = self.head \n while(temp): \n print (temp.data, end=' ') \n temp = temp.next\n \n def skipMdeleteN(self, M, N): \n # Implment This \n temp=self.head\n p=1\n while (temp!=None):\n if M+N-1>=p>=M and temp.next!=None :\n delt=temp.next\n temp.next=temp.next.next\n del(delt)\n############ Faster Above #################\n # while (temp!=None):\n \n # if M==0:\n # self.head=temp.next\n # del(temp)\n # elif M>1 and M+N-1>=p>=M and temp.next!=None :\n # delt=temp.next\n # temp.next=temp.next.next\n # del(delt)\n \n else:\n temp=temp.next\n if p==M+N:\n p-=M+N\n p+=1\n\n \n \n# Driver program to test above function \n \n\nn = int(input())\nM,N = map(int, input().split())\nllist = LinkedList() \nl = list(map(int, input().split()))\nl.reverse()\nfor i in l:\n llist.push(i)\n\nllist.skipMdeleteN(M, N) \n \nllist.printList()","repo_name":"iamnishantchandra/DSA-QA-Using-Python","sub_path":"10X/Python/Sorting/Skip M delete N LINK.py","file_name":"Skip M delete N LINK.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"21812867140","text":"from django.shortcuts import render,redirect\nfrom ..forms.UpdateUserForm import UpdateUserForm,ProfileUpdateForm\n\n# Create your views here.\n\n# Category\ndef profile(request):\n return render(request, 'user/profile.html')\n\ndef profile_update(request):\n if request.method =='POST':\n # instance helps to show current data of user\n # instance helps also to show current data of user if with file\n user_form = UpdateUserForm(request.POST,instance=request.user)\n profile_form = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)\n if user_form.is_valid and profile_form.is_valid:\n user_form.save()\n profile_form.save()\n return redirect('user-profile')\n\n \n else:\n user_form = UpdateUserForm(instance=request.user)\n profile_form = ProfileUpdateForm(instance=request.user.profile) \n context={\n 'user_form':user_form,\n 'profile_form':profile_form\n }\n return render(request, 'user/profile_update.html',context)","repo_name":"urbain32/App-inventory","sub_path":"user/views/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30085574716","text":"#%% SQuAD data download\r\nimport json\r\nwith open('C:/Users/Young Hun Park/Downloads/KorQuAD_v1.0_train.json') as train_file:\r\n train_data=json.load(train_file)\r\n\r\n#%%\r\ntrain_data=train_data['data']\r\n#%%\r\n# make data into DataFrame\r\nimport pandas as pd\r\ndef json_to_df(data):\r\n arrayForDF=[]\r\n for current_subject in data:\r\n subject=current_subject['title']\r\n for current_context in current_subject['paragraphs']:\r\n context=current_context['context']\r\n for current_question in current_context['qas']:\r\n question=current_question['question']\r\n for answer in current_question['answers']:\r\n answer_text=answer['text']\r\n answer_start=answer['answer_start']\r\n \r\n record={\r\n \"answer_text\":answer_text,\r\n \"answer_start\":answer_start,\r\n \"question\":question,\r\n \"context\":context,\r\n \"subject\":subject\r\n \r\n }\r\n arrayForDF.append(record)\r\n df=pd.DataFrame(arrayForDF)\r\n return df\r\n#%%\r\ndata_df=json_to_df(train_data)\r\n#%%\r\n## extract context\r\ncontexts=[]\r\nfor context in data_df.context.unique():\r\n contexts.append(context)\r\n\r\ncontext_df=pd.DataFrame(contexts)\r\n#%%\r\nprint(context_df.isnull().values.any())\r\n\r\n\r\n#%%\r\n# extract sentences from paragraph\r\nfrom nltk.tokenize import sent_tokenize\r\nsentences=[]\r\nfor context in contexts:\r\n temp_X=sent_tokenize(context)\r\n for sentence in temp_X:\r\n sentences.append(sentence)\r\n\r\n\r\n#%%\r\nfrom gensim.models import word2vec\r\nfrom konlpy.tag import Okt\r\n\r\ntwitter = Okt()\r\n\r\n#텍스트를 한���씩 처리합니다.\r\nresult = []\r\nfor line in sentences:\r\n #형태소 분석하기, 단어 기본형 사용\r\n malist = twitter.pos( line, norm=True, stem=True)\r\n r = []\r\n for word in malist:\r\n #Josa”, “Eomi”, “'Punctuation” 는 제외하고 처리\r\n if not word[1] in [\"Josa\",\"Eomi\",\"Punctuation\"]:\r\n r.append(word[0])\r\n #형태소 사이에 공백 \" \" 을 넣습니다. 그리고 양쪽 공백을 지웁니다.\r\n rl = (\" \".join(r)).strip()\r\n result.append(rl)\r\n #print(rl)\r\n\r\n #형태소들을 별도의 파일로 저장 합니다.\r\nwith open(\"context.nlp\",'w', encoding='utf-8') as fp:\r\n fp.write(\"\\n\".join(result))\r\n\r\n #Word2Vec 모델 만들기\r\nwData =word2vec.LineSentence(\"context.nlp\")\r\nwModel =word2vec.Word2Vec(wData, size=200, window=10, hs=1, min_count=2, sg=1)\r\nprint(wModel.wv.most_similar('베토벤'))\r\n#%%\r\nwModel.save('KorQuAD.model')\r\n\r\n#%%\r\nimport gensim\r\nmodel=gensim.models.Word2Vec.load('C:/Users/Young Hun Park/Desktop/python beginner/NLP/KorQuAD.model')\r\n#%%\r\nprint(model.wv.most_similar('박지성'))\r\n\r\n'''\r\n#%%\r\nimport reㄴ\r\n\r\nstopwords=['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']\r\n# clean special character\r\n\r\ndef cleanText(readData):\r\n \r\n \r\n text = re.sub('[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`\\'…》]', '', readData)\r\n \r\n return text\r\n#%%\r\nfrom konlpy.tag import Okt\r\nokt=Okt()\r\ntokenized_data=[]\r\n\r\nfor sentence in sentences:\r\n sentence=cleanText(sentence)\r\n temp_X=okt.morphs(sentence,stem=True)\r\n temp_X=[word for word in temp_X if not word in stopwords]\r\n tokenized_data.append(temp_X)\r\n#%%\r\nimport matplotlib.pyplot as plt\r\nfrom gensim.models import Word2Vec\r\nprint('최대 길이:',max(len(l) for l in tokenized_data))\r\nprint('리뷰의 평균 길이 :',sum(map(len, tokenized_data))/len(tokenized_data))\r\n\r\nplt.hist([len(s) for s in tokenized_data],bins=50)\r\nplt.xlabel('length of sentence')\r\nplt.ylabel('number of samples')\r\n\r\nmodel=Word2Vec(sentences=tokenized_data,size=150,window=5,min_count=10,workers=-1)\r\nprint(model.wv.vectors.shape)\r\nprint(model.wv.most_similar('베토벤'))\r\n'''\r\n","repo_name":"0Park/QA_LSTM","sub_path":"QALSTM_embeddinglayer.py","file_name":"QALSTM_embeddinglayer.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14234908491","text":"# -*-coding:utf-8-*-\n\n\nimport os\nimport sys\n\ndef clear_git(path):\n if os.geteuid():\n args = [sys.executable] + sys.argv\n os.execlp('sudo', 'sudo', *args)\n for dirpath, dirnames, filenames in os.walk(path):\n if \".git\" in os.listdir(dirpath):\n git_path = os.path.join(dirpath,\".git\")\n os.remove(git_path)\n print(git_path)\n\ndef main():\n paths = [\n \"/Users/zhangguo/Codes/scrapers\",\n ]\n for path in paths:\n clear_git(path)\n\n\nif __name__==\"__main__\":\n main()","repo_name":"Guo-Zhang/mactools","sub_path":"clear_git.py","file_name":"clear_git.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13464489732","text":"from sys import stdin\r\nfrom collections import defaultdict\r\ninput = stdin.readline\r\ntree = defaultdict(int)\r\ntotal = 0\r\n\r\nwhile True:\r\n name = input().rstrip()\r\n if not name:\r\n break\r\n tree[name] += 1\r\n total += 1\r\n \r\nl = sorted(list(tree.keys()))\r\nfor t in l:\r\n print(f\"{t} {tree[t]/total*100:.4f}\")","repo_name":"yootal/CodingTest","sub_path":"백준/Silver/4358. 생태학/생태학.py","file_name":"생태학.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14461081001","text":"import time\n\nfrom selenium.common.exceptions import TimeoutException\n\nimport core.drivers.driver_instance as driver_instance\nfrom core.element_type.textbox import TextBox\nfrom core.element_type.button import Button\nfrom core.element_type.table import Table\n\n\ndef scrape_related_products(item):\n timeout_amount = 10\n result = \"Error\"\n URL = \"https://catalog.weidmueller.com/catalog/Start.do;jsessionid=5E9D8162E7FEAFD88246FD0436A48039?ObjectID=\" + str(item) + \"&page=Product\"\n\n try:\n driver_instance.DRIVER.get(URL)\n\n related_products_button = Button(xpath = \"//*[@id=\\\"SimilarProducts\\\"]\", timeout = timeout_amount)\n related_products_button.click()\n\n related_PN_products_table = Table(xpath = \"//div[@id=\\\"SimilarProductsDiv\\\"]//div[@class=\\\"ProductInfoWrapper\\\"]\", timeout = timeout_amount, table_children_timeout = 2)\n \n result = \"\"\n for child in related_PN_products_table.rows:\n part_number_textbox = TextBox(xpath = \"//span[@class=\\\"listAttributeValue products.BaseProduct.bestNr\\\"]\", parent = child, timeout = timeout_amount)\n result += part_number_textbox.get_text() + \",\"\n \n if result == \"\":\n result = \"Error\"\n\n result = result[:-1]\n\n except TimeoutException:\n result = \"Error\"\n\n return result\n\n\ndef scrape_accessories(item):\n timeout_amount = 10\n result = \"Error\"\n URL = \"https://catalog.weidmueller.com/catalog/Start.do;jsessionid=5E9D8162E7FEAFD88246FD0436A48039?ObjectID=\" + str(item) + \"&page=Product\"\n\n try:\n driver_instance.DRIVER.get(URL)\n time.sleep(1)\n\n accesories_products_button = Button(xpath = \"//*[@id=\\\"accessoryList\\\"]\", timeout = timeout_amount)\n accesories_products_button.click()\n\n result = \"\"\n\n all_pn_xpath = \"//div[@id=\\\"accessoryListDiv\\\"]//span[@class=\\\"listAttributeValue products.BaseProduct.bestNr\\\"]\"\n number_of_elements = driver_instance.DRIVER.get_number_of_elements_invisible(xpath= all_pn_xpath, timeout = timeout_amount)\n \n for i in range(1, number_of_elements + 1):\n result_text_box = TextBox(xpath = f\"({all_pn_xpath})[{i}]\", timeout = timeout_amount)\n result += result_text_box.get_text() + \",\"\n\n if result == \"\":\n result = \"Error\"\n else:\n result = result[:-1]\n\n except TimeoutException:\n result = \"Error\"\n\n return result\n","repo_name":"Darkrider123/Scrape-Framework","sub_path":"scraper/implementations/weidmueller.py","file_name":"weidmueller.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37573318768","text":"# Method 1\n# upper bound: seven digits b/c smallest eight digit number > 8 * 9!\n# lower bound: two digits so sum\n# use digit multiset to cut down on search space\n# use f(x) == x as test whether acceptable\n\nimport itertools as it\nimport functools as ft\n\n# Generate factorials for easy reference\nfacs = [1]\nwhile len(facs) < 10:\n facs.append(facs[-1] * len(facs))\n\n# Dummy digit '10' w/ factorial 0 that allows us to simulate the null digit\nfacs.append(0)\ndigits = [0,1,2,3,4,5,6,7,8,9, 10]\n\n# dF(x) == x is a necessary condition of x being a solution\ndef digitFactorial(num):\n sum = 0\n while num > 0:\n sum += facs[num % 10]\n num //= 10\n return sum\n\nsum=0\nsolutions = set() # The same solutions will be generated by different multisets\n\n# We only have to check up to seven digits due to the upper bound\n# We ignore trying 0 as a digit because that will come out in factorialSum anyway\n\nfor digitCombination in it.combinations_with_replacement(digits[1:], 7):\n factorialSum = ft.reduce(lambda x, y: x+y, map(lambda x: facs[x], digitCombination))\n if factorialSum == digitFactorial(factorialSum) and factorialSum not in solutions and factorialSum >= 10:\n solutions.add(factorialSum)\n sum += factorialSum\n\nprint(sum)","repo_name":"hgparker/project_euler","sub_path":"Problem34a.py","file_name":"Problem34a.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11801604431","text":"print(\"ENTER AGE PROGRAM\")\nage = int(input(\"Enter age: \"))\nif(age < 18):\n\tprint(\"You are underage\")\nelif(age >= 18 and age <= 65):\n\tprint(\"You are an adult\")\nelse:\n\tprint(\"You are a senior citizen\")\n \nprint(\"WORKING WITH LOOPS\")\nnumbers = [1, 2, 3, 4, 5]\nfor number in numbers:\n\tprint (number)\n\t\nnumber_size = 0\nwhile number_size < 5:\n\tprint (number_size)\n\tnumber_size += 1\n\n#search for a target number in a list\nnumbers = [2, 5, 8, 12, 9, 15, 7]\ntarget = 9\n\nfor num in numbers:\n if num == target:\n print(\"Target number found!\")\n break\nelse:\n print(\"Target number not found.\")\n\n#print only odd numbers from a list\nnumbers = [1, 4, 3, 7, 2, 6, 9]\n\nfor num in numbers:\n if num % 2 == 0:\n continue\n print(num)\n\n\nprint(\"MENTAL HEALTH PROGRAM USING DICTIONARIES AND EXCEPTION HANDLINGp\")\nvalid_input = False\n#flag to keep track of whether the user has entered a valid input for their mental health rating\n\nwhile not valid_input:\n try:\n rating = int(input(\"Please rate your mental health from 1 to 10: \"))\n if 1 <= rating <= 10:\n valid_input = True\n else:\n print(\"Invalid rating. Please enter a number between 1 and 10.\")\n except ValueError:\n print(\"Invalid input. Please enter a number.\")\n\nprompts = {\n 1: \"You're feeling really down. Remember, it's okay to ask for help.\",\n 2: \"You're not feeling great. Reach out to someone you trust.\",\n 3: \"You're feeling a bit low. Take some time for self-care.\",\n 4: \"You're feeling okay, but there's room for improvement.\",\n 5: \"You're feeling neutral. Consider engaging in activities you enjoy.\",\n 6: \"You're feeling good. Keep up the positive mindset!\",\n 7: \"You're feeling quite positive. Maintain your well-being.\",\n 8: \"You're feeling great! Keep doing what makes you happy.\",\n 9: \"You're feeling amazing! Share your positive energy with others.\",\n 10: \"You're feeling fantastic! Keep up the excellent mental well-being!\"\n}\n\nprint(prompts[rating])\n","repo_name":"AriyoX/recess_assignments","sub_path":"JUNE 22ND/ahumuza_ariyo_nimusiima_morning.py","file_name":"ahumuza_ariyo_nimusiima_morning.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18702024578","text":"import sys\nimport json\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom chalice import Chalice, Response\nfrom chalice import BadRequestError\nfrom chalice import UnauthorizedError\nfrom chalice import NotFoundError\nfrom chalice import CORSConfig\ncors_config = CORSConfig(\n allow_origin='https://foo.example.com',\n allow_headers=['X-Special-Header'],\n max_age=600,\n expose_headers=['X-Special-Header'],\n allow_credentials=True\n)\n\nS3 = boto3.client('s3', region_name='eu-west-1')\nBUCKET = 'shantest21'\n\napp = Chalice(app_name='helloworld')\napp.debug = True\nCITIES_TO_STATE = {\n 'seattle': 'WA',\n 'portland': 'OR',\n}\nOBJECTS = {\n}\n@app.route('/')\ndef index():\n return Response(body='hello world!',\n status_code=200,\n headers={'Content-Type': 'text/plain'})\n\n@app.route('/cities/{city}')\ndef state_of_city(city):\n try:\n return {'state': CITIES_TO_STATE[city]}\n except KeyError:\n raise BadRequestError(\"Unknown city '%s', valid choices are: %s\" % (\n city, ', '.join(CITIES_TO_STATE.keys())))\n\n@app.route('/resource/{value}', methods=['PUT'])\ndef put_test(value):\n return {\"value\": value}\n\n@app.route('/myview', methods=['POST', 'PUT'])\ndef myview():\n pass\n\n@app.route('/objects/{key}', methods=['GET', 'PUT'])\ndef s3objects(key):\n request = app.current_request\n if request.method == 'PUT':\n S3.put_object(Bucket=BUCKET, Key=key,\n Body=json.dumps(request.json_body))\n elif request.method == 'GET':\n try:\n response = S3.get_object(Bucket=BUCKET, Key=key)\n return json.loads(response['Body'].read())\n except ClientError as e:\n raise NotFoundError(key)\n\n# @app.route('/objects/{key}', methods=['GET', 'PUT'])\n# def myobject(key):\n# request = app.current_request\n# if request.method == 'PUT':\n# OBJECTS[key] = request.json_body\n# elif request.method == 'GET':\n# try:\n# return {key: OBJECTS[key]}\n# except KeyError:\n# raise NotFoundError(key)\n\n@app.route('/introspect')\ndef introspect():\n return app.current_request.to_dict()\n\n@app.route('/supports-cors', methods=['PUT'], cors=True)\ndef supports_cors():\n return {}\n@app.route('/custom_cors', methods=['GET'], cors=cors_config)\ndef supports_custom_cors():\n return {'cors': True}\n\n@app.route('/authenticated', methods=['GET'], api_key_required=True)\ndef authenticated():\n return {\"secure\": True}\n\n \n# The view function above will return {\"hello\": \"world\"}\n# whenever you make an HTTP GET request to '/'.\n#\n# Here are a few more examples:\n#\n# @app.route('/hello/{name}')\n# def hello_name(name):\n# # '/hello/james' -> {\"hello\": \"james\"}\n# return {'hello': name}\n#\n# @app.route('/users', methods=['POST'])\n# def create_user():\n# # This is the JSON body the user sent in their POST request.\n# user_as_json = app.current_request.json_body\n# # We'll echo the json body back to the user in a 'user' key.\n# return {'user': user_as_json}\n#\n# See the README documentation for more examples.\n#\n","repo_name":"shanmugavela/helloworld","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40168587733","text":"import math\nimport re\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom textblob import TextBlob\n\n\nclass TwitterClient(object):\n '''\n Generic Twitter Class for sentiment analysis.\n '''\n\n def __init__(self):\n '''\n Class constructor or initialization method.\n '''\n # keys and tokens from the Twitter Dev Console\n consumer_key = 'yUNVJt7FXanj5wV1RUOBQnDlY'\n consumer_secret = 'pjs72BsMIGhmzsTdbd7YNJdAUYSgZQDzPfcc2LZLHHn9zdbKma'\n access_token = '2215814282-XGV9fci1oX8p1Qb03KRJgLXSl7kGdyTV2xRA2l1'\n access_token_secret = 'bOubbtfsq1iTdVQQxGjUB64oELhUfxrSIWCVFmQs45DH7'\n\n # attempt authentication\n try:\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n\n # Making life easier down the road.\n self.error = None\n except:\n print(\"Error: Authentication Failed\")\n\n def clean_tweet(self, tweet):\n '''\n Utility function to clean tweet text by removing links, special characters\n using simple regex statements.\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) | (\\w+: / {2}/ \\S+)\", \" \", tweet).split())\n\n def get_tweet_sentiment(self, tweet):\n '''\n Utility function to classify sentiment of passed tweet\n using textblob's sentiment method\n '''\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'\n\n def get_tweets(self, query, count=10):\n '''\n Main function to fetch tweets and parse them.\n '''\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))\n self.error = str(e)\n\n\ndef go(tweet):\n # creating object of TwitterClient Class\n api = TwitterClient()\n \n # calling function to get tweets\n tweets = api.get_tweets(query=tweet, count=200)\n\n positive_percentage = None\n negative_percentage = None\n neutral_percentage = None\n\n ptweets = None\n ntweets = None\n neutweets = None\n\n # Some validation.\n if tweets == None:\n # Since the error is already present, just pass.\n pass\n else:\n ptweets = []\n ntweets = []\n neutweets = []\n\n for tw in tweets:\n if tw['sentiment'] == 'positive':\n # picking positive tweets from tweets\n ptweets.append(tw)\n elif tw['sentiment'] == 'negative':\n # picking negative tweets from tweets\n ntweets.append(tw)\n else:\n # picking neutral tweets from tweets\n neutweets.append(tw)\n \n # percentage of positive tweets\n positive_percentage = round(100 * len(ptweets) / len(tweets), 2)\n print(\"Positive tweets percentage: {} %\".format(positive_percentage))\n \n # percentage of negative tweets\n negative_percentage = round(100 * len(ntweets) / len(tweets), 2)\n print(\"Negative tweets percentage: {} %\".format(negative_percentage))\n\n # percentage of neutral tweets\n neutral_percentage = round(100 - positive_percentage - negative_percentage, 2)\n print(\"Neutral tweets percentage: {} % \\\n \".format(neutral_percentage))\n\n # printing first 5 positive tweets\n print(\"\\n\\nPositive tweets:\")\n for tweet in ptweets[:10]:\n print(tweet['text'])\n\n # printing first 5 negative tweets\n print(\"\\n\\nNegative tweets:\")\n for tweet in ntweets[:10]:\n print(tweet['text'])\n\n return (positive_percentage, ptweets, negative_percentage, ntweets, neutral_percentage, neutweets, api.error)\n\n# if __name__ == \"__main__\":\n# # calling main function\n# main()\n","repo_name":"kavurisrikanth/lexicon","sub_path":"learnapp/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20533637506","text":"import os\nimport torch\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.optim as optim\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\nimport time\nBASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))\n\n# import from local files\nimport csv_parser\nfrom models import GRUModel6_att, output_limits, GRUModel5, GRUModel6\n\nOUTPUT_INDICES = [0,1,2,3,9]\nuse_cuda = True\n\nclass IOStream():\n def __init__(self, path):\n self.f = open(path, 'a')\n\n def cprint(self, text):\n print(text)\n self.f.write(text+'\\n')\n self.f.flush()\n\n def close(self):\n self.f.close()\n\ndef get_data_indices(data_size, batch_size=32, test_percentage=0.2):\n if test_percentage == 0:\n return data_size\n elif test_percentage == 1:\n return 0\n\n total_batch_num = math.ceil(data_size / batch_size)\n val_batch_num = math.floor(total_batch_num * test_percentage)\n train_batch_num = total_batch_num - val_batch_num\n\n train_data_end_index = train_batch_num * batch_size - 1\n\n return int(train_data_end_index)\n\ndef get_data_loaders(dataset, batch_size=32, test_percentage=0.2):\n # Load training and validation data. \n data_size = len(dataset)\n\n end_training_index = get_data_indices(data_size, batch_size, test_percentage=test_percentage)\n\n train_loader = data.DataLoader(dataset, batch_size=batch_size, sampler=data.SequentialSampler(range(end_training_index)))\n val_loader = data.DataLoader(dataset, batch_size=batch_size, sampler=data.SequentialSampler(range(end_training_index+1, data_size)))\n\n return train_loader, val_loader\n \ndef evaluate(model, eval_loaders, criterion=nn.MSELoss()):\n model.eval()\n # Evaluate the model by returning the loss\n total_loss = 0.0\n loss_iter = 0\n for val_loader in eval_loaders:\n for detector_ids, values, locations, label in val_loader:\n if use_cuda and torch.cuda.is_available():\n values = values.cuda()\n locations = locations.cuda()\n label = label.cuda()\n output = model(values, locations)\n loss = criterion(output, label)\n total_loss += loss.item()\n loss_iter += 1\n\n loss = float(total_loss) / loss_iter\n return loss\n\ndef train(io, model, train_loaders, eval_loaders, num_epochs=500, learning_rate=0.0005):\n if use_cuda and torch.cuda.is_available():\n model.cuda()\n print('CUDA is available! Training on GPU ...')\n else:\n print('CUDA is not available. Training on CPU ...')\n \n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)\n criterion = nn.MSELoss()\n\n train_loss = np.zeros(num_epochs)\n val_loss = np.zeros(num_epochs)\n\n for epoch in range(num_epochs):\n start_time = time.time()\n total_train_loss = 0.0\n iter_per_epoch = 0\n model.train()\n for train_loader in train_loaders:\n for detector_ids, values, locations, train_label in train_loader:\n if use_cuda and torch.cuda.is_available():\n values = values.cuda()\n locations = locations.cuda()\n train_label = train_label.cuda()\n output = model(values, locations)\n loss = criterion(output, train_label) #all train_labels are the same, becuase they came from the same set of y values\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n total_train_loss += loss.item()\n iter_per_epoch += 1\n\n train_loss[epoch] = total_train_loss / iter_per_epoch\n step_val_loss = evaluate(model, eval_loaders, criterion)\n val_loss[epoch] = step_val_loss\n\n io.cprint((\"Epoch {}: Train loss: {} | \"+\n \"Validation loss: {}\").format(\n epoch + 1,\n train_loss[epoch],\n step_val_loss))\n \n if epoch == 0 or step_val_loss < min_val_loss:\n min_val_loss = step_val_loss\n min_val_loss_index = epoch\n best_model = model.state_dict()\n\n end_time = time.time()\n io.cprint(F\"Total time for epoch {epoch + 1}: {end_time - start_time}s\\n\")\n\n io.cprint(F\"\\nMinimum validation mse: {min_val_loss:.5f}\\nEpoch: {min_val_loss_index + 1}\")\n\n torch.save(best_model, BASE_DIR + F'/../model/{model.name}-hidden_size{model.hidden_size}-version{model.version}-checkpoint', _use_new_zipfile_serialization=False)\n\n plt.plot(train_loss)\n plt.plot(val_loss)\n plt.title(F'mean squared loss: Indices {OUTPUT_INDICES} \\\n \\nMinimum validation mse: {min_val_loss:.5f}(Epoch: {min_val_loss_index + 1})')\n plt.ylabel('msl')\n plt.xlabel('epoch')\n plt.legend([F'train', \\\n F'test'], loc='upper right')\n plt.savefig(BASE_DIR + F'/../figures/{model.name}-hidden_size{model.hidden_size}-version{model.version}.png')\n \ndef train_all_QEW_test_all_QEW(io):\n batch_size = 32\n learning_rate = 0.001\n num_epochs = 500\n hidden_size = 100\n use_double_GRU = False\n GRUModel = GRUModel6_att\n\n QEW_dataset_indicies = [2, 3, 4, 5]\n\n train_loaders = []\n eval_loaders = []\n for i in QEW_dataset_indicies:\n x, y = csv_parser.csv_read_structure(BASE_DIR + F'/../QEW-dataset/dataset_QEW{i}')\n\n detectors = torch.tensor([row[1] for row in x]).float()\n values = torch.tensor([row[2] for row in x]).float()\n locations = torch.tensor([row[3] for row in x]).float()\n\n y = torch.tensor(y)[:, torch.tensor(OUTPUT_INDICES)] # only take the needed columns from y\n\n dataset = data.TensorDataset(detectors, values, locations, y)\n\n train_loader, val_loader = get_data_loaders(dataset, batch_size=batch_size, test_percentage=0.2)\n\n train_loaders.append(train_loader)\n eval_loaders.append(val_loader)\n\n model = GRUModel(hidden_size=hidden_size, detector_is_in_order=use_double_GRU) # Assuming all detectors are aligned and in order, else set to False\n\n checkpoint_path = BASE_DIR + F'/../model/{model.name}-hidden_size{model.hidden_size}-version{model.version}-2345-checkpoint'\n if os.path.exists(checkpoint_path):\n try:\n state = torch.load(checkpoint_path)\n model.load_state_dict(state)\n io.cprint(F\"Taken model checkpoint from: {checkpoint_path}\")\n except:\n io.cprint(\"Model doesn't match checkpoint, starting from random model weights...\")\n else:\n io.cprint(\"Starting from random model weights...\")\n\n train(io, model, train_loaders, eval_loaders, num_epochs=num_epochs, learning_rate=learning_rate)\n\ndef train_partial_QEW_test_leftout_QEW_network(io, leftout_network_index=4):\n transfer_learning_percentage = 0.7\n batch_size = 32\n learning_rate = 0.0005\n num_epochs = 100\n hidden_size = 100\n transfer_learning = True\n use_double_GRU = False\n GRUModel = GRUModel6_att\n\n if not transfer_learning:\n QEW_dataset_indicies = [2,3,4,5]\n else:\n QEW_dataset_indicies = [leftout_network_index]\n\n train_loaders = []\n eval_loaders = []\n for i in QEW_dataset_indicies:\n x, y = csv_parser.csv_read_structure(BASE_DIR + F'/../QEW-dataset/dataset_QEW{i}')\n\n detectors = torch.tensor([row[1] for row in x]).float()\n values = torch.tensor([row[2] for row in x]).float()\n locations = torch.tensor([row[3] for row in x]).float()\n\n y = torch.tensor(y)[:, torch.tensor(OUTPUT_INDICES)] # only take the needed columns from y\n\n dataset = data.TensorDataset(detectors, values, locations, y)\n\n if not transfer_learning:\n data_loader, _ = get_data_loaders(dataset, batch_size=batch_size, test_percentage=0)\n\n if i != leftout_network_index:\n train_loaders.append(data_loader)\n else:\n eval_loaders.append(data_loader)\n else:\n train_loader, val_loader = get_data_loaders(dataset, batch_size=batch_size, test_percentage=transfer_learning_percentage)\n train_loaders.append(train_loader)\n eval_loaders.append(val_loader)\n\n model = GRUModel(hidden_size=hidden_size, detector_is_in_order=use_double_GRU).float() # Assuming all detectors are aligned and in order, else set to False\n checkpoint_path = BASE_DIR + F'/../model/{model.name}-hidden_size{model.hidden_size}-version{model.version}-checkpoint'\n\n if os.path.exists(checkpoint_path):\n try:\n state = torch.load(checkpoint_path)\n model.load_state_dict(state)\n io.cprint(F\"Taken model checkpoint from: {checkpoint_path}\")\n except:\n io.cprint(\"Model doesn't match checkpoint, starting from random model weights...\")\n else:\n io.cprint(\"Starting from random model weights...\")\n\n train(io, model, train_loaders, eval_loaders, num_epochs=num_epochs, learning_rate=learning_rate)\n\ndef train_QEW_test_on_small_network(io):\n batch_size = 32\n learning_rate = 0.0005\n num_epochs = 500\n hidden_size = 100\n use_double_GRU = True\n GRUModel = GRUModel6\n\n train_loaders = []\n eval_loaders = []\n\n # Get validation data from data2, data3\n x1, y1 = csv_parser.csv_read_structure(BASE_DIR + '/../data2/dataset1')\n x2, y2 = csv_parser.csv_read_structure(BASE_DIR + '/../data3/dataset2')\n x = x1 + x2\n y = y1 + y2\n \n detectors = torch.tensor([row[1] for row in x]).float()\n values = torch.tensor([row[2] for row in x]).float()\n locations = torch.tensor([row[3] for row in x]).float()\n\n y = torch.tensor(y)[:, torch.tensor(OUTPUT_INDICES)] # only take the needed columns from y\n\n dataset = data.TensorDataset(detectors, values, locations, y)\n train_loader, val_loader = get_data_loaders(dataset, batch_size=batch_size, test_percentage=0.2)\n\n train_loaders.append(train_loader)\n eval_loaders.append(val_loader)\n\n model = GRUModel(hidden_size=hidden_size, detector_is_in_order=use_double_GRU) # Assuming all detectors are aligned and in order, else set to False\n\n if os.path.exists(BASE_DIR + F'/../model/{model.name}-hidden_size{model.hidden_size}-version{model.version}-checkpoint'):\n try:\n state = torch.load(BASE_DIR + F'/../model/{model.name}-hidden_size{model.hidden_size}-version{model.version}-checkpoint')\n model.load_state_dict(state)\n io.cprint(F\"Taken model checkpoint from: {model.name}-hidden_size{model.hidden_size}-version{model.version}-checkpoint\")\n except:\n io.cprint(\"Model doesn't match checkpoint, starting from random model weights...\")\n else:\n io.cprint(\"Starting from random model weights...\")\n\n train(io, model, train_loaders, eval_loaders, num_epochs=num_epochs, learning_rate=learning_rate)\n\nif __name__ == '__main__':\n fig_dir = os.path.join(BASE_DIR, \"../figures\")\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir)\n log_dir = os.path.join(BASE_DIR, \"../log\")\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n io = IOStream(os.path.join(log_dir, 'run-transfer.log'))\n\n #train_all_QEW_test_all_QEW(io)\n train_QEW_test_on_small_network(io)\n #train_partial_QEW_test_leftout_QEW_network(io, leftout_network_index=5)","repo_name":"antianxu/Aimsun_microscopic_deep_calibration","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72373037283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author : RaXianch\n# CreatDATE : 2023/5/30\n# CreatTIME : 16:30\n# Blog : https://blog.raxianch.moe/\n# Github : https://github.com/DeSireFire\n__author__ = 'RaXianch'\n\nimport os\nimport time\nimport json\nimport logging\nfrom typing import Dict\nfrom pprint import pprint\nfrom datetime import datetime\nfrom fastapi.responses import JSONResponse\nfrom starlette.responses import FileResponse\nfrom log_server.components import rename_log_file\nfrom server_core.control import constructResponse\nfrom fastapi.security import OAuth2PasswordRequestForm\nfrom fastapi import Header, HTTPException, Request, APIRouter, Body, Depends, status, Query\n# 统一响应的数据结构\nfrom server_core.conf import BASE_DIR\nfrom loguru import logger as sub_logger\n\nfrom utils.other import get_md5\nfrom .components import \\\n get_projects_info, check_pid, \\\n add_project_info, del_project_info, \\\n update_project_infos, get_query_all, add_data_one, check_id, get_fetch_one, del_data_one, update_data, \\\n add_job_one, get_query_count, synchronous_workers, synchronous_jobs, get_today_job_infos_by_wid, \\\n update_status_for_old_jobs, update_status_for_old_comon_jobs, get_long_job_infos_by_wid, read_latest_lines\nfrom .models import WorkerInfos, ProjectInfos, JobInfos\n\nroute = APIRouter()\n\n\n# 项目视图\n@route.post(\"/add_project\", summary=\"创建项目\")\nasync def add_project(request: Request):\n data = await request.body()\n fdata = await request.form()\n data = dict(fdata)\n\n callbackJson = constructResponse()\n callbackJson.statusCode = 400\n content = {}\n if not check_pid(name=data.get(\"name\")):\n result = add_project_info(data)\n if result:\n callbackJson.statusCode = 200\n return callbackJson.callBacker(content)\n\n\n@route.post(\"/update_project\", summary=\"修改项目\")\nasync def update_project(request: Request):\n # data = await request.body()\n fdata = await request.form()\n data = dict(fdata)\n\n callbackJson = constructResponse()\n callbackJson.statusCode = 400\n content = {}\n if check_pid(pid=data.get(\"pid\")):\n result = update_project_infos(data)\n if result:\n callbackJson.statusCode = 200\n return callbackJson.callBacker(content)\n\n\n@route.delete(\"/del_project\", summary=\"删除项目\")\nasync def del_project(request: Request, pid: str = Query(None)):\n \"\"\"\n 接收要删除的项目信息\n 参数以url传参的方式接收,数据结构为\n :param request: 请求对象\n :param name: 请求传输过来的name参数\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n del_data = dict(request.query_params)\n callbackJson.url = request.url\n content = del_data\n\n jugements = {\n \"无效的文件..\": True,\n \"服务器找不到请求的资源\": True,\n }\n\n if all(list(jugements.values())):\n # os.remove(del_file_path)\n res = del_project_info(del_data)\n else:\n callbackJson.statusCode = 404\n for k, v in jugements.items():\n if not v:\n callbackJson.message = k\n return callbackJson.callBacker(content)\n\n\n@route.get(\"/get_project\", summary=\"获取项目信息\")\nasync def get_project(request: Request, pid: str = Query(None)):\n \"\"\"\n 更新项目信息\n :param request:\n :param pid:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n one = get_fetch_one(ProjectInfos, pid=pid) or []\n # pprint(pro_list)\n # 转换为业务响应数据\n content.update(one)\n return callbackJson.callBacker(content)\n\n\n@route.get(\"/get_projects\", summary=\"获取项目列表\")\nasync def get_projects(request: Request):\n \"\"\"\n 获取项目列表\n :param request:\n :param name:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n pro_list = get_projects_info() or []\n # pprint(pro_list)\n # 转换为业务响应数据\n content[\"list\"] = pro_list or None\n content[\"pageTotal\"] = len(pro_list)\n return callbackJson.callBacker(content)\n\n\n@route.get(\"/get_projects_names\", summary=\"获取所有项目名称\")\nasync def get_projects_names(request: Request):\n \"\"\"\n 获取项目名称\n :param request:\n :param name:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n pro_list = get_projects_info() or []\n names = [v for k, v in pro_list.items() if k == \"name\"]\n # pprint(pro_list)\n # 转换为业务响应数据\n content[\"list\"] = names or None\n content[\"pageTotal\"] = len(pro_list)\n return callbackJson.callBacker(content)\n\n\n# 工作流视图\n@route.get(\"/get_workers\", summary=\"获取项目所属工作流列表\")\nasync def get_workers(request: Request, pid: str = Query(None)):\n \"\"\"\n 获取项目所属工作流列表\n :param request:\n :param name:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n workers_list = get_query_all(model=WorkerInfos, pid=pid) or []\n pprint(workers_list)\n # 转换为业务响应数据\n content[\"list\"] = workers_list or None\n content[\"pageTotal\"] = len(workers_list)\n return callbackJson.callBacker(content)\n\n\n@route.get(\"/get_worker\", summary=\"获取工作流信息\")\nasync def get_worker(request: Request, wid: str = Query(None)):\n \"\"\"\n 获取工作流信息\n :param request:\n :param name:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n workers_info = get_fetch_one(model=WorkerInfos, wid=wid) or {}\n # 转换为业务响应数据\n if workers_info:\n content.update(workers_info)\n else:\n callbackJson.statusCode = 400\n\n return callbackJson.callBacker(content)\n\n\n@route.post(\"/add_workers\", summary=\"新增工作流\")\nasync def add_workers(request: Request):\n data = await request.body()\n fdata = await request.form()\n data = dict(fdata)\n\n callbackJson = constructResponse()\n callbackJson.statusCode = 400\n content = {}\n name = data.get(\"name\")\n pid = data.get(\"pid\")\n temp_wid = get_md5(f\"{name}_{pid}\")\n project_info = get_fetch_one(model=ProjectInfos, pid=pid)\n data[\"wid\"] = temp_wid\n data[\"p_nickname\"] = project_info.get(\"nickname\")\n data[\"pid\"] = pid\n # 检测所属项目存在\n if check_pid(pid=pid):\n # 检测工作流是否存在\n if not check_id(model=WorkerInfos, wid=temp_wid):\n result = add_data_one(WorkerInfos, data)\n if result:\n # 更新项目上面显示的工作流数量\n synchronous_workers(pid)\n callbackJson.statusCode = 200\n else:\n callbackJson.resData[\"errMsg\"] = \"数据添加错误!\"\n else:\n callbackJson.resData[\"errMsg\"] = \"工作流已存在!\"\n else:\n callbackJson.resData[\"errMsg\"] = \"未查询到所属项目!\"\n return callbackJson.callBacker(content)\n\n\n@route.delete(\"/del_workers\", summary=\"删除工作流\")\nasync def del_workers(request: Request, pid: str = Query(None), wid: str = Query(None)):\n \"\"\"\n 接收要删除的项目信息\n 参数以url传参的方式接收,数据结构为\n :param request: 请求对象\n :param pid: 请求传输过来的pid参数\n :param wid: 请求传输过来的wid参数\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n del_data = dict(request.query_params)\n callbackJson.url = request.url\n content = del_data\n\n jugements = {\n \"无效的文件..\": True,\n \"服务器找不到请求的资源\": True,\n }\n\n if all(list(jugements.values())):\n # os.remove(del_file_path)\n res = del_data_one(model=WorkerInfos, **del_data)\n\n # 更新项目上面显示的工作流数量\n synchronous_workers(pid)\n else:\n callbackJson.statusCode = 404\n for k, v in jugements.items():\n if not v:\n callbackJson.message = k\n return callbackJson.callBacker(content)\n\n\n@route.post(\"/update_workers\", summary=\"修改工作流\")\nasync def update_workers(request: Request):\n # data = await request.body()\n fdata = await request.form()\n data = dict(fdata)\n\n callbackJson = constructResponse()\n callbackJson.statusCode = 400\n content = {}\n name = data.get(\"name\")\n pid = data.get(\"pid\")\n temp_wid = get_md5(f\"{name}_{pid}\")\n pn = get_fetch_one(model=ProjectInfos, pid=pid).get(\"nickname\")\n data[\"p_nickname\"] = pn\n # 检测所属项目存在\n if check_pid(pid=pid):\n # 检测工作流是否存在\n if check_id(model=WorkerInfos, wid=temp_wid):\n result = update_data(WorkerInfos, [data])\n if result:\n callbackJson.statusCode = 200\n else:\n callbackJson.resData[\"errMsg\"] = \"修改工作流错误!\"\n else:\n callbackJson.resData[\"errMsg\"] = \"工作流不存在!\"\n else:\n callbackJson.resData[\"errMsg\"] = \"未查询到所属项目!\"\n return callbackJson.callBacker(content)\n\n\n# 任务实例视图\n@route.get(\"/get_jobs\", summary=\"获取任务列表\")\nasync def get_jobs(request: Request, pid: str = Query(None), wid: str = Query(None), jid: str = Query(None)):\n \"\"\"\n 获取任务列表\n :param request:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n if pid:\n jobs_list = get_query_all(model=JobInfos, sort_field=\"end_time\", descending=True, pid=pid) or []\n else:\n jobs_list = get_query_all(model=JobInfos, sort_field=\"end_time\", descending=True) or []\n\n # pprint(jobs_list[0])\n # 转换为业务响应数据\n content[\"list\"] = jobs_list or None\n content[\"pageTotal\"] = len(jobs_list)\n return callbackJson.callBacker(content)\n\n\n@route.delete(\"/del_jobs\", summary=\"删除任务实例\")\nasync def del_jobs(request: Request, pid: str = Query(None), wid: str = Query(None), jid: str = Query(None)):\n \"\"\"\n 接收要删除的任务实例\n 参数以url传参的方式接收\n :param request: 请求对象\n :param pid: 请求传输过来的pid参数\n :param wid: 请求传输过来的wid参数\n :param jid: 请求传输过来的jid参数\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n del_data = dict(request.query_params)\n callbackJson.url = request.url\n content = del_data\n jugements = {\n \"无效的文件..\": True,\n \"服务器找不到请求的资源\": True,\n }\n\n if all(list(jugements.values())):\n # os.remove(del_file_path)\n res = del_data_one(model=JobInfos, **del_data)\n synchronous_jobs(del_data.get(\"pid\"))\n else:\n callbackJson.statusCode = 404\n for k, v in jugements.items():\n if not v:\n callbackJson.message = k\n return callbackJson.callBacker(content)\n\n\n@route.get(\"/get_log\", summary=\"获取任务日志\")\nasync def get_log(request: Request,\n pid: str = Query(None),\n wid: str = Query(None),\n jid: str = Query(None),\n lv: str = Query(None)\n ):\n \"\"\"\n 获取任务日志\n :param request:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n job_info = get_query_all(model=JobInfos, pid=pid, wid=wid, jid=jid) or [{}]\n log_file_path = job_info[0].get(\"log_file_path\", None)\n if lv:\n log_file_path = rename_log_file(log_file_path, lv)\n log_content = \"\"\n try:\n if not log_file_path:\n raise FileNotFoundError\n\n # with open(log_file_path, encoding=\"utf-8\") as f:\n # log_content = f.read()\n\n # 更新日志最新1000行\n log_content_lines = read_latest_lines(log_file_path, num_lines=1000) or []\n if log_content_lines:\n log_content = \"\".join(log_content_lines).strip()\n print(f\"log_content...>{log_content}\")\n\n except FileNotFoundError as FNFE:\n # 未找到指定文件\n log_content = \"未查询到符合条件的日志...\"\n\n # 转换为业务响应数据\n content[\"name\"] = job_info[0].get(\"name\", None)\n content[\"p_nickname\"] = job_info[0].get(\"p_nickname\", None)\n content[\"w_nickname\"] = job_info[0].get(\"w_nickname\", None)\n content[\"run_user\"] = job_info[0].get(\"run_user\", None)\n content[\"content\"] = log_content or None\n return callbackJson.callBacker(content)\n\n@route.get(\"/download_log\", summary=\"下载任务日志\")\nasync def get_log(request: Request,\n pid: str = Query(None),\n wid: str = Query(None),\n jid: str = Query(None),\n lv: str = Query(None)\n ):\n \"\"\"\n 获取任务日志\n :param request:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n job_info = get_query_all(model=JobInfos, pid=pid, wid=wid, jid=jid) or [{}]\n log_file_path = job_info[0].get(\"log_file_path\", None)\n if lv:\n log_file_path = rename_log_file(log_file_path, lv)\n try:\n if not log_file_path:\n raise FileNotFoundError\n\n with open(log_file_path, encoding=\"utf-8\") as f:\n log_content = f.read()\n\n except FileNotFoundError as FNFE:\n # 未找到指定文件\n callbackJson.statusCode = 404\n callbackJson.message = \"未查询到符合条件的日志...\"\n\n # 转换为业务响应数据\n content[\"name\"] = job_info[0].get(\"name\", None)\n content[\"p_nickname\"] = job_info[0].get(\"p_nickname\", None)\n content[\"w_nickname\"] = job_info[0].get(\"w_nickname\", None)\n content[\"run_user\"] = job_info[0].get(\"run_user\", None)\n content[\"content\"] = log_content or None\n return callbackJson.callBacker(content)\n\n@route.post(\"/add_job\", summary=\"新增任务\")\nasync def add_job(request: Request, pid: str = Query(None), wid: str = Query(None), jid: str = Query(None)):\n \"\"\"\n 通过传入工作流实例wid等信息创建实际的任务实例记录\n\n wid: string;\n pid: string;\n p_nickname: string;\n name: string;\n nickname: string;\n crawl_frequency: string;\n description: string;\n status: string;\n modify_user: string;\n extra: string;\n create_time: string;\n update_time: string;\n\n :param request:\n :return:\n \"\"\"\n fdata = await request.form()\n data = dict(fdata)\n callbackJson = constructResponse()\n callbackJson.statusCode = 400\n content = {}\n result = add_job_one(JobInfos, data)\n # 同步项目下的任务数量\n synchronous_jobs(data.get(\"pid\"))\n if result:\n jid = result.get_jid()\n callbackJson.statusCode = 200\n content[\"jid\"] = jid\n return callbackJson.callBacker(content)\n\n\n# todo 项目首页视图\n@route.get(\"/get_ptasks\", summary=\"获取任务状态饼图\")\nasync def get_ptasks(request: Request, pid: str = Query(None)):\n \"\"\"\n 获取任务日志\n :param request:\n :return:\n \"\"\"\n callbackJson = constructResponse()\n callbackJson.statusCode = 200\n content = {}\n rjson = {\n \"title\": {\n \"text\": \"数据统计 有点厉害\",\n },\n \"tooltip\": {\n \"trigger\": \"axis\",\n },\n \"legend\": {\n \"x\": 'center',\n \"y\": 'bottom',\n \"data\": [\"Email\", \"Union Ads\", \"Video Ads\", \"Direct\", \"Search Engine\"],\n },\n \"grid\": {\n # \"left\": \"3%\",\n # \"right\": \"4%\",\n # \"bottom\": \"3%\",\n \"containLabel\": True,\n },\n \"toolbox\": {\n \"feature\": {\n \"saveAsImage\": {},\n },\n },\n \"xAxis\": {\n \"type\": \"category\",\n \"boundaryGap\": False,\n \"data\": [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"],\n },\n \"yAxis\": {\n \"type\": \"value\",\n },\n \"series\": [\n {\n \"name\": \"Email\",\n \"type\": \"line\",\n \"stack\": \"Total\",\n \"data\": [120, 132, 101, 134, 90, 230, 210],\n },\n {\n \"name\": \"Union Ads\",\n \"type\": \"line\",\n \"stack\": \"Total\",\n \"data\": [220, 182, 191, 234, 290, 330, 310],\n },\n {\n \"name\": \"Video Ads\",\n \"type\": \"line\",\n \"stack\": \"Total\",\n \"data\": [150, 232, 201, 154, 190, 330, 410],\n },\n {\n \"name\": \"Direct\",\n \"type\": \"line\",\n \"stack\": \"Total\",\n \"data\": [320, 332, 301, 334, 390, 330, 320],\n },\n {\n \"name\": \"Search Engine\",\n \"type\": \"line\",\n \"stack\": \"Total\",\n \"data\": [820, 932, 901, 934, 1290, 1330, 1320],\n },\n ],\n }\n rjsons = []\n import random, copy\n # for i in range(1, random.randint(2, 5)):\n for i in range(1, 4):\n t = copy.deepcopy(rjson)\n t[\"title\"][\"text\"] = f\"数据统计({i})\"\n for s in t[\"series\"]:\n s[\"data\"] = random_int_list(100, 999, 7)\n rjsons.append(t)\n content[\"list\"] = rjsons\n # 转换为业务响应数据\n return callbackJson.callBacker(content)\n\n\ndef random_int_list(start, stop, length):\n import random\n start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))\n length = int(abs(length)) if length else 0\n random_list = []\n for i in range(length):\n random_list.append(random.randint(start, stop))\n return random_list\n\n# try:\n# pass\n# except Exception as e:\n# print \"报错��息:\"\n# print e\n","repo_name":"DeSireFire/crawler-s-Gravestone","sub_path":"backEnd/apps/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17900289035","text":"import unittest\n\nfrom sparktkregtests.lib import sparktk_test\nfrom sparktkregtests.lib import performance_utils as profiler\n\n\nclass PerformanceKMeans(sparktk_test.SparkTKTestCase):\n\n def setUp(self):\n \"\"\"Import the files to test against.\"\"\"\n super(PerformanceKMeans, self).setUp()\n\n schema = [(\"Vec1\", float),\n (\"Vec2\", float),\n (\"Vec3\", float),\n (\"Vec4\", float),\n (\"Vec5\", float),\n (\"term\", str)]\n\n ds = self.get_file(self.id(), performance_file=True)\n self.frame_train = self.context.frame.import_csv(ds, schema=schema)\n\n def test_kmeans_5by5(self):\n \"\"\"Train a 5-feature, 5-class KMeans model\"\"\"\n with profiler.Timer(\"profile.\" + self.id() + \"_train\"):\n kmodel = self.context.models.clustering.kmeans.train(\n self.frame_train, [\"Vec1\", \"Vec2\", \"Vec3\", \"Vec4\", \"Vec5\"], 5)\n\n with profiler.Timer(\"profile.\" + self.id() + \"_predict\"):\n kmodel.predict(self.frame_train)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tapanalyticstoolkit/spark-tk","sub_path":"regression-tests/sparktkregtests/testcases/performance/kmeans_perf_test.py","file_name":"kmeans_perf_test.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"5892349556","text":"import datetime\nimport subprocess\nfrom importlib import resources\nfrom pathlib import Path\n\nfrom . import migrations\nfrom .common import DBNAME, get_conn, run_sql\n\n\"\"\"\nForward-only DB migration scheme held together by duct tape.\n\n- Uses `user_version` pragma to figure out what migrations are pending.\n- Migrations files are in the form `./migrations/mXXXX.sql`.\n\"\"\"\n\n\ndef _get_current_version():\n return run_sql(\"PRAGMA user_version;\")[0]\n\n\ndef _get_version(migration: Path):\n return int(migration.name[len(\"m\") : -len(\".sql\")])\n\n\ndef _get_pending_migrations(migrations_dir: Path):\n current_version = _get_current_version()\n migrations = sorted(migrations_dir.glob(\"m*.sql\"))\n return [\n migration\n for migration in migrations\n if _get_version(migration) > current_version\n ]\n\n\ndef _read_migrations(paths):\n \"\"\"Returns list of (version, sql_text) tuples\"\"\"\n results = []\n for path in paths:\n with open(path, \"r\") as sql_file:\n results.append((_get_version(path), sql_file.read()))\n return results\n\n\ndef _write_db_schema_script(migrations_dir: Path):\n schema = subprocess.run(\n [\"sqlite3\", DBNAME, \".schema\"], capture_output=True, check=True\n ).stdout\n with open(migrations_dir / Path(\"latest_schema.sql\"), \"wb\") as f:\n f.write(b\"-- This file is auto-generated by the migration script\\n\")\n f.write(b\"-- for reference purposes only. DO NOT EDIT.\\n\\n\")\n f.write(schema)\n\n\ndef migrate(overwrite_latest_schema=True):\n # If there's no existing db, create one with the correct pragmas\n if not Path(DBNAME).is_file():\n run_sql(\"PRAGMA journal_mode = WAL;\")\n\n with resources.path(migrations, \"__init__.py\") as migrations_dir:\n migrations_dir = migrations_dir.parent\n pending_migrations = _get_pending_migrations(migrations_dir)\n if not pending_migrations:\n print(\"Nothing to migrate.\")\n exit()\n print(f\"There are {len(pending_migrations)} pending migrations.\")\n migration_contents = _read_migrations(pending_migrations)\n\n conn = get_conn()\n cursor = conn.cursor()\n\n # Backup first\n current_version = _get_current_version()\n if current_version != 0:\n now = datetime.datetime.utcnow().isoformat(\"T\", \"milliseconds\")\n backup_filename = f\"db_backup_v{current_version}_{now}.sqlite3\"\n print(f\"Backup up to {backup_filename}...\", end=\"\")\n cursor.execute(\"VACUUM main INTO ?;\", (backup_filename,))\n print(\" done\")\n\n # Start migrations\n # NOTE: this is NOT done in a transaction.\n # You'll need to do transactions inside your sql scripts.\n # This is to allow for drastic changes that require temporarily turning off the\n # foreign_keys pragma, which doesn't work inside transactions.\n # If anything goes wrong here, let it abort the whole script. You can always\n # restore from the backup file.\n cursor = conn.cursor()\n for version, sql in migration_contents:\n print(\"Migrating version\", version, \"...\")\n cursor.execute(sql)\n cursor.execute(f\"PRAGMA user_version = {version};\")\n\n if overwrite_latest_schema:\n _write_db_schema_script(migrations_dir)\n\n print(\"All done. Current version:\", _get_current_version())\n","repo_name":"nhanb/pytaku","sub_path":"src/pytaku/database/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"37372833067","text":"import numpy as np\nimport logging\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mrisd import utils, relaxation\n\nlogModule = logging.getLogger(__name__)\n\n\nclass Diagram:\n def __init__(self, tr_in_ms: int = 200, num_trs: int = 1, annotate_consecutive_dash_points: str = 'TR',\n annotate_full_range: str = None):\n self.tr_in_ms: int = tr_in_ms\n self.num_trs: int = num_trs\n self.t_total: int = int(tr_in_ms * num_trs)\n\n # set some adjustable vars - mainly for plotting\n self.prepend_start: int = 10 # [ms] plot before the start so 0 time objects are not cut\n self.prepend_text: float = 0.05 * self.t_total # annotation size in ms equivalent\n self.larmor_vis_freq: float = 2 * 2 * np.pi # per x ms\n\n # set all arrays\n self.time_array = np.linspace(-self.prepend_start, int(tr_in_ms * num_trs),\n int(tr_in_ms * num_trs * 1e3)) # sample time array in us\n self.rf_array: np.ndarray = np.full_like(self.time_array, np.nan)\n zero_point, _ = utils.get_start_end_idx(self.time_array, 0, 0)\n self.rf_array[zero_point:] = 0.0\n self.gs_array: np.ndarray = np.copy(self.rf_array)\n self.gr_array: np.ndarray = np.copy(self.rf_array)\n self.gp_array: np.ndarray = np.copy(self.rf_array)\n self.adc_array: np.ndarray = np.copy(self.rf_array)\n self.signal: np.ndarray = np.copy(self.rf_array)\n\n self.mag_z: np.ndarray = np.copy(self.signal)\n self.mag_xy: np.ndarray = np.copy(self.signal)\n\n self.dash_points: list = []\n self.dash_annotate: str = annotate_consecutive_dash_points\n self.annotate_full_range: str = annotate_full_range\n self.rf_labels: list = []\n self.echo_points: list = []\n\n @staticmethod\n def _map_rf_size(fa_in_deg: float):\n # want to visualize different amplitudes but within a size range\n amp_min = 0.4\n amp_max = 1.0\n # normalize 180° to 1\n mapped_fa = amp_min + (amp_max - amp_min) * np.radians(fa_in_deg) / np.pi\n # clip in case different angles are provided\n return np.clip(mapped_fa, amp_min, amp_max)\n\n def add_rf(self, flip_angle_in_deg: float, timing_in_ms: float,\n duration_in_ms: float = 4.0):\n # normalize size\n amp = self._map_rf_size(flip_angle_in_deg)\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, timing_in_ms, duration_in_ms)\n sinc_x = np.linspace(-(end_idx - start_idx) / 2, (end_idx - start_idx) / 2, int(end_idx - start_idx))\n self.rf_array[start_idx:end_idx] = np.abs(amp * np.sinc(2e-3 * sinc_x))\n time_point = self.time_array[int((end_idx - start_idx) / 2) + start_idx]\n self.rf_labels.append([time_point, flip_angle_in_deg])\n self.dash_points.append(time_point)\n\n def add_grad(self, axis: str, grad_amplitude: float, timing_in_ms: float, duration_in_ms):\n # get indexes based on timing\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, timing_in_ms, duration_in_ms)\n # choose array based on ax\n if axis == 'z':\n self.gs_array[start_idx:end_idx] = grad_amplitude\n elif axis == 'y':\n self.gp_array[start_idx:end_idx] = grad_amplitude\n elif axis == 'x':\n self.gr_array[start_idx:end_idx] = grad_amplitude\n else:\n err = 'no valid gradient axis given, choose from x, y or z'\n logModule.error(err)\n raise ValueError(err)\n\n def add_adc(self, timing_in_ms: float, duration_in_ms: float):\n # get indexes based on timing\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, timing_in_ms, duration_in_ms)\n # choose array based on ax\n self.adc_array[start_idx:end_idx] = 1.0\n time_point = self.time_array[utils.array1d_to_value_find_nearest_idx(\n self.time_array, timing_in_ms + duration_in_ms / 2)]\n self.echo_points.append(time_point)\n self.dash_points.append(time_point)\n\n def add_easy_signal(self, timing_in_ms: float, duration_in_ms: float):\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, timing_in_ms, duration_in_ms)\n mid_idx = utils.array1d_to_value_find_nearest_idx(self.time_array, timing_in_ms + duration_in_ms / 2)\n\n larmor_sig = np.sin(self.larmor_vis_freq * self.time_array[start_idx:end_idx])\n sigma = 0.3 * duration_in_ms\n envelope = np.divide(\n np.exp(\n -np.square(self.time_array[start_idx:end_idx] - self.time_array[mid_idx]) / (2 * sigma ** 2)\n ),\n np.sqrt(2 * np.pi * sigma ** 2)\n )\n self.signal[start_idx:end_idx] = envelope * larmor_sig\n\n def set_mag_longitudinal(self, start_time: float, duration: float,\n start_mag: float = 1.0, t1_in_ms: float = 200.0):\n # get indexes\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, start_time, duration)\n # timing in us!\n relax_time = self.time_array[start_idx:end_idx] - self.time_array[start_idx]\n mag_curve = relaxation.t1_ir_relaxation(t=relax_time, t1_val=t1_in_ms, m_initial=start_mag)\n self.mag_z[start_idx:end_idx] = mag_curve\n return mag_curve[-1]\n\n def set_mag_transverse(self, start_time: float, duration: float, start_mag: float = 1.0, t2_in_ms: float = 10.0):\n start_idx, end_idx = utils.get_start_end_idx(self.time_array, start_time, duration)\n # timing in us!\n relax_time = self.time_array[start_idx:end_idx] - self.time_array[start_idx]\n mag_curve = relaxation.t2_star_relaxation(relax_time, t2_star_val=t2_in_ms, m_initial=start_mag)\n self.mag_xy[start_idx:end_idx] = mag_curve\n return mag_curve[-1]\n\n def plot(self, which_parts: list = None, dpi: int = 200, add_magnetization: bool = False, save: str = None):\n default = ['rf', 'gs', 'gr', 'gp', 'sig', 'adc']\n if add_magnetization:\n default.append('mz')\n default.append('mxy')\n if which_parts is None:\n which_parts = default\n for part in which_parts:\n if part not in default:\n err = f\"Provide list of the parts to plot. Can only contain one or more of: {default}\"\n logModule.error(err)\n raise ValueError(err)\n\n # setup plotting\n num_rows = which_parts.__len__()\n if 'signal' in which_parts and 'adc' in which_parts:\n num_rows -= 1\n colors = cm.viridis(np.linspace(0, 1, 2 * which_parts.__len__())).reshape((which_parts.__len__(), 2, -1))\n fig = plt.figure(figsize=(10, 0.9*num_rows), dpi=dpi)\n # hr = np.full(num_rows + 1, 10)\n # hr[-1] = 1\n gs = fig.add_gridspec(num_rows, 1)\n\n select_idx = 0\n\n if 'rf' in which_parts:\n ax_rf = fig.add_subplot(gs[select_idx])\n ax_rf.axis(False)\n ax_rf.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_rf.text(-self.prepend_text - self.prepend_start, 0, \"RF\")\n ax_rf.plot(self.time_array, self.rf_array, color=colors[select_idx, 0])\n ax_rf.fill_between(self.time_array, self.rf_array, alpha=0.6, color=colors[select_idx, 1])\n for labels in self.rf_labels:\n ax_rf.text(labels[0], 1.05 * np.nanmax(self.rf_array), f\"$\\\\alpha$: {labels[1]:d} °\")\n select_idx += 1\n\n if 'gs' in which_parts:\n ax_gs = fig.add_subplot(gs[select_idx])\n ax_gs.axis(False)\n ax_gs.text(-self.prepend_text - self.prepend_start, 0, \"$G_{\\mathrm{slice}}$\")\n ax_gs.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_gs.plot(self.time_array, self.gs_array, color=colors[select_idx, 0])\n ax_gs.fill_between(self.time_array, self.gs_array, alpha=0.6, color=colors[select_idx, 1])\n select_idx += 1\n\n if 'gr' in which_parts:\n ax_gx = fig.add_subplot(gs[select_idx])\n ax_gx.axis(False)\n ax_gx.text(-self.prepend_text - self.prepend_start, 0, \"$G_{\\mathrm{read}}$\")\n ax_gx.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_gx.plot(self.time_array, self.gr_array, color=colors[select_idx, 0])\n ax_gx.fill_between(self.time_array, self.gr_array, alpha=0.6, color=colors[select_idx, 1])\n select_idx += 1\n\n if 'gp' in which_parts:\n ax_gp = fig.add_subplot(gs[select_idx])\n ax_gp.axis(False)\n ax_gp.text(-self.prepend_text - self.prepend_start, 0, \"$G_{\\mathrm{phase}}$\")\n ax_gp.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_gp.plot(self.time_array, self.gp_array, color=colors[select_idx, 0])\n ax_gp.plot(self.time_array, -self.gp_array, color=colors[select_idx, 0])\n # plot phase encode steps\n for fac_interp in np.linspace(0, 1, 5):\n interpol_gp = fac_interp * self.gp_array\n ax_gp.plot(self.time_array, interpol_gp, color=colors[select_idx, 0])\n ax_gp.plot(self.time_array, -interpol_gp, color=colors[select_idx, 0])\n\n ax_gp.fill_between(self.time_array, self.gp_array, alpha=0.6, color=colors[select_idx, 1])\n ax_gp.fill_between(self.time_array, -self.gp_array, alpha=0.6, color=colors[select_idx, 1])\n select_idx += 1\n\n if 'adc' in which_parts or 'sig' in which_parts:\n ax_sig_adc = fig.add_subplot(gs[select_idx])\n ax_sig_adc.axis(False)\n ax_sig_adc.set_xlim(np.min(self.time_array), np.max(self.time_array))\n if 'adc' in which_parts:\n if 'sig' in which_parts:\n alpha = 0.6\n else:\n alpha = 1.0\n ax_sig_adc.fill_between(self.time_array, self.adc_array, color=colors[select_idx, 1],\n alpha=0.6 * alpha)\n ax_sig_adc.plot(self.time_array, self.adc_array, color=colors[select_idx, 0], alpha=alpha)\n\n if 'sig' in which_parts:\n ax_sig_adc.plot(self.time_array, self.signal, color='#ff4d4d')\n # ax_sig.fill_between(self.time_array, self.gs_array, alpha=0.6, color=colors[select_idx, 1])\n\n ax_sig_adc.text(-self.prepend_text - self.prepend_start, 0, 'Signal / ADC')\n\n for val in self.echo_points:\n ax_sig_adc.scatter(val, 0.05, color='#990000', zorder=3)\n select_idx += 1\n\n if 'mz' in which_parts:\n ax_mz = fig.add_subplot(gs[select_idx])\n ax_mz.axis(False)\n ax_mz.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_mz.set_ylim(-1, 1)\n ax_mz.plot(self.time_array, self.mag_z, color=colors[select_idx, 0])\n null_line = np.zeros_like(self.time_array)\n null_line[np.argwhere(np.isnan(self.mag_z))] = np.nan\n ax_mz.plot(self.time_array, null_line,\n alpha=0.8, color=colors[select_idx, 1], linestyle='dashed')\n ax_mz.text(-self.prepend_text - self.prepend_start, 0, '$M_z$')\n select_idx += 1\n\n if 'mxy' in which_parts:\n ax_mxy = fig.add_subplot(gs[select_idx])\n ax_mxy.axis(False)\n ax_mxy.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_mxy.set_ylim(-1, 1)\n null_line = np.zeros_like(self.time_array)\n\n null_line[np.argwhere(np.isnan(self.mag_z))] = np.nan\n ax_mxy.plot(self.time_array, null_line,\n alpha=0.8, color=colors[select_idx, 1], linestyle='dashed')\n ax_mxy.plot(self.time_array, self.mag_xy, color=colors[select_idx, 0])\n ax_mxy.fill_between(self.time_array[self.adc_array > 0], self.mag_xy[self.adc_array > 0],\n color=colors[select_idx, 1], alpha=0.7)\n\n ax_mxy.text(-self.prepend_text - self.prepend_start, 0, '$M_xy$')\n select_idx += 1\n\n # plot annotation lines\n ax_inter = fig.add_subplot(gs[:])\n ax_inter.axis(False)\n ax_inter.set_xlim(np.min(self.time_array), np.max(self.time_array))\n ax_inter.set_ylim(0, 1)\n for val_idx in range(self.dash_points.__len__()):\n ax_inter.vlines(self.dash_points[val_idx], 0.1, 1, color='#cc0000', alpha=0.7, linestyles='dashed')\n if val_idx < self.dash_points.__len__() - 1:\n ax_inter.annotate(text='', xy=(self.dash_points[val_idx], 0.05),\n xytext=(self.dash_points[val_idx + 1], 0.05),\n arrowprops=dict(arrowstyle='<->', color='#800000',\n ls='--', alpha=0.7))\n ax_inter.text(\n (self.dash_points[val_idx + 1] - self.dash_points[val_idx]) / 2 + self.dash_points[val_idx] - self.time_array[-1] / 80,\n 0.06,\n self.dash_annotate,\n dict(color='#800000')\n )\n if self.annotate_full_range is not None:\n ax_inter.annotate(text='', xy=(self.dash_points[0], 0.02),\n xytext=(self.t_total, 0.02),\n arrowprops=dict(arrowstyle='<->', color='#800000',\n ls='--', alpha=0.7))\n ax_inter.text(\n self.t_total/2 + self.dash_points[0],\n 0.03,\n self.annotate_full_range,\n dict(color='#800000')\n )\n # plt.tight_layout()\n if save is not None:\n plt.savefig(save, bbox_inches='tight', dpi=dpi)\n plt.close()\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n diag = Diagram()\n diag.add_rf(90, 0)\n\n diag.plot()\n","repo_name":"schmidtijoe/mri_sequence_diagrams","sub_path":"mrisd/seq_diagram.py","file_name":"seq_diagram.py","file_ext":"py","file_size_in_byte":14006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19155972908","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom apps.metadata.songs.models import Song\n\n\nclass CosineSimilarity(models.Model):\n # ID\n songBase = models.ForeignKey(\n Song,\n unique=False,\n related_name='CosineSimilarity_right', on_delete=models.CASCADE\n )\n songCompare = models.ForeignKey(\n Song,\n unique=False,\n related_name='CosineSimilarity_left', on_delete=models.CASCADE\n )\n # Data\n title = models.FloatField(default=0, unique=False)\n album = models.FloatField(default=0, unique=False)\n artist = models.FloatField(default=0, unique=False)\n\n class Meta:\n unique_together = (('songBase', 'songCompare'),)\n","repo_name":"DiegoCorrea/ouvido_musical-Back","sub_path":"apps/kemures/similarities/Cosine/DAO/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"27881122406","text":"# Conversion functions for the NATO Phonetic Alphabet\n# https://research.exercism.io/experiment_solutions/413bd25d5b614697bb5a7d7c732b6ccb\n\nimport re\n# To save a lot of typing the code words are presented here\n# as a dict, but feel free to change this if you'd like.\nALPHANUM_TO_NATO = {\n \"A\": \"ALFA\",\n \"B\": \"BRAVO\",\n \"C\": \"CHARLIE\",\n \"D\": \"DELTA\",\n \"E\": \"ECHO\",\n \"F\": \"FOXTROT\",\n \"G\": \"GOLF\",\n \"H\": \"HOTEL\",\n \"I\": \"INDIA\",\n \"J\": \"JULIETT\",\n \"K\": \"KILO\",\n \"L\": \"LIMA\",\n \"M\": \"MIKE\",\n \"N\": \"NOVEMBER\",\n \"O\": \"OSCAR\",\n \"P\": \"PAPA\",\n \"Q\": \"QUEBEC\",\n \"R\": \"ROMEO\",\n \"S\": \"SIERRA\",\n \"T\": \"TANGO\",\n \"U\": \"UNIFORM\",\n \"V\": \"VICTOR\",\n \"W\": \"WHISKEY\",\n \"X\": \"XRAY\",\n \"Y\": \"YANKEE\",\n \"Z\": \"ZULU\",\n \"0\": \"ZERO\",\n \"1\": \"ONE\",\n \"2\": \"TWO\",\n \"3\": \"TREE\",\n \"4\": \"FOUR\",\n \"5\": \"FIVE\",\n \"6\": \"SIX\",\n \"7\": \"SEVEN\",\n \"8\": \"EIGHT\",\n \"9\": \"NINER\",\n}\n\n\ndef transmit(message: str): # Convert a message to a NATO code word transmission.\n arr_message, result = re.findall(r'\\w', message.upper()), [] # разбили по символам, убрали и подняли\n print(arr_message)\n for i in arr_message:\n if i in ALPHANUM_TO_NATO and i.isascii():\n result.append(ALPHANUM_TO_NATO[i])\n result = ' '.join(result)\n return result\n\n\ndef receive(transmission: str): # Convert a NATO code word transmission to a message.\n arr_transmission, result = transmission.split(), []\n for i in arr_transmission:\n for alphabet, nato in ALPHANUM_TO_NATO.items():\n if nato == i:\n result.append(alphabet)\n result = ''.join(result)\n return result\n\n\nprint(transmit(\"Hello, World!\")) #\"HOTEL ECHO LIMA LIMA OSCAR WHISKEY OSCAR ROMEO LIMA DELTA\"\nprint(transmit(\"NCC-1701-D\")) #\"NOVEMBER CHARLIE CHARLIE ONE SEVEN ZERO ONE DELTA\"\nprint(transmit(\"hop_lalaley\"))\nprint(receive(\"HOTEL ECHO LIMA LIMA OSCAR WHISKEY OSCAR ROMEO LIMA DELTA\")) #\"HELLOWORLD\"\nprint(receive(\"NOVEMBER CHARLIE CHARLIE ONE SEVEN ZERO ONE DELTA\")) # \"NCC1701D\"\nprint(receive(\"ONE SEVEN ZERO ONE SIERRA TANGO ROMEO INDIA NOVEMBER GOLF WHISKEY HOTEL INDIA TANGO ECHO SIERRA PAPA ALFA CHARLIE ECHO SIERRA TANGO ROMEO INDIA NOVEMBER GOLF PAPA UNIFORM NOVEMBER CHARLIE TANGO UNIFORM ALFA TANGO INDIA OSCAR NOVEMBER\"))","repo_name":"Shreters/Learning-project","sub_path":"Exercism/nato alphabet.py","file_name":"nato alphabet.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10389174070","text":"M = 1000000007\n\n \ndef exponentiation(bas, exp): \n t = 1; \n while(exp > 0): \n if (exp % 2 != 0): \n t = (t * bas) % M; \n \n bas = (bas * bas) % M; \n exp = int(exp / 2); \n return t % M; \n \n\n\nt = input()\nfor inp in range(t):\n\tk = input()\n\tr = 2\n\tpower = exponentiation(2, k - 1)\n\tprint (10 * power ) % M","repo_name":"mrityunjay-vashisth/JUNE-Challenge-2019-Division-2","sub_path":" RSIGNS.py","file_name":" RSIGNS.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71607228321","text":"import os\nfrom numpy.core.fromnumeric import std\nfrom piq.perceptual import DISTS\nfrom test import as_color_mapped_image\nfrom timeit import default_timer as timer\nfrom typing import List\nfrom statistics import stdev\n\nfrom torch import nn\nimport torch\nfrom torch.tensor import Tensor\nfrom torch.utils.data import DataLoader, dataloader\nfrom torch.utils.data.dataset import Dataset\nimport torchvision.utils as vutils\n\nfrom piq import ssim, LPIPS, multi_scale_ssim, fsim, gmsd, multi_scale_gmsd, haarpsi, mdsi\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nfrom matplotlib.cm import get_cmap\n\nfrom configs.config import Config\nfrom measures import mae, mse, psnr\nfrom utils.datawriter import serialize_and_save_results\nimport netutils\nfrom utils.utils import tosRGB_tensor\n\n\nclass Evaluator:\n def __init__(\n self,\n config: Config,\n net: nn.Module,\n dataset: Dataset,\n device: torch.device,\n io_transform: netutils.IOTransform,\n save_results: bool = True,\n uses_secondary_dataset: bool = False,\n ) -> None:\n self.config = config\n self.save_results = save_results\n\n self.net: nn.Module = net\n self.net.eval()\n\n self.dataset: Dataset = dataset\n self.dataloader = DataLoader(\n dataset=dataset,\n batch_size=self.config.batch_size,\n shuffle=True,\n num_workers=self.config.num_workers_test,\n )\n\n self.device: torch.device = device\n self.io_transform: netutils.IOTransform = io_transform\n\n self.sample_indices = torch.randint(0, len(dataset), (40,))\n self.uses_secondary_dataset = uses_secondary_dataset\n\n def eval(self) -> None:\n begin_eval = timer()\n\n mae_vals = []\n mse_vals = []\n psnr_vals = []\n ssim_vals = []\n lpips_vals = []\n ms_ssim_vals = []\n gmsd_vals = []\n ms_gmsd_vals = []\n haar_vals = []\n mdsi_vals = []\n dists_vals = []\n\n test_size = len(self.dataloader) # in batches\n\n def avg(lst: List[float]) -> float:\n return sum(lst) / len(lst)\n\n print(\"\\nEvaluating network performance for test set {}...\".format(\"B\" if self.uses_secondary_dataset else \"A\"))\n with torch.no_grad():\n lpips = LPIPS()\n dists = DISTS()\n for batch_num, (input, gt) in enumerate(self.dataloader):\n # Skip last incomplete batch\n if batch_num == test_size - 1:\n continue\n\n input: Tensor = self.io_transform.transform_input(input)\n gt: Tensor = self.io_transform.transform_gt_eval(gt, visualize=False)\n output: Tensor = self.io_transform.transform_output_eval(output=self.net(input), visualize=False)\n\n self.io_transform.clear()\n\n for (output_img, gt_img) in zip(output, gt):\n mae_vals.append(mae(output_img, gt_img))\n mse_vals.append(mse(output_img, gt_img))\n psnr_vals.append(psnr(output_img, gt_img))\n ssim_vals.append(ssim(output_img, gt_img, data_range=1.0, kernel_size=7).detach().item())\n lpips_vals.append(lpips(output_img, gt_img).detach().item())\n\n ms_ssim_vals.append(multi_scale_ssim(output_img, gt_img).detach().item())\n # fsim_vals.append(fsim(output_img, gt_img))\n gmsd_vals.append(gmsd(output_img, gt_img).detach().item())\n ms_gmsd_vals.append(multi_scale_gmsd(output_img, gt_img).detach().item())\n haar_vals.append(haarpsi(output_img, gt_img).detach().item())\n mdsi_vals.append(mdsi(output_img, gt_img).detach().item())\n dists_vals.append(dists(output_img, gt_img).detach().item())\n\n print(\"Evaluating inference time...\")\n # evaluate inference time\n with torch.no_grad():\n times: List[float] = []\n skip_items = 50 # skip the first few items because they tend to be slower\n for i in range(self.dataset.__len__()):\n if i < skip_items:\n continue\n\n input: Tensor = self.dataset.__getitem__(i)\n\n input = input[0].unsqueeze(0).to(self.device)\n input = self.io_transform.transform_input(input)\n\n begin = timer()\n\n output: torch.Tensor = self.net(input)\n\n t = timer() - begin\n times.append(t)\n\n print(\"Processing results...\")\n self.process_and_save_results(avg, mae_vals, mse_vals, psnr_vals, ssim_vals, lpips_vals, times, begin_eval)\n self.process_secondary(avg, ms_ssim_vals, gmsd_vals, ms_gmsd_vals, haar_vals, mdsi_vals, dists_vals)\n if self.save_results:\n print(\"Saving snapshots with diff...\")\n self.save_snapshots()\n\n def test_inference(self):\n times: List[float] = []\n\n with torch.no_grad():\n skip_num = 100\n skip = 0\n for i in range(2):\n begin_epoch = timer()\n\n for i in range(len(self.dataset)):\n #begin = timer()\n\n input = self.dataset[i][0].unsqueeze(0).to(self.device)\n output: Tensor = self.net(input)\n\n #t = timer() - begin\n # if skip < skip_num:\n # skip = skip + 1\n # else:\n # times.append(t)\n\n epoch_time = timer() - begin_epoch\n print(\n \"Epoch for {0:.4f} s, single item for {1:.4f} s\".format(\n epoch_time, epoch_time / len(self.dataloader)\n )\n )\n\n print(\"Single item avg: {0:.4f} s\".format(sum(times) / len(times)))\n\n def process_and_save_results(self, avg, mae_vals, mse_vals, psnr_vals, ssim_vals, lpips_vals, times, begin_eval):\n # Measures\n avg_mae = avg(mae_vals)\n std_mae = stdev(mae_vals)\n avg_mse = avg(mse_vals)\n std_mse = stdev(mse_vals)\n avg_psnr = avg(psnr_vals)\n std_psnr = stdev(psnr_vals)\n avg_ssim = avg(ssim_vals)\n std_ssim = stdev(ssim_vals)\n avg_lpips = avg(lpips_vals)\n std_lpips = stdev(lpips_vals)\n # Inference time\n avg_time = avg(times)\n std_time = stdev(times)\n\n if self.save_results:\n filename = \"results_secondary_test_set.json\" if self.uses_secondary_dataset else \"results.json\"\n filepath = os.path.join(self.config.dirs.experiment_results_root, filename)\n serialize_and_save_results(\n filepath,\n avg_mae,\n std_mae,\n avg_mse,\n std_mse,\n avg_ssim,\n std_ssim,\n avg_psnr,\n std_psnr,\n avg_lpips,\n std_lpips,\n avg_time,\n std_time,\n )\n print(\n \"\"\"Evaluation ended for {0:.2f} s. Results:\n MAE = {1:.4f} +/- {2:.4f}\n MSE = {3:.4f} +/- {4:.4f}\n PSNR = {5:.4f} +/- {6:.4f}\n SSIM = {7:.4f} +/- {8:.4f}\n LPIPS = {9:.4f} +/- {10:.4f}\n Avg. inference time = {11:.4f} +/- {12:.4f}\"\"\".format(\n timer() - begin_eval,\n avg_mae,\n std_mae,\n avg_mse,\n std_mse,\n avg_psnr,\n std_psnr,\n avg_ssim,\n std_ssim,\n avg_lpips,\n std_lpips,\n avg_time,\n std_time,\n )\n )\n\n def process_secondary(self, avg, ms_ssim_vals, gmsd_vals, ms_gmsd_vals, haar_vals, mdsi_vals, dists_vals):\n avg_ms_ssim = avg(ms_ssim_vals)\n std_ms_ssim = std(ms_ssim_vals)\n avg_gmsd = avg(gmsd_vals)\n std_gmsd = std(gmsd_vals)\n avg_ms_gmsd = avg(ms_gmsd_vals)\n std_ms_gmsd = std(ms_gmsd_vals)\n avg_haar = avg(haar_vals)\n std_haar = std(haar_vals)\n avg_mdsi = avg(mdsi_vals)\n std_mdsi = std(mdsi_vals)\n avg_dists = avg(dists_vals)\n std_dists = std(dists_vals)\n print(\n \"\"\"Secondary Results {0}:\n MS-SSIM = {1:.4f} +/- {2:.4f}\n FSIM = {3:.4f} +/- {4:.4f}\n GMSD = {5:.4f} +/- {6:.4f}\n MS-GMSD = {7:.4f} +/- {8:.4f}\n HAAR = {9:.4f} +/- {10:.4f}\n MDSI = {11:.4f} +/- {12:.4f}\n DISTS = {13:.4f} +/- {14:.4f}\"\"\".format(\n \"\",\n avg_ms_ssim,\n std_ms_ssim,\n 99999.9,\n 99999.9,\n avg_gmsd,\n std_gmsd,\n avg_ms_gmsd,\n std_ms_gmsd,\n avg_haar,\n std_haar,\n avg_mdsi,\n std_mdsi,\n avg_dists,\n std_dists,\n )\n )\n\n def save_snapshots(self):\n save_dir = self.config.dirs.test_output_samples_dir\n\n if self.uses_secondary_dataset:\n save_dir = save_dir[:-2] + \"_secondary/\"\n os.mkdir(save_dir)\n\n tensors = []\n with torch.no_grad():\n for index in self.sample_indices:\n image_path = os.path.join(save_dir, \"sample_{}.png\".format(index))\n\n (sample_input, sample_gt) = self.dataset.__getitem__(index)\n\n di = sample_input[3:6, :].to(self.device).clamp(0.0, 1.0)\n\n tensors.append(tosRGB_tensor(di.clone()))\n\n sample_gt: Tensor = self.io_transform.transform_gt_eval(sample_gt, visualize=True)\n sample_input: Tensor = self.io_transform.transform_input(sample_input.unsqueeze(0))\n sample_output: Tensor = self.io_transform.transform_output_eval(\n output=self.net(sample_input), visualize=True\n ).squeeze()\n\n self.io_transform.clear()\n\n tensors.append(tosRGB_tensor(sample_gt))\n tensors.append(tosRGB_tensor(sample_output))\n\n tensors.extend(self.make_diff_images(sample_output, di, sample_gt))\n\n grid_tensor = vutils.make_grid(tensors, nrow=3)\n vutils.save_image(grid_tensor, image_path)\n tensors.clear()\n\n def make_diff_images(self, target: Tensor, source_1: Tensor, source_2) -> List[Tensor]:\n diff_1: Tensor = source_1 - target\n diff_2: Tensor = source_2 - target\n\n # colors = {0: \"Reds\", 1: \"Greens\", 2: \"Blues\"}\n\n # per_channel_diffs = [self.make_monochrome_diff_per_channel(diff_1[i], colors[i]) for i in range(3)]\n # per_channel_diffs.extend([self.make_monochrome_diff_per_channel(diff_2[i], colors[i]) for i in range(3)])\n\n # return per_channel_diffs\n diff_1_map = self.make_distance_based_diff_image(diff_1)\n diff_2_map = self.make_distance_based_diff_image(diff_2)\n empty_tensor = torch.ones_like(diff_2_map)\n\n return [diff_1_map, diff_2_map, empty_tensor]\n\n def make_monochrome_diff_per_channel(self, diff_t: Tensor, colormap_name: str) -> Tensor:\n return self.as_color_mapped_image(diff_t, colormap_name)\n\n def make_distance_based_diff_image(self, diff_t: Tensor) -> Tensor:\n xs_sq: Tensor = diff_t[0].pow(2)\n ys_sq: Tensor = diff_t[1].pow(2)\n zs_sq: Tensor = diff_t[2].pow(2)\n normed_distances_map = torch.sqrt(xs_sq + ys_sq + zs_sq) / 1.73205080757 # sqrt(3)\n\n return self.as_color_mapped_image(normed_distances_map, \"jet\")\n\n def as_color_mapped_image(self, t: Tensor, colormap_name: str) -> Tensor:\n cm_hot = get_cmap(colormap_name)\n t = t.cpu()\n t_np = t.numpy()\n t_np = cm_hot(t_np)\n t_ten = torch.from_numpy(t_np)\n t_ten = t_ten.to(self.device)\n return t_ten.permute((2, 0, 1))[0:3, :]\n","repo_name":"kstpr/Thesis","sub_path":"GI_Nets/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":12077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36360064277","text":"from movements import Swimming\nfrom .attraction import Attraction\n # Assuming you have a Swimming class for aquatic animals\n\nclass Wetlands(Attraction):\n\n def __init__(self, name, description):\n super().__init__(name, description)\n def add_animal_pythonic(self, animal):\n try:\n # Check if the animal has a 'walk_speed' attribute\n if hasattr(animal, 'slither_speed'):\n print(f\"{animal} is a snake, so it can't be placed in {self.attraction_name}.\")\n return\n # Check if the animal can swim\n if animal.swim_speed > 0:\n self.animals.append(animal)\n print(f\"{animal} now lives in {self.attraction_name}\")\n except AttributeError as ex:\n print(f\"{animal} doesn't like to swim, so please do not put it in the {self.attraction_name} attraction.\") \n\n def add_animal(self, animal):\n if isinstance(animal, Swimming):\n self.animals.append(animal)\n print(f\"{animal} now lives in {self.attraction_name}\")\n else:\n print(f\"{animal} doesn't belong in {self.attraction_name}.\")\n ","repo_name":"Daniel-Bennett777/Petting-Zoo-","sub_path":"attractions/wetlands.py","file_name":"wetlands.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2535802525","text":"#\r\n# A super-simple script that estimates what the winning score will be.\r\n#\r\n# It assumes that the winning entry will come from shuffling an \"optimal\"\r\n# packing pattern that has a normal distribution of scores.\r\n#\r\nimport numpy as np\r\n\r\n\r\nnum_submissions = 18000 # (400/2 teams shuffling) x (3 submissions per day) x (30 days left) \r\nscore_avg = 35540 # Average score of the optimal packing solution\r\nscore_sdev = 315 # Standard deviation of the optimal packing solution score\r\n\r\n\r\n# Simulate running the competition 10,000 times.\r\nnp.random.seed(0)\r\nnum_trials = 10000\r\nwinning_scores = np.zeros(num_trials)\r\nfor i in range(num_trials):\r\n winning_scores[i] = np.amax( np.random.normal(score_avg, score_sdev, num_submissions) )\r\n\r\nprint( \"Predicted winning score: %0.0f +/- %0.0f\" %(np.mean(winning_scores), np.std(winning_scores)) )\r\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/santas-uncertain-bags/PaulG/estimating-the-winning-score.py","file_name":"estimating-the-winning-score.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"2722596187","text":"from . import AttrDict\n\npnotree_z_dim = 512 * 4 # 4 pnotree concated, each 512\n\nd_cond = pnotree_z_dim\n\nparams = AttrDict(\n # Training params\n batch_size=16,\n max_epoch=50,\n learning_rate=5e-5,\n max_grad_norm=10,\n fp16=True,\n # Data params\n num_workers=4,\n pin_memory=True,\n # unet\n in_channels=2,\n out_channels=2,\n channels=64,\n attention_levels=[2, 3],\n n_res_blocks=2,\n channel_multipliers=[1, 2, 4, 4],\n n_heads=4,\n tf_layers=1,\n d_cond=d_cond,\n # ldm\n linear_start=0.00085,\n linear_end=0.0120,\n n_steps=1000,\n latent_scaling_factor=0.18215,\n # img\n img_h=128,\n img_w=128,\n # conditional\n cond_type=\"pnotree\",\n cond_mode=\"mix\", # {mix, cond, uncond}\n)\n","repo_name":"aik2mlj/polyffusion","sub_path":"polyffusion/params/params_sdf_pnotree.py","file_name":"params_sdf_pnotree.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"54"} +{"seq_id":"27492083780","text":"class stack:\n\n def __init__(self,capacity):\n self.tos1 = -1\n self.tos2=capacity\n self.capacity = capacity\n # This array is used a stack\n self.array = [None for i in range(capacity)]\n def size1(self):\n return self.tos1+1\n def size2(self):\n return self.capacity-self.tos2\n\n def peek1(self):\n if self.tos1==-1:\n print(\"stack underflow\")\n return -1\n return self.array[self.tos1]\n def peek2(self):\n if self.tos2==self.capacity:\n print(\"stack underflow\")\n return -1\n return self.array[self.tos2]\n\n def pop1(self):\n if self.tos1!=-1:\n pop_ele=self.array[self.tos1]\n self.tos1 -= 1\n return pop_ele\n else:\n print(\"underflow\")\n return -1\n def pop2(self):\n if self.tos2!=self.capacity:\n pop_ele=self.array[self.tos2]\n self.tos2 += 1\n return pop_ele\n else:\n print(\"underflow\")\n return -1\n # Push the element to the stack\n def push1(self, op):\n if self.tos1+1 count:\n count = second.count(num)\n MaxCountNum = num\nif count == 1:\n print(\"每個數字剛好只出現 1 次\")\nelse:\n print(f\"最大出現次數的數字為:{MaxCountNum}\\n出現次數為{count}\")\n\n\n\n","repo_name":"Hsing0528/python77","sub_path":"8檢查數值是否有重複.py","file_name":"8檢查數值是否有重複.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36959130027","text":"import Tkinter as tk #Main Tkinter GUI interface\nfrom aja.embedded.rest.kipro import Client\n\nfont_unitid = ('Helvetica', 26, 'bold')\nfont_status = ('Helvetica', 18)\nfont_recbutton = ('Helvetica', 18, 'bold')\n\nnum_units = 6\nclient_list = [] #List of client objects\niplist = [] #List of unit ip addresses\n\n\nclass KiProControl(tk.Tk):\n\n def __init__ (self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n self.LoadIPAddresses()\n #self.Connections()\n\n #Setup window\n self.title(\"AJA KiPro\")\n self.geometry('800x480') #Force window size for RaspberryPi touchscreen\n\n container = tk.Frame(self) #Main holder for the entire application\n container.pack(side=\"top\", fill=\"both\", expand = True) #Place the main holder\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {} #Page dictionary\n\n PageList = (Overview, Settings) #Simple way for adding pages to the application\n\n for P in (PageList):\n page = P(container, self)\n self.frames[P] = page\n page.grid(row=0, column=0, sticky=\"NESW\")\n\n self.show_page(Overview) #Default startup page\n\n def show_page(self, cont):\n page = self.frames[cont]\n page.tkraise()\n\n def LoadIPAddresses(self):\n #Dirty: Load list of clients from ip address text file\n with open('ipaddress.txt', \"r\") as f:\n for line in f:\n iplist.append(line.strip())\n f.close()\n\n def Connections(self):\n for item in iplist:\n print('Connecting to: ')\n print(item)\n newclient = Client(item)\n client_list.append(newclient)\n print(client_list)\n\n\nclass Overview(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n\n #Generate KiPro unit labels\n for i in range(num_units):\n label = tk.Label(self, text=i+1, font=font_unitid)\n label.grid(column=0, row=i)\n\n #Generate device status labels\n self.unit_status = []\n for i in range(num_units):\n label = tk.Label(self, text=\"STANDBY\", font=font_status, fg=\"green\")\n label.grid(column=1, row=i, padx=20)\n self.unit_status.append(label)\n\n #Generate device codec labels\n self.unit_codec = []\n for i in range(num_units):\n label = tk.Label(self, text=\"ProRes 422\", font=font_status)\n label.grid(column=2, row=i, padx=20)\n self.unit_codec.append(label)\n\n #Generate Arm/Disarm buttons\n self.imgRed = tk.PhotoImage(file=\"red.gif\")\n self.imgBlack = tk.PhotoImage(file=\"black.gif\")\n self.arm_buttons = [] #List of buttons\n for i in range(num_units):\n button = tk.Button(self, height=50, width=50, image=self.imgBlack, bg=\"black\",\n command=lambda name=i: self.btnArm_click(name))\n button.grid(column=3, row=i, padx=10, pady=5)\n self.arm_buttons.append(button)\n\n #Record button\n self.imgRecord = tk.PhotoImage(file=\"record.gif\")\n btnRecord = tk.Button(self, image=self.imgRecord, font=font_recbutton, padx=30, pady=100,\n command=lambda: self.btnRecord_click())\n btnRecord.grid(column=4, row=0, rowspan=2, columnspan=2, padx=20)\n\n #Stop button\n self.imgStop = tk.PhotoImage(file=\"stop.gif\")\n btnStop = tk.Button(self, image=self.imgStop)\n btnStop.grid(column=4, row=2, rowspan=2, columnspan=2)\n\n #Select all button\n btnAll = tk.Button(self, text=\"Select All\",\n command= lambda: self.btnSelectAll())\n btnAll.grid(column=4, row=4, pady=30)\n\n #Select none button\n btnNone = tk.Button(self, text=\"Select None\",\n command= lambda: self.btnSelectNone())\n btnNone.grid(column=5, row=4, pady=30)\n\n #Settings button\n btnSettings = tk.Button(self, text=\"Settings\",\n command=lambda: controller.show_page(Settings))\n btnSettings.grid(column=0, row=num_units+1)\n\n def btnArm_click(self, name):\n clicked = self.arm_buttons[name]\n\n if clicked['bg'] == \"black\":\n clicked.config(image=self.imgRed, bg=\"red\")\n else:\n clicked.config(image=self.imgBlack, bg=\"black\")\n\n def btnRecord_click(self):\n for client in client_list:\n client.record()\n\n def btnSelectAll(self):\n for i in self.arm_buttons:\n i.config(image=self.imgRed, bg=\"red\")\n\n def btnSelectNone(self):\n for i in self.arm_buttons:\n i.config(image=self.imgBlack, bg=\"black\")\n\n\nclass Settings(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Settings\")\n\n #Generate KiPro unit labels\n for i in range(num_units):\n label = tk.Label(self, text=i+1, font=font_unitid)\n label.grid(column=0, row=i)\n\n #Run the method to load current IP addresses from the text file\n #self.populateIP()\n\n #Generate ip address fields\n self.ip_input = [] #List of ip address input boxes\n self.ip_buttons = [] #List of SetIP buttons\n for i in range(num_units):\n #ip Address Fields\n e = tk.Entry(self, width=15, font=font_status)\n e.insert(0, iplist[i]) #Pull ip address from the list\n e.grid(column=1, row=i)\n self.ip_input.append(e)\n #Set buttons\n button = tk.Button(self, text=\"Set\",\n command=lambda name=i: self.setIP(name, \"Test\"))\n button.grid(column=2, row=i)\n self.ip_buttons.append(button)\n\n\n btnOverview = tk.Button(self, text=\"Overview\",\n command=lambda: controller.show_page(Overview))\n btnOverview.grid(column=0, row=num_units+1)\n\n def setIP(self, index, data):\n update = self.ip_input[index] #Create a variable containing the input box object to read from\n text = update.get() #Load the text from that input box into a variable\n iplist[index] = text #Update that IP address in the IP list\n\n #Write the IP addresses back to the file\n f = open('ipaddress.txt', \"w\")\n for item in iplist:\n f.write(\"%s\\n\" % item)\n f.close()\n\n\napp = KiProControl()\napp.mainloop()\n","repo_name":"cfarrants/KiProControl","sub_path":"archive/uitest_v5 (with AJA class).py","file_name":"uitest_v5 (with AJA class).py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33443334932","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Dennis van Gils (https://github.com/Dennis-van-Gils)\n\"\"\"\n\nimport sys\n\ntry:\n from matplotlib import pyplot as plt\n from matplotlib import animation\nexcept ImportError:\n sys.exit(\"This demo requires the `matplotlib` package.\")\n\nimport numpy as np\nimport opensimplex_loops as osl\n\nN_FRAMES = 50\nN_PIXELS = 256\nFEATURE_SIZE = 24.0\n\n# Generate noise\nimg_stack = osl.looping_animated_2D_image(\n N_frames=N_FRAMES,\n N_pixels_x=N_PIXELS,\n N_pixels_y=N_PIXELS,\n t_step=0.1,\n x_step=1 / FEATURE_SIZE,\n dtype=np.float32,\n seed=3,\n verbose=True,\n)\n\n# Plot\nfig, ax = plt.subplots()\nimg = ax.imshow(\n img_stack[0],\n cmap=\"gray\",\n vmin=-1,\n vmax=1,\n interpolation=\"none\",\n)\nframe_text = ax.text(0, 1.02, \"\", transform=ax.transAxes)\n\n\ndef anim_init():\n img.set_data(img_stack[0])\n frame_text.set_text(\"\")\n return img, frame_text\n\n\ndef anim_fun(j):\n img.set_data(img_stack[j])\n frame_text.set_text(f\"frame {j:03d}\")\n return img, frame_text\n\n\nanim = animation.FuncAnimation(\n fig,\n anim_fun,\n frames=len(img_stack),\n interval=40,\n init_func=anim_init,\n # blit=True,\n)\n\nplt.grid(False)\nplt.axis(\"off\")\nplt.show()\n\n# Export images to disk?\nif 0:\n try:\n from PIL import Image\n except ImportError:\n sys.exit(\"Exporting images requires the `pillow` package.\")\n\n pil_imgs = []\n for j in range(len(img_stack)):\n pil_img = Image.fromarray((img_stack[j] * 127 + 128).astype(np.uint8))\n # pil_img.save(f\"image_{j:02d}.png\")\n pil_imgs.append(pil_img)\n\n pil_imgs[0].save(\n \"demo_looping_animated_2D_image.gif\",\n save_all=True,\n append_images=pil_imgs[1:],\n duration=40,\n loop=0,\n )\n","repo_name":"Dennis-van-Gils/opensimplex-loops","sub_path":"demos/demo_looping_animated_2D_image.py","file_name":"demo_looping_animated_2D_image.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71274915683","text":"import sys\ninput = sys.stdin.readline\nN,Q = map(int,input().split())\narr = [list(map(int,input().split())) for _ in range(2**N)]\nL = list(map(int,input().split()))\nsize = 2**N\ndef down(A):\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n check=[]\n for i in range(size):\n for j in range(size):\n cnt = 0\n for x in range(4):\n di = i+dx[x]\n dj = j+dy[x]\n if 0<=di 0:\n cnt += 1\n if cnt<3:\n check.append((i,j))\n for i,j in check:\n if A[i][j]>0:\n A[i][j] -= 1\ndef big(A):\n bigsize = 0\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n check = [[0]*size for _ in range(size)]\n for i in range(size):\n for j in range(size):\n if A[i][j] and not check[i][j]:\n q=[(i,j)]\n counter = 1\n check[i][j]=1\n while q:\n a,b = q.pop(0)\n for x in range(4):\n di = a+dx[x]\n dj = b+dy[x]\n if 0<=di List[int]:\n sort_list = sorted(nums, reverse=False)\n a_dict = {}\n for i in range(len(sort_list)):\n if sort_list[i] not in a_dict:\n a_dict[sort_list[i]] = i\n res = []\n for j in nums:\n res.append(a_dict[j])\n return res\n\n# 暴力法\n# class Solution(object):\n# def smallerNumbersThanCurrent(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: List[int]\n# \"\"\"\n# ans = []\n#\n# for v in nums:\n# cnt = 0\n# for t in nums:\n# if t < v:\n# cnt += 1\n# ans.append(cnt)\n#\n# return ans\n\nsolution = Solution()\nnums = [8,1,2,2,3]\nres = solution.smallerNumbersThanCurrent(nums)\nprint(res)","repo_name":"weiyuyan/LeetCode","sub_path":"LeetCode周赛/2020年3月1日周赛/5344. 有多少小于当前数字的数字.py","file_name":"5344. 有多少小于当前数字的数字.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"37228912815","text":"num1,num2 = map(int,(input().split()))\r\n\r\nwhile num1 != 0 and num2 != 0:\r\n list = []\r\n if num1 < num2:\r\n for i in range(1,num2):\r\n if num2 % i == 0:\r\n list.append(i)\r\n if num1 in list:\r\n print(\"factor\")\r\n else:\r\n print(\"neither\")\r\n elif num1 > num2:\r\n if num1 % num2 == 0:\r\n print(\"multiple\")\r\n else:\r\n print(\"neither\")\r\n num1,num2 = map(int,(input().split()))","repo_name":"Sun-Rang/BaekJun_ProGrammers","sub_path":"백준/Bronze/5086. 배수와 약수/배수와 약수.py","file_name":"배수와 약수.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5038992785","text":"from time import time\nfrom components import ChaDepParent\nimport numpy as np\n\nfrom components.SimBroker import SimBroker\nfrom components.Vehicle import Vehicle\nimport components\nimport logging\n\nclass ChaDepLimCon(ChaDepParent):\n\n def step(self, timestep):\n # class method to perform control action for the next simulation step.\n '''repark vehicles based on their charging desire with the parent method'''\n self.repark()\n logging.info(\"Vehicles reparked in %s\" % self.ChargingStationId)\n\n ''' control action'''\n\n '''# calculate maximum available power for charging, for 3 different cases'''\n if self.BtmsSoc() <= self.BtmsMinSoc:\n P_max = self.GridPowerUpper\n # if Btms energy content is large enough to power for the next timestep full discharge rating\n elif self.BtmsEn - timestep/3.6e3 * self.BtmsMaxPower >= self.BtmsSize * self.BtmsMinSoc:\n P_max = self.GridPowerUpper + self.BtmsMaxPower\n # this is the intermediate case and the chargingPower to reach the minimum SOC\n else:\n P_max = self.GridPowerUpper + (self.BtmsEn - self.BtmsSize * self.BtmsMinSoc) / (timestep/3.6e3)\n logging.info(\"maximum power available for charging: %s\" % P_max)\n\n '''# now assign the charging powers to each vehicle, prioritized by their charging desire'''\n self.P_ChargeDelivered = self.distributeChargingPowerToVehicles(timestep, P_max)\n logging.debug(\"vehicle states updated for charging station {}\".format(self.ChargingStationId))\n\n '''# now find out how to charge or discharge BTMS''' \n sumPowers = sum(self.ChBaPower)\n # if sum of charging power is greater than grid power limit, the btms must be discharged\n if sumPowers >= self.GridPowerUpper:\n self.P_BTMS = self.GridPowerUpper - sumPowers # result is negative\n # if that is not the case, we might be able to charge for one timestep, if SOC < Max SOC\n elif self.BtmsEn < self.BtmsSize * self.BtmsMaxSoc:\n self.P_BTMS = min([self.getBtmsMaxPower(timestep), self.GridPowerUpper - sumPowers])\n # if that doesn't work, it seems like Btms is full, then charging power is 0.\n else:\n self.P_BTMS = 0\n \n '''calcualte dispatchable BTMS power'''\n self.P_BTMS = self.BtmsGetPowerDeliverable(timestep, self.P_BTMS)\n\n '''calculate grid power'''\n self.P_Grid = self.P_ChargeDelivered + self.P_BTMS\n logging.info('P_Grid: {:.2f}'.format(self.P_Grid))\n\n '''Write chargingStation states for k in ResultWriter'''\n self.ResultWriter.updateChargingStationState(self.SimBroker.t_act, self)\n logging.debug(\"results written for charging station {}\".format(self.ChargingStationId))\n\n '''# update BTMS state for k+1'''\n # BTMS\n self.BtmsAddPower(self.P_BTMS, timestep)\n logging.debug(\"BTMS state updated for charging station {}\".format(self.ChargingStationId))\n\n '''write vehicle states for k in ResultWriter and update vehicle states for k+1'''\n # Vehicles\n self.updateVehicleStatesAndWriteStates(self.ChBaPower, timestep)\n logging.debug(\"vehicle states updated for charging station {}\".format(self.ChargingStationId))\n\n '''determine power desire for next time step'''\n PowerDesire = 0\n for i in range(0,len(self.ChBaVehicles)):\n if isinstance(self.ChBaVehicles[i], components.Vehicle):\n PowerDesire += min([self.ChBaVehicles[i].getMaxChargingPower(timestep), self.ChBaMaxPower[i]])\n self.PowerDesire = PowerDesire\n self.BtmsPowerDesire = self.getBtmsMaxPower(timestep)\n logging.debug(\"power desires updated for charging station {}\".format(self.ChargingStationId))\n\n '''release vehicles when full and create control outputs'''\n self.resetOutput()\n r1 = self.chBaReleaseThresholdAndOutput()\n r2 = self.queueReleaseThresholdAndOutput()\n logging.debug(\"vehicles released for charging station {}\".format(self.ChargingStationId))\n released_Vehicles = r1 + r2\n\n # write vehicle states before releasing them (to have final SOC)\n for x in r1:\n possiblePower = x.getMaxChargingPower(timestep)\n self.ResultWriter.updateVehicleStates(\n t_act=self.SimBroker.t_act + timestep, vehicle=x, ChargingStationId=self.ChargingStationId, QueueOrBay=False, ChargingPower=0, possiblePower=possiblePower)\n for x in r2:\n possiblePower = x.getMaxChargingPower(timestep)\n self.ResultWriter.updateVehicleStates(\n t_act=self.SimBroker.t_act + timestep, vehicle=x, ChargingStationId=self.ChargingStationId, QueueOrBay=True, ChargingPower=0, possiblePower=possiblePower)\n\n # add release events\n for x in released_Vehicles:\n self.ResultWriter.releaseEvent(self.SimBroker.t_act, x, self.ChargingStationId)\n logging.debug(\"vehicle release events written for charging station {}\".format(self.ChargingStationId))\n\n '''checks'''\n if len(self.ChBaVehicles)!=self.ChBaNum:\n raise ValueError(\"Size of ChargingBay List shouldn't change\")\n ","repo_name":"LBNL-UCB-STI/beam","sub_path":"src/main/python/gemini/cosimulation/xfc_btms_saev_controller/components/ChaDepLimCon.py","file_name":"ChaDepLimCon.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"54"} +{"seq_id":"19524739621","text":"from seecr.test import SeecrTestCase\n\nfrom metastreams.tools.syncdomains import copyRepository\nfrom pprint import pprint\n\nclass SyncDomainsTest(SeecrTestCase):\n\n def testCopyRepository(self):\n src = { 'action': 'refresh',\n 'authorizationKey': 'let-me-in',\n 'baseurl': 'https://oai.example.org',\n 'collection': 'coll_ection',\n 'complete': True,\n 'continuous': 7200,\n 'identifier': 'repo_id',\n 'mappingId': 'c7a9bfe9-a1d9-4e0d-a7d4-a848570e95aa',\n 'maximumIgnore': 0,\n 'metadataPrefix': 'didl_mods',\n 'repositoryGroupId': 'groupId',\n 'set': 'some_set',\n 'shopclosed': [],\n 'targetId': '34e93586-d28f-4968-9191-260b2fc3df00',\n 'use': True,\n 'userAgent': 'Seecr Metastreams Harvester'}\n dest = {'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'targetId': 'myTargetId',\n 'mappingId': 'myMappingId',\n }\n\n new_repo, changed = copyRepository(src, dest)\n\n expected = {\n 'action': None,\n 'authorizationKey': 'let-me-in',\n 'baseurl': 'https://oai.example.org',\n 'collection': 'coll_ection',\n 'complete': True,\n 'continuous': 7200,\n 'identifier': 'repo_id',\n 'maximumIgnore': 0,\n 'metadataPrefix': 'didl_mods',\n 'repositoryGroupId': 'groupId',\n 'set': 'some_set',\n 'shopclosed': [],\n 'use': False,\n 'targetId': 'myTargetId',\n 'mappingId': 'myMappingId',\n 'userAgent': 'Seecr Metastreams Harvester'}\n\n self.assertTrue(changed)\n self.assertEqual(expected, new_repo)\n\n def testCopyRepositoryExtra(self):\n src = { 'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'extra': {\n 'value1': 'one',\n 'value2': 'two',\n }\n }\n dest = {'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'extra': {\n 'value2': 'TWO',\n 'value3': 'THREE',\n }\n }\n new_repo, changed = copyRepository(src, dest)\n self.assertTrue(changed)\n self.assertEqual({\n 'value1': 'one',\n 'value2': 'two',\n 'value3': 'THREE',\n }, new_repo['extra'])\n\n def testCopyRepositoryDoesNotChangeUseAndAction(self):\n src = { 'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'use': False,\n 'action': 'clean',\n }\n dest = {'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'use': True,\n 'action': 'refresh',\n }\n new_repo, changed = copyRepository(src, dest)\n self.assertFalse(changed)\n for k in ['use', 'action']:\n self.assertEqual(dest[k], new_repo[k])\n\n def testCopyRepositoryWithSpecifiedTargetMapping(self):\n src = { 'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'mappingId': 'src_mapping',\n 'targetId': 'src_target',\n }\n dest = {'identifier': 'repo_id',\n 'repositoryGroupId': 'groupId',\n 'targetId': 'dest_target',\n }\n new_repo, changed = copyRepository(src, dest, targetId='new_target', mappingId='new_mapping')\n self.assertTrue(changed)\n self.assertEqual('new_target', new_repo['targetId'])\n self.assertEqual('new_mapping', new_repo['mappingId'])\n","repo_name":"seecr/metastreams-harvester","sub_path":"test/tools/syncdomainstest.py","file_name":"syncdomainstest.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39672909504","text":"from random import random\nfrom typing import Sequence,Any\n\ndef max_of(a :Sequence)->Any:\n maximum = a[0]\n for i in range(1,len(a)):\n if a[i]>maximum:\n maximum = a[i]\n return maximum\n\n \n ","repo_name":"S2chanse/codeTestAlgorithm","sub_path":"chap02/max.py","file_name":"max.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71894172643","text":"# 출처: 프로그래머스 코딩 테스트 연습, https://programmers.co.kr/learn/challenges\n\nNOTE = '0123456789ABCDEF'\ndef change(num, base):\n q, r = divmod(num, base)\n n = NOTE[r]\n return change(q, base) + n if q else n\n\n\ndef solution(n, t, m, p):\n candi = ''\n candi_max_len = m * (t - 1) + p\n candi_i = 0\n while len(candi) < candi_max_len:\n candi += str(change(candi_i, n))\n candi_i += 1\n candi = candi[:candi_max_len]\n\n answer = ''\n answer_i = p - 1\n\n while answer_i < candi_max_len:\n answer += candi[answer_i]\n answer_i += m\n return answer\n","repo_name":"kseungwoo/problem-solving","sub_path":"Programmers/Unclassified/n진수 게임.py","file_name":"n진수 게임.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44531270079","text":"\"\"\"\r\nSolution to the CodeWars kata Equal Sides of an array\r\nLink:\r\nhttps://www.codewars.com/kata/5679aa472b8f57fb8c000047/python\r\n\"\"\"\r\ndef find_even_index(arr):\r\n index = 0\r\n while sum(arr[:index]) != sum(arr[index+1:]):\r\n index +=1\r\n if index >= len(arr):\r\n return -1\r\n return index\r\n \r\n \r\nif __name__=='__main__':\r\n print(find_even_index([1,2,3,4,3,2,1]))\r\n ","repo_name":"DavidSiretMarques/CodeWars-Katas","sub_path":"6kyu/equal_sides_of_an_array.py","file_name":"equal_sides_of_an_array.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35751505570","text":"# 类属性和实例属性\nimport datetime\nimport time\n\n\nclass Student:\n school = \"东北师范大学\" # 类属性\n\n def __init__(self, name):\n self.name = name # 实例属性\n\n def __str__(self):\n return \"学生%s就读于%s\" % (self.name, Student.school)\n\n @classmethod\n def getSchool(cls):\n return cls.school\n\n @classmethod\n def setSchool(cls, school):\n cls.school = school\n\n @staticmethod\n def getCurrentTime():\n return time.time()\n\n\n# 使用类的实例调用类方法和类属性\nxiaoming = Student(\"小明\")\nprint(xiaoming)\nprint(\"类属性的地址是%d,内容是%s。\" % (id(xiaoming.school), xiaoming.school))\nxiaoming.school = \"吉林大学\"\nprint(\"类属性的地址是%d,内容是%s。\" % (id(xiaoming.school), xiaoming.school))\nprint(xiaoming)\n\nprint(\"类属性��地址是%d,内容是%s。\" % (id(Student.school), Student.getSchool()))\n\n# 通过类的实例调用类方法\nxiaoming.setSchool(\"长春中医学院\")\nprint(\"类属性的地址是%d,内容是%s。\" % (id(Student.school), Student.getSchool()))\nprint(\"类属性的地址是%d,内容是%s。\" % (id(xiaoming.school), xiaoming.school))\n\n# 通过类调用类方法\nStudent.setSchool(\"长春大学\")\nprint(\"类属性的地址是%d,内容是%s。\" % (id(Student.school), Student.getSchool()))\nprint(\"类属性的地址是%d,内容是%s。\" % (id(xiaoming.school), xiaoming.school))\n\n# 调用类的静态方法\nprint(Student.getCurrentTime())\nprint(xiaoming.getCurrentTime())\n","repo_name":"nenusoulgithub/LearnPython","sub_path":"求知学堂/day8_10-类方法和静态方法.py","file_name":"day8_10-类方法和静态方法.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2206508866","text":"\"\"\"`PrettyPrint`, `Frozen`, `GridXY`, `gamma`, `cart2pol`, `pol2cart`\"\"\"\nimport numpy as np\nimport sys\nimport abc\nimport collections as coll\nimport random\nimport copy\nfrom os import listdir\nimport re\nfrom scipy.special import factorial\n\n\nclass PrettyPrint(object, metaclass=abc.ABCMeta):\n \"\"\"PrettyPrint\n\n An abstract class that provides a way to prettyprint all class attributes,\n inspired by scikit-learn.\n\n Classes deriving from PrettyPrint are required to implement a\n ``get_params`` method that returns a dictionary containing all the\n attributes to prettyprint.\n\n Examples\n --------\n >>> from pulse2percept.utils import PrettyPrint\n >>> class MyClass(PrettyPrint):\n ... def __init__(self, a, b):\n ... self.a = a\n ... self.b = b\n ...\n ... def get_params(self):\n ... return {'a': self.a, 'b': self.b}\n >>> MyClass(1, 2)\n MyClass(a=1, b=2)\n \"\"\"\n\n @abc.abstractmethod\n def get_params(self):\n \"\"\"Return a dictionary of class attributes\"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n \"\"\"Pretty print class as: ClassName(arg1=val1, arg2=val2)\"\"\"\n # Shorten NumPy array output:\n np.set_printoptions(precision=2, threshold=5, edgeitems=2)\n # Line width:\n lwidth = 60\n # Sort list of parameters alphabetically:\n sorted_params = coll.OrderedDict(sorted(self.get_params().items()))\n # Start string with class name, followed by all arguments:\n str_params = self.__class__.__name__ + '('\n # New line indent (align with class name on first line):\n lindent = len(str_params)\n # Keep track of number of chars on current line:\n lc = len(str_params)\n for key, val in sorted_params.items():\n # Attribute string:\n if isinstance(val, str):\n # Need extra '' around strings for repr:\n sparam = key + '=\\'' + str(val) + '\\', '\n else:\n if isinstance(val, np.ndarray):\n # Print NumPy arrays without line breaks:\n strobj = np.array2string(val).replace('\\n', ',')\n # If still too long, show shape:\n if len(strobj) > lwidth - lindent:\n strobj = '<%s np.ndarray>' % str(val.shape)\n else:\n strobj = str(val)\n if len(strobj) > lwidth - lindent:\n # Too long, just show the type:\n strobj = str(type(val)).replace(\"\", \"\")\n sparam = key + '=' + strobj + ', '\n # If adding `sparam` puts line over `lwidth`, start a new line:\n if lc + len(sparam) > lwidth:\n # But only do so if this is not the first param to be added\n # (check last character of previously written string):\n if str_params[-1] != '(':\n str_params += '\\n' + ' ' * lindent\n lc = lindent\n str_params += sparam\n lc += len(sparam)\n # Delete last comma and add ')':\n str_params = str_params[:-2] + ')'\n return str_params\n\n\nclass FreezeError(AttributeError):\n \"\"\"Exception class used to raise when trying to add attributes to Frozen\n\n Classes of type Frozen do not allow for new attributes to be set outside\n the constructor.\n \"\"\"\n\n\ndef freeze_class(set):\n \"\"\"Freezes a class\n\n Raise an error when trying to set an undeclared name, or when calling from\n a method other than ``Frozen.__init__`` or the ``__init__`` method of a\n class derived from Frozen\n \"\"\"\n\n def set_attr(self, name, value):\n if hasattr(self, name):\n # If attribute already exists, simply set it\n set(self, name, value)\n return\n elif sys._getframe(1).f_code.co_name == '__init__':\n # Allow __setattr__ calls in __init__ calls of proper object types\n if isinstance(sys._getframe(1).f_locals['self'], self.__class__):\n set(self, name, value)\n return\n raise FreezeError(\"You cannot add attributes to \"\n \"%s\" % self.__class__.__name__)\n return set_attr\n\n\nclass Frozen(object):\n \"\"\"Frozen\n\n \"Frozen\" classes (and subclasses) do not allow for new class attributes to\n be set outside the constructor. On attempting to add a new attribute, the\n class will raise a FreezeError.\n \"\"\"\n __setattr__ = freeze_class(object.__setattr__)\n\n class __metaclass__(type):\n __setattr__ = freeze_class(type.__setattr__)\n\n\nclass GridXY(object):\n\n def __init__(self, x_range, y_range, step=1, grid_type='rectangular'):\n \"\"\"2D grid\n\n This class generates a two-dimensional grid from a range of x, y values\n and provides an iterator to loop over elements.\n\n Parameters\n ----------\n x_range : tuple\n (x_min, x_max), includes end point\n y_range : tuple\n (y_min, y_max), includes end point\n step : int, double\n Step size\n grid_type : {'rectangular', 'hexagonal'}\n The grid type\n \"\"\"\n # These could also be their own subclasses:\n if grid_type == 'rectangular':\n self._make_rectangular_grid(x_range, y_range, step)\n elif grid_type == 'hexagonal':\n self._make_hexagonal_grid(x_range, y_range, step)\n else:\n raise ValueError(\"Unknown grid type '%s'.\" % grid_type)\n\n def _make_rectangular_grid(self, x_range, y_range, step):\n \"\"\"Creates a rectangular grid\"\"\"\n if not isinstance(x_range, (tuple, list, np.ndarray)):\n raise TypeError((\"x_range must be a tuple, list or NumPy array, \"\n \"not %s.\") % type(x_range))\n if not isinstance(y_range, (tuple, list, np.ndarray)):\n raise TypeError((\"y_range must be a tuple, list or NumPy array, \"\n \"not %s.\") % type(y_range))\n if len(x_range) != 2 or len(y_range) != 2:\n raise ValueError(\"x_range and y_range must have 2 elements.\")\n if isinstance(step, coll.Sequence):\n raise TypeError(\"step must be a scalar.\")\n # Build the grid from `x_range`, `y_range`. If the range is 0, make\n # sure that the number of steps is 1, because linspace(0, 0, num=5)\n # will return a 1x5 array:\n xdiff = np.diff(x_range)\n nx = int(np.ceil((xdiff + 1) / step)) if xdiff != 0 else 1\n ydiff = np.diff(y_range)\n ny = int(np.ceil((ydiff + 1) / step)) if ydiff != 0 else 1\n self.x, self.y = np.meshgrid(np.linspace(*x_range, num=nx),\n np.linspace(*y_range, num=ny),\n indexing='xy')\n self.shape = self.x.shape\n self.reset()\n\n def _make_hexagonal_grid(self, x_range, y_range, step):\n raise NotImplementedError\n\n def __iter__(self):\n \"\"\"Iterator\n\n You can iterate through the grid as if it were a list:\n\n >>> grid = GridXY((0, 1), (2, 3))\n >>> for x, y in grid:\n ... print(x, y)\n 0.0 2.0\n 1.0 2.0\n 0.0 3.0\n 1.0 3.0\n \"\"\"\n self.reset()\n return self\n\n def __next__(self):\n it = self._iter\n if it >= self.x.size:\n raise StopIteration\n self._iter += 1\n return self.x.ravel()[it], self.y.ravel()[it]\n\n def reset(self):\n self._iter = 0\n\n\ndef gamma(n, tau, tsample, tol=0.01):\n \"\"\"Returns the impulse response of ``n`` cascaded leaky integrators\n\n This function calculates the impulse response of ``n`` cascaded\n leaky integrators with constant of proportionality 1/``tau``:\n y = (t/theta).^(n-1).*exp(-t/theta)/(theta*factorial(n-1))\n\n Parameters\n ----------\n n : int\n Number of cascaded leaky integrators\n tau : float\n Decay constant of leaky integration (seconds).\n Equivalent to the inverse of the constant of proportionality.\n tsample : float\n Sampling time step (seconds).\n tol : float\n Cut the kernel to size by ignoring function values smaller\n than a fraction ``tol`` of the peak value.\n \"\"\"\n n = int(n)\n tau = float(tau)\n tsample = float(tsample)\n if n <= 0 or tau <= 0 or tsample <= 0:\n raise ValueError(\"`n`, `tau`, and `tsample` must be nonnegative.\")\n if tau <= tsample:\n raise ValueError(\"`tau` cannot be smaller than `tsample`.\")\n\n # Allocate a time vector that is long enough for sure.\n # Trim vector later on.\n t = np.arange(0, 5 * n * tau, tsample)\n\n # Calculate gamma\n y = (t / tau) ** (n - 1) * np.exp(-t / tau)\n y /= (tau * factorial(n - 1))\n\n # Normalize to unit area\n y /= np.trapz(np.abs(y), dx=tsample)\n\n # Cut off tail where values are smaller than `tol`.\n # Make sure to start search on the right-hand side of the peak.\n peak = y.argmax()\n small_vals = np.where(y[peak:] < tol * y.max())[0]\n if small_vals.size:\n t = t[:small_vals[0] + peak]\n y = y[:small_vals[0] + peak]\n\n return t, y\n\n\ndef cart2pol(x, y):\n theta = np.arctan2(y, x)\n rho = np.hypot(x, y)\n return theta, rho\n\n\ndef pol2cart(theta, rho):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return x, y\n\n\ndef find_files_like(datapath, pattern):\n \"\"\"Finds files in a folder whose name matches a pattern\n\n This function looks for files in folder ``datapath`` that match a regular\n expression ``pattern``.\n\n Parameters\n ----------\n datapath : str\n Path to search\n pattern : str\n A valid regular expression pattern\n\n Examples\n --------\n # Find all '.npz' files in parent dir\n >>> files = find_files_like('..', r'.*\\.npz$')\n \"\"\"\n # Traverse file list and look for `pattern`\n filenames = []\n pattern = re.compile(pattern)\n for file in listdir(datapath):\n if pattern.search(file):\n filenames.append(file)\n\n return filenames\n","repo_name":"garethgeorge/pulse2percept","sub_path":"pulse2percept/utils/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"73952639842","text":"from abc import ABC\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import optim\nfrom torch.optim.optimizer import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerOptimizersMixin(ABC):\n\n _lightning_optimizers: Optional[List[LightningOptimizer]]\n\n def init_optimizers(self, model: Optional[\"pl.LightningModule\"]) -> Tuple[List, List, List]:\n pl_module = self.lightning_module or model\n self._lightning_optimizers = None\n optim_conf = self.call_hook(\"configure_optimizers\", pl_module=pl_module)\n if optim_conf is None:\n rank_zero_warn(\n \"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer\",\n UserWarning,\n )\n optim_conf = _MockOptimizer()\n\n optimizers, lr_schedulers, optimizer_frequencies, monitor = self._configure_optimizers(optim_conf)\n lr_schedulers = self._configure_schedulers(lr_schedulers, monitor, not pl_module.automatic_optimization)\n _validate_scheduler_optimizer(optimizers, lr_schedulers)\n return optimizers, lr_schedulers, optimizer_frequencies\n\n @staticmethod\n def _configure_optimizers(\n optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple]\n ) -> Tuple[List, List, List, Optional[str]]:\n optimizers, lr_schedulers, optimizer_frequencies = [], [], []\n monitor = None\n\n # single output, single optimizer\n if isinstance(optim_conf, Optimizer):\n optimizers = [optim_conf]\n # two lists, optimizer + lr schedulers\n elif (\n isinstance(optim_conf, (list, tuple))\n and len(optim_conf) == 2\n and isinstance(optim_conf[0], list)\n and all(isinstance(opt, Optimizer) for opt in optim_conf[0])\n ):\n opt, sch = optim_conf\n optimizers = opt\n lr_schedulers = sch if isinstance(sch, list) else [sch]\n # single dictionary\n elif isinstance(optim_conf, dict):\n _validate_optim_conf(optim_conf)\n optimizers = [optim_conf[\"optimizer\"]]\n monitor = optim_conf.get(\"monitor\", None)\n lr_schedulers = [optim_conf[\"lr_scheduler\"]] if \"lr_scheduler\" in optim_conf else []\n # multiple dictionaries\n elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):\n for opt_dict in optim_conf:\n _validate_optim_conf(opt_dict)\n optimizers = [opt_dict[\"optimizer\"] for opt_dict in optim_conf]\n scheduler_dict = (\n lambda scheduler, opt_idx: dict(scheduler, opt_idx=opt_idx)\n if isinstance(scheduler, dict)\n else {\"scheduler\": scheduler, \"opt_idx\": opt_idx}\n )\n\n lr_schedulers = [\n scheduler_dict(opt_dict[\"lr_scheduler\"], opt_idx)\n for opt_idx, opt_dict in enumerate(optim_conf)\n if \"lr_scheduler\" in opt_dict\n ]\n optimizer_frequencies = [\n opt_dict[\"frequency\"] for opt_dict in optim_conf if opt_dict.get(\"frequency\", None) is not None\n ]\n # assert that if frequencies are present, they are given for all optimizers\n if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):\n raise ValueError(\"A frequency must be given to each optimizer.\")\n # single list or tuple, multiple optimizer\n elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizer) for opt in optim_conf):\n optimizers = list(optim_conf)\n # unknown configuration\n else:\n raise MisconfigurationException(\n \"Unknown configuration for model optimizers.\"\n \" Output from `model.configure_optimizers()` should either be:\\n\"\n \" * `torch.optim.Optimizer`\\n\"\n \" * [`torch.optim.Optimizer`]\\n\"\n \" * ([`torch.optim.Optimizer`], [`torch.optim.lr_scheduler`])\\n\"\n ' * {\"optimizer\": `torch.optim.Optimizer`, (optional) \"lr_scheduler\": `torch.optim.lr_scheduler`}\\n'\n ' * A list of the previously described dict format, with an optional \"frequency\" key (int)'\n )\n return optimizers, lr_schedulers, optimizer_frequencies, monitor\n\n def convert_to_lightning_optimizers(self):\n def _convert_to_lightning_optimizer(trainer, optimizer):\n if not isinstance(optimizer, LightningOptimizer):\n optimizer = LightningOptimizer(optimizer)\n optimizer._on_trainer_init(trainer)\n return optimizer\n\n self._lightning_optimizers = {\n opt_idx: _convert_to_lightning_optimizer(self, opt) for opt_idx, opt in enumerate(self.optimizers)\n }\n\n @staticmethod\n def _configure_schedulers(\n schedulers: list, monitor: Optional[str], is_manual_optimization: bool\n ) -> List[Dict[str, Any]]:\n \"\"\"Convert each scheduler into dict structure with relevant information.\"\"\"\n lr_schedulers = []\n default_config = _get_default_scheduler_config()\n for scheduler in schedulers:\n if is_manual_optimization:\n if isinstance(scheduler, dict):\n invalid_keys = {\"interval\", \"frequency\", \"reduce_on_plateau\", \"monitor\", \"strict\"}\n keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]\n\n if keys_to_warn:\n rank_zero_warn(\n f\"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored.\"\n \" You need to call `lr_scheduler.step()` manually in manual optimization.\",\n RuntimeWarning,\n )\n\n scheduler = {key: scheduler[key] for key in scheduler if key not in invalid_keys}\n lr_schedulers.append({**default_config, **scheduler})\n else:\n lr_schedulers.append({**default_config, \"scheduler\": scheduler})\n else:\n if isinstance(scheduler, dict):\n # check provided keys\n extra_keys = [k for k in scheduler.keys() if k not in default_config.keys()]\n if extra_keys:\n rank_zero_warn(f\"Found unsupported keys in the lr scheduler dict: {extra_keys}\", RuntimeWarning)\n if \"scheduler\" not in scheduler:\n raise MisconfigurationException(\n 'The lr scheduler dict must have the key \"scheduler\" with its item being an lr scheduler'\n )\n if \"interval\" in scheduler and scheduler[\"interval\"] not in (\"step\", \"epoch\"):\n raise MisconfigurationException(\n 'The \"interval\" key in lr scheduler dict must be \"step\" or \"epoch\"'\n f' but is \"{scheduler[\"interval\"]}\"'\n )\n scheduler[\"reduce_on_plateau\"] = isinstance(\n scheduler[\"scheduler\"], optim.lr_scheduler.ReduceLROnPlateau\n )\n if scheduler[\"reduce_on_plateau\"] and scheduler.get(\"monitor\", None) is None:\n raise MisconfigurationException(\n \"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used.\"\n ' For example: {\"optimizer\": optimizer, \"lr_scheduler\":'\n ' {\"scheduler\": scheduler, \"monitor\": \"your_loss\"}}'\n )\n is_one_cycle = isinstance(scheduler[\"scheduler\"], optim.lr_scheduler.OneCycleLR)\n if is_one_cycle and scheduler.get(\"interval\", \"epoch\") == \"epoch\":\n rank_zero_warn(\n \"A `OneCycleLR` scheduler is using 'interval': 'epoch'.\"\n \" Are you sure you didn't mean 'interval': 'step'?\",\n RuntimeWarning,\n )\n lr_schedulers.append({**default_config, **scheduler})\n elif isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):\n if monitor is None:\n raise MisconfigurationException(\n \"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`\"\n \" scheduler is used. For example:\"\n ' {\"optimizer\": optimizer, \"lr_scheduler\": scheduler, \"monitor\": \"metric_to_track\"}'\n )\n lr_schedulers.append(\n {**default_config, \"scheduler\": scheduler, \"reduce_on_plateau\": True, \"monitor\": monitor}\n )\n elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):\n lr_schedulers.append({**default_config, \"scheduler\": scheduler})\n else:\n raise ValueError(f'The provided lr scheduler \"{scheduler}\" is invalid')\n return lr_schedulers\n\n\nclass _MockOptimizer(Optimizer):\n \"\"\"The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from\n `configure_optimizers`.\"\"\"\n\n def __init__(self):\n super().__init__([torch.zeros(1)], {})\n\n def add_param_group(self, param_group):\n pass # Do Nothing\n\n def load_state_dict(self, state_dict):\n pass # Do Nothing\n\n def state_dict(self):\n return {} # Return Empty\n\n def step(self, closure=None):\n if closure is not None:\n closure()\n\n def zero_grad(self):\n pass # Do Nothing\n\n def __repr__(self):\n return \"No Optimizer\"\n\n\ndef _validate_optim_conf(optim_conf: Dict[str, Any]) -> None:\n valid_keys = {\"optimizer\", \"lr_scheduler\", \"frequency\", \"monitor\"}\n extra_keys = optim_conf.keys() - valid_keys\n if extra_keys:\n rank_zero_warn(f\"Found unsupported keys in the optimizer configuration: {set(extra_keys)}\", RuntimeWarning)\n\n\ndef _validate_scheduler_optimizer(optimizers, lr_schedulers):\n if any(sch[\"scheduler\"].optimizer not in optimizers for sch in lr_schedulers):\n raise MisconfigurationException(\n \"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`.\"\n )\n\n\ndef _get_default_scheduler_config() -> Dict[str, Any]:\n return {\n \"scheduler\": None,\n \"name\": None, # no custom name\n \"interval\": \"epoch\", # after epoch is over\n \"frequency\": 1, # every epoch/batch\n \"reduce_on_plateau\": False, # most often not ReduceLROnPlateau scheduler\n \"monitor\": None, # value to monitor for ReduceLROnPlateau\n \"strict\": True, # enforce that the monitor exists for ReduceLROnPlateau\n \"opt_idx\": None, # necessary to store opt_idx when optimizer frequencies are specified\n }\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/Lightning_Versions/lightning-1.5.0/pytorch_lightning/trainer/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":11288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11066440810","text":"import aocutils\n\n\ndef main():\n entries = [int(x) for x in aocutils.readlines(\"input.txt\")]\n data = set(entries)\n entries.sort()\n for i, a in enumerate(entries):\n for j, b in enumerate(entries):\n if i == j:\n continue\n\n c = 2020 - a - b\n if c <= 0:\n break\n if c in data:\n print(a, '+', b, '+', c, '=', a + b + c)\n print(a, '*', b, '*', c, '=', a * b * c)\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"martenbr/aoc","sub_path":"aoc2020/dec01/expenses2.py","file_name":"expenses2.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15999521590","text":"import base64\nimport os\nfrom cart import cart\nfrom catalog.models import Product\nfrom ecomstore.settings import PRODUCTS_PER_ROW\nfrom search import search\nfrom search.models import SearchTerm\nfrom stats.models import ProductView\n\nTRACKING_ID = 'tracking_id'\n\n\ndef tracking_id(request):\n if request.session.get(TRACKING_ID, '') == '':\n request.session[TRACKING_ID] = cart.generate_random_id()\n return request.session[TRACKING_ID]\n\n\ndef recommended_from_search(request):\n common_words = frequent_search_words(request)\n\n matching = []\n for word in common_words:\n results = search.products(word).get('products', [])\n for result in results:\n if len(matching) < PRODUCTS_PER_ROW and not result in matching:\n matching.append(result)\n\n return matching\n\n\ndef frequent_search_words(request):\n searches = SearchTerm.objects.filter(tracking_id=tracking_id(request)) \\\n .values('q').order_by('-search_date')[0:10]\n\n search_string = ' '.join([search['q'] for search in searches])\n return sort_words_by_frequency(search_string)[0:3]\n\n\ndef sort_words_by_frequency(search_string):\n words = search_string.split()\n ranked_words = [[word, words.count(word)] for word in set(words)]\n sorted_words = sorted(ranked_words, key=lambda word: -word[1])\n return [p[0] for p in sorted_words]\n\n\ndef log_product_view(request, product):\n t_id = tracking_id(request)\n try:\n v = ProductView.objects.get(tracking_id=t_id, product=product)\n v.save()\n except ProductView.DoesNotExist:\n v = ProductView()\n v.product = product\n v.tracking_id = t_id\n v.ip_address = request.META.get('REMOTE_ADDR')\n v.user = None\n if request.user.is_authenticated():\n v.user = request.user\n v.save()\n\n\ndef recommended_from_views(request):\n t_id = tracking_id(request)\n viewed_products = get_recently_viewed_products(request)\n\n if viewed_products:\n # get all tracking ids who have viewed the list of products viewed by users\n product_views = ProductView.objects.filter(product__in=viewed_products).values('tracking_id')\n t_ids = [v['tracking_id'] for v in product_views]\n\n if t_ids:\n all_viewed_products = Product.active.filter(productview__tracking_id__in=t_ids)\n\n if all_viewed_products:\n other_viewed_products = ProductView.objects.filter(product__in=all_viewed_products)\\\n .exclude(product__in=viewed_products)\n\n if other_viewed_products:\n return Product.active.filter(productview__in=other_viewed_products).distinct()\n\n\ndef get_recently_viewed_products(request):\n t_id = tracking_id(request)\n views = ProductView.objects.filter(tracking_id=t_id).values('product_id').order_by('-date')[0:PRODUCTS_PER_ROW]\n product_ids = [v['product_id'] for v in views]\n return Product.active.filter(id__in=product_ids)\n\n\n\n","repo_name":"c-j-j/ecomstore","sub_path":"stats/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18486916036","text":"from flask import Flask, request, json, Blueprint, current_app\nfrom tlapbot.db import get_db\nfrom tlapbot.owncast_requests import send_chat\nfrom tlapbot.owncast_helpers import (add_user_to_database, change_display_name,\n read_users_points, remove_duplicate_usernames)\nfrom tlapbot.help_message import send_help\nfrom tlapbot.redeems_handler import handle_redeem\n\n\nbp = Blueprint('owncast_webhooks', __name__)\n\n\n@bp.route('/owncastWebhook', methods=['POST'])\ndef owncast_webhook():\n data = request.json\n db = get_db()\n\n # Make sure user is in db before doing anything else.\n if data[\"type\"] in [\"CHAT\", \"NAME_CHANGED\", \"USER_JOINED\"]:\n user_id = data[\"eventData\"][\"user\"][\"id\"]\n display_name = data[\"eventData\"][\"user\"][\"displayName\"]\n add_user_to_database(db, user_id, display_name)\n\n if data[\"type\"] == \"USER_JOINED\":\n if data[\"eventData\"][\"user\"][\"authenticated\"]:\n remove_duplicate_usernames(db, user_id, display_name)\n elif data[\"type\"] == \"NAME_CHANGE\":\n user_id = data[\"eventData\"][\"user\"][\"id\"]\n new_name = data[\"eventData\"][\"newName\"]\n change_display_name(db, user_id, new_name)\n if data[\"eventData\"][\"user\"][\"authenticated\"]:\n remove_duplicate_usernames(db, user_id, new_name)\n elif data[\"type\"] == \"CHAT\":\n if not current_app.config['PASSIVE']:\n prefix = current_app.config['PREFIX']\n user_id = data[\"eventData\"][\"user\"][\"id\"]\n display_name = data[\"eventData\"][\"user\"][\"displayName\"]\n current_app.logger.debug(f'New chat message from {display_name}:')\n current_app.logger.debug(f'{data[\"eventData\"][\"body\"]}')\n if data[\"eventData\"][\"body\"].startswith(f\"{prefix}help\"):\n send_help()\n elif data[\"eventData\"][\"body\"].startswith(f\"{prefix}points\"):\n points = read_users_points(db, user_id)\n if points is None:\n send_chat(\"Error reading points.\")\n else:\n send_chat(f\"{display_name}'s points: {points}\")\n elif data[\"eventData\"][\"body\"].startswith(f\"{prefix}name_update\"):\n # Forces name update in case bot didn't catch the NAME_CHANGE\n # event. Also removes saved usernames from users with same name\n # if user is authenticated.\n change_display_name(db, user_id, display_name)\n if data[\"eventData\"][\"user\"][\"authenticated\"]:\n remove_duplicate_usernames(db, user_id, display_name)\n elif data[\"eventData\"][\"body\"].startswith(prefix):\n handle_redeem(data[\"eventData\"][\"body\"], user_id)\n return data\n","repo_name":"SleepyLili/tlapbot","sub_path":"tlapbot/owncast_webhooks.py","file_name":"owncast_webhooks.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"45426972829","text":"from dash import Input, Output\nimport plotly.graph_objects as go\nimport numpy as np\nimport scipy.stats as stat\nimport statsmodels.stats.proportion as statmod\nfrom eci_view import app\nfrom eci_model import get_df_qual, get_df_quant, df_qual, quant_y_range\n\n\n@app.callback(\n Output(\"quant-hist\", \"figure\"),\n Output(\"quant-variable\", \"children\"),\n Output(\"quant-mean\", \"children\"),\n Output(\"quant-conf-int\", \"children\"),\n Output(\"quant-conf-level\", \"children\"),\n Output(\"sr-hist\", \"children\"),\n Input(\"quant-dropdown\", \"value\"),\n Input(\"quant-conf-value\", \"value\")\n)\ndef update_histogram(value, conf_level):\n df = get_df_quant(value)\n fig = go.Figure(\n go.Histogram(x=df,\n showlegend=False))\n fig.update_traces(marker_line_color=\"rgba(158,171,5,1)\",\n marker_color=\"rgba(158,171,5,0.5)\",\n marker_line_width=1)\n fig.update_yaxes(title_text=None,\n range=[0, quant_y_range[value]])\n fig.update_layout(margin=dict(t=20, b=10, l=20, r=20),\n font_size=14)\n mean = np.mean(df)\n sem = stat.sem(df)\n conf_int = stat.norm.interval(\n alpha=conf_level,\n loc=mean,\n scale=sem)\n add_ci_lines(fig, value, conf_int[0], conf_int[1])\n sr_hist = f\"Histogram of {value} with confidence interval ({conf_int[0]:.3f}, {conf_int[1]:.3f})\"\n return fig, f\"{value}\", f\"{mean:.3f}\", f\"({conf_int[0]:.3f}, {conf_int[1]:.3f})\", f\"{conf_level:.0%}\", sr_hist\n\n\ndef add_ci_lines(fig, value, ci_lower, ci_upper):\n y = np.linspace(0, quant_y_range[value], 10)\n fig.add_trace(\n go.Scatter(x=[ci_lower] * 10,\n y=y,\n marker_opacity=0,\n marker_color=\"#0085a1\",\n name=\"Confidence
    interval
    (upper/lower
    bounds)\",\n hovertemplate=\"CI lower bound: %{x:.3f}\")\n )\n fig.add_trace(\n go.Scatter(x=[ci_upper] * 10,\n y=y,\n marker_opacity=0,\n marker_color=\"#0085a1\",\n hovertemplate=\"CI upper bound: %{x:.3f}\",\n showlegend=False)\n )\n return fig\n\n\n@app.callback(\n Output(\"cat-radio\", \"options\"),\n Output(\"cat-radio\", \"value\"),\n Input(\"qual-dropdown\", \"value\")\n)\ndef set_categories(value):\n df = df_qual[value]\n categories = df.unique()\n cat1 = categories[0]\n cat2 = categories[1]\n return [{\"label\": cat1, \"value\": cat1}, {\"label\": cat2, \"value\": cat2}], cat1\n\n\n@app.callback(\n Output(\"qual-bar\", \"figure\"),\n Output(\"qual-variable\", \"children\"),\n Output(\"qual-cat1\", \"children\"),\n Output(\"count-cat1\", \"children\"),\n Output(\"qual-cat2\", \"children\"),\n Output(\"count-cat2\", \"children\"),\n Output(\"qual-n-cat1\", \"children\"),\n Output(\"ci-cat1\", \"children\"),\n Output(\"qual-ci-result\", \"children\"),\n Output(\"qual-conf-level\", \"children\"),\n Output(\"sr-bar\", \"children\"),\n Input(\"qual-dropdown\", \"value\"),\n Input(\"qual-conf-value\", \"value\"),\n Input(\"cat-radio\", \"value\")\n)\ndef update_bar(value, conf_level, category):\n x, y1, y2, expected_y, cat1, cat2 = get_df_qual(value, category)\n y1_val = [y1, expected_y]\n y2_val = [y2, expected_y]\n fig = go.Figure(data=[go.Bar(name=cat1,\n x=x,\n y=y1_val,\n marker_color=\"#d10373\",\n marker_opacity=0.6),\n go.Bar(name=cat2,\n x=x,\n y=y2_val,\n marker_color=\"#9eab05\",\n marker_opacity=0.6)])\n fig.update_layout(barmode=\"stack\",\n margin=dict(t=20, b=10, l=20, r=20),\n font_size=14)\n fig.update_yaxes(title_text=None,\n range=[0, (y1+y2)+1])\n x = y1\n n = y1 + y2\n conf_int = statmod.proportion_confint(x, n, 1-conf_level, \"normal\")\n fig.add_shape(type=\"line\",\n xref=\"paper\",\n yref=\"paper\",\n x0=0,\n y0=conf_int[0],\n x1=1,\n y1=conf_int[0],\n line=dict(color=\"#0085a1\",\n width=2))\n fig.add_shape(type=\"line\",\n xref=\"paper\",\n yref=\"paper\",\n x0=0,\n y0=conf_int[1],\n x1=1,\n y1=conf_int[1],\n line=dict(color=\"#0085a1\",\n width=2))\n sr_bar = f\"Barchart of {value} with confidence interval for {cat1} ({conf_int[0]*n:.2f}, {conf_int[1]*n:.2f}\"\n return fig, value, f\"Count of {cat1}: \", y1, f\"Count of {cat2}: \", y2, n, f\"Confidence interval for {cat1}: \", f\"({conf_int[0]*n:.2f}, {conf_int[1]*n:.2f})\", f\"{conf_level:.0%}\", sr_bar\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"sozzifer/TECH7009","sub_path":"eci_controller.py","file_name":"eci_controller.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9150127851","text":"#chatGPT-3.5's attempt\nfrom math import radians, sin, cos, sqrt, atan2, degrees\n\ndef calculate_displacement(lat1in, lon1in, lat2in, lon2in):\n # Convert coordinates to radians\n lat1, lon1 = radians(lat1in), radians(lon1in)\n lat2, lon2 = radians(lat2in), radians(lon2in)\n\n # Earth radius in meters\n earth_radius = 6371000\n\n # Haversine formula\n delta_lat = lat2 - lat1\n delta_lon = lon2 - lon1\n\n a = sin(delta_lat/2)**2 + cos(lat1) * cos(lat2) * sin(delta_lon/2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n\n # Calculate displacement in meters\n displacement_y = earth_radius * c\n displacement_x = earth_radius * c * cos(lat1)\n\n #displacement_y = displacement_y if lat1in < lat2in else -displacement_y\n #displacement_x = displacement_x if lon1in < lon2in else -displacement_x\n\n print(f'Lat: {round(lat1in,5)},{round(lat2in,5)},{round(displacement_y,1)} \\t| Lon: {round(lon1in,5)},{round(lon2in,5)},{round(displacement_x,1)}')\n return displacement_y, displacement_x\n\n\n\ndef calculate_distance_changes(lat1, lon1, lat2, lon2):\n # Convert coordinates to radians\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n # Earth radius in meters\n earth_radius = 111111 #meters\n\n # Haversine formula\n delta_lat = lat2 - lat1\n delta_lon = lon2 - lon1\n\n # Calculate distance changes in latitude and longitude\n distance_change_lat = degrees(delta_lat) * earth_radius\n distance_change_lon = degrees(delta_lon) * earth_radius * cos(lat1)\n\n return round(distance_change_lat,2), round(distance_change_lon,2)\n\n\n\ndef calculate_coordinates(lat, lon, dx, dy):\n # Earth radius in meters\n earth_radius = 6371000\n\n # Convert latitude and longitude to radians\n lat = radians(lat)\n lon = radians(lon)\n\n # Calculate displacement in radians\n displacement_lat = dx / earth_radius\n displacement_lon = dy / (earth_radius * cos(lat))\n\n # Calculate new latitude and longitude\n new_lat = lat + displacement_lat\n new_lon = lon + displacement_lon\n\n # Convert back to degrees\n new_lat = degrees(new_lat)\n new_lon = degrees(new_lon)\n\n # Return gps corrected by metric input\n return new_lat, new_lon\n\n\n\n#def add_to_gps(latitude, longitude, node_pose_list, node):\n# x_offset = (node_pose_list[node]['x']) / (cos(latitude) * 111111)\n# y_offset = (node_pose_list[node]['y']) / (111111)\n# return latitude + (y_offset*0.95), longitude + (-x_offset*1.65)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef metric(datum_latitude, datum_longitude, datum_elevation, latitude, longitude, elevation):\n x = (datum_latitude - latitude) / (111111)\n y = (datum_longitude - longitude) / (cos(latitude) * 111111)\n z = datum_elevation - elevation\n return x, y, z\n\ndef gps(datum_latitude, datum_longitude, datum_elevation, x, y, z):\n latitude = datum_latitude + (x * 111111)\n longitude = datum_longitude + (y * cos(latitude) * 111111)\n elevation = datum_elevation + z\n return latitude, longitude, elevation\n\ndef add_metric_to_gps(datum_latitude, datum_longitude, datum_elevation, lat, lon, ele, x, y, z):\n xLat, yLon, zEle = metric(datum_latitude, datum_longitude, datum_elevation, lat, lon, ele)\n return gps(datum_latitude, datum_longitude, datum_elevation, xLat+x, yLon+y, zEle+z)\n\n\n\n\n#datum: lat lon\n#node: lat, lon\n#tmap = metric(node) - metric(datum)\n#tmap = metric(node-datum)\ndef get_relative_metric(datum, node):\n return metric(node)-metric(datum)\n\n\n\n\n#datum: lat lon\n#tmap: xd, yd\n#node = gps(metric(datum)+tmap)\n#node = datum+gps(tmap)\ndef get_relative_metric(datum, tmap):\n return datum+metric(node)\n\n\n\n\n\n\n##################################################################### This one works\nimport geopy.distance\ndef get_datumrelative_metric_from_gps(datum, gnss):\n lat1, lon1 = datum['latitude'], datum['longitude']\n lat2, lon2 = gnss['latitude'], gnss['longitude']\n\n # Get average width difference from left and right longitudes\n x1 = geopy.distance.geodesic((lat1, lon1), (lat1,lon2)).m\n x2 = geopy.distance.geodesic((lat2, lon1), (lat2,lon2)).m\n x = (x1+x2)/2\n if lat1 > lat2: x = -x\n # ^ this is the longitude displacement in m\n\n # Get average height difference from top and bottom latitudes\n y1 = geopy.distance.geodesic((lat1, lon1), (lat2,lon1)).m\n y2 = geopy.distance.geodesic((lat1, lon2), (lat2,lon2)).m\n y = (y1+y2)/2\n if lon1 > lon2: y = -y\n # ^ this is the latitude displacement in m\n\n # Get displacement of gnss from a datum point\n z = gnss['elevation'] - datum['elevation']\n return {'x':x, 'y':y, 'z':z}\n\ndef get_gps_from_datumrelative_metric(datum, xyz):\n # Get gnss of a datum point shifted by xyz metres\n lat = degrees( radians(datum['latitude']) + ( xyz['y'] / 6371000 ))\n lon = degrees( radians(datum['longitude']) + ( xyz['x'] / (6371000 * cos(radians(datum['latitude']))) ))\n return {'latitude': lat, 'longitude': lon, 'elevation': datum['elevation']}\n\ndef displace_gps_by_metric_relative_to_datum(datum, gnss, xyz):\n metric = get_datumrelative_metric_from_gps(datum, gnss)\n new_xyz = {'x':metric['x']+xyz['x'], 'y':metric['y']+xyz['y'], 'z':metric['z']+xyz['z']}\n new_gnss = get_gps_from_datumrelative_metric(datum, new_xyz)\n return new_gnss\n\ndef get_bounds(gps_list):\n lats = [l[0] for l in gps_list]\n lons = [l[1] for l in gps_list]\n north, south = max(lats), min(lats)\n east, west = max(lons), min(lons)\n return {'north':north, 'east':east, 'south':south, 'west':west}\n\ndef get_range(bounds):\n ne = {'latitude':bounds['north'], 'longitude':bounds['east'], 'elevation':0}\n sw = {'latitude':bounds['south'], 'longitude':bounds['west'], 'elevation':0}\n xyz = get_datumrelative_metric_from_gps(sw, ne)\n return xyz['x'], xyz['y']\n################################################################################# This one works\n\nif __name__=='__main__':\n datum = {'latitude': 53.2648, 'longitude': -0.5310, 'elevation': 0}\n print(' datum', datum)\n\n sw = {'latitude': 53.2648, 'longitude': -0.5318, 'elevation': 0}\n print('ori sw', sw)\n\n ne = {'latitude': 53.2675, 'longitude': -0.5248, 'elevation': 0}\n print('ori ne', ne)\n\n print('\\n\\nFind distance from datum to sw corner:')\n print(' datum', datum)\n print('ori sw', sw)\n map_frame_pose = get_datumrelative_metric_from_gps(datum, sw)\n print('\\nmap pos: ', map_frame_pose)\n\n #dims = get_datumrelative_metric_from_gps(sw, ne)\n #print(' sw to ne: ', dims)\n\n #gps = get_gps_from_datumrelative_metric(sw, map_frame_pose)\n #print('ori sw', sw)\n #print('new sw', gps)\n\n # need to use the map_frame_pose and the datum to get the sw gps\n print('\\n\\nUse map_pose and datum to find sw:')\n print('map pos', map_frame_pose)\n print(' datum', datum)\n sw_copy = displace_gps_by_metric_relative_to_datum(datum, datum, map_frame_pose)\n print('\\nori sw', sw)\n print('new sw', sw_copy)\n print('\\n')\n","repo_name":"LCAS/environment_common","sub_path":"environment_common/convertors/tools/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31621370974","text":"# Importing the necessary modules\nimport os\nimport zipfile\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom datetime import datetime\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\n\n# Default DAG arguments\ndefault_args = {\n \"owner\": \"edonizeti\",\n 'depends_on_past': False,\n \"start_date\": days_ago(1),\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n}\n\n# Local file directories\nzip_dir = \"/etl-bi-engineer-datatalks-challenge/data\"\nextract_to = \"/etl-bi-engineer-datatalks-challenge/data/csv_files\"\nmove_zip_to = \"/etl-bi-engineer-datatalks-challenge/data/extracted\"\nmove_csv_to = \"/etl-bi-engineer-datatalks-challenge/data/uploaded\"\n\n# S3 Bucket\nbucket_name = \"etl-bi-engineer\"\n\n# Unzip the .zip files\ndef unzip_files(zip_dir, extract_to):\n zip_files_list = []\n for file in os.listdir(zip_dir):\n # Checking if the file is a .zip\n if file.endswith(\".zip\"):\n # Creating the full path of the .zip file\n zip_path = os.path.join(zip_dir, file)\n # Creating a ZipFile object to manipulate the .zip file\n with zipfile.ZipFile(zip_path, \"r\") as zip_ref:\n # Extracting all .zip files to the same directory\n if not os.path.exists(extract_to):\n os.makedirs(extract_to)\n zip_ref.extractall(extract_to)\n print(f\"File {file} unzipped successfully\"),\n # Generates a list of .zip files\n zip_files_list.append(os.path.join(zip_dir, file))\n return zip_files_list\n\n# Move files that have already been unzipped to another folder\ndef move_zip_file(zip_paths, move_zip_to):\n for zip_path in zip_paths:\n if not os.path.exists(move_zip_to):\n os.makedirs(move_zip_to)\n # Extract the original filename and extension\n file_name, file_ext = os.path.splitext(os.path.basename(zip_path))\n # Appending the date and time to the file name\n timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n # Create a new filename with the current date included\n new_zip_path = f\"{file_name}_{timestamp}{file_ext}\"\n # Move the file with the new filename\n os.rename(zip_path, os.path.join(move_zip_to, new_zip_path))\n #os.rename(zip_path, os.path.join(move_zip_to, os.path.basename(new_zip_path)))\n print(f\"File {zip_path} successfully moved to {move_zip_to}\")\n\n# upload the unzipped files to S3\ndef upload_to_s3(csv_dir,bucket_name):\n # Use airflow connection to authenticate to S3\n hook = S3Hook(aws_conn_id='my_aws_conn') \n # List to store .csv file paths\n csv_files_list = []\n for file in os.listdir(csv_dir):\n if file.endswith(\".csv\"):\n csv_path = os.path.join(csv_dir,file)\n # Returns the file name and extension separately. The file name without the .csv will be the folder name in S3\n file_name, file_ext = os.path.splitext(file)\n folder = file_name \n # Appending the date and time to the file name\n timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n csv_file = f\"{file_name}_{timestamp}{file_ext}\" \n s3_key = os.path.join(folder,csv_file)\n try:\n hook.load_file(csv_path,s3_key,bucket_name) \n print(f\"File {file} uploaded to S3 successfully\")\n # Add file path to list\n csv_files_list.append(csv_path)\n except Exception as e:\n error_message=f\"Error sending file {file} to S3: {e}\"\n print(error_message)\n raise Exception(error_message)\n return csv_files_list\n\n# Move the .csv files after successful upload to S3\ndef move_csv_files(csv_files_list, move_csv_to):\n for csv_path in csv_files_list:\n if not os.path.exists(move_csv_to):\n os.makedirs(move_csv_to)\n # Extract the original filename and extension\n file_name, file_ext = os.path.splitext(os.path.basename(csv_path)) \n # Appending the date and time to the file name\n timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\") \n # Create a new filename with the current date included\n new_csv_path = f\"{file_name}_{timestamp}{file_ext}\" \n os.rename(csv_path, os.path.join(move_csv_to, os.path.basename(new_csv_path)))\n print(f\"File {csv_path} successfully moved to {move_csv_to}\")\n\n# Create DAG\nwith DAG(\n dag_id=\"load_file_to_S3\",\n description='DAG to unzip a .zip file and move it to the \"extracted\" folder',\n default_args=default_args,\n schedule_interval=\"@daily\",\n catchup=False,\n) as dag:\n\n unzip_task = PythonOperator(\n task_id=\"unzip_file\",\n python_callable=unzip_files,\n op_args=[zip_dir, extract_to],\n # To return the function's return value\n provide_context=True, \n )\n\n move_zip_task = PythonOperator(\n task_id=\"move_zip_file\",\n python_callable=move_zip_file,\n # Use the result of the previous task as an argument\n op_args=[unzip_task.output, move_zip_to], \n )\n\n upload_to_S3_task = PythonOperator(\n task_id=\"upload_csv_to_s3\",\n python_callable=upload_to_s3,\n op_args=[extract_to, bucket_name],\n provide_context=True,\n )\n\n move_csv_task = PythonOperator(\n task_id=\"move_csv_files\",\n python_callable=move_csv_files,\n op_args=[upload_to_S3_task.output, move_csv_to],\n )\n\n# Defining the task order in the DAG\nunzip_task >> move_zip_task >> upload_to_S3_task >> move_csv_task","repo_name":"edonizeti/simple-ecommerce-etl-project","sub_path":"airflow/dags/load_files_to_S3.py","file_name":"load_files_to_S3.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36761164860","text":"import logging\n\nfrom autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner\n\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass EchoClient(ApplicationSession):\n async def onJoin(self, details):\n logger.info(\"Successfully connected to the router.\")\n\n await self.call(\"com.echo.log\", \"DATA\")\n self.publish(\"com.echo.log\", \"DATA\")\n\n\nif __name__ == '__main__':\n runner = ApplicationRunner(url=\"ws://localhost:8080/ws\", realm=\"echo\")\n runner.run(EchoClient)\n","repo_name":"jrocketfingers/crossbar-echo","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15489352330","text":"from aiogram import types\nfrom aiogram.types import labeled_price\nfrom aiogram.types.labeled_price import LabeledPrice\n\n\nfrom utils.misc import Item\n\n\nasync def create_invoice(book_id, book_name, book_price, book_photo, book_quant, balance):\n book = Item(\n title=book_name + f' {book_quant}шт',\n description=book_name + f' {book_quant}шт. \\nДля покупки используйте тест-карту: 1111 1111 1111 1026, 12/22, CVC 000. \\nПокупки на суммы больше 1тр недоступны!',\n currency=\"RUB\",\n prices=[\n LabeledPrice(\n label=book_name + f' {book_quant}шт',\n amount=book_price * book_quant\n ),\n LabeledPrice(\n label='Ваш баланс',\n amount=-balance\n )\n ],\n start_parameter=book_id,\n photo_url=book_photo,\n need_shipping_address=True\n )\n\n return book.generate_invoice()\n\nSHIPPING = types.ShippingOption(\n id='shipp',\n title='херовая доставка',\n prices=[\n LabeledPrice(\n 'Почта России', 10000\n )\n ]\n)\n","repo_name":"AndryukovAlexey/book_master","sub_path":"utils/misc/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13480731895","text":"def main():\n text = input(\"Text: \")\n letters = 0\n words = 1\n sentences = 0\n\n for c in text:\n if ord(c.lower()) >= ord(\"a\") and ord(c.lower()) <= ord(\"z\"):\n letters += 1\n if c == \" \":\n words += 1\n if(c == '.' or c == '?' or c == '!'):\n sentences += 1\n \n print(f\"letters: {letters} words: {words} sentences: {sentences}\") \n level = calculateLevel(letters, words, sentences)\n \n printGrade(level)\n \ndef calculateLevel(letters, words, sentences):\n lettersPer100Words = float(letters) / float(words) * 100\n sentencesPer100Words = float(sentences) / float(words) * 100\n level = 0.0588 * lettersPer100Words - 0.296 * sentencesPer100Words - 15.8\n return round(level)\n \ndef printGrade(level):\n if level < 1:\n print(\"Before Grade 1\")\n elif level > 16:\n print(\"Grade 16+\")\n else:\n print(f\"Grade: {level}\")\n\n \n \nmain()","repo_name":"emily-v/python-practice","sub_path":"readability/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21881112435","text":"from cvzone.FaceMeshModule import FaceMeshDetector\r\n\r\ndetector = FaceMeshDetector(maxFaces=1)\r\n\r\ndef calculate(img):\r\n img, faces = detector.findFaceMesh(img, draw=False)\r\n\r\n if faces:\r\n face = faces[0]\r\n pointLeft = face[145]\r\n pointRight = face[374]\r\n w, _ = detector.findDistance(pointLeft, pointRight)\r\n W = 6.3\r\n f = 755\r\n d = (W*f)/w\r\n return round(d, 2)\r\n return False\r\n","repo_name":"AlexandrosGiann/crazy_yugioh_project","sub_path":"face_distance.py","file_name":"face_distance.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21997110620","text":"import datetime\r\nimport sys\r\nimport time\r\n\r\nfrom DataAccessLayer.Repositories.opc_hda.OpcHdaRepository import OpcHdaRepository\r\nfrom Models.Configuration import Configuration\r\n\r\n\r\nclass ProgressBa(object):\r\n\r\n\r\n def __init__(self):\r\n self.config = Configuration()\r\n self.config.load()\r\n self.from_date_sec = time.mktime(self.config.from_date.timetuple())\r\n self.to_date_sec = time.mktime(self.config.to_date.timetuple())\r\n\r\n total = self.to_date_sec - self.from_date_sec\r\n self.percent_sec = total / 100\r\n\r\n def row_update(self):\r\n sys.stdout.write(\"\\r100%\")\r\n sys.stdout.flush()\r\n\r\n def increment(self, tag_value):\r\n current_date_sec = time.mktime(tag_value.timestamp.timetuple())\r\n current_sec = current_date_sec - self.from_date_sec\r\n\r\n current_percent = current_sec / self.percent_sec\r\n\r\n if (current_percent > 1 and current_percent < 5):\r\n test = 1\r\n\r\n # if value_index == 0 or (value_index % 5) == 0:\r\n sys.stdout.write(\"\\r%.2f%%\" % current_percent)\r\n sys.stdout.flush()\r\n\r\n\r\n\r\n\r\n#\r\n# config = Configuration()\r\n# config.load()\r\n# opc_hda_repository = OpcHdaRepository(config.oledb_parameters[\"user\"], config.oledb_parameters[\"host\"])\r\n#\r\n# def sss(self):\r\n#\r\n# self.uts_from_date = int(time.mktime(self.config.from_date.timetuple()))\r\n# self.uts_to_date = int(time.mktime(self.config.to_date.timetuple()))\r\n#\r\n# print(\"От\", self.uts_from_date, \"До\", self.uts_to_date)\r\n#\r\n# #percent = (self.uts_to_date - self.uts_from_date) // 100\r\n# #print(\"1 процент = \", percent)\r\n# #self.total = int(self.uts_to_date - self.uts_from_date)\r\n#\r\n# for tag_value in self.opc_hda_repository.read():\r\n\r\n #time_stamp = str(tag_value.timestamp.replace(tzinfo=None))\r\n #self.uts_time_stamp = time.mktime(datetime.datetime.strptime(time_stamp, \"%Y-%m-%d %H:%M:%S\").timetuple())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#a = ProgressBa()\r\n#a.sss()\r\n\r\n","repo_name":"Modixe/Processing-data-from-a-SCADA","sub_path":"OleDb/Models/ProgressBar.py","file_name":"ProgressBar.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5876489249","text":"\nimport datetime\nimport json\nfrom django.conf import settings\nimport jwt\nfrom mainprocess.utils.jwt_auth import get_token\nfrom mainprocess import models\nfrom mainprocess.models import BillItem, JsonResultFormat, MonthliAmount, UserBillCount, YearlyAmount, YearlyAnalyze, user\nfrom mainprocess.checkcode import create_validate_code\nfrom django.http.response import HttpResponse, JsonResponse\nfrom rest_framework.views import APIView\nfrom io import BytesIO\nfrom mainprocess.extensions.auth import JwtQuertParamsAuthentication\ncheckcode = \"\"\n\n\nclass LoginView(APIView):\n '''用户登录'''\n def post(self, request, *args, **kwargs):\n info = request.data\n mobilehao = info['mobile']\n passwork = info['password']\n print(mobilehao,passwork)\n try:\n user_obj = user.objects.get(mobile=mobilehao)\n except:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n # if not user_obj.exists():\n # return Response({\"code\": 1000, 'error': '不存在用户'})\n userinfo = user_obj.__dict__\n userinfo.pop('_state')\n token = user_obj.token\n decodet = jwt.decode(token,key=settings.SECRET_KEY,verify=False,algorithms=['HS256'])\n if decodet['password'] != passwork:\n return JsonResponse(AllRes(False,\"密码错误!\",\"\"))\n return JsonResponse(AllRes(True,\"登录成功!\",userinfo))\nclass EnsureLogin(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n user_obj = user.objects.get(mobile=userInfo['mobile'],token=userInfo['token'])\n user_obj = user_obj.__dict__\n user_obj.pop('_state')\n return JsonResponse(AllRes(True,\"自动登录成功!\",user_obj))\n except Exception as ex:\n print(ex)\n return JsonResponse(AllRes(False,\"token失效,请重新登录!\",\"\"))\ndef createCode(request):\n f = BytesIO()\n img, code = create_validate_code()\n img.save(f, 'PNG')\n global checkcode\n checkcode = code\n return HttpResponse(f.getvalue())\n\n\ndef validateCode(request):\n info = json.loads(request.body)\n global checkcode\n print(info[\"code\"], checkcode)\n if(info[\"code\"].lower() == checkcode.lower()):\n return JsonResponse(JsonResultFormat(True, \"成功\", \"\").obj2dict())\n return JsonResponse(JsonResultFormat(False, \"失败\", \"\").obj2dict())\n\n\nclass JwtRegister(APIView):\n def post(self, request, *args, **kwargs):\n info = request.data\n token = get_token({\"mobile\": info['mobile'],'password': info['password'],'registrationId': info['registrationId']},9999)\n logintime = datetime.datetime.now().strftime('%Y-%m-%d')\n newUser = models.user(token=token, avatar=\"http://api.btstu.cn/sjtx/api.php\", nickname=info['mobile'], lastLogin=logintime, lastDevice=info['device'], mobile=info['mobile'], registrationId=info['registrationId'])\n newUser.save()\n models.limit(limit=0,limitUserID=newUser.id).save()\n newUserDict = newUser.__dict__\n newUserDict.pop('_state')\n return JsonResponse(AllRes(True,\"注册成功\",newUserDict))\nclass GroupCreate(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def post(self, request, *args, **kwargs):\n info = request.data\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n print(userInfo)\n try:\n models.group(name=info['name'],type=info['type'],usage=info['usage'],isDefault=info['isDefault'],desc=info['desc'],isPersonal=\"1\",creatorId=userInfo['id']).save()\n return JsonResponse(AllRes(True,\"添加圈子成功!\",\"\"))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请重试\",\"\"))\n\n# class GroupQuery(APIView):\n# authentication_classes = [JwtQuertParamsAuthentication]\n# def get(self, request, *args, **kwargs):\n# grouplist = group.objects.all().values()\n# datalist = list(grouplist)\n# return JsonResponse(AllRes(True,\"获取成功\",datalist))\n\n# class GroupQueryById(APIView):\n# authentication_classes = [JwtQuertParamsAuthentication]\n# def get(self, request, params, *args, **kwargs):\n# print(params)\n# groupDetail = group.objects.get(creatorId=params)\n# groupDetail = groupDetail.__dict__\n# groupDetail.pop('_state')\n# return JsonResponse(AllRes(True,\"获取成功\",groupDetail))\nclass CreateBill(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def post(self, request, *args, **kwargs):\n billinfo = request.data\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n print(billinfo)\n try:\n if billinfo['billType'] == \"0\":\n models.bill(buid=userInfo['id'],category=billinfo['category'],billDate=billinfo['billDate'],amount=billinfo['amount'],payMethod=billinfo['payMethod'],remark=billinfo['remark'],billType=billinfo['billType']).save()\n else:\n models.bill(buid=userInfo['id'],category=billinfo['category'],billDate=billinfo['billDate'],amount=billinfo['amount'],remark=billinfo['remark'],billType=billinfo['billType']).save()\n return JsonResponse(AllRes(True,\"创建记账单成功\",\"\"))\n except Exception as ex:\n print(ex)\n return JsonResponse(AllRes(True,\"创建失败!请重试!\",\"\"))\n\nclass GetUserBillCount(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n userId = userInfo['id']\n billCount = models.bill.objects.filter(buid=userId).count()\n billDay = models.bill.objects.filter(buid=userId).values('billDate').order_by('billDate').distinct().count()\n newUserBillCount = UserBillCount(billDay,billCount)\n newUserBillCount = newUserBillCount.__dict__\n print(billCount,billDay)\n return JsonResponse(AllRes(True,\"取得数据成功!\",newUserBillCount))\n except Exception as ex:\n print(ex)\n return JsonResponse(AllRes(False,\"发生异常,请稍候重试!\",\"\"))\n\n\nclass GetUserReminder(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n reminder = models.reminder.objects.filter(ruid=userInfo['id']).values()\n reminder = list(reminder)\n return JsonResponse(AllRes(True,\"\",reminder))\n except:\n return JsonResponse(AllRes(False,\"发生异常,请稍候重试!\",\"\"))\nclass CreateUserReminder(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def post(self, request, *args, **kwargs):\n reminderinfo = request.data\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n models.reminder(frequency=reminderinfo['frequency'],time=reminderinfo['time'],rule=reminderinfo['rule'],back=reminderinfo['back'],ruid=userInfo['id']).save()\n return JsonResponse(AllRes(True,\"创建提醒成功!\",\"\"))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n \nclass UpdateUserReminder(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def put(self, request, *args, **kwargs):\n reminderinfo = request.data\n try:\n models.reminder.objects.filter(id=reminderinfo['id']).update(frequency=reminderinfo['frequency'],time=reminderinfo['time'],rule=reminderinfo['rule'])\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"更新提醒成功!\",\"\"))\nclass DeleteUserReminder(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, param, *args, **kwargs):\n try:\n models.reminder.objects.filter(id=param).delete()\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"删除提醒成功!\",\"\"))\nclass QueryReminderById(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, param, *args, **kwargs):\n try:\n print(param)\n reminder = models.reminder.objects.get(id=param)\n reminder = reminder.__dict__\n print(reminder)\n reminder.pop('_state')\n return JsonResponse(AllRes(True,\"\",reminder))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\nclass SetUserLimit(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def post(self, request, *args, **kwargs):\n info = request.data\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n models.limit.objects.filter(limitUserID=userInfo['id']).update(limit=info['limit'],limitUserID=userInfo['id'])\n except:\n return JsonResponse(AllRes(False,\"发生异常,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"更新预算成功!\",\"\"))\n\nclass GetUserLimit(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n limit = models.limit.objects.get(limitUserID=userInfo['id'])\n limit = limit.__dict__\n limit.pop('_state')\n return JsonResponse(AllRes(True,\"\",limit))\n except:\n return JsonResponse(AllRes(False,\"发生异常,请稍候重试!\",\"\"))\n\nclass GetUserTask(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n tasks = models.task.objects.filter(taskUserId=userInfo['id']).values()\n tasks = list(tasks)\n return JsonResponse(AllRes(True,\"\",tasks))\n except:\n return JsonResponse(AllRes(False,\"发生异常,请稍候重试!\",\"\"))\n\nclass CreateUserTask(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def post(self, request, *args, **kwargs):\n Taskinfo = request.data\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n models.task(frequency=Taskinfo['frequency'],time=Taskinfo['time'],amount=Taskinfo['amount'],billType=Taskinfo['billType'],category=Taskinfo['category'],remark=Taskinfo['remark'],confirm=Taskinfo['confirm'],payMethod=Taskinfo['payMethod'],taskUserId=userInfo['id']).save()\n except Exception as ex:\n print(ex)\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"创建任务成功!\",\"\"))\nclass UpdateUserTask(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def put(self, request, *args, **kwargs):\n Taskinfo = request.data\n try:\n models.task.objects.filter(id=Taskinfo['id']).update(frequency=Taskinfo['frequency'],time=Taskinfo['time'],amount=Taskinfo['amount'],billType=Taskinfo['billType'],category=Taskinfo['category'],remark=Taskinfo['remark'],confirm=Taskinfo['confirm'],payMethod=Taskinfo['payMethod'])\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"更新任务成功!\",\"\"))\nclass DeleteUserTask(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, param, *args, **kwargs):\n try:\n models.task.objects.filter(id=param).delete()\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"删除提醒成功!\",\"\"))\nclass QueryTaskById(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, param, *args, **kwargs):\n try:\n Task = models.task.objects.get(id=param)\n Task = Task.__dict__\n Task.pop('_state')\n return JsonResponse(AllRes(True,\"\",Task))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n\nclass GetBill(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n billItem = []\n \n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n billdates = models.bill.objects.filter(buid=userInfo['id']).values(\"billDate\").order_by(\"billDate\").distinct()\n billdates = list(billdates)\n for billdate in billdates:\n ob_sum = 0.0\n ic_sum = 0.0\n origindata_bill = models.bill.objects.filter(buid=userInfo['id'],billDate=billdate['billDate']).values()\n origindata_bill = list(origindata_bill)\n for ob in origindata_bill:\n if ob['billType'] == \"0\":\n ob_sum += float(ob['amount'])\n else:\n ic_sum += float(ob['amount'])\n billItem.append(BillItem(billdate['billDate'],str(ob_sum),str(ic_sum),origindata_bill).__dict__)\n return JsonResponse(AllRes(True,\"取得数据成功!\",billItem))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n\nclass DeleteBill(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, param, *args, **kwargs):\n try:\n models.bill.objects.filter(id=param).delete()\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n return JsonResponse(AllRes(True,\"删除该账单成功!\",\"\"))\nclass GetBillYears(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n years = []\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n yearsdata = models.bill.objects.filter(buid=userInfo['id']).values('billDate').order_by('billDate').distinct()\n yearsdata = list(yearsdata)\n for yd in yearsdata:\n result = yd['billDate'].split('-')\n years.append(result[0])\n years = list(set(years))\n return JsonResponse(AllRes(True,\"\",years))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\nclass GetYearlyBill(APIView):\n authentication_classes = [JwtQuertParamsAuthentication]\n def get(self, request, *args, **kwargs):\n queryYear = request.GET.get('year')\n userInfo = decodeUserToken(request.META['HTTP_AUTHORIZATION'])\n monthly = []\n results = []\n if userInfo == False:\n return JsonResponse(AllRes(False,\"不存在的用户\",\"\"))\n try:\n fir = models.bill.objects.filter(buid=userInfo['id']).values()\n fir = list(fir)\n y_ob_sum = 0.0\n y_ic_sum = 0.0\n for fd in fir:\n dates = fd['billDate'].split('-')\n if dates[0] == queryYear:\n if fd['billType'] == \"0\":\n print(fd['amount'])\n y_ob_sum += float(fd['amount'])\n else:\n print(fd['amount'])\n y_ic_sum += float(fd['amount'])\n monthly.append(dates[1])\n print(monthly)\n monthly = list(set(monthly))\n print(monthly)\n for od in monthly:\n m_ob_sum = 0.0\n m_ic_sum = 0.0\n for fd in fir:\n dates = fd['billDate'].split('-')\n if dates[0] == queryYear:\n if dates[1] == od:\n if fd['billType'] == \"0\":\n m_ob_sum += float(fd['amount'])\n else:\n m_ic_sum += float(fd['amount'])\n results.append(MonthliAmount(od,m_ob_sum,m_ic_sum,m_ic_sum-m_ob_sum).__dict__)\n yearanalyze = YearlyAnalyze(YearlyAmount(y_ob_sum,y_ic_sum,y_ic_sum-y_ob_sum).__dict__,results).__dict__\n print(yearanalyze)\n return JsonResponse(AllRes(True,\"获取统计成功!\",yearanalyze))\n except:\n return JsonResponse(AllRes(False,\"发生错误,请稍候重试!\",\"\"))\n \ndef AllRes(isScu,str,data):\n return JsonResultFormat(isScu,str,data).obj2dict()\n\ndef decodeUserToken(meta):\n token = meta\n salt = settings.SECRET_KEY\n decodet = jwt.decode(token,key=salt,verify=False,algorithms=['HS256'])\n try:\n user_obj = user.objects.get(mobile=decodet['mobile'])\n except:\n return False\n userInfo = user_obj.__dict__\n return userInfo","repo_name":"mysteriousmy/Android-code-memory","sub_path":"PersonBill/PersonBillS/mainprocess/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39862764319","text":"from numpy import np\nfrom cmath import *\nfrom ipywidgets import interact, interactive, fixed, interact_manual\n#https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\nimport ipywidgets as widgets\n\n\ndef optics_only(coupling = 1):\n # coupling is k0/kext\n omegas = np.arange(-5, 5, 0.01) # in units of kext\n susceptibility = 1 / (((1+coupling)/2)+1j*omegas)\n R = np.abs(1-susceptibility)**2\n\n plot(omegas, R)\n ylim(0, 1.05)\n xlabel('detuning from resonance')\n ylabel('R')\n show()\n\n if coupling == 0:\n print('Extremely overcoupled: \\nThe losses are negligible compared to the rate at which light can escape. \\nAll the light is then reflected back out of the cavity')\n elif coupling < 1:\n print('Overcoupled: \\nThe cavity losses are smaller than the external coupling rate.')\n elif coupling == 1:\n print('Critical coupling:\\nThe rate at which the light enters the cavity matches exactly the decay rate.\\nAt resonance, all photons will be dissipated')\n else:\n bad_cavity_text = \"\"\"Undercoupled:\\nThis is the so-called \"bad cavity\" regime. All excitations die out faster than they can be extracted from the cavity.\n \"\"\"\n print(bad_cavity_text)\n","repo_name":"EIvanov556/Optomechanics-OMT-Mooc","sub_path":"omit/build/lib/omit/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33376122655","text":"from django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom rest_framework import serializers\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_201_CREATED\n\nfrom accounts.models import Account\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'date_joined',\n )\n\n def create(self, validated_data):\n user_kwargs = {\n 'username': validated_data.get('username'),\n 'email': validated_data.get('email'),\n 'first_name': validated_data.get('first_name'),\n 'last_name': validated_data.get('last_name'),\n }\n\n self.__init__(data={**user_kwargs})\n if self.is_valid():\n instance = User(**user_kwargs)\n instance.save()\n self.instance = instance\n return Response(self.data, status=HTTP_201_CREATED)\n\n return None\n\n\nclass AccountsSerializer(serializers.ModelSerializer):\n user = UserSerializer(required=False)\n\n class Meta:\n model = Account\n fields = '__all__'\n\n def create(self, validated_data):\n user = User.objects.filter(**{\"id\": validated_data.get(\"user_id\")}).order_by().first()\n if user:\n instance = Account(user=user)\n instance.save()\n self.instance = instance\n return self\n\n return None\n","repo_name":"ricouly21/s3boto_exercise","sub_path":"accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31424089122","text":"def middle_element(lst):\r\n if len(lst) % 2 == 0:\r\n even1 = int(((len(lst)/2) - 1))\r\n even2 = int(((len(lst)/2)))\r\n even = int((lst[even1] + lst[even2]) / 2)\r\n return even\r\n else:\r\n odd = int((len(lst) / 2))\r\n \r\n return lst[odd]\r\n\r\n#Uncomment the line below when your function is done\r\nprint(middle_element([5, 2, -10, -4, 4, 5]))","repo_name":"mfouquier/Python-Crash-Course","sub_path":"Chapter 9/codeacademy.py","file_name":"codeacademy.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12892079320","text":"from flask import Blueprint, jsonify, request, abort\nfrom common.custom_exceptions import ClientError, ServerError\nfrom common.shortly_logger import logger\nimport requests, json, os\n\napi = Blueprint('api', __name__)\n\n\n@api.route('/shortlinks', methods=['POST'])\ndef create_shortlink():\n \"\"\"\n Returns\n 200 json {\n \"url\": string,\n \"link\": string\n }\n 400 client error\n 500 sever error\n\n :return:\n \"\"\"\n providers = {'bitly': bitly_short,\n 'tinyurl': tiny_short}\n request_json = request.get_json()\n provider = request_json.get('provider')\n logger.debug(f'Provider requested\"{provider}\"')\n url = request_json.get('link')\n\n if url is None:\n logger.warning('No \"url\" provided! 400')\n abort(400)\n\n if provider not in providers:\n alt_provider = providers.popitem()\n provider = alt_provider[0]\n provider_fun = alt_provider[1]\n logger.info(f'Provider does not exist. Will use \"{provider}\".')\n else:\n provider_fun = providers.pop(provider)\n\n try:\n response = provider_fun(url)\n except ClientError:\n logger.warning(f'Request to \"{provider}\" returned with 4xx. Aborting with 400!')\n abort(400)\n except ServerError:\n logger.info(f'Request to \"{provider}\" returned with a 5xx.')\n alt_provider = providers.popitem()\n provider = alt_provider[0]\n provider_fun = alt_provider[1]\n try:\n response = provider_fun(url)\n except ClientError:\n logger.warning(f'Request to \"{provider}\" returned with 4xx. Aborting with 400!')\n abort(400)\n except ServerError:\n logger.warning('Aborting with 500!')\n abort(500)\n\n return jsonify(response)\n\n\ndef bitly_short(url):\n \"\"\"\n Calls the bitly API to shorten given url\n Raises\n 1. ClientError if API status code is 4xx\n 2. ServerError if API status code is 5xx\n\n :param url: string\n :return: dict\n \"\"\"\n data = {'long_url': url}\n headers = {'Accept': 'application/json',\n 'Authorization': os.getenv('BITLY_TOKEN')}\n try:\n logger.info(f'Requesting \"bitly\" to shorten \"{url}\".')\n response = requests.post('https://api-ssl.bitly.com/v4/shorten', data=json.dumps(data), headers=headers)\n except Exception:\n logger.error('Request FAILED!!')\n raise ServerError\n\n # 403 response is returned when the BITLY_TOKEN environment variable is invalid!\n if response.status_code == 403:\n logger.error('INVALID BITLY_TOKEN!')\n raise ServerError\n\n if 400 <= response.status_code < 500 :\n logger.error('Request responded with 4xx.')\n raise ClientError\n\n if 500 <= response.status_code < 600 :\n logger.error('Request responded with 5xx.')\n raise ServerError\n\n resp_json = response.json()\n short_url = resp_json['link']\n return prepare_response(url, short_url)\n\n\ndef tiny_short(url):\n \"\"\"\n Calls the tinyurl API to shorten given url\n Raises\n 1. ClientError if API status code is 4xx\n 2. ServerError if API status code is 5xx\n\n :param url: string\n :return: dict\n \"\"\"\n try:\n logger.info(f'Requesting \"tinyurl\" to shorten \"{url}\".')\n response = requests.get('http://tinyurl.com/api-create.php', params={'url': url})\n except Exception:\n logger.error('Request FAILED!!')\n raise ServerError\n\n # For now tinyurl seems always to return a 200 response but just for future proofing.\n if 400 <= response.status_code < 500:\n logger.error('Request responded with 4xx.')\n raise ClientError\n\n if 500 <= response.status_code < 600:\n logger.error('Request responded with 5xx.')\n raise ServerError\n\n return prepare_response(url, response.content.decode('utf-8'))\n\n\ndef prepare_response(url, short_link):\n \"\"\"\n Creates a dictionary containing\n 1. url requested to be shorten\n 2. shortened url\n\n :param url: string\n :param short_link: string\n :return: dict\n \"\"\"\n if url is None or short_link is None:\n logger.error('Missing url or short_link.')\n raise ServerError\n\n return {\n 'url': url,\n 'link': short_link\n }\n","repo_name":"agdelig/url_shortener","sub_path":"shorty/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27369984810","text":"# Arbeit mit Dateien schreiben \"w\"\nfile = open(\"schreiben.txt\" , \"w\") # schreiben.txt wird von PyCharm angelegt (touch)\nfile.write(\"Vier Bier und eine Suppe \") # Append strg\nfile.write(\"Vier Bier und eine Suppe\") # Append another strg in the same line\nfile.write(\"\\nVier Bier und eine Suppe\") # Append strg with newline\nfile.close() # Speichern und schließe des File's\n\n# The string.join() function can also be used to add a newline\n\n# Schreiben einer Liste in Datei\nfile = open(\"piraten.txt\", \"w\") # \"w\" Überschreibt vorhandene Daten. Falls nicht vorhanden, wird die Datei getouched\npiraten = [\"jan\",\"hein\",\"claas\",\"pit\",\"und deine Mudder\"]\nfor pirat in piraten:\n file.write(pirat + \"\\n\") # newline \\n muss in Anführungszeichen\nfile.close()\n\n# Verschiedene Dateiöffnungsmodi\n# \"r\" read\n# \"w\" write\n# \"a\" append\nfile = open(\"piraten.txt\", \"a\") # variable \"neuer_satz\" wird an \"piraten.txt\" angehängt. \"a\" append modus\nneuer_satz = \"sind coole Piraten, die fahren mit!\"\nfile.write(neuer_satz)\nfile.close()\n\n# Mit einer Schleife kann man keinen neuen Satz anhängen?\n# for neuer_satz in piraten:\n# file.write(neuer_satz + \"\\n\")\n","repo_name":"bhb-boy/repo_2","sub_path":"28_arbeit_mit_dateien_write_append.py","file_name":"28_arbeit_mit_dateien_write_append.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29355667360","text":"import os\nimport re\nimport sys\n\n\ndef ReadMarkDown(file):\n folder = 'test'\n os.system('rm -rf ' + folder + ' && mkdir -p ' + folder)\n with open(file, 'r') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n if '[//file]:#' in line:\n filename = line[10:].strip()\n GetCodeFile(lines, i, os.path.join(folder, filename))\n if '' in lines[i]:\n break\n code += lines[i]\n i += 1\n with open(filename, 'w+') as f:\n f.write(code)\n\n\ndef RunTest():\n folder = 'test'\n os.system('cd ' + folder + ' && sh start.sh')\n os.system('cd .. && rm -rf ' + folder)\n\n\nif __name__ == '__main__':\n ReadMarkDown(os.path.join(sys.argv[1], sys.argv[2]))\n RunTest()\n","repo_name":"PaddlePaddle/Serving","sub_path":"tools/doc_tester_reader.py","file_name":"doc_tester_reader.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":848,"dataset":"github-code","pt":"54"} +{"seq_id":"22321806945","text":"import json\n\nfrom drawio.entities.drawio_config.abstract_config import AbstractConfig\n\n\nclass ConnectConfig(AbstractConfig):\n def __init__(self, from_id: str, to_id: str, **kwargs):\n self.from_id = from_id\n self.to_id = to_id\n self.style = \"edgeStyle=orthogonalEdgeStyle;curved=1;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;\"\n super().__init__(**kwargs)\n\n def to_config_string(self):\n \"\"\"\n Example:\n connect: {\"from\": \"manager\", \"to\": \"name\", \"invert\": true, \"label\": \"manages\", \"style\": \"curved=1;endArrow=blockThin;endFill=1;fontSize=11;\"}\n \"\"\"\n data = json.dumps(self.__dict__)\n # rename 'from_id' to 'from'\n data = data.replace('\"from_id\"', '\"to\"')\n # rename 'to_id' to 'to'\n data = data.replace('\"to_id\"', '\"from\"')\n return data\n","repo_name":"maycuatroi/drawio","sub_path":"drawio/entities/drawio_config/connect_config.py","file_name":"connect_config.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"18688098515","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"new\", views.new_listing, name=\"new\"),\n path(\"new/save\", views.save_listing, name=\"save-listing\"),\n path(\"view_listing/delete\", views.delete_listing, name=\"delete-listing\"),\n path(\"view_listing\", views.view_listing, name=\"view-listing\"),\n path(\"view_listing/add_to_watchlist\", views.watchlist, name=\"watch-list\"),\n path(\"view_listing/comment\", views.comment, name=\"comment\"),\n path(\"view_listing/bid\", views.bid, name=\"bid\"),\n path(\"view_watchlist\", views.view_watchlist, name=\"view-watchlist\"),\n path(\"view_watchlist/delete_item\", views.delete_watch_item, name=\"delete-watch-item\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"categories/view-category\", views.view_category, name=\"view-category\")\n]\n","repo_name":"Eddiebee/Auction-site","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37138458812","text":"import pickle\r\nimport numpy as np\r\nfrom keras.utils import to_categorical\r\n\r\ndef processData(padding = '0before', location = 'datasets/20_1000.csv', task = 'GPC'):\r\n\r\n\tlabels = []\r\n\tsamples = []\r\n\tno_features = int(location.split('/')[1].split('_')[0])\r\n\tno_samples_per_class = int(location.split('/')[1].split('_')[1].split('.')[0])\r\n\tprint(no_features)\r\n\tprint(no_samples_per_class)\r\n\r\n\twith open (location) as f:\r\n\t\tcontent = f.readlines()\r\n\t\tfor line in content:\r\n\t\t\tlst = line.split(',')\r\n\r\n\t\t\tlabel = int(float(lst[0]))\r\n\t\t\tsample = [float(x) for x in lst[1:]]\r\n\r\n\t\t\tlabels.append(label)\r\n\t\t\tsamples.append(sample)\r\n\r\n\tmax_length = max([len(sample) for sample in samples])\r\n\r\n\tif padding.startswith('0'):\r\n\t\tfor i in range(len(samples)):\r\n\t\t\tsample = samples[i]\r\n\t\t\tn = max_length - len(sample)\r\n\t\t\tfor _ in range(n):\r\n\t\t\t\tif padding == '0before':\r\n\t\t\t\t\tsample.insert(0, 0.0)\r\n\t\t\t\telif padding == '0after':\r\n\t\t\t\t\tsample.append(0.0)\r\n\t\t\tsamples[i] = sample\r\n\r\n\tX = np.array(samples)\r\n\tX = np.reshape(X, (X.shape[0], X.shape[1] // no_features, no_features))\r\n\ty = to_categorical(labels)\r\n\r\n\tX_ploc = 'datasets/X_' + task + '_' + str(no_features) + '_' + str(no_samples_per_class) + '_' + padding + '.p'\r\n\tpickle.dump(X, open(X_ploc, 'wb'))\r\n\ty_ploc = 'datasets/y_' + task + '_' + str(no_features) + '_' + str(no_samples_per_class) + '_' + padding + '.p'\r\n\tpickle.dump(y, open(y_ploc, 'wb'))\r\n\r\n#processData(padding = '0after')\r\nprocessData('0after', 'datasets/2_2.csv')","repo_name":"perticascatalin/ImageOfAlgorithm","sub_path":"code/classification/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73634834401","text":"\n\n\ndef avion():\n\n\n n = 5\n\n target = 'FBI'\n count = 0\n for i in range(1,n + 1):\n word = input()\n if target in word:\n print(i,end=' ')\n count += 1\n\n \n\n\n if count == 0:\n print(\"HE GOT AWAY!\")\n\n\n\n\navion() \n\n\n\n\n\n\n\n","repo_name":"cyrustabatab/KattisProgrammingProblems","sub_path":"avion.py","file_name":"avion.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71481860962","text":"from rest_framework.views import APIView\nfrom rest_framework import permissions, status\nfrom rest_framework.response import Response\nfrom Product.models import Product\nfrom Product.serializers import ProductSerializer\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom permissions import *\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\n\nclass ProductView(APIView):\n permission_classes = [IsAdminOrThreeDaysPassedrOrReadOnly]\n\n #상품 조회\n def get(self, request):\n user = request.user\n # 로그인된 유저의 만료되지 않은 상품들 queryset으로 가져오기\n if user.is_authenticated:\n my_products = Product.objects.filter(\n Q(exposure_end__gte=datetime.now())|\n Q(seller=user), \n is_active=True\n )\n # json형태로 돌려주기\n product_serializer = ProductSerializer(my_products, many=True).data\n else:\n products = Product.objects.filter(\n Q(exposure_end__gte=datetime.now()), \n is_active=True\n )\n product_serializer = ProductSerializer(products, many=True).data \n \n return Response(product_serializer, status=status.HTTP_200_OK)\n \n #상품 등록\n def post(self, request):\n # 현재 로그인한 유저의 id를 seller로 지정\n user = request.user\n request.data['seller'] = user.id\n product_serializer = ProductSerializer(data=request.data, context={\"request\": request})\n \n if product_serializer.is_valid():\n product_serializer.save()\n return Response(product_serializer.data, status=status.HTTP_200_OK)\n \n return Response(product_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n #상품 수정\n def put(self, request, obj_id):\n product = Product.objects.get(id=obj_id)\n product_serializer = ProductSerializer(product, data=request.data, partial=True, context={\"request\": request})\n \n if product_serializer.is_valid():\n product_serializer.save()\n return Response(product_serializer.data, status=status.HTTP_200_OK)\n \n return Response(product_serializer.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"KimmyJay/DRF_Assignments","sub_path":"Product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23363286301","text":"from operator import is_\nfrom django.shortcuts import render\nfrom todolist.models import Task\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import TaskForm\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom django.http import HttpResponseBadRequest, JsonResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\n\n@login_required(login_url='/todolist/login/')\ndef show_todolist(request):\n data_task = Task.objects.all().filter(user=request.user)\n\n context = {\n 'username': request.user,\n 'list_task': data_task\n }\n\n return render(request, \"todolist.html\", context)\n\n\n@login_required(login_url='/todolist/login/')\ndef show_todolist_json(request):\n data_task = Task.objects.all().filter(user=request.user)\n return HttpResponse(serializers.serialize(\"json\", data_task), content_type=\"application/json\")\n\n\ndef register(request):\n form = UserCreationForm()\n\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Akun telah berhasil dibuat!')\n return redirect('todolist:login')\n\n context = {'form': form}\n return render(request, 'register.html', context)\n\n\ndef login_user(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n response = HttpResponseRedirect(reverse(\"todolist:show_todolist\"))\n response.set_cookie('last_login', str(datetime.datetime.now()))\n return response\n context = {}\n return render(request, 'login.html', context)\n\n\ndef logout_user(request):\n logout(request)\n response = HttpResponseRedirect(reverse('todolist:login'))\n response.delete_cookie('last_login')\n return response\n\n\ndef create_task(request):\n\n form = TaskForm()\n\n if request.method == 'POST':\n form = TaskForm(request.POST)\n\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.user = request.user\n model_instance.date = datetime.date.today()\n model_instance.title = request.POST.get('title')\n model_instance.description = request.POST.get('description')\n model_instance.save()\n return HttpResponseRedirect('../')\n context = {\n 'form': form\n }\n\n return render(request, 'create_task.html', context)\n\n\ndef change_status(request, id):\n item = Task.objects.get(pk=id)\n item.is_finished = item.is_finished ^ 1\n\n if (item.is_finished == True):\n item.status = 'Selesai'\n else:\n item.status = 'Belum Selesai'\n\n item.save()\n return HttpResponseRedirect('../')\n\n@login_required(login_url='/todolist/login/')\n@csrf_exempt\ndef delete_task(request, id):\n item = Task.objects.get(pk=id)\n item.delete()\n return redirect('todolist:show_todolist')\n\n\n@login_required(login_url='/todolist/login/')\n@csrf_exempt\ndef add(request):\n if request.method == 'POST':\n \n user = request.user\n date = datetime.date.today()\n title = request.POST.get(\"title\")\n description = request.POST.get(\"description\")\n is_finished = False\n status = \"Belum selesai\"\n \n new_task = Task(user=user, date=date, title=title, description=description, \n is_finished=is_finished, status=status)\n new_task.save()\n\n return JsonResponse({\n\n \"pk\": new_task.pk,\n \"fields\":\n {\n \"user\": new_task.user.username,\n \"date\": new_task.date,\n \"title\": new_task.title,\n \"description\": new_task.description,\n \"is_finished\": new_task.is_finished,\n \"status\": new_task.status,\n }\n })\n else:\n return HttpResponseBadRequest('Invalid request')\n","repo_name":"amrul-hzz/tugas-pbp","sub_path":"todolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7208050011","text":"import datetime\nimport math\nimport matplotlib.pyplot as plt\n\ndef get_epoch_time(time_string):\n year = 2000 + int(time_string[0:2]) if int(time_string[0:2]) < 30 else 1900 + int(time_string[0:2])\n doy = int(time_string[2:5])\n day_frac = float(time_string[5:])\n month = (datetime.datetime(year,1,1) + datetime.timedelta(days=doy-1)).month\n day = (datetime.datetime(year,1,1) + datetime.timedelta(days=doy-1)).day\n hour = math.floor(day_frac * 24.)\n minute = math.floor((day_frac * 24 * 60) % 60)\n second = math.floor((day_frac * 24 * 3600) % 60)\n\n epoch_time = datetime.datetime(year, month, day, hour, minute, second)\n return epoch_time\n\ndef make_difference_plot(stk_data, python_data):\n times = []\n \n stk_pos_x = []\n stk_pos_y = []\n stk_pos_z = []\n\n python_pos_x = []\n python_pos_y = []\n python_pos_z = []\n\n for ephem in stk_data['ephemeris_list']:\n times.append(ephem['time'])\n stk_pos_x.append(ephem['position'][0] / 1000.)\n stk_pos_y.append(ephem['position'][1] / 1000.)\n stk_pos_z.append(ephem['position'][2] / 1000.)\n\n for ephem in python_data['ephemeris_list']:\n python_pos_x.append(ephem['position_pef'][0])\n python_pos_y.append(ephem['position_pef'][1])\n python_pos_z.append(ephem['position_pef'][2])\n\n assert len(stk_pos_x) == len(python_pos_x), str.format('STK and Python X array lengths ({} {}) do not match'.format(len(stk_pos_x), len(python_pos_x))) \n assert len(stk_pos_y) == len(python_pos_y), 'STK and Python Y array lengths do not match'\n assert len(stk_pos_z) == len(python_pos_z), 'STK and Python Z array lengths do not match'\n assert len(times) == len(stk_pos_x), 'Time and position array lengths do not match'\n\n diff_x = [python_x - stk_x for (python_x, stk_x) in zip (python_pos_x, stk_pos_x)]\n diff_y = [python_y - stk_y for (python_y, stk_y) in zip (python_pos_y, stk_pos_y)]\n diff_z = [python_z - stk_z for (python_z, stk_z) in zip (python_pos_z, stk_pos_z)]\n\n plt.plot(times, diff_x, label = 'X Difference')\n plt.plot(times, diff_y, label = 'Y Difference')\n plt.plot(times, diff_z, label = 'Z Difference')\n plt.show()","repo_name":"nuclth/python_SGP4","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41274089019","text":"\n# coding=utf-8\n\n# http://norvig.com/spell-correct.html\nimport re, collections\n#import pdb\nalphabet_all = u\"aąbcčdeęėfghiįjklmnopqrsštuųūvwxyzž\"\nalphabet_lt = u\"aącčeęėiįysšuųūzž\"\n\ndef words(text):\n text = text.decode('utf-8')\n words = re.findall(u'[aąbcčdeęėfghiįjklmnopqrsštuųūvwxyzž]+', text.lower())\n #if words[0][0] == '\\xbb' and words[0][1] == '\\xbf':\n # words[0] = words[0][2:]\n #return [w.decode('utf-8') for w in words]\n return words\n#[w.decode('utf-8') for w in words[582:583]]\n\ndef train(features):\n model = collections.defaultdict(lambda: 1)\n for f in features:\n model[f] += 1\n return model\n\nfrom_articles = words(file('dictionaries/dictionary.txt').read())\nswear_words = words(file('dictionaries/swear-words.txt').read())\nNWORDS = train(from_articles + swear_words)\n\nALPHA_LIST = []\nfor alpha in alphabet_lt:\n ALPHA_LIST.append(alpha)\n\ndef edits1(word, alphabet = alphabet_all):\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [a + b[1:] for a, b in splits if b]\n transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]\n replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]\n inserts = [a + c + b for a, b in splits for c in alphabet]\n return set(deletes + transposes + replaces + inserts)\n\ndef concat_lists(list1, list2):\n out = []\n for l1 in list1:\n for l2 in list2:\n out.append(l1 + l2)\n return out\n\ndef known_edits2(word):\n try:\n return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)\n except:\n return set()\n\ndef editsLTx(word):\n return [re.sub('x', 'ks', word), re.sub('x', 'ch', word)]\n\ndef editsLT(word):\n letters = ['']\n for letter in word:\n if letter in u'aą':\n letters = concat_lists(letters, [u'a', u'ą'])\n elif letter in u'cč':\n letters = concat_lists(letters, [u'c', u'č'])\n elif letter in u'o':\n letters = concat_lists(letters, [u'o', u'uo'])\n elif letter in u'eęė':\n letters = concat_lists(letters, [u'e', u'ė', u'ę', u'ia'])\n elif letter in u'įiy':\n letters = concat_lists(letters, [u'į', u'i', u'y'])\n elif letter in u'šs':\n letters = concat_lists(letters, [u's', u'š'])\n elif letter in u'ųūu':\n letters = concat_lists(letters, [u'u', u'ų', u'ū'])\n elif letter in u'zž':\n letters = concat_lists(letters, [u'z', u'ž'])\n else:\n letters = concat_lists(letters, [letter])\n return letters\n\n\n\ndef known(words):\n return set(w for w in words if w in NWORDS)\n\n# 1 stage leaves the word if it is known\n# 2 stage edits letters by trying modifications with only lithuanian specific symbols without changing length of the word\n# 3, 4 stages are all other kind of edits\ndef correct(word):\n if len(word) > 15:\n candidates = known([word]) or known(editsLTx(word)) or known(edits1(word)) or known_edits2(word) or set([word])\n else:\n candidates = known([word]) or known(editsLTx(word)) or known(editsLT(word)) or known(edits1(word)) or known_edits2(word) or set([word])\n # debug\n # print candidates\n if len(candidates) == 0:\n return word\n else:\n out = max(candidates, key=NWORDS.get)\n #if NWORDS[out] < 3:\n # return u'\\'' + word + u'\\''\n #else:\n # return out\n return out\n","repo_name":"dunajevas/portal-model","sub_path":"correctDidYouMean.py","file_name":"correctDidYouMean.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74785485281","text":"import json\nimport os\nimport random\nimport socket\nimport xml.etree.ElementTree as ET\n\n\ndef main():\n ip = '192.168.2.2'\n\n print(f'{ip:<15} | {valid_ip4_addr(ip)}')\n wheel = receive_telegram()\n create_xml(wheel)\n print(f'Writing xml...')\n\n\ndef load_config():\n directory = os.path.dirname(__file__)\n filename = os.path.join(directory, 'config.json')\n\n if not os.path.exists(filename):\n return {}\n\n with open(filename, 'r', encoding='utf-8') as fin:\n return json.load(fin)\n\n\ndef valid_ip4_addr(ip: str) -> bool:\n try:\n socket.inet_pton(socket.AF_INET, ip)\n return True\n except socket.error:\n return False\n\n\ndef receive_telegram():\n directory = os.path.dirname(__file__)\n filename = os.path.join(directory, 'telegram_wheel.json')\n\n if not os.path.exists(filename):\n return {}\n\n with open(filename, 'r', encoding='utf-8') as fin:\n return json.load(fin)\n\n\ndef create_xml(teli_data):\n data = ET.Element('data')\n head = ET.SubElement(data, 'head')\n main_result = ET.SubElement(data, 'main_result')\n part_results = ET.SubElement(main_result, 'part_results')\n measurements = ET.SubElement(part_results, 'measurements')\n\n head.set('FP', teli_data['FP'])\n head.set('PNO', teli_data['IW'])\n\n main_result.set('name', 'main')\n part_results.set('name', teli_data['quality']['QI'])\n measurements.set('name', 'measurement')\n\n main_result.text = 'T_0001'\n part_results.text = '100'\n measurements.text = '1'\n\n mydata = ET.tostring(data)\n myfile = open('qal_results.xml', 'wb')\n myfile.write(mydata)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jbettenh/my_plc","sub_path":"plc_cpu.py","file_name":"plc_cpu.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33619174794","text":"import sys\nfrom reports import get_most_played, sum_sold, get_selling_avg, count_longest_title, get_date_avg, get_game\n\n\ndef print_out():\n file_name = \"game_stat.txt\"\n title = 'Half-Life 2'\n\n a = get_most_played(file_name)\n b = sum_sold(file_name)\n c = get_selling_avg(file_name)\n d = count_longest_title(file_name)\n e = get_date_avg(file_name)\n f = get_game(file_name, title)\n\n with open(sys.path[0] + '/' + \"answers.txt\", \"w+\") as outputfile:\n outputfile.write(str(a) + \"\\n\" + str(b) + \"\\n\" + str(c) + \"\\n\" + str(d) + \"\\n\" + str(e) + \"\\n\" + str(f))\n outputfile.close()\n\n\nprint_out()","repo_name":"CodecoolBP20172/pbwp-3rd-si-game-statistics-BalazsPest","sub_path":"part2/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37673803847","text":"################## Módulos ############\n\n# variáveis locais estão dentro da função\n# variáveis globais estão dentro do programa\n\"\"\"\ndocumentação da soma\n\"\"\"\n\n#print(__doc__)\n#print(__file__)\n\n#import builtins\n\n# print(dir(builtins))\n\n#import math\n\ndef soma(*args,**kwargs):\n\n '''\n soma (x,y,z,....) ----> result\n '''\n #import pdb; pdb.set_trace() # forma tradicional\n breakpoint()#novo em python 3.7\n return sum(args) + sum(kwargs.values())\n\n# res = x + y\n# args_locals = locals()\n# print(args_locals)\n print(type(args))\n return x + x1 + sum(args)\n\n#args_globals = globals()\n\nif __name__ == '__main__':\n l=[1,1]\n d={'x':2, 'x1':2, 'x2':2}\n\n\n\n# resulta = soma(2)\n# print(resulta)\n# l = [1,2,3,4]\n# print(soma(*l))\n print(soma(**d))\n#print(args_globals)\n#print(args_globals['__name__'])\n#print(__name__)\n","repo_name":"marcos7almeida/PythonClass","sub_path":"function_kargs.py","file_name":"function_kargs.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72418372322","text":"import os\nfrom azure.storage.blob import BlobServiceClient, BlobClient\n\n\ndef get_blob_client(service_client : BlobServiceClient, container : str, blob : str) -> BlobClient:\n bc = service_client.get_blob_client(container = container, blob=blob)\n if bc.exists():\n print(f\"Blob client for {blob} sized {bc.get_blob_properties()['size'] / 1024 / 1024} mb.\")\n return bc\n\n\ndef get_service_client(blob_connection_string=None) -> BlobServiceClient:\n if blob_connection_string is None:\n blob_connection_string = os.environ['FecDataStorageConnectionAppSetting']\n service_client = BlobServiceClient.from_connection_string(blob_connection_string)\n return service_client\n","repo_name":"guyrt/opendatapipes","sub_path":"fec/src/attic/dataloadlib/blob_helpers.py","file_name":"blob_helpers.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72579247840","text":"#!/usr/bin/env python2.7\n\nimport logging\nimport time\nimport Queue\nimport re\nfrom threading import Thread\nfrom dns import message, query, exception\nimport dnslib\nimport DNSserver\n\nWHITE_LIST = re.compile(r'srv.*\\.eaufavor\\.info\\.')\nDNSlist = ['8.8.8.8']\n\nclass FetchWorker(Thread):\n def __init__(self, query_info):\n Thread.__init__(self)\n self.query_info = query_info\n\n def run(self):\n query_info = self.query_info\n NS = query_info[0]\n domain = query_info[1]\n query_type = query_info[2]\n queue = query_info[3]\n q = message.make_query(domain, query_type)\n rcode = q.rcode()\n count = 0\n start = time.time()*1000\n while True and count < 3:\n try:\n msg = query.udp(q, NS, timeout=1)\n except exception.Timeout:\n count += 1\n continue\n break\n if count >= 3:\n logging.warning(\"Worker thread for %s, too many retries\", NS)\n queue.put(([], rcode))\n return rcode\n ips = []\n answer = None\n logging.debug(\"Worker thread for %s gets reply %s\", NS, msg.answer)\n for anss in msg.answer:\n #print \"Type\", rdatatype.to_text(anss.to_rdataset().rdtype)\n if anss.to_rdataset().rdtype == query_type: #match record type\n # logging.debug(\"reply %s\", anss)\n answer = anss\n if answer is None:\n logging.warning(\"Worker thread for %s empty response for %s\",\\\n NS, domain)\n queue.put(([], rcode))\n return 1\n for ans in answer:\n ips.append(ans.to_text())\n end = time.time()*1000\n logging.debug(\"Worker thread for %s got answer, delay: %dms\",\n NS, end-start)\n queue.put((ips, rcode))\n #time.sleep(0)\n return 0\n\n\nclass DNSresolver(DNSserver.DNSUDPRequestHandler):\n\n def dns_do_resolve(self, qname, qtype):\n if qtype != dnslib.QTYPE.A:\n ans = []\n code = dnslib.RCODE.NXDOMAIN\n elif not WHITE_LIST.match(qname):\n ans = []\n code = dnslib.RCODE.NXDOMAIN\n # NOTE: do normal resolving\n else:\n ans, code = self.parallel_resolve(qname)\n #addtional = (['aay'], dnslib.RCODE.NOERROR, dnslib.QTYPE.TXT)\n\n return ((ans, code), [])\n\n def get_NS(self, qname):\n # first, get all NS record\n q = message.make_query(qname, dnslib.QTYPE.A)\n IPlist = []\n count = 0\n while True and count < 3:\n try:\n msg = query.udp(q, DNSlist[0], timeout=1)\n except exception.Timeout:\n count += 1\n continue\n break\n if count >= 3:\n logging.warning(\"Getting NS(A) %s failed, too many retries\", qname)\n return ([], dnslib.RCODE.NXDOMAIN)\n answer = None\n for anss in msg.answer:\n #print \"Type\", rdatatype.to_text(anss.to_rdataset().rdtype)\n if anss.to_rdataset().rdtype == dnslib.QTYPE.A: #match record type\n # logging.debug(\"reply %s\", anss)\n answer = anss\n\n if answer is None:\n logging.warning(\"Getting NS(A) %s failed, no NS(A)\", qname)\n return ([], dnslib.RCODE.NXDOMAIN)\n for ans in answer:\n IPlist.append(ans.to_text())\n return IPlist\n\n def parallel_resolve(self, qname):\n logging.debug(\"Parallel resolver\")\n NSlist = self.get_NS(qname)\n if not NSlist:\n return ([], dnslib.RCODE.NXDOMAIN)\n logging.debug(\"Ready for parallel query from %s\", NSlist)\n start = time.time()*1000\n qtype = dnslib.QTYPE.A\n\n queue = Queue.Queue()\n\n # Fire parallel lookups\n workers = []\n for ns in NSlist:\n worker = FetchWorker((ns, qname, qtype, queue))\n worker.daemon = True\n worker.start()\n workers.append(worker)\n\n end = time.time()*1000\n logging.debug(\"prepare task, latency: %d ms\", (end-start))\n time.sleep(0)\n # get the first response, and reply to client\n logging.debug(\"waiting for first response\")\n start = time.time()*1000\n first_response = queue.get()\n\n end = time.time()*1000\n #print \"parallel_resolve, latency: %d ms\"%(end-start)\n logging.info(\"got first response:%s\", first_response)\n return first_response\n\n '''\n start = time.time()*1000\n #if reply_callback:\n # reply_query(first_response, request, reply_callback)\n end = time.time()*1000\n #print \"Send reply, latency: %d ms\"%(end-start)\n # wait for the rest answers\n\n answers = [first_response]\n for worker in workers:\n worker.join()\n logging.debug(\"all workers finished\")\n while not queue.empty():\n answers.append(queue.get())\n '''\n\n\n\n\nif __name__ == '__main__':\n dns_server = DNSserver.DNSserver(port=53, serverClass=DNSresolver)\n dns_server.start_server()\n","repo_name":"eaufavor/BargainDNS","sub_path":"DNSresolver.py","file_name":"DNSresolver.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12044296226","text":"from functools import cached_property, cache\nfrom typing import Any, List, Optional, Tuple, Union\n\nimport colored\nfrom colored import stylize\nfrom PIL import Image as Img\nfrom PIL import ImageFont as ImgFont\nfrom PIL.Image import Image\nfrom PIL.ImageDraw import ImageDraw\nfrom PIL.ImageFont import FreeTypeFont\nfrom pydantic.color import Color\n\n__all__ = [\n \"t_colors\",\n \"t_text\",\n \"paste_updown\",\n \"text_colors\",\n \"ColorString\",\n]\nt_colors = List[Color]\nt_text = str\n\n\nclass ColorString:\n def __init__(self, text, color=[Color(\"#000000\")], fmt: bool = True) -> None:\n self.text = self._check_text(text)\n self.color = self._check_color(color)\n if fmt is True:\n self.format()\n\n @staticmethod\n def _check_text(v):\n return str(v)\n\n @staticmethod\n def _check_color(v):\n _res = []\n if isinstance(v, Color):\n return [v]\n if isinstance(v, str):\n return [Color(v)]\n for i in v:\n if not isinstance(i, Color):\n i = Color(i)\n _res.append(i)\n return _res\n\n def format(self):\n \"\"\"重整颜色使得颜色与字符串长度相等\n \n 颜色多则截掉多余的 , 否则补为最后一种颜色\n \"\"\"\n (self.text, self.color) = self.content\n\n @cached_property\n def content(self) -> Tuple[t_text, t_colors]:\n \"\"\"返回重整结果 , 但不应用\n \"\"\"\n le1 = len(self.text)\n le2 = len(self.color)\n if le1 == le2:\n return (self.text, self.color)\n\n if le1 < le2:\n color = self.color[:le1]\n else:\n color = self.color + [self.color[-1] for _ in range(le1 - le2)]\n\n return (self.text, color)\n\n def __len__(self) -> int:\n \"\"\"返回字符串长度\n \"\"\"\n return self.text.__len__()\n\n def __add__(self, other):\n return ColorString(\n text=(self.text + other.text), color=(self.color + other.color)\n )\n\n def __str__(self):\n msg = \"\"\n (txt, col) = self.content\n for i, j in zip(txt, col):\n msg += \"({},{})\".format(i, j.as_hex())\n return msg\n\n def print(self, bg: Color = Color(\"#ffffff\")):\n \"\"\"尝试打印(with color)\n \"\"\"\n (txt, col) = self.content\n for i, j in zip(txt, col):\n print(stylize(i, colored.fg(j.as_hex()) + colored.bg(bg.as_hex())), end=\"\")\n print()\n\n\ndef paste_updown(img1: Image, img2: Image, loc: Tuple[float, float] = (0, 0)) -> Image:\n \"\"\"把 img2 贴到 img1 下方 , 如果图片范围超出原图 , 多余部分会由空像素替代\n\n Args:\n `img1` : [description]\n `img2` : [description]\n `loc` : 左上角位置.\n \"\"\"\n tmp1 = Img.new(\n \"RGBA\",\n (\n max(img1.size[0], img2.size[0]) + abs(loc[0]),\n max(img1.size[1], img2.size[1]) + abs(loc[1]),\n ), # type: ignore\n )\n tmp2 = Img.new(\n \"RGBA\",\n (\n max(img1.size[0], img2.size[0]) + abs(loc[0]),\n max(img1.size[1], img2.size[1]) + abs(loc[1]),\n ), # type: ignore\n )\n tmp1.paste(img1, (int((abs(loc[0]) - loc[0]) / 2), int((abs(loc[1]) - loc[1]) / 2)))\n tmp2.paste(img2, (int((abs(loc[0]) + loc[0]) / 2), int((abs(loc[1]) + loc[1]) / 2)))\n final = Img.alpha_composite(tmp2, tmp1)\n max_x = max(\n int((abs(loc[0]) - loc[0]) / 2) + img1.size[0],\n int((abs(loc[0]) + loc[0]) / 2) + img2.size[0],\n )\n max_y = max(\n int((abs(loc[1]) - loc[1]) / 2) + img1.size[1],\n int((abs(loc[1]) + loc[1]) / 2) + img2.size[1],\n )\n return final.crop((0, 0, max_x, max_y))\n\n\ndef text_colors(\n draw: ImageDraw,\n text: ColorString,\n font: Union[Tuple[str, int], FreeTypeFont],\n loc: Tuple[float, float] = (0, 0),\n) -> None:\n \"\"\"绘制一行具有多个颜色的字符串 , (空格占位置不上色)\n Args:\n `draw` : ImageDraw 实例 , 用来绘制的对象\n `text` : `ColorString` 类 , 包含文字和颜色\n `fonttype` : 字体文件\n `fontsize` : 字体大小\n `loc` : 位置. \n `font` : ImageFont 实例. \n \"\"\"\n\n if isinstance(font, tuple):\n font = ImgFont.truetype(font[0], font[1])\n (t, c) = text.content\n\n for i in range(len(c) - 1, -1, -1):\n draw.text(loc, t[: i + 1], c[i].as_hex(), font=font)\n\n\nif __name__ == \"__main__\":\n tour = ColorString(text=\"tour\", color=[\"#000000\", \"#a10703\"])\n ist = ColorString(text=\"ist\", color=[\"#000000\"])\n tour.format()\n res = tour + ist\n print(res)\n res.print()\n","repo_name":"CherryGS/testbot","sub_path":"plugins/_codeforces/card/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22145205818","text":"import random\nquotes = [\"Ecoutez-moi, Monsieur Shakespeare, nous avons beau être ou ne pas être, nous sommes!\",\"On doit pouvoir choisir entre s'écouter parler et se faire entendre\"]\n\ncharachters =[\"alvin et les Chipmunks\",\"Babar\",\"betty boop\",\"calimero\",'casper',\"le chat potté\",\"kirikou\"]\n\ndef show_random_quote(my_list):\n rand_num = random.randint(0,len(my_list)-1)\n item = my_list[rand_num]\n return item\n\ndef capitalize(words):\n for word in words:\n word.capitalize()\n return word\n\ndef message(character, quote):\n capitalize(character)\n capitalize(quote)\n return \"{} a dit {} \".format(character, quote)\n\nuser_answer =input(\"Tapez entrée pour connaître une autre citation ou B pour quitter le programme\").capitalize()\n\n\nwhile user_answer != \"B\":\n print(message(show_random_quote(charachters), show_random_quote(quotes)))\n user_answer =input(\"Tapez entrée pour connaître une autre citation ou B pour quitter le programme\").capitalize()\n","repo_name":"KodomoAt/san_antonio","sub_path":"san_antonio.py","file_name":"san_antonio.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7393662698","text":"import cherrypy\nimport m20mon\nclass encountergen(object):\n def index(self,lvl=1):\n# lvl=cherrypy.request.params.get(lvl)\n lvl=int(lvl)\n output=m20mon.makeencounter(lvl)\n return output\n def static(self,filename):\n f=open('static/'+filename)\n output=\"
    \"\n for line in f:\n output+=line+\"
    \"\n return output\n index.exposed = True\n static.exposed= True\n\nroot=encountergen()\ncherrypy.quickstart(root)\n","repo_name":"laurheth/encounter-generator","sub_path":"cherrym20.py","file_name":"cherrym20.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14253504460","text":"from .base import FunctionalTest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom unittest import skip\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nclass NewConversionTest(FunctionalTest):\n #User is presented with a list of processes to choose from\n def test_user_can_choose_type(self):\n self.browser.get(self.live_server_url + '/conversions/create/')\n response = self.browser.find_elements_by_tag_name('h1')\n self.assertIn('BatchCave',response[0].text)\n options = self.browser.find_elements_by_tag_name('option')\n self.assertIn('ER_EAI_2nd',options[2].text)\n\n def test_user_enters_conversion_info(self):\n self.browser.get(self.live_server_url + '/conversions/create/')\n select = Select(self.browser.find_element_by_tag_name(\"select\"))\n select.select_by_visible_text(\"ER_EAI_2nd\")\n #User selects a process\n processBox = self.browser.find_element_by_id(\"id_Name\")\n processBox.send_keys('Firstiest conversion')\n\n #User is able to upload a file through dialog box\n uploadBox = self.browser.find_element_by_id(\"id_Upload\")\n uploadBox.send_keys(\"~/TEST.mrc\")\n time.sleep(10)\n submitButton = self.browser.find_element_by_tag_name(\"form\")\n submitButton.submit()\n\n #user is taken to index view\n time.sleep(10)\n table = self.browser.find_element_by_css_selector(\"table\")\n print(table.text)\n #User can only submit a complete form\n","repo_name":"billmcmillin/batchcave","sub_path":"batchcave/functional_tests/test_new_conversion.py","file_name":"test_new_conversion.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73540859680","text":"import logging\n_logger = logging.getLogger(__name__)\nfrom math import sqrt\nimport numpy as np\nfrom numpy import dot, cross, sin, cos, zeros, array, empty, float64, sign\n\n\nfrom .collisions import collide_balls, collide_balls_f90\n\n\nINCH2METER = 0.0254\n_k = array([0, 1, 0], # upward-pointing basis vector :math:`\\hat{k}`\n dtype=float64) # of any ball-centered frame, following the convention of Marlow\n\n\ndef printit(roots):\n return ', '.join('%5.17f + %5.17fj' % (r.real, r.imag) if r.imag else '%5.17f' % r for r in roots)\n\n\nclass PhysicsEvent(object):\n ball_radius = 0.02625\n ball_mass = 0.1406\n ball_I = 2/5 * ball_mass * ball_radius**2\n mu_s = 0.21\n mu_b = 0.05\n e = 0.89\n mu_r = 0.016 # coefficient of rolling friction between ball and table\n mu_sp = 0.044 # coefficient of spinning friction between ball and table\n g = 9.81 # magnitude of acceleration due to gravity\n _ZERO_TOLERANCE = 1e-8\n _ZERO_TOLERANCE_SQRD = _ZERO_TOLERANCE**2\n def __init__(self, t, T=0.0, parent_event=None, **kwargs):\n \"\"\"\n Base class of pool physics events.\n\n :param t: time of event start\n :param T: time duration of the event (default is 0, i.e. instantaneous)\n \"\"\"\n self.t = t\n self.T = T\n self._parent_event = parent_event\n self.interval = array((t, t + T))\n @property\n def child_events(self):\n return ()\n @property\n def parent_event(self):\n return self._parent_event\n @staticmethod\n def set_quaternion_from_euler_angles(psi=0.0, theta=0.0, phi=0.0, out=None):\n if out is None: out = empty(4, dtype=float64)\n angles = array((psi, theta, phi))\n c1, c2, c3 = cos(0.5 * angles)\n s1, s2, s3 = sin(0.5 * angles)\n out[0] = s1*c2*c3 + c1*s2*s3\n out[1] = c1*s2*c3 - s1*c2*s3\n out[2] = c1*c2*s3 + s1*s2*c3\n out[3] = c1*c2*c3 - s1*s2*s3\n return out\n @staticmethod\n def events_str(events, sep='\\n\\n' + 48*'-' + '\\n\\n'):\n return sep.join('%3d: %s' % (i_e, e) for i_e, e in enumerate(events))\n def __lt__(self, other):\n if isinstance(other, PhysicsEvent):\n return self.t < other.t\n else:\n return self.t < other\n def __gt__(self, other):\n if isinstance(other, PhysicsEvent):\n return self.t > other.t\n else:\n return self.t > other\n def __str__(self):\n if self.T == 0.0 or self.T == float('inf'):\n return '<%16s ( %5.15f )>' % (self.__class__.__name__, self.t)\n else:\n return '<%16s ( %5.15f , %5.15f )>' % (self.__class__.__name__, self.t, self.t+self.T)\n\n\nclass BallEvent(PhysicsEvent):\n def __init__(self, t, i, **kwargs):\n super().__init__(t, **kwargs)\n self.i = i\n @property\n def next_motion_event(self):\n return None\n def __eq__(self, other):\n return self.__class__ == other.__class__ and self.t == other.t and self.T == other.T and self.i == other.i\n def __str__(self):\n return super().__str__()[:-1] + \" i=%d>\" % self.i\n def __repr__(self):\n return '%s(%d @ %s) at %x' % (self.__class__.__name__.split('.')[-1], self.i, self.t, id(self))\n\n\nclass BallStationaryEvent(BallEvent):\n def __init__(self, t, i, r_0=None, **kwargs):\n super().__init__(t, i, **kwargs)\n if r_0 is None:\n r_0 = zeros(3, dtype=float64)\n self._r_0 = self._r = r_0\n self._a_global = None\n @property\n def acceleration(self):\n return zeros(3, dtype=float64)\n @property\n def global_motion_coeffs(self):\n if self._a_global is None:\n self._a_global = a = zeros((3,3), dtype=float64)\n a[0] = self._r_0\n return self._a_global, None\n @property\n def global_linear_motion_coeffs(self):\n if self._a_global is None:\n self._a_global = a = zeros((3,3), dtype=float64)\n a[0] = self._r_0\n return self._a_global\n def calc_shifted_motion_coeffs(self, t0):\n return self.global_motion_coeffs\n def eval_position(self, tau, out=None):\n if out is None:\n out = self._r_0.copy()\n else:\n out[:] = self._r_0\n return out\n def eval_velocity(self, tau, out=None):\n if out is None:\n out = zeros(3, dtype=float64)\n else:\n out[:] = 0\n return out\n def eval_slip_velocity(self, tau, out=None):\n if out is None:\n out = zeros(3, dtype=float64)\n else:\n out[:] = 0\n return out\n def __str__(self):\n return super().__str__()[:-1] + '\\n r=%s>' % self._r\n\n\nclass BallRestEvent(BallStationaryEvent):\n def __init__(self, t, i, **kwargs):\n super().__init__(t, i, T=float('inf'), **kwargs)\n def eval_angular_velocity(self, tau, out=None):\n if out is None:\n out = zeros(3, dtype=float64)\n else:\n out[:] = 0\n return out\n\n\nclass BallSpinningEvent(BallStationaryEvent):\n def __init__(self, t, i, r_0, omega_0_y, **kwargs):\n R = self.ball_radius\n self._omega_0_y = omega_0_y\n self._b = -5 * sign(omega_0_y) * self.mu_sp * self.g / (2 * R)\n T = abs(omega_0_y / self._b)\n super().__init__(t, i, r_0=r_0, T=T, **kwargs)\n self._next_motion_event = None\n @property\n def next_motion_event(self):\n if self._next_motion_event is None:\n self._next_motion_event = BallRestEvent(self.t + self.T, self.i, r_0=self._r_0)\n return self._next_motion_event\n def eval_angular_velocity(self, tau, out=None):\n if out is None:\n out = zeros(3, dtype=float64)\n else:\n out[:] = 0\n out[1] = self._omega_0_y + self._b * tau\n return out\n\n\nclass BallMotionEvent(BallEvent):\n def __init__(self, t, i, T=None, a=None, b=None,\n r_0=None, v_0=None, a_0=None,\n omega_0=None,\n **kwargs):\n \"\"\"\n :param t: start time of event\n :param T: duration of event\n :param a: coefficients of the positional quadratic equation of motion (event-local time)\n :param b: coefficients of the angular velocity linear equation of motion (event-local time)\n :param r_0: ball position at start of event\n :param v_0: ball velocity at start of event\n :param omega_0: ball angular velocity at start of event\n \"\"\"\n super().__init__(t, i, T=T, **kwargs)\n if a is None:\n a = zeros((3,3), dtype=float64)\n if b is None:\n b = zeros((2,3), dtype=float64)\n self._a = a\n self._b = b\n if r_0 is not None:\n a[0] = r_0\n if v_0 is not None:\n a[1] = v_0\n if a_0 is not None:\n a[2] = 0.5 * a_0\n if omega_0 is not None:\n b[0] = omega_0\n self._r_0 = a[0]\n self._v_0 = a[1]\n self._omega_0 = b[0]\n self._ab_global = None\n self._a_global = None\n self._next_motion_event = None\n @property\n def acceleration(self):\n return 2 * self._a[2]\n @property\n def next_motion_event(self):\n return self._next_motion_event\n @property\n def global_motion_coeffs(self):\n if self._ab_global is None:\n self._ab_global = self.calc_global_motion_coeffs(self.t, self._a, self._b)\n return self._ab_global[:3], self._ab_global[3:]\n @property\n def global_linear_motion_coeffs(self):\n if self._a_global is None:\n self._a_global = self.calc_global_linear_motion_coeffs(self.t, self._a)\n return self._a_global\n @staticmethod\n def calc_global_linear_motion_coeffs(t, a, out=None):\n \"\"\"\n Calculates the coefficients of the global-time linear equations of motion.\n\n :param t: the global time of the start of the motion\n :param a: the local-time (0 at the start of the motion) linear motion coefficients\n \"\"\"\n if out is None:\n out = a.copy()\n else:\n out[:] = a\n out[0] += -t * a[1] + t**2 * a[2]\n out[1] += -2 * t * a[2]\n return out\n def calc_shifted_motion_coeffs(self, t0):\n ab_global = self.calc_global_motion_coeffs(self.t - t0, self._a, self._b)\n return ab_global[:3], ab_global[3:]\n @staticmethod\n def calc_global_motion_coeffs(t, a, b, out=None):\n \"\"\"\n Calculates the coefficients of the global-time equations of motion.\n\n :param t: the global time of the start of the motion\n :param a: the local-time (0 at the start of the motion) linear motion coefficients\n :param b: the local-time angular motion coefficients\n \"\"\"\n if out is None:\n out = zeros((5,3), dtype=float64)\n out[:3] = a\n out[3:] = b\n a_global, b_global = out[:3], out[3:]\n a_global[0] += -t * a[1] + t**2 * a[2]\n a_global[1] += -2 * t * a[2]\n b_global[0] += -t * b[1]\n return out\n def eval_position(self, tau, out=None):\n if out is None:\n out = self._r_0.copy()\n else:\n out[:] = self._r_0\n if tau != 0:\n a = self._a\n out += tau * a[1] + tau**2 * a[2]\n return out\n def eval_velocity(self, tau, out=None):\n if out is None:\n out = self._v_0.copy()\n else:\n out[:] = self._v_0\n if tau != 0:\n out += 2 * tau * self._a[2]\n return out\n def eval_angular_velocity(self, tau, out=None):\n if out is None:\n out = empty(3, dtype=float64)\n out[:] = self._b[0] + tau * self._b[1]\n if self._b[0,1] >= 0:\n out[1] = max(0, out[1])\n else:\n out[1] = min(0, out[1])\n return out\n def eval_surface_velocity(self, tau, rd, v=None, omega=None, out=None):\n if v is None:\n v = self.eval_velocity(tau)\n if omega is None:\n omega = self.eval_angular_velocity(tau)\n if out is None:\n out = empty(3, dtype=float64)\n out[:] = v - self.ball_radius / sqrt(dot(rd, rd)) * cross(rd, omega)\n return out\n def eval_position_and_velocity(self, tau, out=None):\n if out is None:\n out = empty((2,3), dtype=float64)\n taus = array((1.0, tau, tau**2))\n a = self._a\n dot(taus, a, out=out[0])\n out[1] = a[1] + 2*tau*a[2]\n return out\n def __str__(self):\n return super().__str__()[:-1] + '\\n r_0=%s\\n v_0=%s\\n a=%s\\n omega_0=%s>' % (self._r_0, self._v_0, self.acceleration, self._omega_0)\n\n\nclass BallRollingEvent(BallMotionEvent):\n def __init__(self, t, i, r_0, v_0, omega_0_y=0.0, **kwargs):\n R = self.ball_radius\n v_0_mag = sqrt(dot(v_0, v_0))\n T = v_0_mag / (self.mu_r * self.g)\n omega_0 = array((v_0[2]/R, omega_0_y, -v_0[0]/R), dtype=float64)\n super().__init__(t, i, T=T, r_0=r_0, v_0=v_0, omega_0=omega_0, **kwargs)\n self._a[2] = -0.5 * self.mu_r * self.g * v_0 / v_0_mag\n self._b[1,::2] = -omega_0[::2] / T\n self._b[1,1] = -sign(omega_0_y) * 5 / 7 * self.mu_r * self.g / R\n #self._b[1,1] = -sign(omega_0_y) * 5 / 2 * self.mu_sp * self.g / R\n self._next_motion_event = None\n @property\n def next_motion_event(self):\n if self._next_motion_event is None:\n i, t, T = self.i, self.t, self.T\n omega_1 = self.eval_angular_velocity(T)\n if abs(omega_1[1]) < self._ZERO_TOLERANCE:\n self._next_motion_event = BallRestEvent(t + T, i, r_0=self.eval_position(T))\n else:\n self._next_motion_event = BallSpinningEvent(t + T, i, r_0=self.eval_position(T),\n omega_0_y=omega_1[1])\n return self._next_motion_event\n def eval_slip_velocity(self, tau, out=None, **kwargs):\n if out is None:\n out = zeros(3, dtype=float64)\n else:\n out[:] = 0\n return out\n\n\nclass BallSlidingEvent(BallMotionEvent):\n def __init__(self, t, i, r_0, v_0, omega_0, **kwargs):\n R, mu_s, g = self.ball_radius, self.mu_s, self.g\n u_0 = v_0 + R * array((omega_0[2], 0.0, -omega_0[0]), dtype=float64)\n u_0_mag = sqrt(dot(u_0, u_0))\n T = 2 * u_0_mag / (7 * mu_s * g)\n super().__init__(t, i, T=T, r_0=r_0, v_0=v_0, omega_0=omega_0, **kwargs)\n self._u_0 = u_0\n self._u_0_mag = u_0_mag\n self._a[2] = -0.5 * mu_s * g * u_0 / u_0_mag\n self._b[0] = omega_0\n self._b[1,::2] = 5 * mu_s * g / (2 * R) / u_0_mag * array((u_0[2], -u_0[0]), dtype=float64)\n self._b[1,1] = -sign(omega_0[1]) * 5 * self.mu_sp * g / (2 * R)\n self._next_motion_event = None\n @property\n def next_motion_event(self):\n if self._next_motion_event is None:\n i, t, T = self.i, self.t, self.T\n omega_1 = self.eval_angular_velocity(T)\n self._next_motion_event = BallRollingEvent(t + T, i,\n r_0=self.eval_position(T),\n v_0=self.eval_velocity(T),\n omega_0_y=omega_1[1])\n return self._next_motion_event\n\n\nclass CueStrikeEvent(BallEvent):\n def __init__(self, t, i, r_i, r_c, V, M, q_i=None):\n \"\"\"\n :param r_i: position of ball at moment of impact\n :param r_c: global coordinates of the point of contact\n :param V: cue velocity at moment of impact; the cue's velocity is assumed to be aligned with its axis\n :param M: cue mass\n :param q_i: rotation quaternion of ball at moment of impact\n \"\"\"\n super().__init__(t, i)\n m, R, I = self.ball_mass, self.ball_radius, self.ball_I\n V = V.copy()\n V[1] = 0 # temporary: set vertical to 0\n self.V = V\n self.M = M\n self.Q = Q = r_c - r_i\n _j = -V; _j[1] = 0; _j /= sqrt(dot(_j, _j))\n _i = cross(_j, _k)\n a, b = dot(Q, _i), Q[1]\n c = sqrt(R**2 - a**2 - b**2)\n sin, cos = b/R, sqrt(R**2 - b**2)/R\n V_mag = sqrt(dot(V, V))\n F_mag = 2*m*V_mag / (\n 1 + m/M + 5/(2*R**2)*(a**2 + b**2*cos**2 + c**2*sin**2 - 2*b*c*cos*sin)\n )\n omega_0 = ( (-c*F_mag*sin + b*F_mag*cos) * _i +\n (a*F_mag*sin) * _j +\n (-a*F_mag*cos) * _k ) / I\n self._child_events = (BallSlidingEvent(t, i,\n r_0=r_i,\n v_0=-F_mag/m*_j,\n omega_0=omega_0,\n q_0=q_i,\n parent_event=self),)\n @property\n def child_events(self):\n return self._child_events\n def __str__(self):\n return super().__str__()[:-1] + '\\n Q=%s\\n V=%s\\n M=%s>' % (self.Q, self.V, self.M)\n\n\nclass RailCollisionEvent(BallEvent):\n _J_LOC = array((\n ( 0.0, 0.0, -1.0),\n ( 1.0, 0.0, 0.0),\n ( 0.0, 0.0, 1.0),\n (-1.0, 0.0, 0.0)), dtype=float64)\n _J_VAR = [2, 0, 2, 0]\n _I_LOC = array((\n ( 1.0, 0.0, 0.0),\n ( 0.0, 0.0, 1.0),\n (-1.0, 0.0, 0.0),\n ( 0.0, 0.0, -1.0)), dtype=float64)\n kappa = 0.6 # coefficient of restitution\n def __init__(self, t, e_i, side):\n super().__init__(t, e_i.i)\n self.e_i = e_i\n self.side = side\n self._child_events = None\n @property\n def child_events(self):\n if self._child_events is None:\n R = self.ball_radius\n e_i = self.e_i\n tau = self.t - e_i.t\n v_1 = e_i.eval_velocity(tau)\n omega_1 = e_i.eval_angular_velocity(tau)\n side = self.side\n j_var = self._J_VAR[side]\n i_var = 2 - j_var\n v_1[j_var] *= -self.kappa\n omega_1[j_var] *= 0.8\n omega_1[i_var] = -sign(omega_1[i_var]) * abs(v_1[j_var]) / R\n if isinstance(e_i, BallSlidingEvent) \\\n and abs(dot(v_1, self._J_LOC[side])) / R <= abs(dot(omega_1, self._I_LOC[side])):\n self._child_events = (BallSlidingEvent(self.t, e_i.i,\n r_0=e_i.eval_position(tau),\n v_0=v_1,\n omega_0=omega_1,\n parent_event=self),)\n else:\n self._child_events = (BallRollingEvent(self.t, e_i.i,\n r_0=e_i.eval_position(tau),\n v_0=v_1,\n parent_event=self),)\n return self._child_events\n def __str__(self):\n return super().__str__()[:-1] + \" side=%d>\" % self.side\n\n\nclass SegmentCollisionEvent(BallEvent):\n kappa = 0.6\n def __init__(self, t, e_i, seg, nor, tan):\n super().__init__(t, e_i.i)\n self.e_i = e_i\n self.seg = seg\n self.nor = nor\n self.tan = tan\n tau = t - e_i.t\n r = e_i.eval_position(tau)\n self.r = r\n self.r_c = r - self.ball_radius * nor\n self._child_events = None\n @property\n def child_events(self):\n if self._child_events is None:\n R = self.ball_radius\n e_i = self.e_i\n tau = self.t - e_i.t\n v_1 = e_i.eval_velocity(tau)\n omega_1 = e_i.eval_angular_velocity(tau)\n v_1 = -self.kappa * dot(v_1, self.nor) * self.nor + dot(v_1, self.tan) * self.tan\n omega_1[:] = 0\n if isinstance(e_i, BallSlidingEvent) \\\n and abs(dot(v_1, self.tan)) / R <= abs(dot(omega_1, self.nor)):\n self._child_events = (BallSlidingEvent(self.t, e_i.i,\n r_0=self.r,\n v_0=v_1,\n omega_0=omega_1,\n parent_event=self),)\n else:\n self._child_events = (BallRollingEvent(self.t, e_i.i,\n r_0=self.r,\n v_0=v_1,\n parent_event=self),)\n return self._child_events\n def __str__(self):\n return super().__str__()[:-1] + \" seg=%d r=%s r_c=%s>\" % (self.seg, self.r, self.r_c)\n\n\nclass CornerCollisionEvent(BallEvent):\n kappa = 0.6 # coefficient of restitution\n def __init__(self, t, e_i, i_c, r_c):\n super().__init__(t, e_i.i)\n self.e_i = e_i\n self.i_c = i_c\n self.r_c = r_c.copy()\n tau = self.t - e_i.t\n self.r_i = r_i = e_i.eval_position(tau)\n v_0 = self.v_0 = e_i.eval_velocity(tau)\n j_loc = self.r_c - r_i\n j_loc[1] = 0\n j_loc /= -sqrt(dot(j_loc, j_loc))\n self.j_loc = j_loc\n i_loc = array((-j_loc[2], 0.0, j_loc[0]), dtype=float64)\n self.i_loc = i_loc\n v_1 = v_0\n omega_1 = self.omega_0 = e_i.eval_angular_velocity(tau)\n v_1x, v_1y = dot(v_1, i_loc), -self.kappa * dot(v_1, j_loc)\n self.v_1 = v_1 = v_1x * i_loc \\\n + v_1y * j_loc\n self.omega_1 = dot(omega_1, j_loc) * j_loc \\\n + omega_1[1] * _k \\\n - v_1y / self.ball_radius * i_loc\n self._child_events = None\n @property\n def child_events(self):\n if self._child_events is None:\n R = self.ball_radius\n u_1 = self.v_1 + R * array((self.omega_1[2], 0.0, -self.omega_1[0]), dtype=float64)\n if isinstance(self.e_i, BallSlidingEvent) \\\n and dot(u_1, u_1) > 0:\n self._child_events = (BallSlidingEvent(self.t, self.e_i.i,\n r_0=self.r_i,\n v_0=self.v_1,\n omega_0=self.omega_1,\n parent_event=self),)\n else:\n self._child_events = (BallRollingEvent(self.t, self.e_i.i,\n r_0=self.r_i,\n v_0=self.v_1,\n omega_0_y=self.omega_1[1],\n parent_event=self),)\n return self._child_events\n def __str__(self):\n return super().__str__()[:-1] + \" i_c=%s r_c=%s v_0=%s v_1=%s omega_0=%s omega_1=%s>\" % (\n self.i_c, self.r_c, self.v_0, self.v_1, self.omega_0, self.omega_1)\n\n\nclass BallCollisionEvent(PhysicsEvent):\n def __init__(self, t, e_i, e_j):\n super().__init__(t)\n self.e_i, self.e_j = e_i, e_j\n self.i, self.j = e_i.i, e_j.i\n tau_i, tau_j = t - e_i.t, t - e_j.t\n self._r_i, self._r_j = e_i.eval_position(tau_i), e_j.eval_position(tau_j)\n self._v_i, self._v_j = e_i.eval_velocity(tau_i), e_j.eval_velocity(tau_j)\n self._r_ij = r_ij = self._r_j - self._r_i\n self._v_ij = v_ij = self._v_j - self._v_i\n self._y_loc = y_loc = 0.5 * r_ij / self.ball_radius\n self._x_loc = array((-y_loc[2], 0.0, y_loc[0]), dtype=float64)\n self._v_ij_y0 = dot(v_ij, y_loc)\n self._omega_i, self._omega_j = e_i.eval_angular_velocity(tau_i), e_j.eval_angular_velocity(tau_j)\n def __str__(self):\n return '<' + super().__str__()[:-1] + '''\n i,j = %s,%s\n r_i = %s\n r_j = %s\n v_i0 = %s ||v_i0|| = %s\n v_j0 = %s ||v_j0|| = %s\n v_ij_y0 = %s\n v_ij_y1 = %s\n v_i1 = %s ||v_i1|| = %s\n v_j1 = %s ||v_j1|| = %s\n>>''' % (self.i, self.j,\n printit(self._r_i),\n printit(self._r_j),\n printit(self._v_i), sqrt(dot(self._v_i, self._v_i)),\n printit(self._v_j), sqrt(dot(self._v_j, self._v_j)),\n self._v_ij_y0,\n self._v_ij_y1,\n printit(self._v_i_1), sqrt(dot(self._v_i_1, self._v_i_1)),\n printit(self._v_j_1), sqrt(dot(self._v_j_1, self._v_j_1)))\n def __repr__(self):\n return '%s(%d,%d @ %s, v_ij_0=%s, v_ij_1=%s) at %x' % (self.__class__.__name__.split('.')[-1], self.i, self.j, self.t, self._v_ij_y0, self._v_ij_y1, id(self))\n def __lt__(self, other):\n if not isinstance(other, PhysicsEvent):\n return self.t < other\n return self.t <= other.t if not isinstance(other, BallCollisionEvent) \\\n else self.t < other.t\n def __gt__(self, other):\n if not isinstance(other, PhysicsEvent):\n return self.t > other\n return self.t > other.t if not isinstance(other, BallCollisionEvent) \\\n else self.t >= other.t\n @property\n def child_events(self):\n if self._child_events is None:\n child_events = []\n for (r, v_1, omega_1, e) in (\n (self._r_i, self._v_i_1, self._omega_i_1, self.e_i),\n (self._r_j, self._v_j_1, self._omega_j_1, self.e_j)\n ):\n if dot(v_1, v_1) == 0:\n if abs(omega_1[1]) == 0:\n e_1 = BallRestEvent(self.t, e.i,\n r_0=r,\n parent_event=self)\n else:\n e_1 = BallSpinningEvent(self.t, e.i,\n r_0=r,\n omega_0_y=omega_1[1],\n parent_event=self)\n else:\n u_1 = v_1 + self.ball_radius * array((omega_1[2], 0.0, -omega_1[0]), dtype=float64)\n if dot(u_1, u_1) == 0:\n e_1 = BallRollingEvent(self.t, e.i,\n r_0=r,\n v_0=v_1,\n omega_0_y=omega_1[1],\n parent_event=self)\n else:\n e_1 = BallSlidingEvent(self.t, e.i,\n r_0=r,\n v_0=v_1,\n omega_0=omega_1,\n parent_event=self)\n child_events.append(e_1)\n self._child_events = tuple(child_events)\n return self._child_events\n\n\nclass SimpleBallCollisionEvent(BallCollisionEvent):\n def __init__(self, t, e_i, e_j, v_factor=0.98):\n \"\"\"Simple one-parameter elastic collision model with no friction between balls or any other surface.\"\"\"\n super().__init__(t, e_i, e_j)\n y_loc = self._y_loc\n v_i, v_j = self._v_i, self._v_j\n v_ix = dot(v_i, y_loc) * y_loc\n v_jx = dot(v_j, y_loc) * y_loc\n v_iy = v_i - v_ix\n v_jy = v_j - v_jx\n v_ix_1 = 0.5 * ((1 - v_factor) * v_ix + (1 + v_factor) * v_jx)\n v_jx_1 = 0.5 * ((1 - v_factor) * v_jx + (1 + v_factor) * v_ix)\n v_i_1 = v_iy + v_ix_1\n v_j_1 = v_jy + v_jx_1\n self._v_i_1, self._v_j_1 = v_i_1, v_j_1\n self._v_ij_y1 = dot(v_j_1 - v_i_1, y_loc)\n omega_i, omega_j = self._omega_i, self._omega_j\n self._omega_i_1, self._omega_j_1 = omega_i.copy(), omega_j.copy()\n self._child_events = None\n @property\n def child_events(self):\n if self._child_events is None:\n child_events = []\n for (r, v_1, omega_1, e) in (\n (self._r_i, self._v_i_1, self._omega_i_1, self.e_i),\n (self._r_j, self._v_j_1, self._omega_j_1, self.e_j)\n ):\n if dot(v_1, v_1) == 0:\n if abs(omega_1[1]) == 0:\n e_1 = BallRestEvent(self.t, e.i,\n r_0=r,\n parent_event=self)\n else:\n e_1 = BallSpinningEvent(self.t, e.i,\n r_0=r,\n omega_0_y=omega_1[1],\n parent_event=self)\n elif isinstance(e, BallSlidingEvent) \\\n and abs(dot(v_1, self._y_loc) / self.ball_radius) > abs(dot(omega_1, cross(_k, self._y_loc))):\n e_1 = BallSlidingEvent(self.t, e.i,\n r_0=r,\n v_0=v_1,\n omega_0=omega_1,\n parent_event=self)\n else:\n e_1 = BallRollingEvent(self.t, e.i,\n r_0=r,\n v_0=v_1,\n omega_0_y=omega_1[1],\n parent_event=self)\n child_events.append(e_1)\n self._child_events = tuple(child_events)\n return self._child_events\n\n\nclass SimulatedBallCollisionEvent(BallCollisionEvent):\n collide_balls = staticmethod(collide_balls)\n def __init__(self, t, e_i, e_j):\n super().__init__(t, e_i, e_j)\n r_i, r_j = self._r_i, self._r_j\n v_i, v_j = self._v_i, self._v_j\n omega_i, omega_j = self._omega_i, self._omega_j\n y_loc = self._y_loc\n self._v_i_1, self._omega_i_1, self._v_j_1, self._omega_j_1 = \\\n self.collide_balls(\n r_i, v_i, omega_i, r_j, v_j, omega_j,\n self.ball_mass*abs(self._v_ij_y0)/6400\n )\n self._v_ij_y1 = dot(self._v_j_1 - self._v_i_1, y_loc)\n self._child_events = None\n\n\nclass FSimulatedBallCollisionEvent(SimulatedBallCollisionEvent):\n collide_balls = staticmethod(collide_balls_f90)\n\n\nclass BallsInContactEvent(PhysicsEvent):\n def __init__(self, e_i, e_j):\n assert e_i.t == e_j.t\n t = e_i.t\n T = min(e_i.T, e_j.T)\n super().__init__(t, T=T)\n self.i, self.j = e_i.i, e_j.i\n e_i._parent_event = self\n e_j._parent_event = self\n nme = e_i.next_motion_event\n while nme:\n nme._parent_event = self\n nme = nme.next_motion_event\n nme = e_j.next_motion_event\n while nme:\n nme._parent_event = self\n nme = nme.next_motion_event\n self._child_events = (e_i, e_j)\n @property\n def child_events(self):\n return self._child_events\n def __str__(self):\n return super().__str__()[:-1] + '''\n i,j = %s,%s\n e_i = %s\n e_j = %s\n>''' % (self.i, self.j, *self._child_events)\n","repo_name":"jzitelli/poolvr.py","sub_path":"poolvr/physics/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":29171,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"13688927127","text":"import sys\n\nsys.path.append('..')\nfrom common.model import Model\nfrom common.vocabulary import Vocabulary\n\nclass TopicWordsStat(object):\n \"\"\"TopicWords implements topic words tools.\n \"\"\"\n\n def __init__(self, model, vocabulary):\n self.model = model\n self.vocabulary = vocabulary\n\n def save(self, topic_words_file, accumulated_prob_threshold):\n \"\"\"Save the topic words to file.\n \"\"\"\n fp = open(topic_words_file, 'w')\n fp.write(self.get_topic_top_words(accumulated_prob_threshold))\n fp.close()\n\n def get_topic_top_words(self, accumulated_prob_threshold):\n \"\"\"Returns topics' top words.\n \"\"\"\n topic_top_words = []\n sparse_topic_word_dist = self.compute_topic_word_distribution()\n\n for topic, word_probs in enumerate(sparse_topic_word_dist):\n top_words = []\n top_words.append(str(topic))\n top_words.append(str(self.model.global_topic_hist[topic]))\n accumulated_prob = 0.0\n for word_prob in word_probs:\n top_words.append(\n self.vocabulary.word(word_prob[0]).encode('gbk', 'ignore'))\n top_words.append(str(word_prob[1]))\n accumulated_prob += word_prob[1]\n if accumulated_prob > accumulated_prob_threshold:\n break\n topic_top_words.append('\\t'.join(top_words))\n\n return '\\n'.join(topic_top_words)\n\n def compute_topic_word_distribution(self):\n \"\"\"Compute the topic word distribution p(w|z), indexed by topic z.\n \"\"\"\n # item fmt: z -> \n sparse_topic_word_dist = []\n [sparse_topic_word_dist.append([]) for topic in xrange(self.model.num_topics)]\n\n for word_id, ordered_sparse_topic_hist in \\\n self.model.word_topic_hist.iteritems():\n for non_zero in ordered_sparse_topic_hist.get_non_zeros():\n sparse_topic_word_dist[non_zero.topic].append(\n [word_id,\n (non_zero.count + self.model.hyper_params.word_prior) /\n (self.model.hyper_params.word_prior * self.vocabulary.size() +\n self.model.global_topic_hist[non_zero.topic])])\n\n for topic, word_probs in enumerate(sparse_topic_word_dist):\n word_probs.sort(cmp=lambda x,y:cmp(x[1], y[1]), reverse=True)\n\n return sparse_topic_word_dist\n","repo_name":"fandywang/mltk","sub_path":"mltk/lda/python/training/topic_words_stat.py","file_name":"topic_words_stat.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"18561985868","text":"# 1. 트리\n # 계층적인 자료의 표헌에 적합한 자료구조\n \n # 루트 노드: 맨 위 하나의 노드\n # 부모/자식 노드\n # 리프(단말) 노드: 자식 없는 노드 <-> 비단말노드\n # 트리 차수 3 2 1 0\n # 트리 높이(레벨) 4 3 2 1\n # 간선 n-1개\n\n# 2. 이진 트리\n # 모든 노드가 2개의 서브 트리 갖는 트리\n # 순환적(재귀함수의 개념/ 서브트리도 이진 트리)\n # 공집합이거나 루트와 왼쪽 서브 트리, 오른쪽 서브 트리로 구성된 노드들 집합\n \n # 포화 이진 트리\n # 트리의 각 레벨에 노드가 꽉 차있는 이진 트리 (2h-1개 노드)\n \n # 완전 이진 트리 (포화 이진 트리를 포함함)\n # 레벨 1부터 h-1까지는 노드가 모두 채워지고\n # 마지막 레벨 h에서는 노드가 순서대로 채워짐\n \n # 편향 이진 트리\n \n # 표현\n # 배열 표현법\n # 완전 이진 트리에 숫자 붙여서 리스트로 표현하기 편리\n # 노드 i의 부모 노드 인덱스 i//2\n # 노드 i의 왼쪽 자식 노드 인덱스 2i\n # 노드 i의 오른쪽 자식 노드 인덱스 2i+1\n \n # 링크 표현법 (화살표 없는 리스트는 None 가짐)\n \nclass TNode:\n def __init__(self,data,left,right):\n self.data = data\n self.left = left\n self.right = right\n \n# 3. 이진트리의 연산\n # 순회 traversal\n # 트리에 속하는 모든 노드를 한 번씩 방문\n # 선형 자료구조는 순회 단순\n \n # 전위 순회 (부모 먼저)\n\ndef preorder(n):\n if n is not None:\n print(n.data,end=' ')\n preorder(n.left)\n preorder(n.right)\n \n # 중위 순회 (부모 중간)\n \ndef inorder(n):\n if n is not None:\n inorder(n.left)\n print(n.data,end=' ')\n inorder(n.right)\n \n # 후위 순회 (부모 마지막)\n \ndef postorder(n):\n if n is not None:\n postorder(n.left)\n postorder(n.right)\n print(n.data,end=' ')\n \n # 레벨 순회 (큐 사용)\n \nfrom Queue import *\n\ndef levelorder(root):\n queue = CircularQueue()\n queue.enqueue(root)\n while not queue.isEmpty():\n n = queue.dequeue()\n if n is not None:\n print(n.data,end=' ')\n queue.enqueue(n.left)\n queue.enqueue(n.right)\n \n # 노드 개수\n \ndef count_node(n):\n if n is None:\n return 0\n else:\n return 1 + count_node(n.left) + count_node(n.right)\n\n # 단말 노드 수\n \ndef count_leaf(n):\n if n is None:\n return 0\n elif n.left is None and n.right is None:\n return 1\n else:\n return count_leaf(n.left)+count_leaf(n.right)\n \n # 트리 높이\n \ndef calc_height(n):\n if n is None:\n return 0\n hLeft = calc_height(n.left)\n hRight = calc_height(n.right)\n if hLeft > hRight:\n return hLeft + 1\n else: return hRight + 1\n \n # 실행\n \nd = TNode('D',None,None)\ne = TNode('E',None,None)\nb = TNode('B',d,e)\nf = TNode('F',None,None)\nc = TNode('C',f,None)\nroot = TNode('A',b,c)\n\nprint('\\nIn-Order : ',end='')\ninorder(root)\nprint('\\nPre-Order : ',end='')\npreorder(root)\nprint('\\nPost-Order : ',end='')\npostorder(root)\nprint('\\nLevel-Order : ',end='')\nlevelorder(root)\nprint()\nprint(\"노드의 개수 = %d개\"%count_node(root))\nprint(\"단말의 개수 = %d개\"%count_leaf(root))\nprint(\"트리의 높이 = %d\"%calc_height(root))\n\n# 4. 이진트리의 응용 : 모르스 코드 결정트리\n # 모르스 부호 : 도트와 대시의 조합으로 구성된 메시지 전달 부호\n # 인코딩 O(1) , 디코딩 O(n)\n \n # 결정트리 : 여러 단계의 복잡한 조건을 갖는 문제에 대한 조건과 해결 방법을 트리 형태로 나타낸 것\n # 디코딩 O(log2n)\n \ntable = [('A','.-'),('B','-...'),('C','-.-.'),('D','-..'),('E','.'),('F', '..-.'),('G','--.'),('H','....'),('I','..'),('J','.---'),\n ('K','-.-'),('L','.-..'),('M','--'),('N','-.'),('O','---'),('P','.--.'),('Q','--.-'),('R','.-.'),('S','...'),\n ('T','-'),('U','..-'),('V','...-'),('W','.--'),('X','-..-'),('Y','-.--'),('Z','--..')]\n\ndef make_morse_tree():\n root = TNode(None,None,None)\n for tp in table:\n code = tp[1]\n node = root\n for c in code:\n if c == '.':\n if node.left == None:\n node.left = TNode(None,None,None)\n node = node.left\n elif c == '-':\n if node.right == None:\n node.right = TNode(None,None,None)\n node = node.right\n node.data = tp[0]\n return root\n\ndef decode(root,code):\n node = root\n for ch in code:\n if ch == '.': node = node.left\n elif ch == '-': node = node.right\n return node.data\n\ndef encode(ch):\n idx = ord(ch)-ord('A')\n return table[idx][1]\n\nmorseCodeTree = make_morse_tree()\nstr1 = input(\"입력 문장 : \")\nmlist = []\nfor ch in str1:\n code = encode(ch)\n mlist.append(code)\nprint(\"Morse Code : \",mlist)\nprint(\"Decoding : \",end='')\nfor code in mlist:\n ch = decode(morseCodeTree,code)\n print(ch,end='')\n\n# 5. 힙트리\n # 완전이진트리 기반\n # 가장 큰(or 작은) 값 빠르게 찾기 위한 지료구조 -> 느슨한 정렬 상태\n # 최대 힙(부모 노드 키 값이 자식보다 크거나 같은 완전이지트리), 최소 힙(작거나 같은)\n \n # 삽입\n # Upheap -> O(log2n) : 새로운 항목을 힙의 마지막 노드 다음 위치에 삽입\n \n # 삭제\n # Downheap -> O(log2n) : 루트 노드 삭제 후 빈 자리에 마지막 노드 가져옴(->내림)\n \n # 배열로 구현\n \nclass MaxHeap:\n def __init__(self):\n self.heap = []\n self.heap.append(0)\n def size(self): return len(self.heap)-1\n def isEmpty(self): return self.size() == 0\n def Parent(self,i): return self.heap[i//2]\n def Left(self,i): return self.heap[i*2]\n def Right(self,i): return self.heap[i*2+1]\n def display(self,msg='힙 트리: '):\n print(msg,self.heap[1:])\n def insert(self,n):\n self.heap.append(n)\n i = self.size()\n while i != 1 and n>self.Parent(i):\n self.heap[i] = self.Parent(i)\n i=i//2\n self.heap[i] = n\n def delete(self):\n parent = 1\n child = 2\n if not self.isEmpty():\n hroot = self.heap[1]\n last = self.heap[self.size()]\n while child <= self.size():\n if child= self.heap[child]:\n break\n self.heap[parent] = self.heap[child]\n parent=child\n child*=2\n self.heap[parent]=last\n self.heap.pop(-1)\n return hroot\n \nheap = MaxHeap()\ndata = [2,5,4,8,9,3,7,3]\nprint(\"[삽입 연산] : \"+str(data))\nfor elem in data:\n heap.insert(elem)\nheap.display('[ 삽입 후 ]: ')\nheap.delete()\nheap.display('[ 삭제 후 ]: ')\nheap.delete()\nheap.display('[ 삭제 후 ]: ')\n\n# 6. 힙의 응용: 허프만 코드\n # 문자 빈도따라 다른 길이\n \ndef make_tree(freq):\n heap=MinHeap()\n for n in freq:\n heap.insert(n)\n for i in range(0,n):\n e1=heap.delete()\n e2=heap.delete()\n heap.insert(e1+e2)\n print(\" (%d+%d)\"%(e1,e2))\n \nlabel = ['E','T','N','I','S']\nfreq = [15,12,8,6,4]\nmake_tree(freq)\n","repo_name":"ekgus9/Basic-python","sub_path":"data structure/8Tree.py","file_name":"8Tree.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21767785360","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom BitVector import *\nimport time, os\n\n\n# %%\nSbox = (\n 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,\n 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,\n 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,\n 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,\n 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,\n 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,\n 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,\n 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,\n 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,\n 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,\n 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,\n 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,\n 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,\n 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,\n 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,\n 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,\n)\n\nInvSbox = (\n 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,\n 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,\n 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,\n 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,\n 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,\n 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,\n 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,\n 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,\n 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,\n 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,\n 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,\n 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,\n 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,\n 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,\n 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,\n 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,\n)\n\nMixer = [\n [BitVector(hexstring=\"02\"), BitVector(hexstring=\"03\"), BitVector(hexstring=\"01\"), BitVector(hexstring=\"01\")],\n [BitVector(hexstring=\"01\"), BitVector(hexstring=\"02\"), BitVector(hexstring=\"03\"), BitVector(hexstring=\"01\")],\n [BitVector(hexstring=\"01\"), BitVector(hexstring=\"01\"), BitVector(hexstring=\"02\"), BitVector(hexstring=\"03\")],\n [BitVector(hexstring=\"03\"), BitVector(hexstring=\"01\"), BitVector(hexstring=\"01\"), BitVector(hexstring=\"02\")]\n]\n\nInvMixer = [\n [BitVector(hexstring=\"0E\"), BitVector(hexstring=\"0B\"), BitVector(hexstring=\"0D\"), BitVector(hexstring=\"09\")],\n [BitVector(hexstring=\"09\"), BitVector(hexstring=\"0E\"), BitVector(hexstring=\"0B\"), BitVector(hexstring=\"0D\")],\n [BitVector(hexstring=\"0D\"), BitVector(hexstring=\"09\"), BitVector(hexstring=\"0E\"), BitVector(hexstring=\"0B\")],\n [BitVector(hexstring=\"0B\"), BitVector(hexstring=\"0D\"), BitVector(hexstring=\"09\"), BitVector(hexstring=\"0E\")]\n]\n\n\n# %%\nAES_modulus = BitVector(bitstring='100011011')\n\n\n# %%\nshow_steps = False\n\n\n# %%\nmixer_multiplication = [[[None]*4 for i in range(4)] for j in range(256)]\ninvmixer_multiplication = [[[None]*4 for i in range(4)] for j in range(256)]\nfor i in range(256):\n bvi = BitVector(intVal=i,size=8)\n for j in range(4):\n for k in range(4):\n mixer_multiplication[i][j][k] = Mixer[j][k].gf_multiply_modular(bvi, AES_modulus,8)\n invmixer_multiplication[i][j][k] = InvMixer[j][k].gf_multiply_modular(bvi, AES_modulus,8)\n\n\n# %%\ndef sub_byte(x):\n return BitVector(intVal=Sbox[x.intValue()],size=8)\n\n\n# %%\ndef rot_sub_word(finp):\n x = finp.deep_copy()\n x << 8\n ret = BitVector(size=0)\n for i in range(4):\n ret += sub_byte(x[i*8:(i+1)*8])\n return ret\n\n\n# %%\ndef print_matrix(x):\n for i in range(4):\n for j in range(4):\n print(x[i][j].get_bitvector_in_hex(),end=' ')\n print()\n\n\n# %%\nkey_ascii = input(\"Key: \")\n# key_ascii = \"BUET CSE16 Batch\" # \"Thats my Kung Fu\"\nkey_ascii = key_ascii.ljust(16, '0')[0:16]\nprint(\"Key:\")\nprint(key_ascii)\n\nrc = BitVector(intVal=1,size=8)\nw = []\nfor i in range(4):\n w += [BitVector(textstring=key_ascii[i*4:(i+1)*4])]\nfor i in range(4,44):\n if i&3 == 0:\n rcon = rc.deep_copy()\n rcon.pad_from_right(24)\n w += [w[i-4] ^ rot_sub_word(w[i-1]) ^ rcon]\n rc = BitVector(intVal=2,size=8).gf_multiply_modular(rc, AES_modulus, 8)\n else:\n w += [w[i-4] ^ w[i-1]]\n\nround_key = []\nfor i in range(11):\n rk = [[None]*4 for i in range(4)]\n for j in range(i*4,(i+1)*4):\n for k in range(4):\n rk[k][j-i*4] = w[j][k*8:(k+1)*8]\n if show_steps:\n print(\"round {} key:\".format(i))\n print_matrix(rk)\n round_key += [rk]\n\n\n# %%\ndef add_matrices(x,y):\n ret = [[None]*4 for i in range(4)]\n for i in range(4):\n for j in range(4):\n ret[i][j] = x[i][j] ^ y[i][j]\n return ret\n\n\n# %%\ndef sub_bytes(x):\n x[:] = [[sub_byte(cell) for cell in row] for row in x] # can't change the reference, must change in place, hence x[:] instead x\n\n\n# %%\ndef shift_rows(x):\n for i in range(4):\n x[i] = x[i][i:]+x[i][:i]\n\n\n# %%\ndef mix_cols(x):\n ret = [[None]*4 for i in range(4)]\n for i in range(4):\n for j in range(4):\n ret[i][j] = BitVector(size=8)\n for k in range(4):\n ret[i][j] ^= mixer_multiplication[x[k][j].intValue()][i][k]\n return ret\n\n\n# %%\ndef ascii_to_matrix(text_ascii):\n assert(len(text_ascii) == 16)\n mat = [[None]*4 for i in range(4)]\n for i in range(16):\n mat[i&3][i>>2] = BitVector(textstring=text_ascii[i])\n return mat\n\n\n# %%\ndef matrix_copy(x):\n mat = [[None]*4 for i in range(4)]\n for i in range(4):\n for j in range(4):\n mat[i][j] = x[i][j].deep_copy()\n return mat\n\n\n# %%\ndef encrypt_matrix(textmat):\n ciphermat = matrix_copy(textmat)\n for i in range(11):\n if i != 0:\n sub_bytes(ciphermat)\n shift_rows(ciphermat)\n if i != 10:\n ciphermat = mix_cols(ciphermat)\n ciphermat = add_matrices(ciphermat,round_key[i])\n if show_steps:\n print(\"encryption round {} output:\".format(i))\n print_matrix(ciphermat)\n print()\n return ciphermat\n\n\n# %%\ndef invsub_byte(x):\n return BitVector(intVal=InvSbox[x.intValue()],size=8)\n\n\n# %%\ndef invsub_bytes(x):\n x[:] = [[invsub_byte(cell) for cell in row] for row in x] # can't change the reference, must change in place, hence x[:] instead x\n\n\n# %%\ndef invshift_rows(x):\n for i in range(4):\n x[i] = x[i][4-i:]+x[i][:4-i]\n\n\n# %%\ndef invmix_cols(x):\n ret = [[None]*4 for i in range(4)]\n for i in range(4):\n for j in range(4):\n ret[i][j] = BitVector(size=8)\n for k in range(4):\n ret[i][j] ^= invmixer_multiplication[x[k][j].intValue()][i][k]\n return ret\n\n\n# %%\ndef matrix_to_ascii(mat):\n ret = \"\"\n for i in range(16):\n ret += mat[i&3][i>>2].get_bitvector_in_ascii()\n return ret\n\n\n# %%\ndef decrypt_matrix(ciphermat):\n deciphermat = matrix_copy(ciphermat)\n for i in range(11):\n if i != 0:\n invshift_rows(deciphermat)\n invsub_bytes(deciphermat)\n deciphermat = add_matrices(deciphermat,round_key[10-i])\n if i != 0 and i != 10:\n deciphermat = invmix_cols(deciphermat)\n if show_steps:\n print(\"decryption round {} output:\".format(i))\n print_matrix(deciphermat)\n print()\n return deciphermat\n\n\n# %%\ndef encrypt_ascii(ascii_large):\n l = len(ascii_large)\n ascii_large += \"\".rjust(16, ' ')\n ret = []\n for i in range(0,l,16):\n ret += [encrypt_matrix(ascii_to_matrix(ascii_large[i:i+16]))]\n return ret\n\n\n# %%\ndef decrypt_ciphers(ciphers):\n ret = \"\"\n for cipher in ciphers:\n ret += matrix_to_ascii(decrypt_matrix(cipher))\n return ret\n\n\n# %%\ntext_ascii = input(\"Plain text: \")\n# text_ascii = \"WillGraduateSoon\" # \"Two One Nine Two\"\nprint(\"Plain Text:\")\nprint(text_ascii)\nstart_time = time.process_time()\nciphers = encrypt_ascii(text_ascii)\nend_time = time.process_time()\nprint(\"Cipher text:\")\nfor cipher in ciphers:\n print_matrix(cipher)\nprint(\"Encryption time: {}\".format(end_time - start_time))\nprint(\"Deciphered text:\")\nstart_time = time.process_time()\nprint(decrypt_ciphers(ciphers))\nend_time = time.process_time()\nprint(\"Decryption time: {}\".format(end_time - start_time))\n\n\n# %%\ndef bytes_from_file(filename, chunksize=8192):\n with open(filename, \"rb\") as f:\n while True:\n chunk = f.read(chunksize)\n if chunk:\n yield from chunk\n else:\n break\n\n\n# %%\ninpfile = input(\"Enter filepath: \")\n# inpfile = \"../sample.pdf\"\nfilename, file_extension = os.path.splitext(inpfile)\ninput_byteara = None\nwith open(inpfile, \"rb\") as f:\n input_byteara = bytearray(f.read())\nprint(len(input_byteara))\n\n\n# %%\ndef matrix_to_bytearray(x):\n ret = bytearray()\n for j in range(4):\n for i in range(4):\n ret.append(x[i][j].intValue())\n return ret\n\n\n# %%\ndef encrypt_byteara(x):\n ret = bytearray()\n mat = [[None]*4 for i in range(4)]\n curbyte = 0\n for b in x:\n mat[curbyte&3][curbyte>>2] = BitVector(intVal=b)\n if curbyte == 15:\n ret += matrix_to_bytearray(encrypt_matrix(mat))\n curbyte = 0\n else:\n curbyte += 1\n if curbyte != 0:\n ret += matrix_to_bytearray(encrypt_matrix(mat))\n ret += len(x).to_bytes(16, byteorder='big')\n return ret\n\n\n# %%\nencrypted_byteara = encrypt_byteara(input_byteara)\nwith open(\"encrypted.bin\", \"wb\") as f:\n f.write(encrypted_byteara)\nprint(len(encrypted_byteara))\n\n\n# %%\ndef decrypt_byteara(x):\n filesize = int.from_bytes(x[-16:], byteorder='big')\n ret = bytearray()\n mat = [[None]*4 for i in range(4)]\n curbyte = 0\n for b in x:\n mat[curbyte&3][curbyte>>2] = BitVector(intVal=b)\n if curbyte == 15:\n ret += matrix_to_bytearray(decrypt_matrix(mat))\n curbyte = 0\n else:\n curbyte += 1\n assert(curbyte == 0)\n print(filesize)\n ret = ret[:filesize]\n return ret\n\ndecrypted_byteara = decrypt_byteara(encrypted_byteara)\n\n\n# %%\nassert(input_byteara == decrypted_byteara)\nwith open(\"decrypted\"+file_extension, \"wb\") as f:\n f.write(decrypted_byteara)\n\n\n# %%\nsbox = [None]*256\ninvsbox = [None]*256\nfor i in range(256):\n sbox[i] = BitVector(intVal=i, size=8)\n if i != 0:\n sbox[i] = sbox[i].gf_MI(AES_modulus, 8)\n sbox[i] = sbox[i].gf_multiply_modular(BitVector(intVal=31, size=8), BitVector(intVal=257, size=9), 8) ^ BitVector(intVal=99, size=8)\n invsbox[sbox[i].intValue()] = BitVector(intVal=i,size=8)\n\nprint(\"\\nSbox:\")\nfor i in range(256):\n assert(sbox[i].intValue() == Sbox[i])\n print(sbox[i].get_bitvector_in_hex(),end='\\n' if i%16 == 15 else ' ')\nprint(\"\\nInvSbox:\")\nfor i in range(256):\n assert(invsbox[i].intValue() == InvSbox[i])\n print(invsbox[i].get_bitvector_in_hex(),end='\\n' if i%16 == 15 else ' ')\n\n","repo_name":"ShafinKhadem/Computer-Networks-assignments","sub_path":"AES implementation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":12457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24645364444","text":"import sys, os\nfrom typing import List, Tuple, Union\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nfrom programs.engine.discrete_gaussian_utility import RationalScalarBernoulliExp\n\ndef multinomial_randomized_response(*, true_category, possible_categories: Union[List, Tuple], epsilon: Tuple[int, int] = (0, 1), rng):\n \"\"\"\n This is an implimentation of the multinomial randomized response mechanism provided by Wang, Wu, and Hu in \"Using\n Randomized Response for Differential Privacy Preserving Data Collection\" (2016) without using floating point operations.\n Inputs:\n true_category: the true category of a categorical variable of a given record\n possible_categories: the universe of possible values said categorical variable can take\n epsilon: this function can be used in a mechanism that satisfies epsilon[0]/epsilon[1] local DP; see Wang, Wu, and Hu (2016) for more detail\n rng: pseudo-random number generator (see programs.engine.rngs)\n Output:\n res: an element of possible_categories\n \"\"\"\n assert true_category in possible_categories, \"true_category is not in possible_categories\"\n true_index = [k for k, x in enumerate(possible_categories) if true_category == x][0]\n while True:\n x = rng.integers(low=0, high=len(possible_categories))\n if (x == true_index) or (RationalScalarBernoulliExp(gamma=epsilon, rng=rng) == 1):\n # Given some j in {0, ..., len(possible_categories)-1} such that j != true_index, at this point x satisfies\n # P(x == j) = 1/len(possible_categories) * exp(-epsilon) and also P(x == true_index) = 1/len(possible_categories).\n # Thus, P(x == j)/P(x == true_index) = exp(-epsilon). Since this also holds for each such j, and this function also\n # produces an output with probability one, this implies the output probabilities are the same as the ones provided\n # in (Wang, Wu and Hu; 2016).\n return possible_categories[x]\n","repo_name":"uscensusbureau/DAS_2020_DHC_Production_Code","sub_path":"das_decennial/scripts/multinomial_mechanism.py","file_name":"multinomial_mechanism.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21643216908","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n# Utilities for our models.\n\n\ndef balanced_class_weights(y):\n bincount = np.asarray((np.sum(y == 0), np.sum(y == 1)))\n return len(y) / (2. * bincount)\n\n\ndef balanced_weights(y):\n wclass = balanced_class_weights(y)\n return np.asarray([wclass[i] for i in y])\n\n\ndef choose_threshold(s, y):\n #return np.amin(s[y == 1])\n #return np.median(s)\n si = np.argsort(s)\n s = s[si]\n y = y[si]\n #sy = sorted(zip(s, y))\n #s = [x for x, _ in sy]\n #y = [x for _, x in sy]\n\n maxF1 = -np.inf\n bestTh = 0\n\n for i in xrange(1, len(y)):\n if y[i] != y[i-1]:\n TP = np.sum(y[i:] == 1)\n FP = np.sum(y[i:] == 0)\n FN = np.sum(y[:i] == 1)\n F1 = (2.*TP)/(2.*TP+FN+FP+1e-10)\n if F1 > maxF1:\n maxF1 = F1\n bestTh = (s[i]+s[i-1])/2.\n\n return bestTh\n","repo_name":"rpmcruz/ranking-imbalance","sub_path":"binary/src/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"17051639771","text":"#region IMPORTS\nfrom base64 import b64decode\nimport pandas as pd\nimport streamlit as st\nfrom streamlit_quill import st_quill as text_editor\nimport os\nif os.name == 'nt':\n\timport win32clipboard\n\tfrom PIL import Image\n\tfrom io import BytesIO\n\tdef send_to_clipboard(img_path):\n\t\timage = Image.open(img_path)#path\n\t\toutput = BytesIO()\n\t\timage.convert(\"RGB\").save(output, \"BMP\")\n\t\tdata = output.getvalue()[14:]\n\t\t#print(data)\n\t\toutput.close()\n\t\twin32clipboard.OpenClipboard()\n\t\twin32clipboard.EmptyClipboard()\n\t\twin32clipboard.SetClipboardData(win32clipboard.CF_DIB, data)\nimport strings as literais\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport re\n\n#endregion\n\nPATH_PROFILE = \"--user-data-dir=C:\\\\Users\\\\Victor\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Profile 2\"\nst.set_page_config(\n\t page_title='Unidades',\n\t layout='centered',\n\t initial_sidebar_state='expanded',\n\t page_icon=literais.apple_dragon_icon, #\"favicon.png\" #expanded / collapsed\n\t menu_items={\n\t\t 'Get help': 'https://github.com/jvcss',\n\t\t 'Report a bug': \"https://github.com/jvcss\",\n\t\t 'About': \"App para automação whatsapp\"\n\t}\n)\n\nif \"contatos_salvos\" not in st.session_state: st.session_state[\"contatos_salvos\"] = pd.DataFrame([], columns=['contatos'])\n\nif \"contatos_list\" not in st.session_state: st.session_state[\"contatos_list\"] = []\n\nif \"ultima_conversa\" not in st.session_state: st.session_state[\"ultima_conversa\"] = []\n\nif \"black_list\" not in st.session_state: st.session_state[\"black_list\"] = []\n\ndef html_to_wppedit(raw_html):\n\tnegrito = re.compile('(|)')\n\tclean_negrito_text = negrito.sub('*', raw_html)\n\n\titalico = re.compile('(|)')\n\tclean_negrito_text = italico.sub('_', clean_negrito_text)\n\n\tcutted = re.compile('(|)')\n\tclean_negrito_text = cutted.sub('~', clean_negrito_text)\n\n\tmonoletter = re.compile(r'(|)')\n\tclean_negrito_text = monoletter.sub('```', clean_negrito_text)\n\n\tCLEANR = re.compile('<.*?>')\n\tcleantext = re.sub(CLEANR, '', clean_negrito_text)\n\treturn cleantext\n\ndef listar_nomes_desc(content):\n\tdesc_ = re.findall(r'_1qB8f\">(.*?)',texto)\n\texcept Exception as e:\n\t\tprint(f'Não achou nenhuma ultima conversa: {e}')\n\t\treturn \"\"\n\tfinally:\n\t\tif info_last_talk:\n\t\t\treturn info_last_talk\n\t\telse: return \"\"\n\n\ndef desistir_localizado(contato, texto):\n\t#fr'{contato}: \">
    (.*?)<'\n\tctt = contato.replace('+', '\\+')\n\tfrases = re.findall(fr'{ctt}: \">
    (.*?)<', texto)\n\t#\n\tfor cada in frases:\n\t\tprint(f'CADA UNIDADE {cada}')\n\tstr_match = [s for s in frases if 'Desistir' in s]\n\t#print(f'PESSOA {contato} ENCONTRADO--> {str_match}\\n\\n')\n\t#print(f'FRASES VISTA--> {frases}\\n\\n')\n\tif str_match != []:\n\t#\tprint(f'AQUI--> {frases}')\n\t\treturn contato\n\telse:\n\t\treturn False\n\n\n\ndef ui_login():\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\twait = WebDriverWait(driver, 999)\n\n\tpalavra_beta = wait.until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"side\"]/header/div[1]/div[2]/b')))\n\n\tif palavra_beta:\n\t\treturn palavra_beta.text\n\telse:\n\t\treturn 'error'\n\n\n\ndef ui_lista_chat():\n\tchat_ctts = []\n\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\twait = WebDriverWait(driver, 999)\n\n\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\tbtn_search.send_keys(\"\")\n\t\n\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\ttime.sleep(1)\n\t#\n\t#chat_bloco.send_keys(Keys.ARROW_DOWN) //*[@id=\"pane-side\"]/div[2]\n\t#try:\n\t#chat_bloco = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"pane-side\"]/div[1]')))\n\t#except:\n\tchat_bloco = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"pane-side\"]/div[2]')))\n\t\n\tctt_anterior = ''\n\t\n\tis_chat_not_end = True\n\twhile is_chat_not_end:\n\n\t\tctt_selected = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\tselected_name = nome_localizado(str(ctt_selected.get_attribute('innerHTML')))\n\n\t\tchat_ctts.append(selected_name.group(1))\n\n\t\tif selected_name.group(1) != ctt_anterior:\n\t\t\tctt_anterior = selected_name.group(1)\n\t\telse:\n\t\t\tis_chat_not_end = False\n\t\tprint(f'\\n\\n\\n\\nNOME: {selected_name.group(1)}\\n\\n\\n')\n\t\ttime.sleep(.1)\n\t\t\n\t\tchat_bloco.send_keys(Keys.ARROW_DOWN)\n\t\t\n\treturn chat_ctts\n\n\ndef ui_lista_contatos():\n\tcontatos = []\n\t\n\tcontato_anterior = ''\n\tdescricao_anterior = ''\n\tfim_da_lista_contatos = True\n\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\twait = WebDriverWait(driver, 999)\n\n\ticone_ctts = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/header/div[2]/div/span/div[2]/div')))\n\ticone_ctts.click()\n\n\tctt_blc = wait.until(EC.presence_of_element_located((By.XPATH , '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[1]/span/div[1]/span/div[1]/div[2]/div[2]')))\n\tctt_blc.send_keys(Keys.ARROW_DOWN)\n\tctt_blc.send_keys(Keys.ARROW_UP)\n\twhile fim_da_lista_contatos:\n\t\ttime.sleep(.1)\n\t\t\n\t\tctt_selecionado = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\n\t\tnome_selecionado = nome_localizado(str(ctt_selecionado.get_attribute('innerHTML')))\n\n\t\tdescricao_nome_selecionado = listar_nomes_desc(str(ctt_selecionado.get_attribute('innerHTML')))\n\n\t\tcontatos.append(str(nome_selecionado.group(1)))\n\t\t#lista_inter += nome_selecionado\n\n\t\tif descricao_nome_selecionado:\n\t\t\tprint(f'\\n\\n >{nome_selecionado[0]} tem descrição> {descricao_nome_selecionado[0]}')\n\t\telse:\n\t\t\tdescricao_nome_selecionado.append(str(f'descrição ausente-{str(nome_selecionado.group(1))[:2]}'))\n\t\t\tprint(f\"\\n\\n >{nome_selecionado[0]} SEM descricao> {str(f'descrição ausente-{str(nome_selecionado.group(1))}')}\")\n\t\t\n\t\tif nome_selecionado.group(1) != contato_anterior or descricao_nome_selecionado[0] != descricao_anterior:\n\t\t\tcontato_anterior = nome_selecionado.group(1)\n\t\t\tdescricao_anterior = descricao_nome_selecionado[0]\n\t\telse:\n\t\t\tfim_da_lista_contatos = False\n\t\tctt_blc.send_keys(Keys.ARROW_DOWN)\n\n\tback_to_main = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[1]/span/div[1]/span/div[1]/header/div/div[1]/button')))\n\tback_to_main.click()\n\tcontatos = list(dict.fromkeys(contatos))\n\t\n\treturn contatos\n\n\n\n\n\n\n\ndef ui_buscar(contatos_):\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)#options=opts ---headless\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\twait = WebDriverWait(driver, 60)\n\tcontagem = 0\n\tate_o_fim = True\n\twhile ate_o_fim:\n\t\tif contagem >= len(contatos_['contatos']) - 1: ate_o_fim = False\n\t\ttry:\n\t\t\tdriver.get('https://web.whatsapp.com/')\n\t\t\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\t\t\tbtn_search.click()\n\t\t\tbtn_search.send_keys(contatos_['contatos'][contagem])\n\t\t\ttime.sleep(0.51)\n\t\t\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\t\t\tctt_selected = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\t\tctt_selected.click()\n\t\texcept Exception as e:\n\t\t\tst.error(f'{e}')\n\t\tfinally:\n\t\t\ttime.sleep(0.51)\n\t\t\tbtn_clear = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/button')))\n\t\t\tbtn_clear.click()\n\t\t\tcontagem +=1\n\t\t\ttime.sleep(.21)\n\tdriver.quit()\n\n\n\n\n\n\n\n\n\n\ndef ui_enviar_imagem(contatos_,mensagem):\n\tate_o_fim = True\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\tlistar_imgs = re.findall( r'src=\"data:image/(.*?);base64,(.*?)\"', fr'{mensagem}')\n\ttexto_p_enviar = html_to_wppedit(mensagem)\n\twait = WebDriverWait(driver, 60)\n\tcontagem = 0\n\tlista_contatos_visto = []\n\n\twhile ate_o_fim:\n\t\tif contagem >= len(contatos_['contatos']) - 1: ate_o_fim = False\n\t\ttry:\n\t\t\tdriver.get('https://web.whatsapp.com/')\n\t\t\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\t\t\tbtn_search.click()\n\t\t\tbtn_search.send_keys(contatos_['contatos'][contagem])\n\t\t\ttime.sleep(0.51)\n\t\t\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\t\t\tctt_selecionado = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\t\ttime.sleep(.51)\n\t\t\tctt_selecionado.click()\n\t\t\tinfo_ultimo_contato = extrair_info_ultima_conversa(str(ctt_selecionado.get_attribute('innerHTML')))\n\n\t\t\tlista_contatos_visto.append(info_ultimo_contato.group(1))\n\n\t\t\ttela_atual = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"main\"]/div[3]/div/div[2]/div[3]')))\n\t\t\t\n\n\t\t\tse_desistente_ = desistir_localizado(contatos_['contatos'][contagem],tela_atual.get_attribute('innerHTML'))\n\n\t\t\tif bool(listar_imgs):\n\t\t\t\tespaco_enviar = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]')))\n\t\t\t\tespaco_enviar.send_keys('')\n\t\t\t\tactions = ActionChains(driver)\n\t\t\t\tactions.key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()\n\t\t\t\ttime.sleep(3)\n\t\t\t\tespaco_enviar = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[2]/span/div[1]/span/div[1]/div/div[2]/div/div[1]/div[3]/div/div/div[2]/div[1]/div[2]')))\n\t\t\t\tespaco_enviar.send_keys(texto_p_enviar) #texto para enviar\n\t\t\t\ttime.sleep(5)\n\t\t\t\tbotao_enviar = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"app\"]/div[1]/div[1]/div[2]/div[2]/span/div[1]/span/div[1]/div/div[2]/div/div[2]/div[2]/div/div')))\n\t\t\t\tbotao_enviar.click()\n\t\t\telse:\n\t\t\t\tespaco_enviar = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]')))\n\t\t\t\tespaco_enviar.send_keys('')\n\t\t\t\tespaco_enviar.send_keys(texto_p_enviar)\n\t\t\t\tbotao_enviar_menor = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"main\"]/footer/div[1]/div/span[2]/div/div[2]/div[2]/button/span')))\n\t\t\t\tbotao_enviar_menor.click()\n\n\t\texcept Exception as e:\n\t\t\tprint(f'envia_msg -- ERROR {e}')\n\t\tfinally:\n\t\t\ttime.sleep(1)\n\n\t\t\t\n\n\t\t\tbtn_clear = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/button')))\n\n\t\t\tbtn_clear.click()\n\n\t\t\tcontagem +=1\n\n\t\t\ttime.sleep(.5)\n\n\ttime.sleep(1)\n\tdriver.quit()\n\n\n\n\n\n\n\n\n\ndef ui_ultima_conversa( contatos_):#dataframe['contatos'], text-img.txt\n\tate_o_fim = True\n\tlista_contatos_info = []\n\tlista_contatos_ = []\n\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)#options=opts ---headless\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\t\n\twait = WebDriverWait(driver, 60)\n\tcontagem = 0\n\n\twhile ate_o_fim:\n\t\tif contagem >= len(contatos_['contatos']) - 1: ate_o_fim = False\n\t\ttry:#abrir janela clicar em pesquisa. esperar item carregado\n\t\t\tdriver.get('https://web.whatsapp.com/')\n\n\t\t\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\t\t\tbtn_search.click()\n\t\t\tbtn_search.send_keys(contatos_['contatos'][contagem])\n\t\t\ttime.sleep(.81)\n\t\t\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(f'envia_msg -- ERROR {e}')\n\t\tfinally:\n\t\t\tctt_selected = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\t\tctt_selected.click()\n\t\t\tselected_name = nome_localizado(str(ctt_selected.get_attribute('innerHTML')))\n\t\t\tinfo_ultimo_contato = extrair_info_ultima_conversa(str(ctt_selected.get_attribute('innerHTML')))\n\t\t\t\n\t\t\tlista_contatos_info.append(str(info_ultimo_contato.group(1)))\n\t\t\tlista_contatos_.append(selected_name.group(1))\n\n\t\t\t\n\n\t\t\tprint(f'{selected_name.group(1)} < ULTIMA CONVERSA > {str(info_ultimo_contato.group(1))}')\n\t\t\ttime.sleep(.51)\n\n\t\t\tbtn_clear = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/button')))\n\t\t\tbtn_clear.click()\n\t\t\tcontagem +=1\n\t\t\ttime.sleep(1)\n\tdriver.quit()\n\treturn lista_contatos_, lista_contatos_info\n\n\nESSA_FUNCAO_TEM_PROBLEMA_DE_RUN_TIME = \"\"\"\ndef ui_ultima_conversa_chat( contatos_):\n\tate_o_fim = True\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\twait = WebDriverWait(driver, 60)\n\tcontagem = 0\n\tlista_conta_contatos = list(contatos_['contatos'])\n\tlista_contatos_info = []\n\tlista_contatos_ = []\n\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\tbtn_search.click()\n\tchat_bloco = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"pane-side\"]/div[2]')))\n\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\twhile ate_o_fim:\n\t\tif contagem >= len(lista_conta_contatos) - 1: ate_o_fim = False\n\t\tctt_selected = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\tctt_selected.click()\n\t\tselected_name = nome_localizado(str(ctt_selected.get_attribute('innerHTML')))\n\t\tinfo_ultimo_contato = extrair_info_ultima_conversa(str(ctt_selected.get_attribute('innerHTML')))\n\t\tprint(f'\\n\\n\\n{selected_name.group(1)} - {info_ultimo_contato.group(1)}\\n\\n')\n\n\t\tif any(selected_name.group(1) in s for s in contatos_['contatos']):\n\t\t\tlista_contatos_info.append(str(info_ultimo_contato.group(1)))\n\t\t\tlista_contatos_.append(selected_name.group(1))\n\t\telse:\n\t\t\t#caso contato do chat nao esteja na lista de todos os contatos anteriores\n\t\t\tlista_contatos_info.append('')\n\t\t\tlista_contatos_.append(selected_name.group(1))\n\n\t\t\tlista_conta_contatos.append(selected_name.group(1))\n\n\t\tcontagem +=1\n\t\tchat_bloco.send_keys(Keys.ARROW_DOWN)\n\t\t#time.sleep(.51)\n\tdataframe = pd.DataFrame()#st.session_state[\"contatos_salvos\"])\n\tdataframe['contatos'] = lista_contatos_\n\tdataframe['ultima conversa'] = lista_contatos_info\n\t\n\t#st.session_state[\"contatos_salvos\"] = pd.DataFrame(dataframe)\n\tdriver.quit()\n\treturn pd.DataFrame(dataframe)\"\"\"\n\n\n\n\ndef ui_ultima_conversa_rapida( contatos_):#dataframe\n\tate_o_fim = True\n\topts = Options()\n\topts.add_argument(PATH_PROFILE)\n\tdriver = webdriver.Chrome(options=opts)\n\tdriver.get('https://web.whatsapp.com/')\n\tdriver.maximize_window()\n\t\n\twait = WebDriverWait(driver, 60)\n\tcontagem = 0\n\tlista_contatos_info = []\n\n\twhile ate_o_fim:\n\t\ttry:\n\t\t\tif contagem >= len(contatos_['contatos']) - 1: ate_o_fim = False\n\n\t\t\tbtn_search = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/label/div/div[2]')))\n\t\t\tbtn_search.click()\n\t\t\tbtn_search.send_keys(contatos_['contatos'][contagem])\n\t\t\ttime.sleep(1)\n\t\t\tbtn_search.send_keys(Keys.ARROW_DOWN)\n\t\t\tctt_selected = wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2_TVt')))\n\t\t\ttime.sleep(.81)\n\t\t\tctt_selected.click()\n\t\t\ttry:\n\t\t\t\tinfo_ultimo_contato = extrair_info_ultima_conversa(str(ctt_selected.get_attribute('innerHTML')))\n\t\t\texcept:\n\t\t\t\tprint('Falha Na Ultima Conversa {e}')\n\n\t\t\t#print(f'\\n\\n\\n{contatos_[\"contatos\"][contagem]} - {info_ultimo_contato.group(1)}\\n\\n')\n\t\t\tif type(info_ultimo_contato)!=str:\n\t\t\t\tlista_contatos_info.append(info_ultimo_contato.group(1))\n\t\t\telse:\n\t\t\t\tlista_contatos_info.append(info_ultimo_contato)\n\t\t\t\n\t\t\ttela_atual = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"main\"]/div[3]/div/div[2]/div[3]')))\n\t\t\tse_desistente_ = desistir_localizado(contatos_['contatos'][contagem],tela_atual.get_attribute('innerHTML'))\n\n\t\t\tprint(f'DESISTENTE {se_desistente_}\\n\\n')\n\t\t\t\n\t\t\tbtn_clear = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/button')))\n\t\t\ttime.sleep(2)\n\t\t\tbtn_clear.click()\n\t\t\tcontagem +=1\n\t\texcept Exception as e:\n\t\t\tprint('Falha Na Ultima Conversa {e}')\n\t\t\tbtn_clear = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"side\"]/div[1]/div/button')))\n\t\t\tbtn_clear.click()\n\t\tfinally:\n\t\t\tprint('................')\n\t#st.session_state[\"contatos_salvos\"]['ultima conversa'] = lista_contatos_info\n\tdriver.quit()\n\treturn lista_contatos_info\n\n\n\n\n\n\n\n\n\n\n\n\nwith st.container():\n\tcaixa = st.container()\n\tcaixa.subheader(\"Mensagem\")\n\tcontent = text_editor(placeholder=\"Escreva seu Newsletter Personalizado\",html=caixa.checkbox(\"Entregar como HTML\", True),readonly=caixa.checkbox(\"Apenas leitura\", False),key=\"quill\",)\n\tlistar_imgs = re.findall( r'src=\"data:image/(.*?);base64,(.*?)\"', fr'{content}')\n\tst.subheader('contato dos pandas')\n\n\tst.write(st.session_state.contatos_salvos)\n\tst.write(st.session_state.contatos_list)\n\tst.write(st.session_state.ultima_conversa)\n\n\n\tif st.sidebar.button('LOGIN', ):\n\t\t#on_click=send_to_clipboard('imagem-0.png')\n\t\t\n\t\tst.sidebar.info(f'Login : {ui_login()}')\n\n\tif st.sidebar.button('LISTA CHAT',):\n\t\tst.sidebar.info('Executando Chrome')\n\t\t\n\t\tst.session_state[\"contatos_list\"] += ui_lista_chat()\n\n\t\tst.session_state[\"contatos_list\"] = list(dict.fromkeys(st.session_state[\"contatos_list\"]))\n\n\t\tst.session_state.contatos_salvos = pd.DataFrame(st.session_state.contatos_list, columns=['contatos'])\n\n\t\tst.experimental_rerun()\n\n\tif st.sidebar.button('LISTA CONTATOS',):\n\t\tst.sidebar.info('Executando Chrome')\n\n\t\tst.session_state[\"contatos_list\"] += ui_lista_contatos()\n\n\t\tst.session_state[\"contatos_list\"] = list(dict.fromkeys(st.session_state[\"contatos_list\"]))\n\t\t\n\t\tst.session_state.contatos_salvos = pd.DataFrame(st.session_state.contatos_list, columns=['contatos'])\n\n\t\tst.experimental_rerun()\n\t\n\tif st.sidebar.button('CHAT E CONTATOS'):\n\t\tst.sidebar.info('Executando Chrome CHAT')\n\t\tst.session_state[\"contatos_list\"] += ui_lista_chat()\n\t\tst.session_state[\"contatos_list\"] = list(dict.fromkeys(st.session_state[\"contatos_list\"]))\n\n\t\tst.sidebar.warning('Executando Chrome CONTATOS')\n\t\tst.session_state[\"contatos_list\"] += ui_lista_contatos()\n\t\tst.session_state[\"contatos_list\"] = list(dict.fromkeys(st.session_state[\"contatos_list\"]))\n\n\t\tst.session_state.contatos_salvos = pd.DataFrame(st.session_state.contatos_list, columns=['contatos'])\n\t\tst.experimental_rerun()\n\n\n\n\tif st.sidebar.button('BUSCA INDIVIDUAL',):\n\t\tst.sidebar.info('Executando Chrome')\n\t\tui_buscar(st.session_state.contatos_salvos)\n\n\tenviar = st.sidebar.button('ENVIAR IMAGEM',)\n\tif enviar:\n\t\tst.sidebar.info('Executando Chrome')\n\t\tst.session_state[\"contatos_salvos\"] = ui_enviar_imagem(st.session_state.contatos_salvos,content)\n\n\tif len(listar_imgs) > 0 and not enviar:\n\t\twith open(f\"imagem-{0}.{listar_imgs[0][0]}\", 'wb') as wrb:\n\t\t\twrb.write(b64decode(listar_imgs[0][1]))\n\t\tsend_to_clipboard(f\"imagem-0.{listar_imgs[0][0]}\")\n\n\tif st.sidebar.button('ULTIMA CONVERSA BUSCA',):\n\t\tst.sidebar.info('Executando Chrome')\n\n\t\tst.session_state[\"contatos_salvos\"]['ultima conversa'] = ui_ultima_conversa_rapida(st.session_state.contatos_salvos)\n\t\tst.experimental_rerun()\n\n\n\n\n\ndef remove_repeted():\n\tremove_reapter = \"\"\"\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\ttela_atual = wait.until(EC.presence_of_element_located((By.XPATH,'/html/body/div[1]/div/div/div[4]/div/div[3]/div/div[2]/div[3]')))\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tclean_tela_atual = re.sub(CLEANR, '', tela_atual.get_attribute('innerHTML'))\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tprint(f'EU SEI clean_tela_atual? {clean_tela_atual}') #\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\talready_sending_call_back_no_texts = already_sent(clean_tela_atual, clean_mensagem)\n\t\t\t\t\t\t\ttime.sleep(20)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print(f'EU SEI Q VEM ATÉ AQUI? {already_sending_call_back_no_texts}') #\n\t\t\t\t\t\t\t#time.sleep(100)\n\t\t\t\t\t\t\t#already_sending_call_back_no_texts = already_sent(tela_atual, mensagem_strip)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#mensagem_strip = mensagem_strip.replace(\"/n\", '')\n\t\t\t\t\t\t\t#print('fine, we found? '+ already_sent(clean_tela_atual, clean_mensagem))\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t#//*[@id=\"main\"]/div[3]/div/div[2]/div[2]/div[2]/div/div/div[1]/span/div/span\n\t\t\t\t\t\t\t#print(f'fine EU SEI Q VEM ATÉ AQUI? {already_sending_call_back_no_texts}') #\n\t\t\t\t\t\t\t#time.sleep(100)\n\t\t\t\t\t\t\texiting_no_content = wait_short.until(EC.presence_of_element_located((By.XPATH,'//*[@id=\"main\"]/div[3]/div/div[2]/div[2]/div[2]/div/div/div[1]/span/div/span')))\n\t\t\t\t\t\t\t#print(f\"\\n{exiting_no_content.get_attribute('innerHTML')}\\n\")\n\t\t\t\t\t\t\tif strings.svg_lock == exiting_no_content.get_attribute('innerHTML'):\n\t\t\t\t\t\t\t\t#print (\"\\nthey are equals, found\\n\")\t\n\t\t\t\t\t\t\t\talready_sending_call_back_no_texts = False#\t\t\t\t\tCONVERSA LIMPA\n\t\t\t\t\t\t\telse: #\n\t\t\t\t\t\t\t\tprint('erro reading lock')\n\t\t\t\t\t\t\t#already_sending_call_back_no_texts = content_localizado(str(exiting_no_content.get_attribute('innerHTML')))\n\t\t\t\t\t\t\t#already_sending = False\n\t\t\t\t\t\t\t#mensagem_strip = mensagem\n\t\t\t\t\t\t\t#mensagem_strip = mensagem_strip.replace(\"/n\", '')\n\t\t\t\t\t\t\t\ttela_atual = '!mensagem_strip'\n\t\t\t\t\t\t\t\t#print (\"\\nnot found first as msg they are not equal found\\n\")\n\t\t\t\t\t\t\ttime.sleep(500)\n\t\t\t\t\t\t\tprint('EXCESSAO')\n\t\t\t\t\t\t\n\t\t\t\t\t\tif already_sending_call_back_no_texts:\n\t\t\t\t\t\t\t#//*[@id=\"app\"]/div/div/div[2]/div[2]/span/div/span/div/div/div[2]/div/div[1]/div[1]/div[1]\n\t\t\t\t\t\t\tbotao_fechar_img = wait.until(EC.presence_of_element_located(GetLocator.BOTAO_CANCELAR_IMG))\n\t\t\t\t\t\t\tbotao_fechar_img.click()\n\t\t\t\t\t\t\tprint(f\"already sent to {contatos_['contatos'][contagem]}\")\n\t\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpass\"\"\"","repo_name":"AbsolutPro/AbsolutApp","sub_path":"unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":22136,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74266763680","text":"\n\n\nimport re\nimport win32clipboard\nimport clipboard\n\nfrom tag import extract_tags\nimport pangu\n\nimport enum \n\nBlockType = enum.Enum('BlockType', ('p', \n 'title', \n 'fencecode', \n 'sepline', \n 'indent', \n 'admonition', \n 'table', \n 'list', \n 'quote', \n 'frontmatter'\n ))\n\n\nclass Polish:\n def __init__(self, text):\n self.text = text\n self.source = text\n\n @classmethod\n def from_clipboard(cls):\n text = clipboard.paste()\n return cls(text)\n\n def to_clipboard(self):\n clipboard.copy(self.text)\n return self\n\n def join_markdown_link(self):\n t = re.sub(r'\\n\\[\\n{1,4}(.+)\\n{1,6}\\]\\((https?://.+?)\\)', \n r'\\n[\\1](\\2)', \n self.text)\n self.text = t\n return self\n\n def update_image_url(self, prefix=None, replacer=None):\n\n if replacer:\n self._replace(r'!\\[(.*?)\\]\\((.+?)\\)', replacer)\n else:\n self._replace(r'!\\[(.*?)\\]\\((.+?)\\)', r'![\\1](' + prefix + r'\\2)')\n\n return self\n\n def _replace(self, pat, repl):\n self.text = re.sub(pat, repl, self.text)\n\n\n\n def _tag_markdown_blocks(self, lines):\n '''\n 单行 tag: title p sepline...\n 多行 tag: quote indent admonition fencecode frontmatter...\n fencecode frontmatter 通过结束符关闭\n quote indent admonition 自行关闭\n '''\n last_blocktype = None\n for line, farseer in zip(lines, lines[1:] + ['']): # farseer 前瞻一行\n \n if last_blocktype is BlockType.fencecode:\n yield BlockType.fencecode\n if line.startswith('```'):\n last_blocktype = None\n elif last_blocktype is BlockType.frontmatter:\n yield BlockType.frontmatter\n if line.strip() == '---':\n last_blocktype = None\n elif last_blocktype is BlockType.admonition and line.startswith(' '):\n yield BlockType.admonition\n elif line.strip() == '' and last_blocktype in (BlockType.admonition, BlockType.indent, BlockType.quote):\n yield last_blocktype\n\n elif line.startswith('```'):\n last_blocktype = BlockType.fencecode\n yield BlockType.fencecode\n elif line.startswith(' '):\n last_blocktype = BlockType.indent\n yield BlockType.indent\n elif line.strip() == '---':\n last_blocktype = BlockType.frontmatter\n yield BlockType.frontmatter\n elif line.startswith('!!! '):\n last_blocktype = BlockType.admonition\n yield BlockType.admonition\n elif line.startswith('>'):\n last_blocktype = BlockType.quote\n yield BlockType.quote\n\n else:\n if line.startswith('#'):\n yield BlockType.title\n elif line.startswith((r'----', r'\\----', r'====', r'\\====', r'\\=\\=\\=\\=')):\n yield BlockType.sepline\n else:\n yield BlockType.p\n\n # end def _tag_markdown_blocks\n\n\n def _split_markdown_blocks(self, text):\n '''\n 单行 tag: title p sepline...\n 多行 tag: quote indent admonition fencecode frontmatter...\n\n 聚合相同的多行 tag\n '''\n lines = self._sep_split(text.strip(), sep=r\"([\\n]+)\")\n tags = list(self._tag_markdown_blocks(lines))\n\n block_cache = ''\n\n for tag, next_tag, line in zip(tags, tags[1:] + [None], lines):\n # print(tag, line)\n if tag in (BlockType.title, BlockType.p, BlockType.sepline):\n yield tag, line\n block_cache = ''\n elif tag == next_tag:\n block_cache += line\n else:\n block_cache += line\n yield tag, block_cache\n block_cache = ''\n\n # end def _split_markdown_blocks\n\n\n\n\n def _sep_split(self, text, sep=r\"([.。!!??\\n]+)\"):\n result = re.split(sep, text)\n values = result[::2]\n delimiters = result[1::2] + ['']\n return [v+d for v, d in zip(values, delimiters)]\n\n\n\n def extract_tags(self, n=10):\n return extract_tags(self.text, topK=n)\n\n\n def pangu_spacing(self):\n self.text = pangu.spacing_text(self.text)\n return self\n\n def transword(self):\n transdict = { '显示': '显式',\n '登陆': '登录',\n '稍后': '稍候',\n '��': '(',\n ')': ')',\n '“': '\"',\n '”': '\"',}\n for k, v in transdict.items():\n self.text = self.text.replace(k, v)\n return self\n\n\n\n def extract_outline(self, headers='h1,h2,h3'):\n\n lines = self._split_markdown_blocks(self.text)\n result = []\n header_pat = tuple('#'*int(h[1])+' ' for h in headers.split(','))\n\n for kind, line in lines:\n if kind == BlockType.title and line.startswith(header_pat):\n result.append(line.strip())\n return result\n\n\n\n\n def extract_highlights(self):\n '''针对行内高亮, 非块级元素'''\n result = []\n highlight_pat1 = r'(==(.+?)==)'\n highlight_pat2 = r'(<(span|font)[^>]+?color ?(=|:) ?([\\'\\\"]?).+?\\4;?[^>]+?>.+?)'\n\n for kind, line in self._split_markdown_blocks(self.text):\n if kind in (BlockType.fencecode, BlockType.sepline):\n continue\n else:\n for m in re.findall(highlight_pat1, line):\n result.append(('highlight', m[0]))\n\n for m in re.findall(highlight_pat2, line):\n result.append(('highlight', m[0]))\n\n return result\n\n\n def extract_annotations(self):\n '''针对块级注释'''\n result = []\n\n for kind, block in self._split_markdown_blocks(self.text):\n # print(kind, block)\n if kind in (BlockType.admonition, ):\n result.append(('annotation', block))\n return result\n","repo_name":"probe301/markdown-polish","sub_path":"polish.py","file_name":"polish.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33898644275","text":"import numpy as np\nimport pint\n\nfrom XPSS.pss.db.units import LengthUnits\n\nfrom XPSS.logger import Logger\n\nfrom .systemops import nEDU\n\n\nloggger = Logger(debug=False)\n\nclass Data:\n def __init__(self, pssvars, params):\n self.Q = None # flowrates\n self.v = None # velocity\n self.p = None # pressures\n self.Qc = None # flowrate corrections\n self.Pc = None #pressure corrections\n\n [self.Cn, self.Cn_n, self.B, self.pipeProps,\n self.pipeSortList, self.nodeSortList] = \\\n self.create_conn_matrix(pssvars)\n # pssvars.pipeNodeProps, pssvars.pipeProps,\n # pssvars.nodeProps, pssvars.res,\n # pssvars.numEntityErr, pssvars.nPipes,\n # pssvars.nNodes)\n\n self.A = np.array(self.B[1:]) # Strictly-upper triangular incidence matrix\n\n #constant values:\n self.n, _ = self.A.shape #number of pipes / junctions\n self.onesVec = np.ones((self.n,1), dtype=int)\n\n self.nOpEDU = None\n self.nEDU = nEDU(self.A)\n\n self.dh = dh(pssvars, params.elevUnits) + \\\n dh_ps(self.nEDU, params.pumpStationDepth) #Static heads (elevations)\n\n self.nomDia = None # nominal diameter of pipes (np.array(n,1))\n self.matl = None # pipe material\n self.sch = None # pipe schedule\n\n self.d = None #inner pipe diameter\n self.C = None # Hazen-Williams coefficient\n self.roughness = None # Darcy-Weisbach frictino coefficient\n self.L = None # Pipe lengths\n\n self.pumps = None # List of pumps for end nodes\n\n # Constant Flow\n self.fl = None # pipe friction loss\n self.afl = None # accumulated friction loss\n\n\n def create_conn_matrix(self, pssvars): #TODO: Convert C to be equivalent to the incidence matrix in graph theory.\n #TODO: Currently, C is represented as an upper triangular matrix. This should be modified so that it is Symmetric, i.e C = C + C^T. Need to see if the affects anything in the existing code.\n \"\"\" {Function}\n Create two matrices, \"C\" and \"C_n\" that define the geometry of a connected graph system, ordered from the reservior upstream. C, is the connection matric for pipes, and C_n is the connection matrix of nodes. Each defines which pipes / nodes are immediately downstream of the current pipe / node. \"C_n\", is essentilly the adjacency matrix in graph theory.\n {Variables}\n Pipe_nodes_pd:\n (Pandas Dataframe) unsorted list of Pipes and connected junctions based on ordering of QGIS features.\n Pipe_props_pd:\n (Pandas Dataframe) unsorted list of Pipe attributes based on ordering of QGIS features.\n Node_props_pd:\n (Pandas Dataframe) unsorted list of node attributes based on ordering of QGIS features. This includes all junctions, but not the reservoir\n res:\n ()\n\n {Outputs}\n C:\n (2D Numpy array) Pipe connection matrix. A square symmetric matrix that defines how pipes are connected within the network. Each row indicates the current pipe, and each column represents potentially connected pipes. a value of 1 indicates the pipes are connected. A value of 0 indicates no connection. All digonal entries are 0. Sorted from the reservoir upstream (breadth-first search).\n C_n:\n (2D Numpy array) Node connection matrix (adjacency matrix) A square matrix with dimension equal to the number of nodes. Rows indicate the current node. Columns indicate the nodes with edges connecting the current node. Sorted from the reservoir upstream (breadth-first search). For directed tree graphs, this matrix is strictly upper triangular.\n In:\n (2D Numpy array) Incidence matrix. Rows are an ordered list of all nodes. Columns are an ordered list of all edges. Matrix entry In_{ij} is 1 if the ith node is connected to the jth edge, and zero otherwise.\n \"\"\"\n\n Pipe_nodes = pssvars.pipeNodeProps.to_numpy()\n Pipe_props = pssvars.pipeProps.to_numpy()\n Node_props = pssvars.nodeProps.to_numpy()\n res = pssvars.res\n num_entity_err = pssvars.numEntityErr\n num_pipes = pssvars.nPipes\n num_nodes = pssvars.nNodes\n\n\n #logger.progress(\"num_entity_err: \", num_entity_err)\n #logger.progress(\"Pipe_nodes: \", Pipe_nodes)\n #logger.progress(\"Pipe_props: \", Pipe_props)\n\n #BUILD THE CONNECTION MATRIX\n\n #C = [ [0] * len(Pipe_nodes) for _ in range(len(Pipe_nodes))]\n C = np.zeros((len(Pipe_nodes),len(Pipe_nodes)))\n #logger.progress(\"Pipe_nodes: \", len(Pipe_nodes))\n #logger.progress(\"C: \", len(C))\n\n #logger.progress(\"Connection matrix initialized...\")\n #1. Find the pipes that contain a node that has only one pipe connected (i.e. end nodes)\n #2. loop through each of these pipes and determine how many junctions are between the end node and the reservior\n #3. determine the maximum value of #2 for all end nodes, (label this \"N\")\n #(May be able to revise so that steps above this line are not needed)\n #4a. initialize an loop counter \"count = 0\"\n count = 0\n #4b. create an array \"pipe_list\" that contains a list of the row location of the pipes connected to the reservior\n\n pipe_lst = [] #sorted list of pipes from the discharge reservior upstream\n node_lst = [res[0]] #sorted list of nodes from the discharge reservior upstream\n Dstream_pipe = [res[0]] #sorted list of the pipe that is downstream of the current pipe\n\n num_pipes = len(pssvars.pipe_fts.index)\n num_nodes = len(pssvars.junc_fts.index) + len(pssvars.res_fts.index)\n\n if num_pipes > num_nodes-1:\n C_n = np.zeros((num_pipes+1, num_pipes+1)) #initialization if there is a geometry error.\n In = np.zeros((num_pipes+1, num_pipes))\n else:\n C_n = np.zeros((num_nodes, num_nodes)) #typical initialization\n In = np.zeros((num_nodes, num_pipes))\n\n\n\n #all_pipes = Pipe_props\n\n In[0][0] = 1 # The first pipe in the list is connected to the reservoir\n\n #logger.progress(C_n)\n\n #get edu info from qepanet\n num_edu = pssvars.pipe_fts['num_edu'].to_numpy(dtype='int')\n\n #logger.progress(str(num_edu))\n\n for i in range(len(Pipe_nodes)): #NOTE: Does this work if more than 1 pipe is connected to the reservoir?\n if (Pipe_nodes[i][1] == res[0]):\n pipe_lst.append([i, 1])\n node_lst.append(Pipe_nodes[i,2]) # add the upstream node of the first pipe to the node list\n C_n[0][1] = 1 #The node jut added to \"node_lst is adjacent to the reservoir\n elif (Pipe_nodes[i][2] == res[0]):\n pipe_lst.append([i, 2])\n node_lst.append(Pipe_nodes[i,1])\n C_n[0][1] = 1\n #logger.progress(\"Sorted list of Pipes from reservior: \"+str(pipe_lst))\n #5. If count < len(pipe_list):\n #while count < len(pipe_lst):\n #for count in range(len(pipe_lst)):\n error_lst = []\n\n\n try:\n while True:\n #logger.progress(\"Pipe_lst: \", len(pipe_lst))\n #logger.progress(\"count: \", count)\n if (C.sum(axis=1) != 0).all(): #check to see if any rows in the C matrix have all zeros (axis=1 are rows for numpy)\n break\n #if (count >= len(pipe_lst)) or (count >= len(C)): # or count >= len(node_lst)-2:\n #num_entity_err=True\n #logger.progress(\"Processed \", count, \" out of \", len(C), \" pipes.\")\n #break\n C[count][count] = 1 #set all diagonal elements to 1 indicating it is an end branch\n # find the upstream node node corresponding to the current pipe\n found = 0\n # for i in range(2):\n # if Pipe_nodes[pipe_lst[count][i+1] != pipe_lst[count]:\n # node_in = Pipe_nodes[count][i+1]\n # if found == 1:\n # logger.progress(\"ERROR: Something went wrong while searching for upstream node connection for Pipe \"+ Pipe_nodes[count][1] )\n # raise ValueError()\n # found = 1\n\n if pipe_lst[count][1] == 1: #get the index of the downstream node for the current pipe\n j = 2 #upstream node index\n elif pipe_lst[count][1] == 2:\n j = 1\n else:\n self.log_error(\"Something went wrong while searching for upstream node connection for Pipe \" + str(Pipe_nodes[count][1]))\n node_in = Pipe_nodes[pipe_lst[count][0]][j] #store the qepanet name of the upstream(?) node\n #node_lst.append(node_in)\n In[count+1][count] = 1 #upstream node For a directed tree graph, the diagonal entries of the squrae matrix with the first row removed is always 1.\n #logger.progress(\"Incidence Matrix: \\n\", In)\n\n # a. (Work from the reservior / Outlet upstream) for each entry # \"count\" in the pipe list determine the number of pipes connected to the junction.\n for pipe in range(len(Pipe_nodes)):\n for j in range(1,3):\n if (Pipe_nodes[pipe][j] == node_in and pipe != pipe_lst[count][0]):\n pipe_lst.append([pipe, j])\n if j == 1:\n node_lst.append(Pipe_nodes[pipe][2])\n else:\n node_lst.append(Pipe_nodes[pipe][1])\n Dstream_pipe.append(Pipe_nodes[pipe_lst[count][0]][0])\n C[count][len(pipe_lst)-1] = 1\n C[count][count] = 0 #If the pipe has a pipe connected to the upstream node, it is not a branch of the system and does not have a pump attached.\n\n C_n[count+1][len(node_lst)-1] = 1 #Polulate the node connection matrix (this only works for directed tree graphs where the #nodes = #pipes+1)\n In[count+1][ len(node_lst)-2 ] = 1 #downstream node\n #logger.progress(\"Adjacency Matrix \\n\", C_n)\n #logger.progress(\"Pipe List: \", pipe_lst)\n #logger.progress(\"Node List: \", node_lst)\n #logger.progress(\"Pipe Nodes \\n\", Pipe_nodes)\n #logger.progress(C_n)\n #logger.progress(\"Incidence Matrix:\\n\", In)\n #Collect all connected pipes\n\n\n #if (C[count][count] == 1 and num_edu[pipe_lst[count][0]]): #if the pipe is an end branch and the number of EDUs is defined in qepanet, overwrite the default value of num_edu\n #C[count][count] = num_edu[pipe_lst[count][0]]\n #logger.progress(str(C))\n #logger.progress(\"pipe_lst = \"+str(pipe_lst))\n #logger.progress(\"Downstream_pipe = \"+str(Dstream_pipe))\n\n #check if the current pipe is connected back to the reservoir\n # row = True\n # current_node = C_n[:][count+1]\n #\n # while np.sum(current_node) > 1:\n # for i in range(len(current_node)):\n # if current_node[i] == 1:\n # if row:\n # current_node = C_n[:][i]\n # row = False\n # break\n # else:\n # current_node = C_n[i]\n # row = True\n # break\n #\n # if current_node[0] != 1:\n # i = 0\n # while current_node[i] != 1:\n # if i < len(current_node) - 1:\n # not_found = True\n # break\n # i += 1\n # if not not_found:\n # raise Exception(\"ERROR: Node \",node_lst[i], \" is disconnected from the Reservior\")\n\n count += 1\n\n\n\n except:\n logger.progress(\"Processed \"+str(count)+\" out of \"+ str(len(C))+\" pipes.\")\n logger.progress(\"Errors found in the system geometry:\")\n logger.progress(\"\\tNumber of Pipes: \"+str(num_pipes))\n logger.progress(\"\\tNumber of Nodes: \"+str(num_nodes))\n logger.progress(\"Locating discontinuities...\")\n self.analyze_system_connectivity(Pipe_nodes, pipe_lst, node_lst)\n #raise Exception(\"ERROR: The system geometry is not correct!\")\n if num_entity_err is True: #if an error was raised during check(),check for overlaps, then do the same as above\n\n #loop through qgis features and check for overlap\n overlap_nodes = []\n\n for col in range(len(In[0])):\n junc_fts = self.params.junctions_vlay.getFeatures()\n index = 0\n while In[index][col] == 0:\n index += 1\n upstream_eid = node_lst[col+1]\n downstream_eid = node_lst[index]\n\n #logger.progress(\"Checking overlap on Nodes \"+upstream_eid+\" and \"+downstream_eid+\"...\")\n\n upstream_found = False\n downstream_found = False\n\n if downstream_eid == res[0]:\n for feat in self.params.reservoirs_vlay.getFeatures(): #assumes there is only 1 reservoir\n downstream_pt = feat.geometry()\n downstream_found = True\n break\n\n for feat in junc_fts: #TODO: Is there a better way to do this?\n if (upstream_found == False) and (str(feat.attribute(Junction.field_name_eid)) == upstream_eid):\n upstream_pt = feat.geometry()\n upstream_found = True\n if (downstream_found == False) and (str(feat.attribute(Junction.field_name_eid)) == downstream_eid):\n downstream_pt = feat.geometry()\n downstream_found = True\n if (upstream_found is True) and (downstream_found is True):\n break\n\n if upstream_found is False:\n self.log_error(\"Point not found for junction \"+upstream_eid, stop=True)\n\n if downstream_found is False:\n self.log_error(\"Downstream point not found for junction \"+downstream_eid, stop=True)\n\n if NetworkUtils.points_overlap(upstream_pt, downstream_pt, self.params.tolerance) is not False:\n overlap_nodes.append(upstream_eid) #TODO: improve so that it doesnt append the same feature twice\n overlap_nodes.append(downstream_eid)\n\n if len(overlap_nodes) > 0:\n self.select_qgis_features(self.params.junctions_vlay, overlap_nodes)\n self.log_error(\"Overlapping features were found:\")\n self.log_error(str(overlap_nodes), stop=True)\n\n logger.progress(\"Processed \"+str(count)+\" out of \"+ str(len(C))+\" pipes.\")\n logger.progress(\"Errors found in the system geometry:\")\n logger.progress(\"\\tNumber of Pipes: \"+str(num_pipes))\n logger.progress(\"\\tNumber of Nodes: \"+str(num_nodes))\n logger.progress(\"Locating discontinuities...\")\n self.analyze_system_connectivity(Pipe_nodes, pipe_lst, node_lst)\n #raise Exception(\"ERROR: The system geometry is not co\n\n # - for each pipe:\n # - determine its location within the connection matrix (based on location within \"Pipe_Node\" Matrix)\n # - set the diagonal entry to 1\n # - find all pipes that share the same node as the upstream node within the pipe\n # - for each pipe:\n # - find the row location of the pipe within the \"Pipe_Node\" matrix\n # - set the corresponding column entry within the connection matrix for the current node in \"node_list\" equal to \"-1\".\n # - append the row location in \"Node_List\" to the \"pipe_list\" array.\n\n #df = pd.DataFrame(Dstream_pipe, columns=\"Downstream Connected Pipe\")\n\n C_n = C_n + C_n.T #Get a symmetric matrix\n #logger.progress(\"Adjacency Matrix \\n\", C_n)\n #logger.progress(\"Pipe List: \", pipe_lst)\n #logger.progress(\"Node List: \", node_lst)\n #logger.progress(\"Pipe Nodes \\n\", Pipe_nodes)\n new_index = []\n\n for i in range(len(pipe_lst)):\n new_index.append(pipe_lst[i][0])\n\n\n Pipe_props_pd = pssvars.pipeProps.reindex(new_index) #reorder the rows in decending order from the reservior to the branches (same as connection matrix)\n\n Pipe_props_pd.insert(loc=1, column='Downstream Connected Pipe', value=Dstream_pipe)\n\n #logger.progress(str(Pipe_props_pd))\n\n #logger.progress(\"Connection matrix populated...\")\n\n for i in range(len(C)): #check to see that all diagonal elements of the connection matrix are = 1.\n #this also ensures that all of the pipe elements were lopped through in the while loop above.\n if np.sum(C[i]) == 0:\n self.log_error(\"The connection matrix was not populated completely.\", stop=True)\n\n sort_lst = [0 for row in range(len(pipe_lst))] #create list to map location of qepanet features to pandas index format\n for i in range(len(pipe_lst)):\n sort_lst[i] = pipe_lst[i][0]\n\n\n #logger.progress(\"node_lst: \", node_lst)\n\n node_srt_lst = [ 0 for i in range(len(node_lst)-1)]\n node_prop_np = pssvars.nodeProps['Node ID'].astype(str)\n\n #logger.progress(\"Node list:\\n\", node_lst)\n #logger.progress(\"Node props:\\n\", node_prop_np)\n\n for i in range(1,len(node_lst)):\n #index = Node_props_pd.index[Node_props_pd['Node ID'].astype(str)==node_lst[i]].tolist()\n for j in range(len(node_prop_np)):\n if node_prop_np[j] == node_lst[i]:\n # break\n #logger.progress(index)\n node_srt_lst[i-1] = j\n\n #logger.progress(\"Node sort list:\\n\", node_srt_lst)\n\n return [C, C_n, In, Pipe_props_pd, sort_lst, node_srt_lst]\n\ndef dh(pssvars, units):\n \"\"\"Calculates the static head for nodes give elevations.\"\"\"\n\n dh = pssvars.nodeProps['Elevation [ft]'].to_numpy() - \\\n pssvars.nodeProps['Elevation [ft]'].to_numpy()[0]\n\n dh *= LengthUnits[units]\n dh = dh.to_base_units()\n\n return dh\n\ndef dh_ps(nEDU, dh):\n \"\"\"Return an array of pump station depths for end nodes\"\"\"\n\n return dh*nEDU.astype(bool).astype(int)\n","repo_name":"mcgoldba/XPSS","sub_path":"pss/calc/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":19221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23122953502","text":"from django.test import TestCase\n\nfrom airport.models import Aircraft, StateChangeLog\nfrom airport.tasks import ground_crew_routine\n\n\ndef create_aircraft(call_sign, state='PARKED', type='AIRLINER', longitude=0,\n latitude=0, altitude=0, heading=0):\n return Aircraft.objects.create(\n call_sign=call_sign,\n state=state,\n type=type,\n longitude=longitude,\n latitude=latitude,\n altitude=altitude,\n heading=heading\n )\n\n\nclass GroundCrewRoutineTests(TestCase):\n\n def test_landed_aircraft_parked(self):\n aircraft = create_aircraft(\n call_sign='CS1',\n type='AIRLINER',\n state=Aircraft.LANDED\n )\n\n ground_crew_routine()\n\n aircraft.refresh_from_db()\n\n self.assertEqual(aircraft.state, Aircraft.PARKED)\n\n def test_parked_aircraft_creates_success_log(self):\n aircraft = create_aircraft(\n call_sign='CS1',\n type='AIRLINER',\n state=Aircraft.LANDED\n )\n\n ground_crew_routine()\n\n aircraft.refresh_from_db()\n\n logs = StateChangeLog.objects.all()\n self.assertEqual(len(logs), 1)\n self.assertLog(logs[0], aircraft, 'LANDED', 'PARKED', 'ACCEPTED')\n\n def assertLog(self, log, expected_aircraft, expected_from_state, expected_to_state, expected_outcome):\n self.assertEqual(log.aircraft, expected_aircraft)\n self.assertEqual(log.from_state, expected_from_state)\n self.assertEqual(log.to_state, expected_to_state)\n self.assertEqual(log.outcome, expected_outcome)\n","repo_name":"milorad-kukic/airport","sub_path":"app/airport/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10273103322","text":"import csv\nimport os\nimport sys\n\nfrom step_functions.metrics import get_lambda_metrics_executions, get_lambda_metrics_perslice,\\\n get_lambda_metrics_timestamps\nfrom step_functions.deployment import sfn_constants\n\n\ndef write_metrics(metrics_file, file_headers, obtained_metrics):\n if not os.path.exists(sfn_constants.METRICS_FOLDER):\n os.mkdir(sfn_constants.METRICS_FOLDER)\n\n with open(metrics_file, 'w', encoding='UTF8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(file_headers)\n writer.writerows(obtained_metrics)\n\n\ndef write_metrics_perslice(duration_list, memory_list):\n with open(sfn_constants.METRICS_FILE_PERSLICE, 'w', encoding='UTF8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(sfn_constants.CSV_HEADERS_PERSLICE)\n for i in range(len(duration_list)):\n for j in range(len(duration_list[i])):\n writer.writerow([i + 1, j + 1, duration_list[i][j], memory_list[i][j]])\n\n\nif __name__ == '__main__':\n error_message = \"Please enter a correct mode as argument: executions, timestamps, perslice\"\n if len(sys.argv) != 2:\n raise Exception(error_message)\n else:\n mode = sys.argv[1]\n print(\"MODE: \" + mode + \"\\n\")\n print(\"Saving metrics to CSV file...\")\n if mode == \"executions\":\n file = sfn_constants.METRICS_FILE\n metrics = get_lambda_metrics_executions.get_metrics()[0]\n write_metrics(file, sfn_constants.CSV_HEADERS_EXECUTIONS, metrics)\n elif mode == \"timestamps\":\n file = sfn_constants.METRICS_FILE_TIMESTAMPS\n metrics = get_lambda_metrics_timestamps.get_metrics()[0]\n write_metrics(file, sfn_constants.CSV_HEADERS_TIMESTAMPS, metrics)\n elif mode == \"perslice\":\n file = sfn_constants.METRICS_FILE_PERSLICE\n metrics = get_lambda_metrics_perslice.get_metrics()\n durations, memories = metrics[0], metrics[2]\n write_metrics_perslice(durations, memories)\n else:\n raise Exception(error_message)\n print(\"Saved metrics successfully to \" + file)\n\n","repo_name":"adrien-glg/onnx-decomposer_aws","sub_path":"step_functions/metrics/save_lambda_metrics.py","file_name":"save_lambda_metrics.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"32968961508","text":"import boto.ec2\nfrom resource_conf import *\nimport datetime\nfrom boto.ec2.cloudwatch import CloudWatchConnection\n\n\"\"\" Create the AWS EC2 connection\"\"\"\nconn = boto.ec2.connect_to_region(RG, aws_access_key_id=A_KEY, aws_secret_access_key=S_KEY)\nconn3 = boto.ec2.cloudwatch.connect_to_region(RG, aws_access_key_id=A_KEY, aws_secret_access_key=S_KEY)\n\ndef launch_instance(ami_image_id, mykey, instance_type, sec_group, data_script):\n \"\"\"Launch a new instance with input parameters\n \"\"\"\n instace_reservation = conn.run_instances(\n image_id=ami_image_id,\n key_name=mykey,\n instance_type=instance_type, \n security_groups=sec_group,\n monitoring_enabled=True)\n return instace_reservation\n\ndef list_intances():\n \"\"\"List all instances created ins yout ec2 count\n \"\"\"\n return conn.get_all_instances()\n\ndef get_intance(inst):\n \"\"\"Return the aws instance with the parameter id\n \"\"\"\n return conn.get_all_instances(instance_ids=[inst])\n\ndef delete_instance(instanceId):\n \"\"\"Delete the aws instance with the parameter id\n \"\"\"\n conn.terminate_instances(instance_ids=[instanceId])\n\ndef create_SecGroup(groupName, groupDesc, httpPorts):\n try:\n web = conn.create_security_group(groupName, groupDesc)\n for port in httpPorts:\n web.authorize('tcp', int(port), int(port), '0.0.0.0/0')\n return \"SecurityGroup created\"\n except:\n return \"Error: Group already exists\"\n \ndef get_health_log(time_elapsed, ami):\n\thealth_log = conn3.get_metric_statistics(60,\n datetime.datetime.utcnow() - datetime.timedelta(seconds=time_elapsed),\n datetime.datetime.utcnow(), 'CPUUtilization', 'AWS/EC2',\n 'Average',\n dimensions={'ImageId':[ami]})\n\treturn dict(health_log[-1])[u'Average']\n\t\n\n\ndef reload_list():\n\tlist_inst = [i.instances[0] for i in conn.get_all_instances() if i.instances[0].image_id == AMI_IMAGE and (i.instances[0].state == 'running')]\n\tsort_list = [(get_time(inst.launch_time), inst.id, inst.public_dns_name) for inst in list_inst]\t\t\n\tsort_list.sort() \n\t\n\tfile_ = open(\"instances.data\", 'w')\n\tfor inst in sort_list:\n\t\tfile_.write(inst[1]+ \"::\" + inst[2] +\"||\")\n\tfile_.close()\n\ndef get_time(str_time):\n\tdate_ = str_time.split('-')\n\tyear = int(date_[0])\n\tmonth = int(date_[1])\n\tday = int(date_[2][0:2])\n\t\n\ttime_ = date_[2][3:-1].split(':')\n\thour = int(time_[0])\n\tminutes = int(time_[1])\n\tsecs = int(time_[1])\n\t\n\treturn datetime.datetime(year, day, month, hour, minutes, secs)\n\n","repo_name":"antoniorodrigues/redes-digitais","sub_path":"manual-autoscaling/aws_ec2.py","file_name":"aws_ec2.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8815282033","text":"import drlse\nimport numpy as np\nfrom PIL import Image\nfrom shapely.geometry import LineString\n# import scipy.ndimage.filters as filters\nfrom skimage import draw, color\nfrom skimage.segmentation import slic, find_boundaries\n\nfrom utils_tools.drlse_tools import *\n\nimg_name='216802_216803-100050_100051-18.jpg'\n# from shapely.geometry import LineString\n# parameters google,mapbox,yahoo mu=0.02 lmda=6 alfa=-3\nflag = 1 # flag为1,则动态展示变化过程\ntimestep = 2 # time step\nmu = 0.02 / timestep # coefficient of the distance regularization term R(phi)\nmax_iter = 600\n# iter_outer = 1\n# mapbox lmda=6\nlmda = 6 # coefficient of the weighted length term L(phi)\n# mapbox alfa=-3\nalfa = -3 # coefficient of the weighted area term A(phi)\nepsilon = 1.5 # parameter that specifies the width of the DiracDelta function\n\nc0 = 2\n\nimg_dir = r'C:\\python_pycharm_label_test\\compared_experiments\\segmentation\\experiment_sample\\mapbox'\nsave_dir=r'C:\\python_pycharm_label_test\\compared_experiments\\segmentation\\workflow_GA_images'\njson_dir = r'C:\\python_pycharm_label_test\\experiment_data\\label_json_512'\nphi_binary_result_dir = r'C:\\python_pycharm_label_test\\experiment_data\\phi_binary_results_512'\nphi_binary_result_dir_ori = r'C:\\python_pycharm_label_test\\experiment_data\\phi_binary_results_512_origin'\nroad_network_npy_result_dir = r'C:\\python_pycharm_label_test\\experiment_data\\road_network_npy_result_512'\nroad_network_img_result_dir = r'C:\\python_pycharm_label_test\\experiment_data\\road_network_img_result_512'\nroad_network_preview_dir = r'C:\\python_pycharm_label_test\\experiment_data\\road_network_preview_result_512'\nexp_resluts_dir = r'C:\\python_pycharm_label_test\\experiment_data\\exp_results_512'\nimg_list = os.listdir(img_dir)\nphi_reference_512 = r'C:\\python_pycharm_label_test\\experiment_data\\phi_reference_results_512'\nphi_skeleton_result_dir = r'C:\\python_pycharm_label_test\\experiment_data\\phi_skeleton_results_512'\nphi_thinned_result_dir = r'C:\\python_pycharm_label_test\\experiment_data\\phi_thinned_results_512'\nreference_dir = r'C:\\python_pycharm_label_test\\ground_truth'\nlabel_single_dir=r'C:\\python_pycharm_label_test\\experiment_data\\lebel_single_line_512'\nimg_results_dir=r'C:\\python_pycharm_label_test\\experiment_data\\exp_results_512\\compared_experiment_resluts\\mapbox\\ours'\n\n\n\n\nprename = img_name.split('.')[0]\nimg=Image.open(os.path.join(img_dir, img_name))\nimg_gray = np.array(img.convert('L'))\nimg_color = np.array(img)\nlabel_json = np.load(os.path.join(json_dir, prename + '.npy'), allow_pickle=True)\nmask = np.zeros(img_gray.shape)\nW,H=img_gray.shape\n\nfilter = np.zeros(img_gray.shape)\nsavename = prename + '.png'\n[H, W] = img_gray.shape\n\n\n'''\n====================================================================================================\n initiate the initial area in the image\n=====================================================================================================\n'''\n\nfor _geo in label_json:\n if _geo[0]['type'] == 'LineString':\n # 设置道路中心线缓冲区作为初始区域\n\n roadpoints_coordinates = _geo[0]['coordinates']\n\n line = LineString(roadpoints_coordinates)\n if line.length > 50: # remove the small ones\n linebuffer = line.buffer(4)\n linebuffer1 = line.buffer(25)\n xs, ys = linebuffer.exterior.coords.xy\n rr, cc = draw.polygon(ys, xs, (W, H))\n mask[rr, cc] = 1\n\n xs, ys = linebuffer1.exterior.coords.xy\n rr, cc = draw.polygon(ys, xs, (W, H))\n filter[rr,cc]=1\n\n elif _geo[0]['type'] == 'MultiLineString':\n for coor in _geo[0]['coordinates']:\n\n roadpoints_coordinates = coor\n\n line = LineString(roadpoints_coordinates)\n if line.length > 50:\n linebuffer = line.buffer(4)\n linebuffer1 = line.buffer(25)\n xs, ys = linebuffer.exterior.coords.xy\n rr, cc = draw.polygon(ys, xs, (W, H))\n mask[rr, cc] = 1\n\n xs, ys = linebuffer1.exterior.coords.xy\n rr, cc = draw.polygon(ys, xs, (W, H))\n filter[rr, cc] = 1\n\n'''\n=======================================================================\n drlse\n=======================================================================\n'''\n\n# 设置道路中心线缓冲区作为初始区域\n\nphi = np.where(mask==0,2,-2)\n\n\n\n# start level set evolution\n\nphi = drlse.drlse(img_gray.astype(np.float32), phi.astype(np.float32), W, H, mu, timestep, lmda, alfa, epsilon, 1,\n max_iter,5,0.4)\n\n\n\n## save the end status image\n\nkernel = np.ones((15, 15), np.uint8)\nphi_mat_binary = np.where(phi < 0, 255, 0).astype(np.uint8)\n\n\n\n\n\nclosing = cv2.morphologyEx(phi_mat_binary, cv2.MORPH_CLOSE, kernel)\n\nfiltered_closing=closing*filter\n\n\n\n'''\n=======================================================================\n SLIC\n=======================================================================\n'''\nimg_lab = color.rgb2lab(img)\n[H, W, Ch] = img_color.shape\ndata = np.reshape(img_color, (H * W, Ch))\n\nslic_img = slic(img_color, n_segments=1000, compactness=15, sigma=0.5)\n\nmerge_location = np.where(filtered_closing != 0)\n\nmerge_label=slic_img[merge_location]\n\nmerge_class_list=np.unique(merge_label)\n\nslic_class_list=np.unique(slic_img)\n\n# '''\n# ====================================================================================\n# computhe the union part of slic results and the segmentation meraging result,\n# when the union is over half of the number of the slic superpixels\n# ==================================================================================\n# '''\n\n\n'''------- directly compute the union part----------'''\nravel_slic_img=np.ravel(slic_img)\nfinal_merge_img=ravel_slic_img.copy().astype(np.uint8)\nfor _idx,_val in enumerate (ravel_slic_img):\n if _val in merge_class_list:\n final_merge_img[_idx]=255\n else:\n final_merge_img[_idx]=0\nfinal_merge_img=final_merge_img.reshape(slic_img.shape)\n\nclosing_final = cv2.morphologyEx(final_merge_img, cv2.MORPH_CLOSE, kernel)\nclosing_final = cv2.medianBlur(closing_final, 9)\n\n# the segmentation results over the slic results\nours_maskarr=filtered_closing.astype(np.uint8)\ndpi = 300\nfig = plt.figure(figsize=(3.5, 3.5), dpi=dpi) # 按原图像尺寸输出保存���像(尺寸和dpi)\n\n#show image\naxes = fig.add_axes([0, 0, 1, 1])\naxes.set_axis_off()\naxes.imshow(img_color)\n\n#draw the outline\ncontours, hierarchy = cv2.findContours(final_merge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\ncontour_arr=contours[0].squeeze()\nplt.plot(contour_arr[:,0],contour_arr[:,1],'cyan',linewidth=0.3)\n\n\n#draw the mask\naxes2 = fig.add_axes([0, 0, 1, 1])\naxes2.set_axis_off()\nmask_rgba2 = np.ones(([H, W, 4]))\nmask_rgba2[:, :, 0] = final_merge_img / 255.*0.75 # R\nmask_rgba2[:, :, 1] = final_merge_img / 255.*0.75# G\nmask_rgba2[:, :, 2] = final_merge_img / 255.*0.75 # B\nalpha = np.where(final_merge_img != 0, 0.9, 0) # not transparent\nmask_rgba2[:, :, -1] = alpha\naxes2.imshow(mask_rgba2)\n\n#original area\naxes1 = fig.add_axes([0, 0, 1, 1])\naxes1.set_axis_off()\nmask_rgba2 = np.ones(([H, W, 4]))\nmask_rgba2[:, :, 0] = ours_maskarr / 255. # red\nmask_rgba2[:, :, 1:3] = 0\nalpha = np.where(ours_maskarr != 0, 0.4, 0) # not transparent\nmask_rgba2[:, :, -1] = alpha\naxes1.imshow(mask_rgba2)\n\n\n#draw the grid\n\naxes = fig.add_axes([0, 0, 1, 1])\naxes.set_axis_off()\nboundaries = find_boundaries(slic_img, mode='subpixel',\n background=0)\ncoors=np.where(boundaries==True)\nboundary_arr=np.zeros((H,W))\nboundary_arr[((coors[0])/2).astype(np.int),((coors[1])/2).astype(np.int)]=255\nmask_rgba2 = np.ones(([H, W, 4]))\nmask_rgba2[:, :, 0] = boundary_arr / 255.*255/255. # R\nmask_rgba2[:, :, 1] = boundary_arr / 255.*238/255.# G\nmask_rgba2[:, :, 2] = boundary_arr / 255.*88/255. # B\n\nalpha = np.where(boundary_arr != 0, 0.8, 0) # not transparent\nmask_rgba2[:, :, -1] = alpha\naxes1.imshow(mask_rgba2)\n\n# draw line\n\nfor _geo in label_json:\n if _geo[0]['type'] == 'LineString':\n # 设置道路中心线缓冲区作为初始区域\n\n roadpoints_coordinates = _geo[0]['coordinates']\n\n line = LineString(roadpoints_coordinates)\n road_x,road_y=line.xy\n plt.plot(road_x,road_y,'#424242',linewidth=1)\n\n\n\n\n\n# the final results\n\n\nfig.savefig(os.path.join(save_dir,prename+'_GA.tif'))\n\n","repo_name":"lebusini/RoadSegVGI","sub_path":"generate_GA.py","file_name":"generate_GA.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29311295877","text":"\n__doc__ = '''\nSubpackage containing the modules that implement web stuff for projects\n'''\n\nfrom twisted.web.server import NOT_DONE_YET\n\nfrom page import Page\nfrom routing import Router, Route, RouteDispatcher\nfrom script import Script, ScriptManager, ScriptError\nfrom response import (\n Response, NotFound, NotImplemented, Ok, InternalServerError,\n BadRequest, Conflict, AlreadyExists, Found, Unauthorized\n)\nfrom stylesheet import (\n Stylesheet, StylesheetError, InvalidFile, InvalidFileExtension,\n FileDontExists\n)\n\nfrom websocket import WebSocketError, WebSocketProtocol, WebSocketFactory\n\n\n__all__ = [\n 'Page',\n 'Router', 'Route', 'RouteDispatcher',\n 'Response', 'NotFound', 'NotImplemented', 'Ok', 'InternalServerError',\n 'BadRequest', 'Conflict', 'AlreadyExists', 'Found', 'Unauthorized',\n 'Script', 'ScriptManager', 'ScriptError',\n 'Stylesheet', 'StylesheetError', 'InvalidFile', 'InvalidFileExtension',\n 'FileDontExists',\n 'WebSocketError', 'WebSocketProtocol', 'WebSocketFactory',\n 'NOT_DONE_YET'\n]\n","repo_name":"PyMamba/mamba-framework","sub_path":"mamba/web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"11734913946","text":"from flask import render_template,request,redirect,url_for\nfrom flask_login import login_required\nfrom db import db\nfrom models.category import Category\nfrom models.dish import Dish\nfrom models.form import Form\nfrom utils import is_staff\n\n@login_required\ndef show_dishes(id):\n form = Form()\n category = Category.query.get(id)\n return render_template('dishes/show_dishes.html',category=category,form=form)\n\n@login_required\ndef add_dish():\n form = Form()\n categories = Category.query.all()\n if request.method == 'POST':\n new_dish = Dish(\n name = form.name.data,\n price = form.price.data,\n description = form.description.data,\n image = form.image.data, \n is_gluten_free = form.is_gluten_free.data,\n is_vegeterian = form.is_vegeterian.data,\n category_id = request.form['category']\n )\n db.session.add(new_dish)\n db.session.commit()\n return redirect(url_for('category.menu'))\n return is_staff('dishes/add_dish.html',form=form,category=categories)\n\n@login_required\ndef dish_management():\n categories = Category.query.all()\n return is_staff('dishes/dish_management.html',category=categories)\n\n@login_required\ndef edit_dish(id):\n form = Form()\n dish = Dish.query.get(id)\n categories = Category.query.all()\n if request.method == 'POST':\n dish.name = form.name.data\n dish.price = form.price.data\n dish.description = form.description.data\n dish.image = form.image.data \n dish.is_gluten_free = form.is_gluten_free.data\n dish.is_vegeterian = form.is_vegeterian.data\n dish.category_id = request.form['category']\n db.session.commit()\n return redirect(url_for('dish.dish_management'))\n return is_staff('dishes/edit_dish.html',dish=dish,category=categories,form=form)\n\n@login_required\ndef delete_dish(id):\n dish = Dish.query.get(id)\n if request.method == 'POST':\n db.session.delete(dish)\n db.session.commit()\n return redirect(url_for('dish.dish_management'))\n return is_staff('dishes/delete_dish.html',dish=dish)\n","repo_name":"Sh4Qy/Yammi-Website","sub_path":"controllers/dishes.py","file_name":"dishes.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42438170316","text":"#Author: Manuel Gonzalez-Rivero\r\n#Date: June 2016\r\n#Purpose: to reshape and cleaup data from viewshed analysis for the purpuse of this study\r\n\r\nimport sys, os.path\r\nsys.path.insert(0, 'PATH_TO_ARCHGIS')\r\nimport arcpy\r\nimport os\r\nimport arcgisscripting\r\nfrom arcpy import env\r\narcpy.ResetEnvironments()\r\nwks=\"PATH_TO WORKING_DIR\"\r\nenv.workspace = wks\r\ntableList=arcpy.ListTables()\r\n\r\nfor table in tableList:\r\n\tinTable = table\r\n\toutTable = \"%s\\\\vwshd\\\\%s\" % (wks, table)\r\n\ttempTableView = \"%s_TableView\" %(inTable)\r\n\texpression = arcpy.AddFieldDelimiters(tempTableView, \"Value\") + \" = 0\"\r\n\t# Execute CopyRows to make a new copy of the table\r\n\tarcpy.CopyRows_management(inTable, outTable)\r\n\r\n\t# Execute MakeTableView\r\n\tarcpy.MakeTableView_management(outTable, tempTableView)\r\n\r\n\t# Execute SelectLayerByAttribute to determine which rows to delete\r\n\tarcpy.SelectLayerByAttribute_management(tempTableView, \"NEW_SELECTION\", expression)\r\n\r\n\t# Execute GetCount and if some features have been selected, then execute\r\n\t# DeleteRows to remove the selected rows.\r\n\tif int(arcpy.GetCount_management(tempTableView).getOutput(0)) > 0:\r\n\t\tarcpy.DeleteRows_management(tempTableView)\r\n\t# Create ID field\r\n\tarcpy.AddField_management(tempTableView,\"ID\",\"SHORT\")\r\n\tfeatures = arcpy.UpdateCursor(tempTableView)\r\n\tfor feature in features:\r\n\t\tfeature.ID=feature.NAME\r\n\t\tfeatures.updateRow(feature)\r\ndel feature, features\r\n\t\r\n\r\n","repo_name":"mgonzalezrivero/fish-structural_complexity","sub_path":"protocols/Viewshed/cleanup_viewshed_table.py","file_name":"cleanup_viewshed_table.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"16783551613","text":"paper = list(map(int, input().split()))\nip = int(input())\nlst0 = [0, paper[1]]\nlst1 = [0, paper[0]]\nfor i in range(ip):\n code, n = map(int, input().split())\n if code == 0:\n lst0.append(n)\n\n elif code == 1:\n lst1.append(n)\n\nlst0.sort()\nlst1.sort()\nmax0 = 0\nmax1 = 0\nfor i in range(1, len(lst0)):\n dummy0 = lst0[i]-lst0[i-1]\n if dummy0 > max0:\n max0 = dummy0\n\nfor i in range(1, len(lst1)):\n dummy1 = lst1[i] - lst1[i - 1]\n if dummy1 > max1:\n max1 = dummy1\n\nprint(max0*max1)\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/2628.py","file_name":"2628.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1004288686","text":"# Usage\n# python scripts/optical_flow_sparse_manual.py -u 'rtsp://admin:instar@192.168.2.19/livestream/13'\n# Click on video to select point to track\nimport sys\nimport numpy as np\nimport cv2\nimport argparse\nfrom imutils.video import VideoStream\n\n# Parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-u\", \"--url\", help=\"RTSP streaming URL\", default=\"rtsp://admin:instar@192.168.2.19/livestream/12\")\nargs = vars(ap.parse_args())\n\n# get video stream from IP camera\nprint(\"[INFO] starting video stream\")\nvs = VideoStream(args[\"url\"]).start()\n\n# first frame from stream\nframe = vs.read()\n\n# convert to grayscale\nframe_gray_init = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n# set min size of tracked object, e.g. 15x15px\nparameter_lucas_kanade = dict(winSize=(15, 15), maxLevel=4, criteria=(cv2.TERM_CRITERIA_EPS |\n cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n\n# define function to manually select object to track\ndef select_point(event, x, y, flags, params):\n global point, selected_point, old_points\n # record coordinates of mouse click\n if event == cv2.EVENT_LBUTTONDOWN:\n point = (x, y)\n selected_point = True\n old_points = np.array([[x, y]], dtype=np.float32)\n\n\n# associate select function with window Selector\ncv2.namedWindow('Optical Flow')\ncv2.setMouseCallback('Optical Flow', select_point)\n\n# initialize variables updated by function\nselected_point = False\npoint = ()\nold_points = ([[]])\n\n# create a black canvas the size of the initial frame\ncanvas = np.zeros_like(frame)\n\n# loop through the remaining frames of the video\n# and apply algorithm to track selected objects\nwhile True:\n # get next frame\n frame = vs.read()\n # covert to grayscale\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if selected_point is True:\n cv2.circle(frame, point, 5, (0, 0, 255), 2)\n # update object corners by comparing with found edges in initial frame\n new_points, status, errors = cv2.calcOpticalFlowPyrLK(frame_gray_init, frame_gray, old_points, None,\n **parameter_lucas_kanade)\n\n # overwrite initial frame with current before restarting the loop\n frame_gray_init = frame_gray.copy()\n # update to new edges before restarting the loop\n old_points = new_points\n\n x, y = new_points.ravel()\n j, k = old_points.ravel()\n\n # draw line between old and new corner point with random colour\n canvas = cv2.line(canvas, (int(x), int(y)), (int(j), int(k)), (0, 255, 0), 3)\n # draw circle around new position\n frame = cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 0), -1)\n\n result = cv2.add(frame, canvas)\n cv2.imshow('Optical Flow', result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ncv2.destroyAllWindows()\nsys.exit()","repo_name":"mpolinowski/opencv2-tracking-algorithm","sub_path":"scripts/optical_flow_sparse_manual.py","file_name":"optical_flow_sparse_manual.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37620797269","text":"n, k = map(int,input().split())\r\n\r\nquantity = n//k\r\n\r\nif quantity > 9999:\r\n print(\"번호 초과 오류\")\r\nelse:\r\n i = 1\r\n while i <= quantity:\r\n print(\"F-{0:04d}\".format(i))\r\n i += 1\r\n","repo_name":"dyrnfmxm/python_practice","sub_path":"codeUp_1672.py","file_name":"codeUp_1672.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40516692626","text":"#!/usr/bin/python3.7\n\nfrom base64 import urlsafe_b64encode\n\n\n#Ref: https://pypi.org/project/virustotal-python/\nfrom virustotal_python import Virustotal\n\n\n\nvt_api_key = \"b4837f3abbd2ff89b9c28c6d463ff99c7a510e596c7b8936503b9a10a6583ac4\"\n\ndef analyze_url_vt(url):\n # v3 example\n vtotal = Virustotal(API_KEY=vt_api_key, API_VERSION=\"v3\")\n\n '''\n # v2 example\n try:\n # Send a URL to VirusTotal for analysis\n resp = vtotal.request(\"url/scan\", params={\"url\": url}, method=\"POST\")\n url_resp = resp.json()\n # Obtain scan_id\n scan_id = url_resp[\"scan_id\"]\n # Request report for URL analysis\n analysis_resp = vtotal.request(\"url/report\", params={\"resource\": scan_id})\n print(analysis_resp.response_code)\n pprint(analysis_resp.json())\n except Error as err:\n print(f\"An error occurred: {err}\\nCatching and continuing with program.\")\n '''\n # v3 example\n '''\n result = {}\n try:\n # Send URL to VirusTotal for analysis\n resp = vtotal.request(\"urls\", data={\"url\": url}, method=\"POST\")\n # URL safe encode URL in base64 format\n # https://developers.virustotal.com/v3.0/reference#url\n url_id = urlsafe_b64encode(url.encode()).decode().strip(\"=\")\n # Obtain the analysis results for the URL using the url_id\n analysis_resp = vtotal.request(f\"urls/{url_id}\")\n #print(json.dumps(analysis_resp.data))\n #pprint(analysis_resp.object_type)\n analysis_data = analysis_resp.data\n \n if 'attributes' in analysis_data and 'last_analysis_stats' in analysis_data['attributes']:\n # lbl_list = analysis_data['attributes']['last_analysis_stats']\n # for lbl in lbl_list:\n # if lbl_list[lbl] > 0 and lbl not in ['harmless', 'undetected', 'timeout']: result.append(lbl)\n engine_list = analysis_data['attributes']['last_analysis_results']\n #print(engine_list.values())\n #for engine in list(engine_list):\n # print(engine)\n engine_list_vals = list(engine_list.values())\n #print(engine_list_vals[3])\n for item in engine_list_vals:\n #print(item)\n item_res = item['result']\n item_cat = item['category']\n if item_cat not in ['harmless', 'undetected', 'timeout']:\n if item_res not in result:\n result[item_res] = 1\n else:\n result[item_res] += 1\n\n except Exception as err:\n #print(f\"An error occurred: {err}\\nCatching and continuing with program.\")\n #traceback.print_exc(file=sys.stdout)\n pass\n #return list(result.keys())\n return {'mal_status': result, 'result': analysis_data}\n '''\n\n\n result = {}\n analysis_data = None\n try:\n # Send URL to VirusTotal for analysis\n resp = vtotal.request(\"urls\", data={\"url\": url}, method=\"POST\")\n url_id = urlsafe_b64encode(url.encode()).decode().strip(\"=\")\n analysis_resp = vtotal.request(f\"urls/{url_id}\")\n analysis_data = analysis_resp.data\n\n if 'attributes' in analysis_data and 'last_analysis_stats' in analysis_data['attributes']:\n engine_list = analysis_data['attributes']['last_analysis_results']\n engine_list_vals = list(engine_list.values())\n for item in engine_list_vals:\n item_res = item['result']\n item_cat = item['category']\n if item_cat not in ['harmless', 'undetected', 'timeout']:\n if item_res not in result:\n result[item_res] = 1\n else:\n result[item_res] += 1\n\n except Exception as err:\n pass\n\n return {'mal_status': result, 'result': analysis_data}\n\n\n\n# Pretty print\ndef pprint(dictionary, indent=0):\n for key, value in dictionary.items():\n if isinstance(value, dict):\n print('\\t' * indent + str(key))\n pprint(value, indent+1)\n else:\n print('\\t' * indent + str(key) + ': ' + str(value))\n\n# Pretty write to file\ndef pwrite(file, dictionary, indent=0):\n for key, value in dictionary.items():\n if isinstance(value, dict):\n file.write('\\t' * indent + str(key) + '\\n')\n pwrite(file, value, indent+1)\n else:\n file.write('\\t' * indent + str(key) + ': ' + str(value) + '\\n')\n\n\n# f = open(\"/var/tmp/phishing/alexa_domains.txt\", \"r\")\n# output = open(\"/var/tmp/phishing/vt_alexa_domains_formatted.txt\", \"a\")\n# while True:\n# # Get next line from file\n# url = f.readline().strip()\n#\n# # if line is empty\n# # end of file is reached\n# if not url:\n# break\n# r = analyze_url_vt(url)\n# pwrite(output, r)\n# f.close()\n","repo_name":"ChelseaGuan/Improving-Phishing-Detection-Models","sub_path":"check_domain_with_vt.py","file_name":"check_domain_with_vt.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72159412642","text":"from abc import ABC, abstractmethod\nfrom agox.observer import Observer\nfrom agox.writer import Writer, agox_writer\n\nimport functools\n\nclass PostprocessBaseClass(ABC, Observer, Writer):\n\n def __init__(self, gets={'get_key':'candidates'}, sets={'set_key':'candidates'}, \n order=3, verbose=True, use_counter=True, prefix='', surname=''):\n Observer.__init__(self, gets=gets, sets=sets, order=order, surname=surname)\n Writer.__init__(self, verbose=verbose, use_counter=use_counter, prefix=prefix)\n\n self.add_observer_method(self.postprocess_candidates,\n sets=self.sets[0], gets=self.gets[0], order=self.order[0],\n handler_identifier='AGOX')\n\n def update(self):\n \"\"\"\n Used if the postprocessor needs to continously update, e.g. the training of a surrogate potential. \n \"\"\"\n pass\n\n @abstractmethod\n def postprocess(self, candidate):\n \"\"\"\n Method that actually do the post_processing\n \"\"\"\n return postprocessed_candidate\n\n def process_list(self, list_of_candidates):\n \"\"\"\n This allows all postproccesors to act on a list of candidates serially.\n This function can be overwritten by sub-class to implement parallelism. \n \"\"\"\n processed_candidates = []\n for candidate in list_of_candidates:\n processed_candidate = self.postprocess(candidate)\n processed_candidates.append(processed_candidate)\n return processed_candidates\n\n def immunity_decorator(func):\n @functools.wraps(func)\n def wrapper(self, candidate):\n if candidate is None: \n return None\n if candidate.get_postprocess_immunity():\n return candidate\n else:\n return func(self, candidate)\n return wrapper\n \n def immunity_decorator_list(func):\n @functools.wraps(func)\n def wrapper(self, candidates):\n non_immune_candidates = []\n immune_candidates = []\n for candidate in candidates:\n if not candidate.get_postprocess_immunity():\n non_immune_candidates.append(candidate)\n else:\n immune_candidates.append(candidate)\n\n if len(non_immune_candidates) > 0:\n return func(self, non_immune_candidates) + immune_candidates\n else:\n return immune_candidates\n return wrapper\n\n def __add__(self, other):\n return SequencePostprocess(processes=[self, other], order=self.order)\n\n @agox_writer\n @Observer.observer_method\n def postprocess_candidates(self, state): \n candidates = state.get_from_cache(self, self.get_key)\n \n if self.do_check():\n candidates = self.process_list(candidates)\n candidates = list(filter(None, candidates))\n\n # Add data in write mode - so overwrites! \n state.add_to_cache(self, self.set_key, candidates, mode='w')\n \n\nclass SequencePostprocess(PostprocessBaseClass):\n\n name = 'PostprocessSequence'\n\n def __init__(self, processes=[], order=None):\n self.processes = processes\n self.order = order\n\n def postprocess(self, candidate):\n for process in self.processes:\n candidate = process.postprocess(candidate)\n\n return candidate\n\n def process_list(self, list_of_candidates):\n for process in self.processes:\n list_of_candidates = process.process_list(list_of_candidates)\n return list_of_candidates\n\n def __add__(self, other):\n self.processes.append(other)\n return self\n \n def attach(self, main):\n for j, process in enumerate(self.processes):\n process.update_order(process.postprocess_candidates, order=self.order[0]+j*0.1)\n process.attach(main)\n","repo_name":"kimrojas/agox","sub_path":"agox/postprocessors/ABC_postprocess.py","file_name":"ABC_postprocess.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43007522271","text":"##############################################################\n# demonstration script for resampling, stacking, mosaicking and subsetting SAR images\n# John Truckenbrodt 2017\n##############################################################\nimport sys\n#sys.path.insert(0, \"/geonfs01_vol1/qe89hep/spatialist\")\n#sys.path = ['/geonfs01_vol1/qe89hep/spatialist', '/geonfs02_vol2/software/local/lib/python2.7/site-packages/GDAL-2.2.1-py2.7-linux-x86_64.egg','/usr/local/lib/python2.7/site-packages/GDAL-2.0.0-py2.7-linux-x86_64.egg', '/geonfs02_vol2/software/local/lib/python2.7/site-packages', '/geonfs01_vol1/01_EMS_SALDI_KNP_S1/xx_2_mosaicing', '/usr/local/lib/python27.zip', '/usr/local/lib/python2.7', '/usr/local/lib/python2.7/plat-linux2', '/usr/local/lib/python2.7/lib-tk', '/usr/local/lib/python2.7/lib-old', '/usr/local/lib/python2.7/lib-dynload', '/homes2/geoinf/c3urma/.local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/site-packages']\n\n\nimport os\n\nfrom pyroSAR.ancillary import groupbyTime, seconds\nfrom spatialist import stack\nfrom spatialist.ancillary import finder\nfrom pyroSAR.drivers import identify\n\n# filename = \"F:/geodata/geo402/S2/xx_S2_indices/stack_evi/S2A_MSIL1C_20151229T075332_N0201_R135_T35JMH_mskd.vrt\"\n#\n# from pyroSAR import identify\n# id = identify(filename)\n# print(id.outname_base())\n\ndef main():\n\n dirs = [\"stack_savi\"] #, \"stack_ndvi\", \"stack_msavi\", \"stack_reip\", \"stack_rvi\", \"stack_dvi\"]\n\n resolution = [30, 30]\n\n # shapefile (for stack boundaries)\n shp = 'F:/geodata/geo402/02_features/LADYBRAND_final_enlarged_study_area.shp'\n\n # store results in separate files or one single stack file? If separate then dstfile is used as a directory.\n sep = True\n\n for dir in dirs:\n\n # define input directory containing files to be stacked\n dir_in = 'F:/geodata/geo402/S2/xx_S2_indices/' + dir\n print(dir_in)\n os.makedirs(dir_in, exist_ok=True)\n\n # define output file name\n dstfile = 'F:/geodata/geo402/S2/xx_S2_indices/mosaics/' + dir\n print(dstfile)\n\n # list files to be resampled; those not overlapping with the shapefile geometry will excluded by function stack\n srcfiles = finder(dir_in, ['*'])\n\n # check whether dstfile is already a file\n if os.path.isfile(dstfile):\n raise IOError('dstfile already exists')\n \n # create groups of similar time stamps for mosaicking.\n # All images with a time stamp of less than 30s difference will be grouped\n groups = groupbyTime(srcfiles, seconds, 30)\n\n # final function call\n # groups will be mosaicked first\n # the resulting images will all have the same extent\n stack(srcfiles=groups, dstfile=dstfile, resampling='bilinear',\n targetres=resolution, srcnodata=-9999, dstnodata=-9999,\n shapefile=shp, sortfun=seconds, separate=sep, overwrite=True)\n # -tr 30 30 -te 463563.375 6739018 549706.4375 6791364\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"konstantinschellenberg/402slangbos","sub_path":"python/mosaic_S2.py","file_name":"mosaic_S2.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28182041877","text":"from sys import stdin\n\nclass MyQueue:\n def __init__(self,size):\n self.size = size\n self.__en_stack = Mystack(size)\n self.__de_stack = Mystack(size)\n\n #0 : enqueue 상태, 1 : dequeue 상태\n self.__status = 0\n def is_empty(self):\n return self.__en_stack.is_empty() and self.__de_stack.is_empty()\n def peek(self):\n if self.__status == 0:\n self.change_status(self.__status)\n\n if self.__de_stack.is_empty():\n return -1\n\n return self.__de_stack.peek()\n\n def get_size(self):\n return self.__en_stack.top+1 if self.__status == 0 else self.__de_stack.top+1\n\n def back(self):\n if self.__status == 1:\n self.change_status(self.__status)\n\n if self.__en_stack.is_empty():\n return -1\n return self.__en_stack.peek()\n\n def en_queue(self,insert_data):\n if self.__status == 1:\n self.change_status(self.__status)\n\n if self.size == self.__en_stack.top + 1:\n raise Exception('큐가 꽉참')\n\n self.__en_stack.push(insert_data)\n def de_queue(self):\n if self.__status == 0:\n self.change_status(self.__status)\n\n if self.__de_stack.is_empty():\n return -1\n\n return self.__de_stack.pop()\n\n def change_status(self,status):\n if status == 0:\n while not self.__en_stack.is_empty():\n self.__de_stack.push(self.__en_stack.pop())\n self.__status = 1\n else:\n while not self.__de_stack.is_empty():\n self.__en_stack.push(self.__de_stack.pop())\n self.__status = 0\n\nclass Mystack:\n def __init__(self,size):\n self.data = [0]*size\n self.size = size\n self.top = -1\n\n def is_empty(self):\n return self.top == -1\n\n def peek(self):\n if self.is_empty():\n raise Exception('스택이 빔')\n return self.data[self.top]\n def push(self,push_data):\n if self.size == self.top+1:\n raise Exception('스택이 꽉참')\n\n self.top += 1\n self.data[self.top] = push_data\n\n def pop(self):\n if self.is_empty():\n raise Exception('스택이 빔')\n\n return_data = self.data[self.top]\n self.top -= 1\n return return_data\n\ninput = stdin.readline\nn = int(input())\n\ndef solv():\n q = MyQueue(n)\n for _ in range(n):\n command = input().strip().split()\n if command[0] == 'push':\n q.en_queue(command[1])\n elif command[0] == 'pop':\n print(q.de_queue())\n elif command[0] == 'front':\n print(q.peek())\n elif command[0] == 'back':\n print(q.back())\n elif command[0] == 'size':\n print(q.get_size())\n elif command[0] == 'empty':\n print(1 if q.is_empty() else 0)\n\nsolv()","repo_name":"alsgh9948/Problem-Solving","sub_path":"baekjoon/10845.py","file_name":"10845.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14610580372","text":"# import numpy as np\nimport pandas as pd\nimport utils\nfrom crate_compile import crateCompile\nfrom song_corpus_analysis import analyseSongCorpus\nimport spotify_interactions as si\nfrom math import ceil,floor\nimport random\nimport datetime\n\ntoday=datetime.date.today()\n\nmodel_folder = \"pkl_vals\"\nplaylist_folder = \"playlist_csvs\"\nfid_sounds = \"/\".join((model_folder,\"sounds_compiled.pkl\"))\nfid_edge = \"/\".join((model_folder,\"edge_compiled.pkl\"))\nfid_pulse = \"/\".join((model_folder,\"pulse_compiled.pkl\"))\nfid_dw = \"/\".join((model_folder,\"dw_compiled.pkl\"))\nfid_crate = \"/\".join((model_folder,\"crates_compiled.pkl\"))\n\nsp = si.initSpotipy(\"playlist-read-private playlist-modify-private user-library-read\")#\nmode = \"dedupCrates\"#\"initEdgePulse\"\n\nif mode == \"dedupCrates\":\n si.dedupDF(fid_sounds)\n si.dedupDF(fid_edge)\n si.dedupDF(fid_pulse)\nelif mode == \"getAlbumsFromIds\":\n playID = si.getPlaylistID(sp,\"DJ Pull 01/22/2022 The Downselect, 2021\")\n idsAdjust = si.getTracksFromPlaylist(sp,playID,ret_track_info = False,ret_af = False,ret_pl_info=False)\n df_ret = si.addAlbumsToCrate(sp,idsAdjust,fid_crate)\n si.saveTrackDF(df_ret,\"crates_compiled.csv\")\n\nelif mode == \"modifyCrate\":\n trackDF = pd.read_pickle(fid_crate)\n idsAdjust = list(trackDF[\"Track ID\"])\n now = datetime.datetime.now()\n dtString = now.strftime(\"%m/%d/%Y\")\n trackDF[\"Date Added\"] = dtString\n trackDF.to_pickle(fid_crate)\n si.saveTrackDF(trackDF,'crates_compiled.csv')\n\nelif mode == \"modifySounds\":\n trackDF = pd.read_pickle(fid_sounds)\n idsAdjust = list(trackDF[\"Track ID\"])\n now = datetime.datetime.now()\n dtString = now.strftime(\"%m/%d/%Y\")\n trackDF[\"Date Added\"] = dtString\n trackDF.to_pickle(fid_crate)\n si.saveTrackDF(trackDF,'sounds_compiled.csv')\nelif mode == \"initSounds\":\n plCompile_sounds = \"The Sound of\"\n si.saveTracksFromPlaylists(sp,plCompile_sounds,fid_sounds)\n print(\"Sounds compiled!\")\nelif mode==\"modifyCrate\":\n trackDF = pd.read_pickle(fid_crate)\n idsAdjust = list(trackDF[\"Track ID\"])\n now = datetime.datetime.now()\n dtString = now.strftime(\"%m/%d/%Y\")\n trackDF[\"Date Added\"] = dtString\n trackDF.to_pickle(fid_crate)\n si.saveTrackDF(trackDF,'crates_compiled.csv')\nelif mode==\"initCrate\":\n plCompile_crate = \"/*\"\n si.saveTracksFromPlaylists(sp,plCompile_crate,fid_crate)\n print(\"Crates compiled!\")\n\nelif mode == \"manualSetup\":\n # idsAdjust = si.cyclePlaylist(sp,\"The Downselect\",nDaysCycle = 7,removeTracks=True,newPl= True)\n # if idsAdjust:\n # si.addToPlaylist(sp,\"downselect_downselect_listen\",idsAdjust)\n now = datetime.datetime.now()\n dtString=now.strftime(\"%m/%d/%Y\")\n\n # Monday, create discover weekly.\n # playlistTitle = \"Combined DW for the Week of \" + dtString\n # playlistSearch = \"Discover Weekly\"\n # playlistRemove = \"Discovery Avoid\"\n # si.compilePlaylists(sp,playlistSearch,playlistRemove,playlistTitle)\n playlistTitle = \"Combined RR for the Week of \" + dtString\n playlistSearch = \"Release Radar\"\n playlistRemove = \"Discovery Avoid\"\n si.compilePlaylists(sp,playlistSearch,playlistRemove,playlistTitle)\n\nelif mode == \"getGenresDownsel\":\n plGet = \"The Downselect, 2021\"\n playID = si.getPlaylistID(sp,plGet)\n\n trackDict,analysisDict = si.getTracksFromPlaylist(sp,playID,True,True)\n if analysisDict is None:\n trackDict,analysisDict = si.getTracksFromPlaylist(sp,playID,True,True)\n\n idxUse = [idx for idx,val in enumerate(analysisDict) if not (val is None)]\n trackDictUse = [trackDict[idx] for idx in idxUse]\n analysisDictUse = [analysisDict[idx]for idx in idxUse]\n trackDF = si.tracksToDF(trackDictUse,analysisDictUse,False)\n (genreVals,genreCount) = si.getTopGenres(sp,trackDF)\n DF_genre = pd.DataFrame({\"Genre\":genreVals,\"GenreCount\":genreCount})\n DF_genre.to_csv(\"end_of_year_genres.csv\")\n\nelif mode==\"initEdgePulse\":\n plCompile_edge = \"Combined Edge Playlists\"\n plCompile_pulse = \"Combined Pulse Playlists\"\n plCompile_dw = \"Combined DW Playlists \"\n si.saveTracksFromPlaylist(sp,plCompile_pulse,fid_pulse)\n print(\"Pulse compiled!\")\n si.saveTracksFromPlaylist(sp,plCompile_edge,fid_edge)\n print(\"Edge compiled!\")\nelif mode == \"removeEdgeLiked\":\n si.saveTrackDF(pd.read_pickle(fid_edge),'pre_removal.csv')\n si.removeSavedTracks_df(sp,fid_edge)\n si.saveTrackDF(pd.read_pickle(fid_edge),'post_removal.csv')\nelif mode == \"newEdgeCluster\":\n RECOMP_EDGE = False\n nExport_edge = 1\n #Compile crates weekly\n\n if RECOMP_EDGE:\n dateEarly=today-datetime.timedelta(days=7)\n dateLate = today\n dateIn = [dateEarly,dateLate]\n\n nTracks = si.getNewTracks_df(sp, fid_edge,\"The Edge of\",dateIn)\n print(\"Number of tracks in pool: \" + str(nTracks))\n # si.crateCompile(sp,fid_in = fid_edge,searchIDs=[\"The Edge of\"])\n # analyseSongCorpus(rangeClusterSearch=[0+int(np.floor(nTracks/30)),100+int(np.floor(nTracks/30))],poolSize=10e3,showPlot=False,fid_in=fid_edge,out_append=\"edge_\")\n\n si.clusterSinglePlaylist(sp,model_folder,fid_edge,\"Combined Edge Playlists\",1,analyzeCorpus=RECOMP_EDGE,out_append=\"edge\", pklIn=True)\n\nelif mode == \"newPulseCluster\":\n RECOMP_PULSE = True\n nExport_pulse = 1\n #Compile crates weekly\n\n if RECOMP_PULSE:\n dateEarly=today-datetime.timedelta(days=7)\n dateLate = today\n dateIn = [dateEarly,dateLate]\n\n nTracks = si.getNewTracks_df(sp, fid_pulse,\"The Pulse of\",dateIn)\n print(\"Number of tracks in pool: \" + str(nTracks))\n # si.crateCompile(sp,fid_in = fid_edge,searchIDs=[\"The Edge of\"])\n # analyseSongCorpus(rangeClusterSearch=[0+int(np.floor(nTracks/30)),100+int(np.floor(nTracks/30))],poolSize=10e3,showPlot=False,fid_in=fid_edge,out_append=\"edge_\")\n\n si.clusterSinglePlaylist(sp,model_folder,fid_pulse,\"Combined Pulse Playlists\",1,analyzeCorpus=RECOMP_PULSE,out_append=\"pulse\", pklIn=True)\n\nelif mode == \"pulseCluster\":\n calcClusters= False\n model_folder = \"pkl_vals\"\n\n fid_pulse = \"/\".join((model_folder,\"pulse_compiled.pkl\"))\n si.clusterSinglePlaylist(sp,model_folder,fid_pulse,\"Combined Pulse Playlists\",2,analyzeCorpus=calcClusters)\n\nelif mode == \"refreshEdgePulse\":\n today = datetime.date.today()\n dateEarly=today-datetime.timedelta(days=7)\n dateLate = today\n dateIn = [dateEarly,dateLate]\n si.getNewTracks(sp,\"The Edge of\",\"Combined Edge Playlists\",dateIn)\n si.getNewTracks(sp,\"The Pulse of\",\"Combined Pulse Playlists\",dateIn)\nelif mode == \"recDrawPlaylist\":\n today = datetime.date.today()\n djDate = today.strftime(\"%m/%d/%Y\")\n\n plSearch=\"Genre Selects: Lofi + beats \"#\"The Downselect, July 2021 Week 3\"#\"The Downselect\"\n print(\"here\")\n targetSampleSize = 100 #20\n ITER_MAX = 50#100\n tempoDelta = 5\n keyDelta = 3#3#6\n popRange = [0, 60]\n\n plName = \"DJ Pull \"+ djDate+\" \" + plSearch\n si.getDJrecs(sp,plSearch,plName,targetSampleSize,tempoDelta,keyDelta,popRange,ITER_MAX)\nelif mode == \"updateEdge\":\n dateEarly=today-datetime.timedelta(days=6)\n dateLate = today\n dateIn = [dateEarly,dateLate]\n si.getNewTracks(sp,\"The Edge of\",\"Combined Edge Playlists\",dateIn)\n # si.getNewTracks(sp,\"The Pulse of\",\"lol\",dateIn)\nelif mode == \"updatePulse\":\n dateEarly=today-datetime.timedelta(days=6)\n dateLate = today\n dateIn = [dateEarly,dateLate]\n si.getNewTracks(sp,\"The Pulse of\",\"Combined Pulse Playlists\",dateIn)\n # si.getNewTracks(sp,\"The Pulse of\",\"lol\",dateIn)\nelif mode == \"recDateUpdate\":\n\n # createNewPl = (today.day == 1)\n # idsAdjust = si.cyclePlaylist(sp,\"The Downselect\",nDaysCycle = 7,removeTracks=True,newPl= createNewPl)\n # if idsAdjust:\n # si.addToPlaylist(sp,\"downselect_downselect_listen\",idsAdjust)\n now = datetime.datetime.now()\n dtString=now.strftime(\"%m/%d/%Y\")\n\n playlistTitle = \"Combined RR for the Week of \" + dtString\n playlistSearch = \"Release Radar\"\n playlistRemove = \"Discovery Avoid\"\n si.compilePlaylists(sp,playlistSearch,playlistRemove,playlistTitle)\n ### TODO: Update this to also update the edge playlists with new additions.\n\n\nelif mode == \"djRadioTest\":\n today = datetime.date.today()\n djDate = today.strftime(\"%m/%d/%Y\")\n\n plSearch=\"wip | mode2\"#\"The Downselect, July 2021 Week 3\"#\"The Downselect\"\n\n targetSampleSize = 100 #20\n tempoDelta = 5\n keyDelta = 3#6\n popRange = [0, 100]\n\n sp = si.initSpotipy(\"playlist-read-private playlist-modify-private\")#\n\n pl_id = si.getPlaylistID(sp,plSearch)\n trackDict,analysisDict = si.getTracksFromPlaylist(sp,pl_id,True,True)\n trackDF = si.tracksToDF(trackDict,analysisDict)\n df_single = trackDF.sample(n=1)\n tempoRange = [78, 85]\n key_dj = int(df_single[\"DJ Key\"])\n keyRange = [key_dj ,keyDelta+key_dj] ### NOTE: this doesn't account for edce case of key <\n keyDiff = 12 - (keyDelta+key_dj)\n if keyDiff < 0:\n keyRange = [12-keyDelta,12]\n# seedDF = si.djSort(trackDF,tempoRange,keyRange)\n seedDF = si.djSort(trackDF,tempoRange,keyRange)\n\nelif mode == \"recsQuery\":\n ''' This set of code useful for testing the recommendation section'''\n # There's a lot of things that this can be used for (can limit tempo, key etc in rec search and use artists as target)\n #This will become a function when I decide what I want to do with it\n # Can also seed with artists and genres. Max of 5 seeds total.\n plSearch=\"Genre Selects: Lofi + beats\"#\"The Downselect, July 2021 Week 3\"#\"The Downselect\"\n targetSampleSize = 100#5*2 #20\n pl_id = si.getPlaylistID(sp,plSearch)\n trackDict,analysisDict = si.getTracksFromPlaylist(sp,pl_id,True,True)\n trackDF = si.tracksToDF(trackDict,analysisDict)\n tempoRange = [80,87]#[78, 85]\n trackIDs = [item[\"id\"] for item in trackDict if item[\"id\"]]\n popRange = [0,60]\n recIDs = []\n recIDsUnique = [0]\n iterCount = 0\n N_REP = 100\n while len(recIDsUnique) < targetSampleSize and iterCount < N_REP:\n iterCount += 1\n recRet = sp.recommendations(seed_tracks=trackIDs[0:5],limit=targetSampleSize,min_tempo=tempoRange[0],max_tempo = tempoRange[1],market=\"US\",min_popularity=popRange[0],max_popularity=popRange[1])\n recTracks= recRet[\"tracks\"]\n recIDs = recIDs + [elt[\"id\"] for elt in recTracks if (\"US\" in elt[\"available_markets\"])]\n recIDsUnique = list(dict.fromkeys(recIDs))\n if len(recIDsUnique)> targetSampleSize:\n break\n print(str(iterCount) + \" : \" + str(len(recIDsUnique)))\n ### TODO: get recTracks into recIDsUnique format, get sp.audio_features() of the ids to djsort similar playlists.\n si.createPlaylist(sp,\"Similar to \"+plSearch,recIDsUnique,incAnalysis = False)\nelse:\n print(\"Invalid command string.\")\n","repo_name":"alexmbrun/spotify_playlist_experiments","sub_path":"playlist_viz/spotify_playlist_scratch.py","file_name":"spotify_playlist_scratch.py","file_ext":"py","file_size_in_byte":10804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38400453421","text":"\"\"\"\n Diagnostic eddy classes\n =======================\n\n Classes defining eddy related fields diagnostics.\n\n Description of the classes\n --------------------------\n\n * :class:`MiddleAtmosphericEddyHeatFluxDiagnostic`: Diagnostic giving the middle atmospheric eddy heat flux field.\n * :class:`MiddleAtmosphericEddyHeatFluxProfileDiagnostic`: Diagnostic giving the middle atmospheric eddy heat flux zonally averaged profile.\n\n\"\"\"\n\nimport warnings\n\nimport numpy as np\nfrom scipy.integrate import simpson\nimport matplotlib.pyplot as plt\n\nfrom qgs.diagnostics.base import FieldDiagnostic, ProfileDiagnostic\nfrom qgs.diagnostics.temperatures import MiddleAtmosphericTemperatureAnomalyDiagnostic\nfrom qgs.diagnostics.wind import MiddleAtmosphericVWindDiagnostic\n\n\nclass MiddleAtmosphericEddyHeatFluxDiagnostic(FieldDiagnostic):\n \"\"\"Diagnostic giving the middle atmospheric eddy heat flux field.\n Computed as :math:`v'_{\\\\rm a} \\\\, T'_{\\\\rm a}` and scaled with the\n atmospheric specific heat capicity if available (through the `heat_capacity` argument or the\n :attr:`~.AtmosphericTemperatureParams.gamma` parameter).\n\n Parameters\n ----------\n\n model_params: QgParams\n An instance of the model parameters.\n delta_x: float, optional\n Spatial step in the zonal direction `x` for the gridded representation of the field.\n If not provided, take an optimal guess based on the provided model's parameters.\n delta_y: float, optional\n Spatial step in the meridional direction `y` for the gridded representation of the field.\n If not provided, take an optimal guess based on the provided model's parameters.\n dimensional: bool, optional\n Indicate if the output diagnostic must be dimensionalized or not.\n Default to `True`.\n temp_mean_state: MiddleAtmosphericTemperatureDiagnostic, optional\n A temperature diagnostic with a long trajectory as data to compute the mean temperature field.\n If not provided, compute the mean with the data stored in the object.\n vwind_mean_state: MiddleAtmosphericVWindDiagnostic, optional\n A :math:`v` wind diagnostic with a long trajectory as data to compute the mean wind field.\n If not provided, compute the mean with the data stored in the object.\n heat_capacity: float, optional\n The air specific heat capacity. If not provided, uses the one of :attr:`~.AtmosphericTemperatureParams.gamma` if\n available or or let the heat flux in K m s^{-1}.\n\n Attributes\n ----------\n\n dimensional: bool\n Indicate if the output diagnostic must be dimensionalized or not.\n\n \"\"\"\n\n def __init__(self, model_params, delta_x=None, delta_y=None, dimensional=True, temp_mean_state=None, vwind_mean_state=None, heat_capacity=None):\n\n FieldDiagnostic.__init__(self, model_params, dimensional)\n\n self._plot_title = r'Eddy heat flux in the middle of the atmosphere'\n if heat_capacity is not None or model_params.atemperature_params.gamma is not None:\n self._plot_title += r\" $\\gamma_{\\rm a} v'_{\\rm a} \\, T'_{\\rm a}$\"\n self._plot_units = r\" (in \" + r'W m$^{-1}$' + r\")\"\n else:\n self._plot_title += r\" $v'_{\\rm a} \\, T'_{\\rm a}$\"\n self._plot_units = r\" (in \" + r'K m s$^{-1}$' + r\")\"\n self._default_plot_kwargs['cmap'] = plt.get_cmap('magma')\n self._color_bar_format = False\n\n self._tdiag = MiddleAtmosphericTemperatureAnomalyDiagnostic(model_params, delta_x, delta_y, dimensional)\n self._vdiag = MiddleAtmosphericVWindDiagnostic(model_params, delta_x, delta_y, dimensional)\n\n self._X = self._tdiag._X\n self._Y = self._tdiag._Y\n\n self._temp_mean_state = temp_mean_state\n self._vwind_mean_state = vwind_mean_state\n\n self._heat_capacity = heat_capacity\n\n def _compute_grid(self, delta_x=None, delta_y=None):\n pass\n\n def _configure(self, delta_x=None, delta_y=None):\n pass\n\n def _get_diagnostic(self, dimensional):\n\n self._tdiag.set_data(self._time, self._data)\n self._vdiag.set_data(self._time, self._data)\n\n T = self._tdiag._get_diagnostic(dimensional)\n V = self._vdiag._get_diagnostic(dimensional)\n\n if self._temp_mean_state is not None:\n Tmean = self._temp_mean_state._get_diagnostic(dimensional).mean(axis=0)\n else:\n Tmean = np.mean(T, axis=0)\n if self._vwind_mean_state is not None:\n Vmean = self._vwind_mean_state._get_diagnostic(dimensional).mean(axis=0)\n else:\n Vmean = np.mean(V, axis=0)\n\n self._diagnostic_data = (T - Tmean) * (V - Vmean)\n if dimensional:\n if self._model_params.atemperature_params.gamma is not None:\n self._diagnostic_data = self._diagnostic_data * self._model_params.atemperature_params.gamma\n elif self._heat_capacity is not None:\n self._diagnostic_data = self._diagnostic_data * self._heat_capacity\n self._diagnostic_data_dimensional = True\n else:\n self._diagnostic_data_dimensional = False\n return self._diagnostic_data\n\n\nclass MiddleAtmosphericEddyHeatFluxProfileDiagnostic(ProfileDiagnostic):\n \"\"\"Diagnostic giving the middle atmospheric eddy heat flux zonally averaged profile.\n Computed as :math:`\\\\Phi_{\\\\rm e} = \\\\overline{v'_{\\\\rm a} \\\\, T'_{\\\\rm a}} = \\\\frac{n}{2\\\\pi} \\\\, \\\\int_0^{2\\\\pi/n} \\\\Phi_{\\\\rm e} \\\\, \\\\mathrm{d} x` where\n :math:`v'_{\\\\rm a} \\\\, T'_{\\\\rm a}` is the eddy heat flux scaled with the\n atmospheric specific heat capicity if available (through the `heat_capacity` argument or the\n :attr:`~.AtmosphericTemperatureParams.gamma` parameter).\n\n Parameters\n ----------\n\n model_params: QgParams\n An instance of the model parameters.\n delta_x: float, optional\n Spatial step in the zonal direction `x` for the gridded representation of the field.\n If not provided, take an optimal guess based on the provided model's parameters.\n delta_y: float, optional\n Spatial step in the meridional direction `y` for the gridded representation of the field.\n If not provided, take an optimal guess based on the provided model's parameters.\n dimensional: bool, optional\n Indicate if the output diagnostic must be dimensionalized or not.\n Default to `True`.\n temp_mean_state: MiddleAtmosphericTemperatureDiagnostic, optional\n A temperature diagnostic with a long trajectory as data to compute the mean temperature field.\n If not provided, compute the mean with the data stored in the object.\n vwind_mean_state: MiddleAtmosphericVWindDiagnostic, optional\n A :math:`v` wind diagnostic with a long trajectory as data to compute the mean wind field.\n If not provided, compute the mean with the data stored in the object.\n heat_capacity: float, optional\n The air specific heat capacity. If not provided, uses the one of :attr:`~.AtmosphericTemperatureParams.gamma` if\n available or or let the heat flux in K m s^{-1}.\n\n Attributes\n ----------\n\n dimensional: bool\n Indicate if the output diagnostic must be dimensionalized or not.\n\n \"\"\"\n\n def __init__(self, model_params, delta_x=None, delta_y=None, dimensional=True, temp_mean_state=None, vwind_mean_state=None, heat_capacity=None):\n\n ProfileDiagnostic.__init__(self, model_params, dimensional)\n\n self._flux = MiddleAtmosphericEddyHeatFluxDiagnostic(model_params, delta_x, delta_y, dimensional, temp_mean_state, vwind_mean_state, heat_capacity)\n self._plot_title = r'Zonally averaged profile'\n self._plot_label = r'Middle atmospheric eddy heat flux'\n if heat_capacity is not None or model_params.atemperature_params.gamma is not None:\n self._plot_label += r\" $\\gamma_{\\rm a} \\overline{v'_{\\rm a} \\, T'_{\\rm a}}$\"\n self._plot_units = r'W m$^{-1}$'\n else:\n self._plot_label += r\" $\\overline{v'_{\\rm a} \\, T'_{\\rm a}}$\"\n self._plot_units = r'K m s$^{-1}$'\n self._axis_label = r'$y$'\n self._configure()\n\n def _configure(self):\n self._points_coordinates = self._flux._Y[:, 0]\n\n def _get_diagnostic(self, dimensional):\n\n self._flux.set_data(self._time, self._data)\n\n flux = self._flux._get_diagnostic(dimensional)\n dX = self._flux._X[0, 1] - self._flux._X[0, 0]\n\n iflux = simpson(flux, dx=dX, axis=2) / (2*np.pi / self._model_params.scale_params.n)\n\n self._diagnostic_data = iflux\n if dimensional:\n self._diagnostic_data_dimensional = True\n else:\n self._diagnostic_data_dimensional = False\n return self._diagnostic_data\n\n\nif __name__ == '__main__':\n from qgs.params.params import QgParams\n from qgs.integrators.integrator import RungeKuttaIntegrator\n from qgs.functions.tendencies import create_tendencies\n\n pars = QgParams()\n pars.set_atmospheric_channel_fourier_modes(2, 2)\n f, Df = create_tendencies(pars)\n integrator = RungeKuttaIntegrator()\n integrator.set_func(f)\n ic = np.random.rand(pars.ndim) * 0.1\n integrator.integrate(0., 200000., 0.1, ic=ic, write_steps=5)\n time, traj = integrator.get_trajectories()\n integrator.terminate()\n\n flux = MiddleAtmosphericEddyHeatFluxDiagnostic(pars)\n flux.set_data(time, traj)\n\n iflux = MiddleAtmosphericEddyHeatFluxProfileDiagnostic(pars, delta_x=0.25, delta_y=0.15)\n iflux.set_data(time, traj)\n","repo_name":"Climdyn/qgs","sub_path":"qgs/diagnostics/eddy.py","file_name":"eddy.py","file_ext":"py","file_size_in_byte":9529,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"72782175521","text":"#1\ndef number_of_food_groups():\n return 5\nprint(number_of_food_groups())\n#Prediction: 5 (correct)\n\n#2\ndef number_of_military_branches():\n return 5\nprint(number_of_days_in_a_week_silicon_or_triangle_sides() + number_of_military_branches())\n# Prediction: Error, function requires arguments, was given none.\n# Correct Answer: NameError: name 'number_of_days_in_a_week_silicon_or_triangle_sides' is not defined\n# I thought the function could be seen in the rest of the file, but could not. Therefore it was not yet defined. \n\n#3\ndef number_of_books_on_hold():\n return 5\n return 10\nprint(number_of_books_on_hold())\n# Prediction: 5 (correct)\n\n\n#4\ndef number_of_fingers():\n return 5\n print(10)\nprint(number_of_fingers())\n# Prediction: 5 (correct)\n\n\n#5\ndef number_of_great_lakes():\n print(5)\nx = number_of_great_lakes()\nprint(x)\n# Prediction: 5 (correct) \\n Error, variable not defined (incorrect)\n# Correct Answer: \n # 5\n # None\n# Variable IS defined (function), it just doesn't return anything so it's technically = None. \n\n\n\n#6\ndef add(b,c):\n print(b+c)\nprint(add(1,2) + add(2,3))\n# Prediction: 3 (correct) \\n 5 (correct) \\n 8 (incorrect)\n# Correct Answer: \n # 3\n # 5\n # TypeError: unsupported operand type(s) for +: 'NoneType' and 'NoneType'\n# The function doesn't return a value, just prints. The function technically = None and cannot be added together or concatinated.\n\n\n#7\ndef concatenate(b,c):\n return str(b)+str(c)\nprint(concatenate(2,5))\n# Prediction: 25 (correct)\n\n\n#8\ndef number_of_oceans_or_fingers_or_continents():\n b = 100\n print(b)\n if b < 10:\n return 5\n else:\n return 10\n return 7\nprint(number_of_oceans_or_fingers_or_continents())\n# Prediction: 10 (incorrect)\n# Correct Answer: \n # 100\n # 10\n# I didn't see the first print statement\n\n\n#9\ndef number_of_days_in_a_week_silicon_or_triangle_sides(b,c):\n if b list[int]:\n\n # recursive solution is trivial as mentioned in description\n #\n # if not root:\n # return []\n # ans = [root.val]\n # for child in root.children:\n # ans += self.preorder(child)\n # return ans\n\n if not root:\n return []\n\n ans = []\n stack = [root]\n while stack:\n node = stack.pop()\n ans.append(node.val)\n for child in reversed(node.children):\n stack.append(child)\n return ans","repo_name":"Sadomtsevvs/Leetcode","sub_path":"589. N-ary Tree Preorder Traversal.py","file_name":"589. N-ary Tree Preorder Traversal.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31967611761","text":"import numpy as np\nfrom scipy.ndimage import imread\nimport matplotlib.pyplot as plt\nimport random\nfrom collections import deque\nfrom scipy.misc import imresize\nfrom collections import deque\nfrom math import pi, cos\nfrom transformtest import dct2, idct2\n\ndef shift(a, n):\n\tb = np.roll(a, n)\n\tif n > 0:\n\t\tb[:n] = 0\n\telse:\n\t\tb[n:] = 0\n\treturn b\n\ndef findStart(array):\n\tshape = array.shape\n\tindices = []\n\trandom.seed(8734)\n\tfor x in range(0, shape[0]):\n\t\tfor y in range(0, shape[1]):\n\t\t\tif array[x, y]:\n\t\t\t\tindices.append((x, y))\n\trandom.shuffle(indices)\n\tq = deque(indices)\n\t\n\tdef expand(rectangle, direction):\n\t\t((x0, y0),(x1, y1)) = rectangle\n\t\tif direction == 0: # up\n\t\t\tif np.all(array[x0:x1, y0-1:y0]):\n\t\t\t\tarray[x0:x1, y0-1:y0] = False\n\t\t\t\treturn ((x0, y0-1),(x1, y1)), False\n\t\tif direction == 1: # right\n\t\t\tif np.all(array[x1:x1+1, y0:y1]):\n\t\t\t\tarray[x1:x1+1, y0:y1] = False\n\t\t\t\treturn ((x0, y0),(x1+1, y1)), False\n\t\tif direction == 2: # down\n\t\t\tif np.all(array[x0:x1, y1:y1+1]):\n\t\t\t\tarray[x0:x1, y1:y1+1] = False\n\t\t\t\treturn ((x0, y0),(x1, y1+1)), False\n\t\tif direction == 3: # left\n\t\t\tif np.all(array[x0-1:x0, y0:y1]):\n\t\t\t\tarray[x0-1:x0, y0:y1] = False\n\t\t\t\treturn ((x0-1, y0),(x1, y1)), False\n\t\treturn rectangle, True\n\n\twhile q:\n\t\tindex = q.popleft()\n\t\tif not array[index[0], index[1]]:\n\t\t\tcontinue\n\t\trectangle = ((index[0], index[1]), (index[0] + 1, index[1] + 1))\n\t\tarray[index[0], index[1]] = True\n\t\tdirection = 0\n\t\tfailureCount = 0\n\t\twhile True:\n\t\t\trectangle, failed = expand(rectangle, direction)\n\t\t\tif failed:\n\t\t\t\tfailureCount += 1\n\t\t\t\tif failureCount >= 4:\n\t\t\t\t\t# done\n\t\t\t\t\tyield rectangle\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tfailureCount = 0\n\t\t\tdirection = (direction + 1) % 4\n\n\ndef loadDomain(path):\n\treturn imread(path).astype('bool')\n\ndef getPartitions(array, solver):\n\tp = []\n\tshape = array.shape\n\tarray2 = np.zeros((shape[0], shape[1]), dtype='int')\n\tfor rect in findStart(array):\n\t\t((x0, y0), (x1, y1)) = rect\n\t\tpartition = Partition((x0, y0), (x1, y1), solver)\n\t\tarray2[x0:x1,y0:y1] = partition.__hash__()\n\t\tp.append(partition)\n\treturn p\n\ndef visualize(partitions, shape):\n\tarray2 = np.zeros((shape[0], shape[1], 3), dtype='int')\n\tcolor = np.random.uniform(0, 255, 3)\n\tfor r in partitions:\n\t\tarray2[r.lowerBoundary[0]:r.upperBoundary[0], r.lowerBoundary[1]:r.upperBoundary[1]] = color\n\t\tcolor = np.random.uniform(0, 255, 3)\n\tplt.imshow(array2)\n\tplt.show()\n\nclass Partition:\n\tdef __init__(self, lowerBoundary, upperBoundary, parent):\n\n\t\tself.parent = parent\n\t\t\n\t\tself.lowerBoundary = lowerBoundary\n\t\tself.upperBoundary = upperBoundary\n\t\t\n\t\tself.shape = (self.upperBoundary[0] - self.lowerBoundary[0], self.upperBoundary[1] - self.lowerBoundary[1])\n\n\t\tself.topNeighbours = set()\n\t\tself.rightNeighbours = set()\n\t\tself.bottomNeighbours = set()\n\t\tself.leftNeighbours = set()\n\t\t\n\t\txs = np.arange(0, self.shape[0], 1) + 1\n\t\tys = np.arange(0, self.shape[1], 1) + 1\n\t\tscaling_x = float(self.shape[1]) / max(self.shape[0], self.shape[1])\n\t\tscaling_y = float(self.shape[0]) / max(self.shape[0], self.shape[1])\n\t\tself.kValues = pi * np.sqrt(np.array([pow(xs[ix] * scaling_x,2) + pow(ys[iy] * scaling_y,2) for ix,iy in np.ndindex(self.shape)]).reshape(self.shape))\n\t\t\n\t\tself.fValues = np.zeros(self.shape)\n\t\tself.fLastValues = np.zeros(self.shape)\n\n\tdef __hash__(self):\n\t\t# todo: replace 100000 with domain width...\n\t\treturn self.lowerBoundary[0] * 100000 + self.lowerBoundary[1]\n\n\tdef getShape(self):\n\t\treturn self.shape\n\n\tdef getValues(self):\n\t\treturn idct2(self.fValues)\n\n\t\n\tdef neighbours(self, positions, rhsq):\n\t\tconv = np.array([2, -27, 270, -490, 270, -27, 2]) / 180\n\t\tU = np.zeros(self.shape)\n\n\t\tfor y in range(self.lowerBoundary[1], self.upperBoundary[1]):\n\t\t\t\n\t\t\ty_t = y - self.lowerBoundary[1]\n\n\t\t\te1 = (shift(conv, 1) + conv) * np.array([1,1,1,1,0,0,0])\n\t\t\tr1 = conv - e1\n\t\t\tc1 = np.convolve(r1, positions[-4+self.upperBoundary[0]:3+self.upperBoundary[0],y], 'valid')\n\t\t\tU[-1,y_t] = c1[0]\n\t\t\n\t\t\te2 = (shift(conv, 2) + conv) * np.array([1,1,1,0,0,0,0])\n\t\t\tr2 = conv - e2\n\t\t\tc2 = np.convolve(r2, positions[-5+self.upperBoundary[0]:2+self.upperBoundary[0],y], 'valid')\n\t\t\tU[-2,y_t] = c2[0]\n\n\t\t\te3 = (shift(conv, 3) + conv) * np.array([1,1,0,0,0,0,0])\n\t\t\tr3 = conv - e3\n\t\t\tc3 = np.convolve(r3, positions[-6+self.upperBoundary[0]:1+self.upperBoundary[0],y], 'valid')\n\t\t\tU[-3,y_t] = c3[0]\n\n\t\tU = U / rhsq\n\n\t\treturn U\n\n\n\n\tdef step(self, externalForces, positions, delta_t = 1.0 / 1378.0, c = 340):\n\t\tw = c * self.kValues\n\t\tmult = np.cos(w*delta_t)\n\t\tforce = externalForces[self.lowerBoundary[0]:self.upperBoundary[0],\\\n\t\t\tself.lowerBoundary[1]:self.upperBoundary[1]]\n\t\tneighbouringForces = self.neighbours(positions, pow(1.0 / 64, 2))\n\t\t\n\t\tforceTerm = 2.0 * dct2(force + neighbouringForces * c * c) * (1.0 - mult) / (np.power(w, 2))\n\t\tfNew = 2 * self.fValues * mult - self.fLastValues + forceTerm\n\t\tself.fLastValues = self.fValues\n\t\tself.fValues = fNew\n\nclass Solver:\n\tdef __init__(self, file):\n\t\tdomain = loadDomain(file).T\n\t\tself.partitions = getPartitions(domain, self)\n\t\tself.shape = domain.shape\n\t\tself.neighbourForces = np.zeros(self.shape)\n\t\tself.externalForces = np.zeros(self.shape)\n\t\tself.speedOfSound = 340\n\t\tself.deltaT = 1.0 / 44100.0\n\n\tdef step(self):\n\t\tvalues = self.getValues()\n\t\tfor part in self.partitions:\n\t\t\tpart.step(self.externalForces, values, self.deltaT, self.speedOfSound)\n\n\tdef getValues(self):\n\t\toutput = np.zeros(self.shape)\n\t\tfor part in self.partitions:\n\t\t\toutput[part.lowerBoundary[0]:part.upperBoundary[0],\\\n\t\t\t\tpart.lowerBoundary[1]:part.upperBoundary[1]] = part.getValues()\n\t\treturn output\n","repo_name":"sondrehav/master","sub_path":"EchoSimPython/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4692249155","text":"#!/usr/bin/python3\n# this script predicts semantic tags using a trained neural model\n\nimport sys\nsys.path.append(sys.argv[1])\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\nimport pickle\nimport numpy as np\n\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nimport keras\nkeras.backend.tensorflow_backend.set_session(tf.Session(config=config))\n\nfrom models.argparser import get_args\nfrom models.loader import load_conll_notags, make_char_seqs\nfrom models.nn import get_model\n\nfrom utils.input2feats import wordsents2sym, charsents2sym\n\n\n# parse input arguments\nargs = get_args()\n\n# load trained model parameters\nminfo = pickle.load(open(args.output_model_info, 'rb'))\nparams = minfo['params']\n\n# read and featurize unlabelled data\nword_inputs, word_sents = load_conll_notags(args.input_pred_file,\n minfo['max_slen'],\n vocab = minfo['word2idx'].keys(),\n oovs = minfo['oov_sym'],\n pads = minfo['pad_word'],\n lower = False,\n mwe = True,\n unk_case = True)\n\n# transform inputs to a symbolic representation\nif params.use_words:\n X_word, _ = wordsents2sym(word_sents,\n minfo['max_slen'],\n minfo['word2idx'],\n minfo['tag2idx'],\n minfo['oov_sym']['unknown'],\n minfo['DEFAULT_TAG'],\n minfo['pad_word']['pad'],\n minfo['PADDING_TAG'])\n\n# compute character-based inputs\nif params.use_chars:\n char_sents, _ = make_char_seqs(word_sents,\n vocab = set(minfo['char2idx'].keys()),\n oovs = minfo['oov_sym'],\n pads = minfo['pad_char'],\n len_perc = params.word_len_perc,\n lower = False)\n\n # map character sentences and their tags to a symbolic representation\n X_char = charsents2sym(char_sents,\n minfo['max_slen'],\n minfo['max_wlen'],\n minfo['char2idx'],\n minfo['oov_sym']['unknown'],\n minfo['pad_char']['begin'],\n minfo['pad_char']['end'],\n minfo['pad_char']['pad'])\n\n# build input for the model\nif params.use_words and params.use_chars:\n X = [X_word, X_char]\nelif params.use_words:\n X = X_word\nelif params.use_chars:\n X = X_char\n\n# use a trained model to predict the corresponding tags\nif params.use_words and params.use_chars:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'], num_words = minfo['num_words'],\n wemb_dim = minfo['wemb_dim'], wemb_matrix = minfo['wemb_matrix'],\n max_wlen = minfo['max_wlen'], num_chars = minfo['num_chars'],\n cemb_dim = minfo['cemb_dim'], cemb_matrix = minfo['cemb_matrix'])\nelif params.use_words:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'], num_words = minfo['num_words'],\n wemb_dim = minfo['wemb_dim'], wemb_matrix = minfo['wemb_matrix'])\n\nelif params.use_chars:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'],\n max_wlen = minfo['max_wlen'], num_chars = minfo['num_chars'],\n cemb_dim = minfo['cemb_dim'], cemb_matrix = minfo['cemb_matrix'])\n\nmodel.load_weights(args.output_model)\n#model.summary()\n\n# predict tags using the model\np = model.predict(X, verbose = min(1, params.verbose))\np = np.argmax(p, axis=-1) + 1\n\n# reconstruct the original file with tags\n# an input sentence can be split over multiple processed sentences\nidx_offset = 0\nwith open(args.output_pred_file, 'w') as ofile:\n for sidx in range(len(word_inputs)):\n # find the range of processed sentences that match the current input sentence\n old_offset = idx_offset\n while list(filter(lambda y: y[1] != -1, word_sents[sidx + idx_offset]))[-1][1] < len(word_inputs[sidx]) - 1:\n idx_offset += 1\n\n # generate the predicted mapping for each word in the input sentence\n wpos2tag = {}\n for off in range(old_offset, idx_offset + 1):\n for wpos, tag in zip([x[1] for x in word_sents[sidx + off]], p[sidx + off]):\n if wpos not in wpos2tag:\n wpos2tag[wpos] = []\n wpos2tag[wpos].append(tag)\n\n for widx in range(len(word_inputs[sidx])):\n tgt_word = word_inputs[sidx][widx]\n tgt_tag = minfo['tag2idx'][minfo['DEFAULT_TAG']]\n # multi-word expressions take the most common prediction for their individual components\n if widx in wpos2tag:\n tgt_tag = max(set(wpos2tag[widx]), key=wpos2tag[widx].count)\n # write out\n ofile.write(str(minfo['idx2tag'][tgt_tag]) + '\\t' + tgt_word + '\\n')\n ofile.write('\\n')\n\n","repo_name":"ginesiametlle/semtagger","sub_path":"models/semtagger_predict.py","file_name":"semtagger_predict.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37573394088","text":"# Method 1...topological sort\n\nf = open(\"Problem79_input\", \"r\")\ndata = []\nfor k in range(50):\n data.append(f.readline().strip())\nf.close()\n\nedges = {} # v\nnodes = set()\n\nfor datum in data:\n for c in datum:\n nodes.add(c)\n if datum[0] not in edges:\n edges[datum[0]] = {datum[1]}\n else:\n edges[datum[0]].add(datum[1])\n if datum[1] not in edges:\n edges[datum[1]] = {datum[2]}\n else:\n edges[datum[1]].add(datum[2])\n\nvisited = set()\nstack = []\n\ndef dfs(node):\n visited.add(node)\n if node in edges:\n for downstream in edges[node]:\n if downstream not in visited:\n dfs(downstream)\n stack.append(node)\n\nfor node in nodes:\n if not node in visited:\n dfs(node)\n\nprint(\"\".join(stack[::-1]))","repo_name":"hgparker/project_euler","sub_path":"Problem79a.py","file_name":"Problem79a.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20480359434","text":"import sys\nimport time\nimport threading\nfrom socket import *\n\ndef alive_chk():\n global alive\n global clist\n while True:\n tmp = list(alive.items())\n for key, val in tmp:\n if val + 30 < time.time():\n print(\"{} is off-line\\t{}:{}\".format(key.split(\" \")[0], key.split(\" \")[1], key.split(\" \")[2]))\n sys.stdout.flush()\n del alive[key]\n clist.remove(key)\n send = \"1\" + \",\".join(clist)\n for data in clist:\n ip = data.split(\" \")[1]\n port = int(data.split(\" \")[2])\n ServerSocket.sendto(send.encode(),(ip, port))\n break\n time.sleep(0.1)\n\n\nServerSocket = socket(AF_INET, SOCK_DGRAM)\nServerSocket.bind(('', 10080))\nclist = []\nalive = {}\nth = threading.Thread(target=alive_chk)\nth.daemon = True\nth.start()\nwhile True:\n msg, Add = ServerSocket.recvfrom(65565)\n op = msg.decode()[0] # 0:registration 1:list data 2:chat data 3:keep_alive data 4:unregistration\n if op == \"0\":\n clist.append(msg.decode()[1:33].strip() +\" \" + Add[0] + \" \" + str(Add[1]) + \" \" + msg.decode()[33:])\n alive[clist[-1]] = time.time()\n print(\"{}\\t{}:{}\".format(msg.decode()[1:33].strip(), Add[0], Add[1]))\n sys.stdout.flush()\n send = \"1\" + \",\".join(clist)\n for data in clist:\n ip = data.split(\" \")[1]\n port = int(data.split(\" \")[2])\n ServerSocket.sendto(send.encode(),(ip, port))\n elif op == \"3\":\n alive[msg.decode()[1:33].strip() +\" \" + Add[0] + \" \" + str(Add[1]) + \" \" + msg.decode()[33:]] = time.time()\n elif op == \"4\":\n for i in range(0, len(clist)):\n tmp = clist[i].split(\" \")\n if tmp[1] == Add[0] and tmp[2] == str(Add[1]):\n del alive[clist[i]]\n del clist[i]\n print(\"{} is unregistered\\t{}:{}\".format(tmp[0], tmp[1], tmp[2]))\n sys.stdout.flush()\n send = \"1\" + \",\".join(clist)\n for data in clist:\n ip = data.split(\" \")[1]\n port = int(data.split(\" \")[2])\n ServerSocket.sendto(send.encode(),(ip, port))\n break\nServerSocket.close()\n","repo_name":"KangInPark/Assignment","sub_path":"Computer Networks/A4/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26771857802","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : minidata.\n# @File : http\n# @Time : 2022/4/11 下午6:29\n# @Author : yuanjie\n# @WeChat : meutils\n# @Software : PyCharm\n# @Description :\n\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\ndata = {'result': 'this is a test'}\nhost = ('localhost', 8899)\n\n\nclass Resquest(BaseHTTPRequestHandler):\n timeout = 5\n server_version = \"Apache\" # 设置服务器返回的的响应头\n\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\") # 设置服务器响应头\n self.send_header(\"test1\", \"This is test!\") # 设置服务器响应头\n self.end_headers()\n buf = '''\n \n \n Get page\n \n \n
    \n username:
    \n password:
    \n \n \n \n '''\n self.wfile.write(buf.encode()) # 里面需要传入二进制数据,用encode()函数转换为二进制数据 #设置响应body,即前端页面要展示的数据\n\n def do_POST(self):\n path = self.path\n print(path)\n # 获取post提交的数据\n datas = self.rfile.read(int(self.headers['content-length'])) # 固定格式,获取表单提交的数据\n # datas = urllib.unquote(datas).decode(\"utf-8\", 'ignore')\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\") # 设置post时服务器的响应头\n self.send_header(\"test\", \"This is post!\")\n self.end_headers()\n\n html = '''\n \n \n Post page\n \n \n Post Data:%s
    \n Path:%s\n \n ''' % (datas, self.path)\n self.wfile.write(html.encode()) # 提交post数据时,服务器跳转并展示的页面内容\n\n\nif __name__ == '__main__':\n server = HTTPServer(host, Resquest)\n print(\"Starting server, listen at: %s:%s\" % host)\n server.serve_forever()\n","repo_name":"yuanjie-ai/minidata","sub_path":"examples/http_demo.py","file_name":"http_demo.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"7876574190","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :23. Merge k Sorted Lists.py\n# @Time :3/27/22\n# @Author :Eason Tang\nfrom typing import List, Optional\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n head = ListNode()\n\n cur_node = [x for x in lists]\n res_node = head\n\n while any(cur_node):\n min_val = float('inf')\n min_idx = 0\n for i, node in enumerate(cur_node):\n if node and node.val < min_val:\n min_idx = i\n min_val = node.val\n res_node.next = cur_node[min_idx]\n res_node = res_node.next\n cur_node[min_idx] = cur_node[min_idx].next\n\n return head.next\n","repo_name":"tangyisheng2/leetcode-note","sub_path":"code/23. Merge k Sorted Lists.py","file_name":"23. Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7884786823","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef _prod(array):\n prod = 1.\n for e in array:\n prod *= e\n return prod\n\n\ndef variable_shapes(optimized_vars=None):\n lines = ['']\n if optimized_vars is not None:\n lines.append('Optimizing Variables:')\n lines.append('====================')\n total_params = 0\n for var in optimized_vars:\n n_param = _prod(var.get_shape().as_list())\n total_params += n_param\n lines.append('%20s %8d %s' % (var.get_shape().as_list(), n_param, var.name))\n lines.append('Total Optimizing parameters: %d' % total_params)\n\n lines.append('')\n\n train_vars = tf.trainable_variables()\n lines.append('Trainable Variables:')\n lines.append('====================')\n total_params = 0\n for var in train_vars:\n if optimized_vars is not None and var in optimized_vars: continue\n n_param = _prod(var.get_shape().as_list())\n total_params += n_param\n lines.append('%20s %8d %s' % (var.get_shape().as_list(), n_param, var.name))\n lines.append('Total trainable parameters: %d' % total_params)\n\n lines.append('')\n lines.append('Other Variables:')\n lines.append('================')\n total_params = 0\n\n for var in tf.global_variables():\n if var in train_vars: continue\n n_param = _prod(var.get_shape().as_list())\n total_params += n_param\n lines.append('%20s %8d %s' % (var.get_shape().as_list(), n_param, var.name))\n lines.append('Total non-trainable parameters: %d' % total_params)\n\n return '\\n'.join(lines)\n\n\ndef print_attention_weights(features, targets, mode, params, review_weights, predictions):\n if review_weights is NOne:\n tf.logging.info('no review_weights')\n return predictions\n\n if mode != tf.contrib.learn.ModeKeys.TRAIN:\n print_num = params['print_num_per_batch']\n max_weight_index = tf.argmax(review_weights, axis=1)\n max_weightindex = tf.squeeze(max_weight_index, axis=1)\n review_weights = tf.squeeze(review_weights, axis=2)\n for i in range(print_num):\n predictions = tf.Print(predictions, [features['placeid'][i]], summarize=1)\n predictions = tf.Print(predictions, [features['attribute_string'][i]], summarize=1)\n predictions = tf.Print(predictions, [max_weight_index[i]], summarize=1)\n predictions = tf.Print(predictions, [review_weights[i, max_weight_index[i]]], summarize=1)\n predictions = tf.Print(predictions, [features['token_list_string'][i, max_weight_index[i]]], summarize=params['max_len'])\n predictions = tf.Print(predictions, [predictions[i]], summarize=1)\n\n if mode == tf.contrib.learn.ModeKeys.EVAL:\n predictions = tf.Print(predictions, [targets[i]], summarize=1)\n\n return predictions\n\n\ndef to_categorical(y, nb_classes, miml=False):\n \"\"\" to_categorical.\n\n Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n\n Arguments:\n y: `array`. Class vector to convert.\n nb_classes: `int`. Total number of classes.\n\n \"\"\"\n Y = np.zeros((len(y), nb_classes), dtype=np.float32)\n for i in range(len(y)):\n if miml:\n for label in y[i]:\n Y[i, label] = 1.\n else:\n Y[i, y[i]] = 1.\n return Y\n\n\n# =====================\n# SEQUENCES UTILS\n# =====================\n\n\ndef pad_sequences(sequences, maxlen=None, dtype='int32', padding='post',\n truncating='post', value=0.):\n \"\"\" pad_sequences.\n\n Pad each sequence to the same length: the length of the longest sequence.\n If maxlen is provided, any sequence longer than maxlen is truncated to\n maxlen. Truncation happens off either the beginning or the end (default)\n of the sequence. Supports pre-padding and post-padding (default).\n\n Arguments:\n sequences: list of lists where each element is a sequence.\n maxlen: int, maximum length.\n dtype: type to cast the resulting sequence.\n padding: 'pre' or 'post', pad either before or after each sequence.\n truncating: 'pre' or 'post', remove values from sequences larger than\n maxlen either in the beginning or in the end of the sequence\n value: float, value to pad the sequences to the desired value.\n\n Returns:\n x: `numpy array` with dimensions (number_of_sequences, maxlen)\n\n Credits: From Keras `pad_sequences` function.\n \"\"\"\n lengths = [len(s) for s in sequences]\n\n nb_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if len(s) == 0:\n continue # empty list was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError(\"Truncating type '%s' not understood\" % padding)\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError(\"Padding type '%s' not understood\" % padding)\n return x\n\n\ndef load_word2vec(fname, vocab, lower_case=False):\n \"\"\"\n Loads 300x1 word vecs from Google (Mikolov) word2vec\n \"\"\"\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if lower_case:\n word = word.lower() \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs\n","repo_name":"fufrank5/relatedness","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2374324515","text":"\"\"\"\nThis is the 4th notebook I'm making using EfficientNet on TPUs. The full list:\n 1. https://www.kaggle.com/xhlulu/flowers-tpu-concise-efficientnet-b7\n 2. https://www.kaggle.com/xhlulu/plant-pathology-very-concise-tpu-efficientnet\n 3. https://www.kaggle.com/xhlulu/alaska2-efficientnet-on-tpus\nReferences:\n 1. https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu\n\"\"\"\nimport os\nos.system('pip install /kaggle/input/efficientnet-keras-source-code/ -q')\n\nimport efficientnet.tfkeras as efn\nimport pandas as pd\nfrom kaggle_datasets import KaggleDatasets\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\n\n# Helper functions\ndef auto_select_accelerator():\n try:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.experimental.TPUStrategy(tpu)\n print(\"Running on TPU:\", tpu.master())\n except ValueError:\n strategy = tf.distribute.get_strategy()\n print(f\"Running on {strategy.num_replicas_in_sync} replicas\")\n \n return strategy\n\ndef decode_image(path, label=None, target_size=(512, 512)):\n img = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)\n img = tf.cast(img, tf.float32) / 255.0\n img = tf.image.resize(img, target_size)\n \n return img if label is None else img, label\n\ndef data_augment(img, label=None):\n img = tf.image.random_flip_left_right(img)\n img = tf.image.random_flip_up_down(img)\n\n return img if label is None else img, label\n\ndef build_dataset(paths, bsize, labels=None, cache=True,\n decode_fn=decode_image, augment_fn=data_augment,\n augment=True, repeat=True, shuffle=1024):\n AUTO = tf.data.experimental.AUTOTUNE\n slices = paths if labels is None else (paths, labels)\n dset = tf.data.Dataset.from_tensor_slices(slices)\n dset = dset.map(decode_fn, num_parallel_calls=AUTO)\n dset = dset.cache() if cache else dset\n dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset\n dset = dset.repeat() if repeat else dset\n dset = dset.shuffle(shuffle) if shuffle else dset\n dset = dset.batch(batch_size).prefetch(AUTO)\n \n return dset\n\n\n# ############### Variables and configurations ###############\nstrategy = auto_select_accelerator()\nBATCH_SIZE = strategy.num_replicas_in_sync * 16\nGCS_DS_PATH = KaggleDatasets().get_gcs_path('cassava-leaf-disease-classification')\n\n# ############### Loading and preprocess CSVs ###############\nload_dir = \"/kaggle/input/cassava-leaf-disease-classification/\"\ndf = pd.read_csv(load_dir + 'train.csv')\ndf['paths'] = GCS_DS_PATH + \"/train_images/\" + df.image_id\nsub_df = pd.read_csv(load_dir + 'sample_submission.csv')\n\n# ############### Splitting and defining the dataset ###############\ntrain_df, valid_df = train_test_split(df, test_size=0.2, random_state=42)\ntrain_dataset = build_dataset(\n train_df.paths, train_df.label, bsize=BATCH_SIZE)\nvalid_dataset = build_dataset(\n valid_df.paths, valid_df.label, bsize=BATCH_SIZE, \n repeat=False, shuffle=False, augment=False)\n\n# ############### Build and compile the model ###############\nwith strategy.scope():\n model = tf.keras.Sequential([\n efn.EfficientNetB7(\n input_shape=(512, 512, 3),\n weights='noisy-student',\n include_top=False),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(5, activation='softmax')\n ])\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['sparse_categorical_accuracy'])\n model.summary()\n\n# ############### Train the model ###############\nsteps_per_epoch = train_df.shape[0] // BATCH_SIZE\ncheckpoint = tf.keras.callbacks.ModelCheckpoint(\n 'model.h5', save_best_only=True)\nlr_reducer = tf.keras.callbacks.ReduceLROnPlateau(\n monitor=\"val_loss\", patience=3, min_lr=1e-6)\n\nmodel.fit(\n train_dataset, \n epochs=20,\n verbose=2,\n callbacks=[checkpoint, lr_reducer],\n steps_per_epoch=steps_per_epoch,\n validation_data=valid_dataset)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/cassava-leaf-disease-classification/xhlulu/cassava-train-efficientnet-on-tpu-in-100-lines.py","file_name":"cassava-train-efficientnet-on-tpu-in-100-lines.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"8787749887","text":"\ndef add(mat1, mat2):\n res = []\n for i in range(len(mat1)):\n row = []\n for j in range(len(mat2)):\n row[j] = mat1[i][j] + mat2[i][j]\n res.append(row)\n return res\n\ndef sub(mat1, mat2):\n res = []\n for i in range(len(mat1)):\n row = []\n for j in range(len(mat2)):\n row[j] = mat1[i][j] - mat2[i][j]\n res.append(row)\n return res\n\ndef mul(mat1, mat2):\n m = len(mat1); n=len(mat1[0]); p = len(mat2[0])\n res = [[0 for _ in range(p)] for _ in range(m)]\n for i in range(n):\n for j in range(n):\n for k in range(p):\n res[i][j] += mat1[i][k]* mat2[k][j]\n return res\n\ndef getCofactor(mat, cf, p, q, n):\n i = 0; j = 0\n for row in range(n):\n for col in range(n):\n if row != p and col != q:\n cf[i][j] = mat[row][col]\n j += 1\n if j == n - 1:\n j = 0\n i += 1\n return cf\n\ndef determinant(mat, n):\n d = 0\n if n == 1:\n return mat[0][0]\n cf = [[0 for _ in range(n)] for _ in range(n)]\n for i in range(n):\n cf = getCofactor(mat, cf, 0, i, n)\n d += ((-1)**i) * mat[0][i] * determinant(cf, n - 1)\n return d\ndef printSnake(mat):\n rows = len(mat); cols = len(mat[0])\n for i in range(rows):\n for j in range(cols):\n if i % 2 == 0:\n print(mat[i][j], end = \" \")\n else:\n print(mat[i][j - cols], end = \" \")\n print(\"\\n\")\n\ndef printBoundary(mat):\n rows = len(mat); cols = len(mat[0])\n\n for i in mat[0]:\n print(i, end =\" \")\n\n for i in range(rows):\n print(mat[i][cols-1], end=\" \")\n\n for i in mat[-1][::-1]:\n print(i, end=\" \")\n\n for i in reversed(range(1,rows)):\n print(mat[i][0], end=\" \")\n\ndef transpose(mat):\n n = len(mat)\n for i in range(n):\n for j in range(i+1, n):\n mat[i][j], mat[j][i] = (mat[j][i], mat[i][j])\n return mat\n\nanticlockwise90 = lambda mat: transpose(mat)[::-1]\nclockwise90 = lambda mat: [row[::-1] for row in transpose(mat)]\n\ndef search(mat, x):\n rows = len(mat); cols = len(mat[0])\n r_idx = -1; c_idx = -1\n\n for i in range(rows):\n if x < mat[i][-1]:\n r_idx = i\n break\n elif x == mat[i][-1]:\n r_idx = i\n c_idx = cols - 1\n break\n else:\n pass\n if r_idx == -1:\n return -1\n else:\n if c_idx == -1:\n for i in range(cols):\n if mat[r_idx][i] == x:\n c_idx = i\n\n if r_idx != -1 and c_idx != -1:\n return (r_idx, c_idx)\n else:\n return -1\n\ndef median(mat):\n rows = len(mat); cols = len(mat[0])\n flat = []\n for i in range(rows):\n for j in range(cols):\n flat.append(mat[i][j])\n flat = sorted(flat)\n return flat[rows*cols//2]\n\n\ndef printSpiral(a):\n k = 0; l = 0; m = len(a); n = len(a[0])\n while (k < m and l < n):\n for i in range(l, n):\n print(a[k][i], end=\" \")\n k += 1\n\n for i in range(k, m):\n print(a[i][n - 1], end=\" \")\n n -= 1\n\n if (k < m):\n for i in range(n - 1, (l - 1), -1):\n print(a[m - 1][i], end=\" \")\n m -= 1\n\n if (l < n):\n for i in range(m - 1, k - 1, -1):\n print(a[i][l], end=\" \")\n l += 1\n \n \n","repo_name":"vatsalcode/Codeshow-100days_of_code","sub_path":"Abhishek Tyagi/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"27208127446","text":"import os\nfrom itertools import product\n\nimport numpy as np\nimport plotly.graph_objs as go\nfrom scipy.stats import wilcoxon\nfrom tqdm import tqdm\n\nfrom libs import compute_lib\nfrom libs.experiments import load, filtering, compute, paths, organize\nfrom libs.experiments.config import QUANTIFICATION_WINDOW_LENGTH_IN_CELL_DIAMETER, \\\n QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER, QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER, all_experiments, \\\n DERIVATIVE\nfrom plotting import save\n\nOFFSET_X = 0\nOFFSET_Y = 0.5\nALIGNMENT_OFFSET_Y = 0\nOFFSET_Z = 0\n\nPAIR_DISTANCE_RANGE = [4, 10]\n\nSTART_TIME_FRAME_Z_SCORE = 5\nBLOCK_ALREADY_DENSE = False\n\n\ndef align_by_z_score(_experiments, _experiments_fiber_densities):\n _experiments_fiber_densities_aligned = {}\n for _tuple in tqdm(_experiments, desc='Temporal aligning experiments'):\n _experiment, _series_id, _group = _tuple\n\n _left_cell_alignment_fiber_densities = \\\n _experiments_fiber_densities[(_experiment, _series_id, _group, 'left_cell', ALIGNMENT_OFFSET_Y)]\n _right_cell_alignment_fiber_densities = \\\n _experiments_fiber_densities[(_experiment, _series_id, _group, 'right_cell', ALIGNMENT_OFFSET_Y)]\n\n _left_cell_fiber_densities = \\\n _experiments_fiber_densities[(_experiment, _series_id, _group, 'left_cell', OFFSET_Y)]\n _right_cell_fiber_densities = \\\n _experiments_fiber_densities[(_experiment, _series_id, _group, 'right_cell', OFFSET_Y)]\n\n _normalization = load.normalization_series_file_data(_experiment, _series_id)\n _left_cell_value_aligned, _right_cell_value_aligned = None, None\n for _time_frame, (_left_cell_fiber_density, _right_cell_fiber_density) in \\\n enumerate(zip(_left_cell_alignment_fiber_densities, _right_cell_alignment_fiber_densities)):\n _normalized_left_cell_fiber_density = compute_lib.z_score(\n _x=_left_cell_fiber_density[0],\n _average=_normalization['average'],\n _std=_normalization['std']\n )\n _normalized_right_cell_fiber_density = compute_lib.z_score(\n _x=_right_cell_fiber_density[0],\n _average=_normalization['average'],\n _std=_normalization['std']\n )\n _mean_z_score = (_normalized_left_cell_fiber_density + _normalized_right_cell_fiber_density) / 2\n if _mean_z_score > START_TIME_FRAME_Z_SCORE:\n if _time_frame == 0 and BLOCK_ALREADY_DENSE:\n _left_cell_value_aligned = []\n _right_cell_value_aligned = []\n else:\n _left_cell_value_aligned = _left_cell_fiber_densities[_time_frame:]\n _right_cell_value_aligned = _right_cell_fiber_densities[_time_frame:]\n break\n\n # in case z-score not reached\n if _left_cell_value_aligned is None:\n _left_cell_value_aligned = []\n _right_cell_value_aligned = []\n\n _experiments_fiber_densities_aligned[(_experiment, _series_id, _group, 'left_cell')] = \\\n _left_cell_value_aligned\n _experiments_fiber_densities_aligned[(_experiment, _series_id, _group, 'right_cell')] = \\\n _right_cell_value_aligned\n\n return _experiments_fiber_densities_aligned\n\n\ndef main(_high_temporal_resolution=True):\n _experiments = all_experiments()\n _experiments = filtering.by_categories(\n _experiments=_experiments,\n _is_single_cell=False,\n _is_high_temporal_resolution=_high_temporal_resolution,\n _is_bleb=False,\n _is_dead_dead=False,\n _is_live_dead=False,\n _is_bead=False,\n _is_metastasis=False\n )\n\n _tuples = load.experiments_groups_as_tuples(_experiments)\n _tuples = filtering.by_pair_distance_range(_tuples, PAIR_DISTANCE_RANGE)\n _tuples = filtering.by_real_pairs(_tuples)\n _tuples = filtering.by_band(_tuples)\n print('Total tuples:', len(_tuples))\n\n _arguments = []\n for _tuple in _tuples:\n _experiment, _series_id, _group = _tuple\n for _cell_id in ['left_cell', 'right_cell']:\n _latest_time_frame = compute.latest_time_frame_before_overlapping(_experiment, _series_id, _group, OFFSET_X)\n _arguments.append({\n 'experiment': _experiment,\n 'series_id': _series_id,\n 'group': _group,\n 'length_x': QUANTIFICATION_WINDOW_LENGTH_IN_CELL_DIAMETER,\n 'length_y': QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER,\n 'length_z': QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER,\n 'offset_x': OFFSET_X,\n 'offset_y': OFFSET_Y,\n 'offset_z': OFFSET_Z,\n 'cell_id': _cell_id,\n 'direction': 'inside',\n 'time_points': _latest_time_frame\n })\n if ALIGNMENT_OFFSET_Y != OFFSET_Y:\n _arguments.append({\n 'experiment': _experiment,\n 'series_id': _series_id,\n 'group': _group,\n 'length_x': QUANTIFICATION_WINDOW_LENGTH_IN_CELL_DIAMETER,\n 'length_y': QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER,\n 'length_z': QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER,\n 'offset_x': OFFSET_X,\n 'offset_y': ALIGNMENT_OFFSET_Y,\n 'offset_z': OFFSET_Z,\n 'cell_id': _cell_id,\n 'direction': 'inside',\n 'time_points': _latest_time_frame\n })\n\n _windows_dictionary, _windows_to_compute = \\\n compute.windows(_arguments, _keys=['experiment', 'series_id', 'group', 'cell_id', 'offset_y'])\n _fiber_densities = compute.fiber_densities(_windows_to_compute)\n\n _experiments_fiber_densities = {\n _key: [_fiber_densities[_tuple] for _tuple in _windows_dictionary[_key]]\n for _key in _windows_dictionary\n }\n\n _experiments_fiber_densities_aligned = align_by_z_score(_tuples, _experiments_fiber_densities)\n _tuples_by_experiment = organize.by_experiment(_tuples)\n\n _same_correlations_array = []\n _different_correlations_array = []\n _valid_tuples = []\n for _experiment in _tuples_by_experiment:\n print('Experiment:', _experiment)\n _experiment_tuples = _tuples_by_experiment[_experiment]\n\n for _same_index in tqdm(range(len(_experiment_tuples)), desc='Main loop'):\n _same_tuple = _experiment_tuples[_same_index]\n _same_experiment, _same_series, _same_group = _same_tuple\n\n _same_left_cell_fiber_densities = \\\n _experiments_fiber_densities_aligned[\n (_same_experiment, _same_series, _same_group, 'left_cell')\n ]\n _same_right_cell_fiber_densities = \\\n _experiments_fiber_densities_aligned[\n (_same_experiment, _same_series, _same_group, 'right_cell')\n ]\n\n _same_properties = \\\n load.group_properties(_same_experiment, _same_series, _same_group)\n _same_left_cell_fiber_densities = compute.remove_blacklist(\n _same_experiment,\n _same_series,\n _same_properties['cells_ids']['left_cell'],\n _same_left_cell_fiber_densities\n )\n _same_right_cell_fiber_densities = compute.remove_blacklist(\n _same_experiment,\n _same_series,\n _same_properties['cells_ids']['right_cell'],\n _same_right_cell_fiber_densities\n )\n\n _same_left_cell_fiber_densities_filtered, _same_right_cell_fiber_densities_filtered = \\\n compute.longest_same_indices_shared_in_borders_sub_array(\n _same_left_cell_fiber_densities, _same_right_cell_fiber_densities\n )\n\n # ignore small arrays\n if len(_same_left_cell_fiber_densities_filtered) < compute.minimum_time_frames_for_correlation(_same_experiment):\n continue\n\n _same_correlation = compute_lib.correlation(\n compute_lib.derivative(_same_left_cell_fiber_densities_filtered, _n=DERIVATIVE),\n compute_lib.derivative(_same_right_cell_fiber_densities_filtered, _n=DERIVATIVE)\n )\n for _different_index in range(len(_experiment_tuples)):\n if _same_index != _different_index:\n _different_tuple = _experiment_tuples[_different_index]\n _different_experiment, _different_series, _different_group = \\\n _different_tuple\n for _same_cell_id, _different_cell_id in product(['left_cell', 'right_cell'],\n ['left_cell', 'right_cell']):\n _same_fiber_densities = _experiments_fiber_densities_aligned[(\n _same_experiment,\n _same_series,\n _same_group,\n _same_cell_id\n )]\n _different_fiber_densities = _experiments_fiber_densities_aligned[(\n _different_experiment,\n _different_series,\n _different_group,\n _different_cell_id\n )]\n\n _different_properties = load.group_properties(\n _different_experiment, _different_series, _different_group\n )\n _same_fiber_densities = compute.remove_blacklist(\n _same_experiment,\n _same_series,\n _same_properties['cells_ids'][_same_cell_id],\n _same_fiber_densities\n )\n _different_fiber_densities = compute.remove_blacklist(\n _different_experiment,\n _different_series,\n _different_properties['cells_ids'][_different_cell_id],\n _different_fiber_densities\n )\n\n _same_fiber_densities_filtered, _different_fiber_densities_filtered = \\\n compute.longest_same_indices_shared_in_borders_sub_array(\n _same_fiber_densities, _different_fiber_densities\n )\n\n # ignore small arrays\n if len(_same_fiber_densities_filtered) < compute.minimum_time_frames_for_correlation(_different_experiment):\n continue\n\n _different_correlation = compute_lib.correlation(\n compute_lib.derivative(_same_fiber_densities_filtered, _n=DERIVATIVE),\n compute_lib.derivative(_different_fiber_densities_filtered, _n=DERIVATIVE)\n )\n\n _same_correlations_array.append(_same_correlation)\n _different_correlations_array.append(_different_correlation)\n\n if _same_tuple not in _valid_tuples:\n _valid_tuples.append(_same_tuple)\n\n print('Total tuples:', len(_valid_tuples))\n print('Total points:', len(_same_correlations_array))\n _same_minus_different = \\\n np.array(_same_correlations_array) - np.array(_different_correlations_array)\n print('Wilcoxon of same minus different around the zero:')\n print(wilcoxon(_same_minus_different))\n print('Higher same amount:', (_same_minus_different > 0).sum() /\n len(_same_minus_different))\n\n # plot\n _fig = go.Figure(\n data=go.Scatter(\n x=_same_correlations_array,\n y=_different_correlations_array,\n mode='markers',\n marker={\n 'size': 5,\n 'color': '#ea8500'\n },\n showlegend=False\n ),\n layout={\n 'xaxis': {\n 'title': 'Same network correlation',\n 'zeroline': False,\n 'range': [-1.1, 1.2],\n 'tickmode': 'array',\n 'tickvals': [-1, -0.5, 0, 0.5, 1]\n },\n 'yaxis': {\n 'title': 'Different network correlation',\n 'zeroline': False,\n 'range': [-1.1, 1.2],\n 'tickmode': 'array',\n 'tickvals': [-1, -0.5, 0, 0.5, 1]\n },\n 'shapes': [\n {\n 'type': 'line',\n 'x0': -1,\n 'y0': -1,\n 'x1': -1,\n 'y1': 1,\n 'line': {\n 'color': 'black',\n 'width': 2\n }\n },\n {\n 'type': 'line',\n 'x0': -1,\n 'y0': -1,\n 'x1': 1,\n 'y1': -1,\n 'line': {\n 'color': 'black',\n 'width': 2\n }\n },\n {\n 'type': 'line',\n 'x0': -1,\n 'y0': -1,\n 'x1': 1,\n 'y1': 1,\n 'line': {\n 'color': 'red',\n 'width': 2\n }\n }\n ]\n }\n )\n\n save.to_html(\n _fig=_fig,\n _path=os.path.join(paths.PLOTS, save.get_module_name()),\n _filename='plot_high_time_' + str(_high_temporal_resolution)\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"assafna/cell-ecm-project","sub_path":"fiber_density/experiments/same_inner_correlation_vs_different_inner_correlation_temporal_alignment.py","file_name":"same_inner_correlation_vs_different_inner_correlation_temporal_alignment.py","file_ext":"py","file_size_in_byte":13960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24521126792","text":"'''\nUse Euler's method in a Python code to solve the initial value problem\nt^2y\"-2ty'+2y=t^3ln(t) with 1=heights[ii]:\n ii += 1\n \n if ii res[j] + abs(heights[i]-heights[j]) + prices[j] + (j-i):\n min_res = res[j] + abs(heights[i]-heights[j]) + prices[j] + (j-i)\n res[i] = min_res\n\n return res[0] \n\nif __name__ == \"__main__\":\n n = int(input().strip())\n mason_height = int(input().strip())\n heights = list(map(int, input().strip().split(' ')))\n prices = list(map(int, input().strip().split(' ')))\n result = raceAgainstTime(n, mason_height, heights, prices)\n print(result)\n","repo_name":"mekan-allaberdi/hackerrank-solutions","sub_path":"a-race-against-time.py","file_name":"a-race-against-time.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13190556278","text":"from datetime import datetime, timedelta\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom Questionaire.models import Inquiry, Inquirer\nfrom PageDisplay.models import Page, TextModule\nfrom questionaire_mailing.renderers import MailHTMLRenderer\n\nfrom questionaire_mailing.mailing import construct_and_send_mail\n\n\nclass MailPage(Page):\n title = models.CharField(max_length=100)\n renderer = MailHTMLRenderer\n\n\nclass MailTask(models.Model):\n name = models.CharField(max_length=56)\n description = models.CharField(max_length=512)\n active = models.BooleanField(default=False, verbose_name=\"Tigger is active\")\n layout = models.ForeignKey(Page, on_delete=models.CASCADE, blank=True, editable=False)\n\n def save(self, *args, **kwargs):\n if not hasattr(self, 'layout'):\n self.layout = MailPage.objects.create(name=self.name, title=self.name)\n self.layout.add_basic_module(TextModule, text=\"Hallo,\")\n self.layout.add_basic_module(TextModule, text=\"Met vriendelijke groet,\\n\\nDe klimaat menukaart\")\n\n return super(MailTask, self).save(*args, **kwargs)\n\n @property\n def type(self):\n sublcass = self.get_as_child()\n return sublcass.type\n\n def get_as_child(self):\n \"\"\" Returns the child object of this class\"\"\"\n # Loop over all children\n for child in self.__class__.__subclasses__():\n # If the child object exists\n if child.objects.filter(id=self.id).exists():\n return child.objects.get(id=self.id).get_as_child()\n return self\n\n def activate(self):\n \"\"\" Activates the task and assures that already applicable inquiries don't suddenly get spammed.\"\"\"\n if not self.active:\n self.active = True\n self.save()\n # Switch states by pretending to send mail, without sending mail\n self.generate_mail(send_mail=False)\n\n def deactivate(self):\n if self.active:\n self.active = False\n self.save()\n\n\nclass ProcessedMail(models.Model):\n \"\"\" A tracker to track which mails have been send and which ones have not.\"\"\"\n mail = models.ForeignKey(to=MailTask, on_delete=models.CASCADE)\n inquiry = models.ForeignKey(to=Inquiry, on_delete=models.CASCADE, blank=True, null=True)\n inquirer = models.ForeignKey(to=Inquirer, on_delete=models.CASCADE, blank=True, null=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n was_applicable = models.BooleanField(verbose_name=\"Whether the mail has been send\")\n\n class Meta:\n unique_together = ['mail', 'inquiry']\n\n def clean(self):\n if not (self.inquirer or self.inquiry):\n raise ValidationError(\"Either inquirer or inquiry needs to have a value\")\n\n\nclass TimedMailTask(MailTask):\n days_after = models.IntegerField(default=7)\n\n AFTER_COMPLETION = 'TC'\n AFTER_CREATION_INCOMPLETE = 'TI'\n AFTER_LAST_LOGIN_COMPLETED = 'LC'\n AFTER_LAST_LOGIN_INCOMPLETE = 'LI'\n TRIGGER_CHOICES = [\n (AFTER_COMPLETION, 'After completion'),\n (AFTER_CREATION_INCOMPLETE, 'After creation (incomplete)'),\n (AFTER_LAST_LOGIN_COMPLETED, 'After Last Login (Completed)'),\n (AFTER_LAST_LOGIN_INCOMPLETE, 'After Last Login (Incomplete)'),\n ]\n trigger = models.CharField(max_length=2, choices=TRIGGER_CHOICES)\n\n @property\n def type(self):\n return \"Timed mail\"\n\n @property\n def display_general_info(self):\n return f'{self.days_after} dagen na {self.get_trigger_display()}'\n\n def get_all_sendable_inquiries(self, datetime=None):\n if datetime is None:\n datetime = timezone.now()\n\n # Adjust the time to be at the edge\n datetime = datetime - timedelta(days=self.days_after)\n\n queryset = Inquiry.objects.all()\n\n # Exclude existing and processed entries\n queryset = queryset.exclude(processedmail__in=ProcessedMail.objects.filter(mail=self))\n\n if self.trigger == TimedMailTask.AFTER_COMPLETION:\n queryset = queryset.filter(completed_on__lte=datetime)\n elif self.trigger == TimedMailTask.AFTER_CREATION_INCOMPLETE:\n queryset = queryset.filter(created_on__lte=datetime, is_complete=False)\n elif self.trigger == TimedMailTask.AFTER_LAST_LOGIN_COMPLETED:\n queryset = queryset.filter(last_visited__lte=datetime, is_complete=True)\n elif self.trigger == TimedMailTask.AFTER_LAST_LOGIN_INCOMPLETE:\n queryset = queryset.filter(last_visited__lte=datetime, is_complete=False)\n\n return queryset\n\n def generate_mail(self, send_mail=True):\n \"\"\"\n Generates mail for all applicable inquiries that apply for this task\n :param send_mail: Whether actual mails should be send.\n If False, mails won't be created, but ProcessedMail instances will be created. defaults True.\n :return: The number of instances processed\n \"\"\"\n # Get all inquiries that need to be mailed\n inquiries = self.get_all_sendable_inquiries()\n\n processed = 0\n\n page_obj = self.layout.get_as_child()\n\n for inquiry in inquiries:\n mail_send = send_mail\n\n if send_mail:\n # For each inquiry, construct the mail\n # Send the mail\n email = inquiry.inquirer.email\n if email:\n construct_and_send_mail(page_obj, {}, email)\n else:\n mail_send = False\n\n # For each inquiry send the mail, and process it as send (or not)\n ProcessedMail.objects.create(mail=self, inquiry=inquiry, was_applicable=mail_send)\n processed += 1\n return processed\n\n\nclass TriggeredMailTask(MailTask):\n TRIGGER_MAIL_REGISTERED = \"MR\"\n TRIGGER_MAIL_CHANGED = \"MRN\"\n TRIGGER_INQUIRY_COMPLETE = \"IC\"\n EVENT_CHOICES = [\n (TRIGGER_MAIL_REGISTERED, 'After mail registration in requiry'),\n (TRIGGER_MAIL_CHANGED, 'After mail change in user-settings'),\n (TRIGGER_INQUIRY_COMPLETE, 'After inquiry completion'),\n ]\n event = models.CharField(max_length=3, choices=EVENT_CHOICES)\n\n @property\n def type(self):\n return \"Triggered mail\"\n\n @classmethod\n def trigger(cls, event_type, inquiry=None, inquirer=None, email=None):\n if not (inquirer or inquiry):\n raise AssertionError(\"Either an inquiry or inquirer should be given\")\n\n active_mail_task = cls.objects.filter(event=event_type, active=True).first()\n if active_mail_task is not None:\n active_mail_task.generate_mail(inquiry=inquiry, inquirer=inquirer, send_mail=True, email=email)\n\n @property\n def display_general_info(self):\n return f'{self.get_event_display()}'\n\n def activate(self):\n # Disable any other active mail triggers of the same type.\n if not self.active:\n TriggeredMailTask.objects.filter(event=self.event, active=True).update(active=False)\n # Call the super\n return super(TriggeredMailTask, self).activate()\n\n def generate_mail(self, inquiry=None, inquirer=None, send_mail=False, email=None):\n \"\"\"\n Creates the email from the given inquiry\n :param inquiry:\n :param inquirer:\n :param send_mail:\n :return:\n \"\"\"\n if not send_mail:\n # Mail should not be send, for triggers we do not track if triggers should have been triggered in the past\n # Because it can not activate again by any other means than the actual event trigger.\n return\n\n # Get the email\n if email is None:\n if inquirer:\n email = inquirer.email\n elif inquiry:\n email = inquiry.inquirer.email\n\n if email:\n context = {\n 'inquiry': inquiry,\n 'inquirer': inquirer,\n }\n\n construct_and_send_mail(self.layout.get_as_child(), context, email)\n ProcessedMail.objects.create(mail=self, inquiry=inquiry, inquirer=inquirer, was_applicable=True)\n\n\n# Import the modules\nfrom questionaire_mailing.modules.modules import *\n","repo_name":"DutcherNL/Shakespear","sub_path":"questionaire_mailing/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73690450722","text":"from utils import *\n\nclass State:\n def __init__(self, elevator, objects):\n self.elevator = elevator\n\n # Keeping the list of objects sorted ensures that states which\n # are equivalent will always be equal. This hugely cuts down\n # the runtime of the program.\n self.objects = tuple(sorted(objects))\n\n def __eq__(self, other):\n return self.elevator == other.elevator and self.objects == other.objects\n\n def __hash__(self):\n return hash((self.elevator, self.objects))\n\n def __lt__(self, other):\n return self.elevator < other.elevator and self.objects < other.objects\n\n def __repr__(self):\n return f\"Floor {self.elevator}: {self.objects}\"\n\ndef generator(element): return element[0]\ndef microchip(element): return element[1]\n\ndef heuristic(state):\n distances = 0\n for generator, microchip in state.objects:\n distances += 3 - generator\n distances += 3 - microchip\n\n return distances\n\ndef pair_is_inverse(p1, p2):\n (p2x, p2y) = p2\n return p1 == (p2y, p2x)\n\n@check(31)\ndef part1(_):\n initial_state = State(elevator=0, objects=((0, 0), (1, 2), (1, 2), (1, 2), (1, 2)))\n path_to_goal = astar_search(initial_state, heuristic, get_next_states)\n return len(path_to_goal) - 1\n\n@check(55)\ndef part2(a):\n initial_state = State(elevator=0, objects=((0, 0), (0, 0), (0, 0), (1, 2), (1, 2), (1, 2), (1, 2)))\n path_to_goal = astar_search(initial_state, heuristic, get_next_states)\n return len(path_to_goal) - 1\n\n\ndef get_next_states(state):\n current_floor = state.elevator\n objects = state.objects\n\n # List of deltas (+1/-1) which the elevator can travel\n available_moves = []\n if current_floor < 3: available_moves.append(+1)\n if current_floor > 0: available_moves.append(-1)\n\n # Lists of all generators/microchips on the current floor\n objects_on_current_floor = lambda by_fn: [o for o in objects if by_fn(o) == current_floor]\n generators_on_current_floor = objects_on_current_floor(generator)\n microchips_on_current_floor = objects_on_current_floor(microchip)\n\n # Combined two lists of all states reachable from moving either up or down\n return flatten([\n get_states_in_direction(state, generators_on_current_floor, microchips_on_current_floor, delta)\n for delta in available_moves\n ])\n\ndef get_states_in_direction(state, generators, microchips, delta):\n # A list of pairs of the form (object, delta)\n # e.g. If the elevator is on floor 0 and floor 0 contains a generator\n # and a microchip, possible_moves is:\n # [\n # ((0, 0), (1, 0)), # < The generator can be moved up one floor\n # ((0, 0), (0, 1)), # < The microchip can be moved up one floor\n # ]\n #\n possible_moves = list(chain(\n [(o, (delta, 0)) for o in generators],\n [(o, (0, delta)) for o in microchips],\n ))\n\n # All possible combinations of moves\n # Lists of length 1 and 2 represent the ability to carry either 1 or 2\n # items in the elevator\n possible_move_combinations = chain(\n combinations(possible_moves, 1),\n combinations(possible_moves, 2),\n )\n\n # If moves are to be applied to the same object (i.e. deltas are\n # (1, 0) and (0, 1)), merge these into a single move - (1, 1)\n #\n # Any moves to equivalent objects (e.g. two moves to two paired microchips\n # and generators) are also equivalent, so the result is a set to ensure\n # we don't consider more moves than are necessary.\n all_possible_moves = set(merge_moves_to_same_object(possible_move_combinations))\n\n # Apply deltas, moving from a list of the form (objects_before, delta) to a list\n # of the form (objects_before, objects_after).\n all_possible_moves = [\n [(before, tuple_add(before, delta)) for (before, delta) in move_components]\n for move_components in all_possible_moves\n ]\n\n # For every possible move in the 'delta' direction, apply this move to the current\n # state, yielding a new state with objects on new floors.\n all_possible_states = [\n make_move(state, move, delta)\n for move in all_possible_moves\n ]\n\n # Filter out states in which we fry any microchips\n all_possible_states = filter(is_valid_state, all_possible_states)\n\n return (State(*s) for s in all_possible_states)\n\ndef make_move(old_state, move, delta):\n old_elevator = old_state.elevator\n old_objects = old_state.objects\n\n elevator = old_elevator + delta\n\n objects = list(old_objects)\n\n for moved_object in move:\n (before, after) = moved_object\n objects.remove(before)\n objects.append(after)\n\n new_state = (elevator, objects)\n return (elevator, tuple(objects))\n\ndef is_valid_state(state):\n (elevator, objects) = state\n\n for (generator, microchip) in objects:\n # If a generator and microchip are paired, they're safe\n if generator == microchip:\n continue\n\n # If any other generator is on the same floor as an unpaired\n # microchip, the microchip is fried.\n for (other_generator, _) in objects:\n if other_generator == microchip:\n return False\n\n return True\n\ndef merge_moves_to_same_object(moves):\n moves_after_merging = []\n\n # Iterate through each possible move\n for move in moves:\n # List the items which will be taken in that move\n move_actions = list(move)\n\n # Get pairs of items\n move_pairs = list(combinations(move_actions, r=2))\n\n # For all pairs of moves\n for move_pair in move_pairs:\n (move1, move2) = move_pair\n (before1, delta1) = move1\n (before2, delta2) = move2\n\n # If they are the same item, merge them\n if before1 == before2 and pair_is_inverse(delta1, delta2):\n move_actions.remove(move1)\n move_actions.remove(move2)\n move_actions.append((before1, tuple_add(delta1, delta2)))\n\n moves_after_merging.append(tuple(move_actions))\n\n return moves_after_merging\n","repo_name":"jgilchrist/advent-of-code","sub_path":"python/2016/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34683180076","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nfrom heapq import heappop, heappush\n\nfrom celery.beat import event_t\nfrom celery.schedules import schedstate\nfrom django_celery_beat.schedulers import DatabaseScheduler\n\nfrom soukoapi.celery import app\n\n\ndef is_task_in_queue(task, queue_name=None):\n queues = [queue_name] if queue_name else app.amqp.queues.keys()\n\n for queue in queues:\n if task in get_celery_queue_tasks(queue):\n return True\n return False\n\n\ndef get_celery_queue_tasks(queue_name):\n with app.pool.acquire(block=True) as conn:\n tasks = conn.default_channel.client.lrange(queue_name, 0, -1)\n decoded_tasks = []\n\n for task in tasks:\n j = json.loads(task)\n task = j[\"headers\"][\"task\"]\n if task not in decoded_tasks:\n decoded_tasks.append(task)\n return decoded_tasks\n\n\nclass SmartScheduler(DatabaseScheduler):\n \"\"\"\n Smart means that prevents duplicating of tasks in queues.\n\n The aim is to execute tasks only once.\n \"\"\"\n\n def is_due(self, entry):\n is_due, next_time_to_run = entry.is_due()\n\n if not is_due or not is_task_in_queue( # duplicate wouldn't be created\n entry.task\n ): # not in queue so let it run\n return schedstate(is_due, next_time_to_run)\n\n # Task should be run (is_due) and it is present in queue (is_task_in_queue)\n H = self._heap\n\n if not H:\n return schedstate(False, self.max_interval)\n\n event = H[0]\n verify = heappop(H)\n if verify is event:\n next_entry = self.reserve(entry)\n heappush(\n H,\n event_t(self._when(next_entry, next_time_to_run), event[1], next_entry),\n )\n else:\n heappush(H, verify)\n next_time_to_run = min(verify[0], next_time_to_run)\n return schedstate(False, min(next_time_to_run, self.max_interval))\n","repo_name":"GuitooStephan/soukoapi","sub_path":"main/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37091792698","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport math\nimport time\n\ntry:\n\n url = \"http://suninjuly.github.io/math.html\"\n browser = webdriver.Chrome()\n browser.get(url)\n\n\n def calc(a):\n return str(math.log(abs(12 * math.sin(int(a)))))\n\n x_element = browser.find_element(By.CSS_SELECTOR, 'span[id=\"input_value\"]')\n x = x_element.text\n y = calc(x)\n time.sleep(2)\n \n input_text = browser.find_element(By.CSS_SELECTOR, 'input[id=\"answer\"]')\n input_text.send_keys(y)\n time.sleep(2)\n\n checkbox = browser.find_element(By.CSS_SELECTOR, '.form-check-custom .form-check-input')\n checkbox.click()\n time.sleep(2)\n\n radio_robot = browser.find_element(By.CSS_SELECTOR, '.form-radio-custom [id=\"robotsRule\"]')\n radio_robot.click()\n time.sleep(2)\n\n button_submit = browser.find_element(By.CSS_SELECTOR, '.btn.btn-default')\n button_submit.click()\n\nfinally:\n time.sleep(10)\n browser.quit()\n","repo_name":"zokm/stepik-auto-tests-course","sub_path":"part2_lesson1_step5.py","file_name":"part2_lesson1_step5.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14075724924","text":"from datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass Customer:\n\n student_name = 'Matt Jacobi'\n\n def __init__(self, id, fname, lname,dob,city,state,zip):\n self.id = id\n self.first_name = fname\n self.last_name = lname\n self.dob = dob\n self.city = city\n self.state = state\n self.zip = zip\n\n def full_name(first, last):\n return first + ' ' + last\n\n def age(self):\n dob = datetime.strptime(self.dob, '%Y-%m-%d')\n today = datetime.today()\n age = relativedelta(today, dob)\n return age.years\n\n def adult(self):\n return self.age() >= 18\n\n def to_json(self):\n j = {}\n j.update(self.__dict__)\n j ['id'] = self.id\n j ['city'] = self.city\n j ['state'] = self.state\n j ['zip'] = self.zip\n j ['age'] = self.age()\n j ['full_name'] = (self.first_name +' '+self.last_name)\n j ['adult'] = self.adult()\n\n return j\n","repo_name":"mljacobi/isds","sub_path":"mjaco32_cust_class.py","file_name":"mjaco32_cust_class.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23995990313","text":"from gtts import gTTS\n\nimport requests\nimport argparse\nimport base64\nimport json\nimport re\n\n\nparser = argparse.ArgumentParser(description='Convert a JSON file and upload it to Anki. Required Anki Connect in the Desktop App')\nparser.add_argument('output_path', help='Path to place generated files')\nparser.add_argument('target_deck', help='Anki deck to generate voices for')\nargs = parser.parse_args()\n\n\n# print the existing Takoboto->Anki deck to print the IDs that we will scrape\ndeck_name = args.target_deck\n#deck_name = \"adjectives\"\n\n# AnkiConnect API URL\nurl = \"http://localhost:8765\"\n\n# Request parameters\nparams = {\n \"action\": \"findNotes\",\n \"version\": 6,\n \"params\": {\n \"query\": f'deck:\"{deck_name}\"'\n }\n}\n\nprint(f\"Scanning deck {deck_name} to generate a sythesized file for the word.\")\n\n# Send the request to AnkiConnect\nresponse = requests.post(url, json=params)\ndata = response.json()\n\nif \"result\" in data:\n note_ids = data[\"result\"]\n print(f\"Found {len(note_ids)} cards in the '{deck_name}' deck.\")\n\n # gather all notes without a sound\n no_sound_list = []\n for note_index, note_id in enumerate(note_ids):\n \n print(f\"\\rScanning Notes: {(100 * (note_index + 1) / len(note_ids)):.0f}%\", end=\"\")\n\n params = {\n \"action\": \"notesInfo\",\n \"version\": 6,\n \"params\": {\n \"notes\": [note_id]\n }\n }\n response = requests.post(url, json=params)\n data = response.json()\n\n data_fields = data['result'][0]['fields']\n\n # check if sound is used directly on a field\n kanji = \"\"\n used_field = \"\"\n if data_fields.get('kanjis'):\n kanji = data_fields['kanjis']['value']\n used_field = \"kanjis\"\n if data_fields.get('Japanese'):\n kanji = data_fields['Japanese']['value']\n used_field = \"Japanese\"\n\n # check for sound embedded in name\n if kanji != \"\" and used_field != \"\":\n pattern = r'\\[sound:(.*?)\\]'\n matches = re.findall(pattern, kanji)\n if len(matches) == 0: # nothing embedded\n # check for a sound param\n sound_value = data_fields.get('sound')\n if sound_value is None or sound_value['value'] == \"\":\n no_sound_list.append(data)\n if len(matches) > 1:\n # we have more than 1 so regenerate to fix it\n data_fields[used_field]['value'] = re.sub(r'\\[.*?\\]', '', kanji)\n\n \n print(f\"\\n{len(no_sound_list)}/{len(note_ids)} notes dont have sound\")\n total_sounds_to_generate = len(no_sound_list)\n for index, data in enumerate(no_sound_list):\n deck_data = data['result'][0]\n data_fields = deck_data['fields']\n note_id = deck_data['noteId']\n\n # get the kani from our deck types\n kanji = \"\"\n used_field = \"\"\n if data_fields.get('kanjis'):\n kanji = data_fields['kanjis']['value']\n used_field = \"kanjis\"\n if data_fields.get('Japanese'):\n kanji = data_fields['Japanese']['value']\n used_field = \"Japanese\"\n \n if kanji == \"\" or used_field == \"\":\n continue\n print(f\"\\rGenerating Sound: {(100 * (index + 1) / total_sounds_to_generate):.0f}% - {note_id} - {kanji} \", end=\"\")\n\n # Create gTTS object and specify the language\n tts = gTTS(text=kanji, lang='ja', slow=False)\n\n # Save the speech as an audio file\n audio_file = f\"{args.output_path}/{note_id}.wav\"\n #audio_file = f\"/home/retrozelda/Development/projects/takoboto_scraper_for_anki/data/.tmp/{note_id}.wav\"\n tts.save(audio_file)\n\n # grab the sound from disk\n sound_data = base64.b64encode(open(audio_file, \"rb\").read()).decode('utf-8')\n sound_field = {\"filename\": f\"{note_id}.wav\", \"data\": sound_data, \"fields\":[used_field], \"deleteExisting\":True}\n\n # create our note update\n updated_note = {\n \"id\" : note_id,\n \"fields\" : {},\n \"audio\" : sound_field\n }\n\n # convert our existing fields over\n for key, value in data_fields.items():\n updated_note['fields'][key] = value['value']\n\n # clean the existing \"used\" field\n updated_note['fields'][used_field] = re.sub(r'\\[.*?\\]', '', updated_note['fields'][used_field])\n \n # insert the sound in our deck\n payload = json.dumps({\n \"action\": \"updateNote\",\n \"version\": 6,\n \"params\": {\n \"note\": updated_note\n }\n })\n response = requests.post(url, data=payload)\n result = response.json()\n if result['error'] is not None:\n print(f\" ERROR: {result['error'] }\")\n\n print(f\"\\rGenerated {total_sounds_to_generate} sounds. \")\n\nelse:\n print(\"An error occurred while fetching notes.\")\n if \"error\" in data:\n print(data[\"error\"])\n\n","repo_name":"RetroZelda/takoboto_scraper_for_anki","sub_path":"python/generate_voice.py","file_name":"generate_voice.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14610806399","text":"class Solution:\n def checkIfPangram(self, sentence: str) -> bool:\n dict = {}\n\n # String.ascii_lowercase で 'abcdefghijklmnopqrstuvwxyz' を生成できる\n for a in 'abcdefghijklmnopqrstuvwxyz':\n dict[a] = False\n\n for s in sentence:\n dict[s] = True\n\n for key, value in dict.items():\n if not value:\n return False\n\n return True\n\n# 模範解答\n# https://leetcode.com/problems/check-if-the-sentence-is-pangram/discuss/1175554/Pangram-Solution-in-Python-3-(96-fast)\n\n\n'''\nimport string\nclass Solution:\n def checkIfPangram(self, sentence: str) -> bool:\n # 集合型を作成\n checker = set(String.ascii_lowercase)\n \n # in 句で真偽値を取得\n for i in checker:\n if i not in sentence.lower():\n return False\n \n return True\n'''\n","repo_name":"Takuma-Ikeda/other-LeetCode","sub_path":"src/easy/answer/check_if_the_sentence_is_pangram.py","file_name":"check_if_the_sentence_is_pangram.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37750850826","text":"from openapi_typed_2 import convert_to_Schema\n\nfrom meeshkan.nlp.schema_merger import SchemaMerger\n\n\ndef test_merge():\n schema1 = {\n \"type\": \"object\",\n \"required\": [\"foo\", \"baz\"],\n \"properties\": {\n \"foo\": {\"type\": \"integer\"},\n \"bar\": {\"type\": \"string\"},\n \"baz\": {\"type\": \"string\"},\n \"faz\": {\n \"type\": \"object\",\n \"required\": [\"field1\"],\n \"properties\": {\n \"field1\": {\"type\": \"integer\"},\n \"field2\": {\"type\": \"string\"},\n \"field3\": {\"type\": \"string\"},\n },\n },\n },\n }\n\n schema2 = {\n \"type\": \"object\",\n \"required\": [\"foo\", \"bar\"],\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n \"bar\": {\"type\": \"string\"},\n \"zaz\": {\"type\": \"string\"},\n \"faz\": {\n \"type\": \"object\",\n \"required\": [\"field1\", \"field4\"],\n \"properties\": {\n \"field1\": {\"type\": \"integer\"},\n \"field2\": {\"type\": \"string\"},\n \"field3\": {\"type\": \"string\"},\n \"field4\": {\"type\": \"integer\"},\n },\n },\n },\n }\n\n schema3 = {\n \"type\": \"object\",\n \"required\": [\"foo\", \"bar\"],\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n \"bar\": {\"type\": \"string\"},\n \"zaz\": {\"type\": \"string\"},\n \"faz\": {\"type\": \"string\"},\n },\n }\n\n schema_merger = SchemaMerger()\n\n actual = schema_merger.merge((schema1, schema2, schema3))\n\n expected = {\n \"type\": \"object\",\n \"required\": [\"foo\"],\n \"properties\": {\n \"foo\": {\"anyOf\": [{\"type\": \"integer\"}, {\"type\": \"string\"}]},\n \"bar\": {\"type\": \"string\"},\n \"baz\": {\"type\": \"string\"},\n \"zaz\": {\"type\": \"string\"},\n \"faz\": {\n \"anyOf\": [\n {\"type\": \"string\"},\n {\n \"type\": \"object\",\n \"required\": [\"field1\"],\n \"properties\": {\n \"field1\": {\"type\": \"integer\"},\n \"field2\": {\"type\": \"string\"},\n \"field3\": {\"type\": \"string\"},\n \"field4\": {\"type\": \"integer\"},\n },\n },\n ]\n },\n },\n }\n\n schema = convert_to_Schema(actual)\n assert schema is not None\n\n assert len(actual[\"properties\"]) == len(expected[\"properties\"])\n assert len(actual[\"required\"]) == 1\n assert \"foo\" in actual[\"required\"]\n assert actual[\"properties\"][\"bar\"] == {\"type\": \"string\"}\n assert actual[\"properties\"][\"baz\"] == {\"type\": \"string\"}\n assert actual[\"properties\"][\"zaz\"] == {\"type\": \"string\"}\n assert len(actual[\"properties\"][\"foo\"][\"anyOf\"]) == 2\n assert {\"type\": \"integer\"} in actual[\"properties\"][\"foo\"][\"anyOf\"]\n assert {\"type\": \"string\"} in actual[\"properties\"][\"foo\"][\"anyOf\"]\n assert {\n \"type\": \"object\",\n \"required\": [\"field1\"],\n \"properties\": {\n \"field1\": {\"type\": \"integer\"},\n \"field2\": {\"type\": \"string\"},\n \"field3\": {\"type\": \"string\"},\n \"field4\": {\"type\": \"integer\"},\n },\n } in actual[\"properties\"][\"faz\"][\"anyOf\"]\n","repo_name":"meeshkan/meeshkan-nlp","sub_path":"tests/meeshkan/nlp/test_schema_merger.py","file_name":"test_schema_merger.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13756755606","text":"from typing import List\n\n\nclass Solution:\n def numberOfGoodSubarraySplits(self, nums: List[int]) -> int:\n ans, m = 1, 10 ** 9 + 7\n cnt = i = 0\n\n while i < len(nums) and nums[i] == 0:\n i += 1\n\n if i == len(nums):\n return 0\n\n while i < len(nums):\n if nums[i] == 1:\n ans = (ans * (cnt + 1)) % m\n cnt = 0\n else:\n cnt += 1\n i += 1\n return ans\n\n\nobj = Solution()\nprint(obj.numberOfGoodSubarraySplits([0, 0]))\n","repo_name":"yashVisavadia/Leetcode-Questions","sub_path":"Practice/Ways to Split Array Into Good Subarrays.py","file_name":"Ways to Split Array Into Good Subarrays.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72301025762","text":"# Write a function that takes in an array of strings and groups anagrams together.\n\n# Anagrams are strings made up of exactly the same letters, where order doesn't matter. For example, \"cinema\" and \"iceman\" are anagrams; similarly. \"foo\" and \"ofo\" are anagrams. Your function should return a list of anagram groups in no particular order.\n\n# sample = [\"yo\", \"act\", \"flop\", \"tac\", \"foo\", \"cat\", \"oy\", \"olfp\"]\n\n# Output = [\n # [\"yo\", \"oy\"],\n # [\"act\", \"tac\", \"cat\"],\n # [\"flop\", \"olfp\"],\n # [\"foo\"]\n# ]\n# \n\ndef groupAnagrams(words):\n dict = {}\n for word in words:\n sortedWords = tuple(sorted(word))\n if sortedWords not in dict:\n dict[sortedWords] = []\n dict[sortedWords].append(word)\n return list(dict.values())\n\nsample = [\"yo\", \"act\", \"flop\", \"tac\", \"foo\", \"cat\", \"oy\", \"olfp\"]\nprint(groupAnagrams(sample))","repo_name":"AsherThomasBabu/AlgoExpert","sub_path":"Strings/Group-Anagram/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16524485850","text":"\"\"\"Util for testing correct handling of shapes\"\"\"\n\nfrom itertools import chain, product\nfrom typing import Iterator, Sequence\n\nfrom torch import Tensor\n\nfrom util.dimension_order import broadcast_to_by_leading_dims\n\n\nclass BroadcastShapeTestingUtil:\n \"\"\"Namespace for testing utilities for tensors being correctly broadcasted\"\"\"\n BATCH_SHAPES = [(1,), (3,), (5,)]\n SPATIAL_SHAPES = [tuple(), (2,), (2, 3)]\n\n @classmethod\n def expand_tensor_shapes_for_testing(\n cls,\n *tensors: Tensor\n ) -> Iterator[Sequence[Tensor]]:\n \"\"\"Expand tensor shapes from batch and spatial size\n\n E.g: Input tensors with shapes (3, 2) and (3, 3), yield:\n (1, 3, 2), (1, 3, 3)\n (5, 3, 2), (5, 3, 3),\n (1, 3, 2, 2), (1, 3, 3, 2)\n ...\n \"\"\"\n shape_iterator = chain(\n product(\n cls.BATCH_SHAPES,\n cls.SPATIAL_SHAPES),\n [(tuple(), tuple())]\n )\n for batch_shape, spatial_shape in shape_iterator:\n reshaped_tensors = [\n broadcast_to_by_leading_dims(\n tensor,\n batch_shape + tuple(tensor.shape) + spatial_shape,\n tensor.ndim)\n for tensor in tensors\n ]\n yield reshaped_tensors\n","repo_name":"honkamj/SITReg","sub_path":"src/tests/shape_test_util.py","file_name":"shape_test_util.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17979689638","text":"# Author(s): Marius Weber (ETHZ, HSLU T&A)\r\n\r\nimport rhinoscriptsyntax as rs\r\nimport math\r\n\r\ndef calc_loc_coor(layer,PORIG,PXAXS):\r\n \"\"\"\r\n Calculates direction of the semi-local coordiantes systems\r\n\r\n Parameters\r\n ----------\r\n structure : obj\r\n Structure object to update.\r\n layers : list\r\n Layer string names to extract nodes and elements.\r\n Returns\r\n -------\r\n ORxyz : \r\n Urpsrung semi-loc coor system\r\n\r\n unit_vector_XA:\r\n Einheitsvektor of the semi local x axes\r\n \r\n unit_vector_YA:\r\n Einheitsvektor of the semi local y axes\r\n\r\n unit_vector_ZA:\r\n Einheitsvektor of the semi local z axes\r\n \"\"\"\r\n\r\n # Reale globale Koordianten der x-Achse aus PXAXS bestimmen\r\n \r\n X_pxaxs=PORIG[0]+PXAXS[0]\r\n Y_pxaxs=PORIG[1]+PXAXS[1]\r\n Z_pxaxs=PORIG[2]+PXAXS[2] \r\n\r\n # Bestimmung des Einheitsvektors (Richtung) der z-Achse \r\n obj = rs.ObjectsByLayer(layer)\r\n normals = rs.MeshFaceNormals(obj)\r\n if normals:\r\n for vector in normals: pass\r\n\r\n # Koordinaten (Reale Koordinaten im plane des entsprechenden Layers) PORIG (KP1)\r\n X_porig=PORIG[0] \r\n Y_porig=PORIG[1] \r\n Z_porig=PORIG[2] \r\n \r\n # Koordinaten (Einheitsvektoren) PZAXS (KP4) inkl Bestimmung der Richtung\r\n\r\n VZ_X=vector[0]\r\n VZ_Y=vector[1]\r\n VZ_Z=vector[2]\r\n\r\n X_pzaxs=X_porig+VZ_X\r\n Y_pzaxs=Y_porig+VZ_Y\r\n Z_pzaxs=Z_porig+VZ_Z\r\n\r\n # Berechnung Vektorprodukt (ist eigentlich Einheitsvektor aus z und x achse \r\n X_pyaxs=(Y_pzaxs*Z_pxaxs-Z_pzaxs*Y_pxaxs)+X_porig\r\n Y_pyaxs=(Z_pzaxs*X_pxaxs-X_pzaxs*Z_pxaxs)+Y_porig\r\n Z_pyaxs=(X_pzaxs*Y_pxaxs-Y_pzaxs*X_pxaxs)+Z_porig\r\n \r\n # Summary\r\n ORxyz=[X_porig,Y_porig,Z_porig] # Urpsrung semi-loc coor system\r\n XAxyz=[X_pxaxs,Y_pxaxs,Z_pxaxs] # Endpunkt x-Achse des semi-loc coor system\r\n YAxyz=[X_pyaxs,Y_pyaxs,Z_pyaxs] # Endpunkt y-Achse des semi-loc coor system\r\n ZAxyz=[X_pzaxs,Y_pzaxs,Z_pzaxs] # Endpunkt z-Achse des semi-loc coor system\r\n\r\n # Richtungsvektoren\r\n RV_XA=[XAxyz[0]-ORxyz[0],XAxyz[1]-ORxyz[1],XAxyz[2]-ORxyz[2]]\r\n RV_YA=[YAxyz[0]-ORxyz[0],YAxyz[1]-ORxyz[1],YAxyz[2]-ORxyz[2]]\r\n RV_ZA=[ZAxyz[0]-ORxyz[0],ZAxyz[1]-ORxyz[1],ZAxyz[2]-ORxyz[2]]\r\n\r\n # Einheitsvektoren der Richtungsvektoren berechnen (gegen schlussendich die Richtung der lokalen Koordinaten im globalen System wieder)\r\n unit_vector_XA_0=(1/(math.sqrt(RV_XA[0]**2+RV_XA[1]**2+RV_XA[2]**2)))*RV_XA[0]\r\n unit_vector_XA_1=(1/(math.sqrt(RV_XA[0]**2+RV_XA[1]**2+RV_XA[2]**2)))*RV_XA[1]\r\n unit_vector_XA_2=(1/(math.sqrt(RV_XA[0]**2+RV_XA[1]**2+RV_XA[2]**2)))*RV_XA[2]\r\n unit_vector_XA=[unit_vector_XA_0,unit_vector_XA_1,unit_vector_XA_2]\r\n\r\n unit_vector_YA_0=(1/(math.sqrt(RV_YA[0]**2+RV_YA[1]**2+RV_YA[2]**2)))*RV_YA[0]\r\n unit_vector_YA_1=(1/(math.sqrt(RV_YA[0]**2+RV_YA[1]**2+RV_YA[2]**2)))*RV_YA[1]\r\n unit_vector_YA_2=(1/(math.sqrt(RV_YA[0]**2+RV_YA[1]**2+RV_YA[2]**2)))*RV_YA[2]\r\n unit_vector_YA=[unit_vector_YA_0,unit_vector_YA_1,unit_vector_YA_2] \r\n\r\n unit_vector_ZA_0=(1/(math.sqrt(RV_ZA[0]**2+RV_ZA[1]**2+RV_ZA[2]**2)))*RV_ZA[0]\r\n unit_vector_ZA_1=(1/(math.sqrt(RV_ZA[0]**2+RV_ZA[1]**2+RV_ZA[2]**2)))*RV_ZA[1]\r\n unit_vector_ZA_2=(1/(math.sqrt(RV_ZA[0]**2+RV_ZA[1]**2+RV_ZA[2]**2)))*RV_ZA[2]\r\n unit_vector_ZA=[unit_vector_ZA_0,unit_vector_ZA_1,unit_vector_ZA_2] \r\n \r\n return ORxyz, XAxyz, YAxyz, ZAxyz, unit_vector_XA, unit_vector_YA, unit_vector_ZA\r\n","repo_name":"StrucEng-Library-kfmresearch/strucenglib-snippets","sub_path":"strucenglib/prepost_functions/calc_loc_coor.py","file_name":"calc_loc_coor.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2987526218","text":"import math\n\ncups_to_pound = 10.0 / 3.0\nsugar_cups_to_pound = 1.0 / 2.0\ntsp_to_ounces = 1.0 / 6.0\n\n\ndef bags_of_flour(cookie_count, loaf_count):\n cups_of_flour_cookie = 2.25 / 48.0\n cups_of_flour_loaf = 1.5\n bags = 1.0 / 5.0\n\n flour_for_cookies = cookie_count * cups_of_flour_cookie\n flour_for_loaf = loaf_count * cups_of_flour_loaf\n pounds_of_flour = convert_flour_pound(flour_for_cookies, flour_for_loaf)\n\n return math.ceil(pounds_of_flour * bags)\n\n\ndef containers_of_salt(cookie_count, loaf_count):\n tsp_of_salt_cookie = 1.0 / 48.0\n tsp_of_salt_loaf = 1.0 / 8.0\n container_per_ounce = 1.0 / 26.0\n\n salt_for_cookie = cookie_count * tsp_of_salt_cookie\n salt_for_loaf = loaf_count * tsp_of_salt_loaf\n ounces_of_salt = convert_tsp_ounces(salt_for_cookie, salt_for_loaf)\n\n return math.ceil(ounces_of_salt * container_per_ounce)\n\n\ndef convert_flour_pound(cookie_cup, loaf_cup):\n total_flour = cookie_cup + loaf_cup\n return total_flour / cups_to_pound\n\n\ndef convert_tsp_ounces(cookie_tsp, loaf_tsp):\n total = cookie_tsp + loaf_tsp\n return total * tsp_to_ounces\n\n\nprint(containers_of_salt(4, 4))\n","repo_name":"kev-odin/java-where-i-started","sub_path":"cs_142_java/module1/pythonCode/BakingCalculator.py","file_name":"BakingCalculator.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6266227273","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# >>\n# async-redis-cache, 2021\n# LiveViewTech\n# <<\n\nimport os\nimport asyncio\nimport random\nimport string\n\nimport pytest\n\nfrom aiocacher.cache import Cache\nfrom aiocacher.backends.redis import RedisBackend\n\n\nCHARS = string.ascii_letters + string.digits\nREDIS_DB = int(os.getenv('REDIS_TEST_DB', '4'))\n\n\n@pytest.fixture(scope='session')\ndef redis_port() -> int:\n return 16379\n\n\n@pytest.fixture(scope='function')\n@pytest.mark.asyncio\nasync def redis_backend(event_loop, redis_port):\n o = RedisBackend(\n client_name='unittests',\n port=redis_port,\n db=REDIS_DB,\n pool_maxsize=3,\n loop=event_loop,\n )\n yield o\n await o.close()\n\n\n@pytest.fixture(scope='function')\n@pytest.mark.asyncio\nasync def cache(redis_backend):\n o = Cache(\n redis_backend,\n namespace='unittests',\n global_timeout=5.0,\n )\n yield o\n await o.close()\n\n\n@pytest.fixture(scope='function')\ndef random_string(length: int = 16):\n return ''.join(random.choice(CHARS) for _ in range(length))\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef cleanup(request, redis_port):\n \"\"\"Cleanup a testing directory once we are finished.\"\"\"\n def clear_cache():\n loop = asyncio.get_event_loop()\n backend = RedisBackend(port=redis_port, db=REDIS_DB, loop=loop)\n loop.run_until_complete(backend.purge())\n loop.run_until_complete(backend.close())\n loop.run_until_complete(loop.shutdown_asyncgens())\n for task in asyncio.all_tasks(loop):\n task.cancel()\n loop.close()\n request.addfinalizer(clear_cache)\n","repo_name":"blakev/python-aiocacher","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"323363665","text":"#Löst das Sudoku, indem eindeutige Lösungen gefunden werden\ndef solveEasy(sudoku):\n foundSolution = True\n while foundSolution:\n possibilityMap = createPosibilityMap(sudoku)\n foundSolution = False\n for rowBlock in range(3):\n for colBlock in range(3):\n for number in range(1,10):\n times = 0\n for checkRow in range(int(rowBlock * 3), int(rowBlock * 3 + 3)):\n for checkCol in range(int(colBlock * 3), int(colBlock * 3 + 3)):\n if possibilityMap[checkRow][checkCol][number] == True:\n times = times + 1\n if times == 1:\n for checkRow in range(int(rowBlock * 3), int(rowBlock * 3 + 3)):\n for checkCol in range(int(colBlock * 3), int(colBlock * 3 + 3)):\n if possibilityMap[checkRow][checkCol][number] == True:\n print(\"[\" + str(checkRow) + \":\" + str(checkCol) + \"] --> \" + str(number))\n sudoku[checkRow][checkCol] = number\n foundSolution = True\n return sudoku\n\n#Erzeugt ein Array, dass für jedes Zahlenfeld angibt, welche Zahlen möglich sind\ndef createPosibilityMap(sudoku):\n possibilityMap = [[[]]]\n for row in range(len(sudoku)):\n possibilityMap.append([[]])\n for col in range(len(sudoku[row])):\n possibilityMap[row].append([])\n possibilityMap[row][col].append(0)\n #Berechne, in welchem Block sich das aktuelle Feld befindet\n rowBlock = int((row - row % 3) / 3) \n colBlock = int((col - col % 3) / 3)\n for number in range(1,10):\n checkFailed = False\n #check occupation\n if sudoku[row][col] != 0:\n checkFailed = True\n #check row\n for checkCol in range(9):\n if sudoku[row][checkCol] == number:\n checkFailed = True\n break\n #check col\n for checkRow in range(9):\n if sudoku[checkRow][col] == number:\n checkFailed = True\n break\n #check box\n for checkRow in range(int(rowBlock * 3), int(rowBlock * 3 + 3)):\n for checkCol in range(int(colBlock * 3), int(colBlock * 3 + 3)):\n if sudoku[checkRow][checkCol] == number:\n checkFailed = True\n break\n if checkFailed:\n break\n if checkFailed == True:\n possibilityMap[row][col].append(False)\n else:\n possibilityMap[row][col].append(True)\n return possibilityMap\n\n#Gibt das Sudoku in der Konsole aus\ndef printSudoku(sudoku, *zeroReplacer):\n if zeroReplacer:\n newRow = []\n rowString = \"\"\n for row in range(0,9):\n rowString = \"\"\n for col in range(0,9):\n rowString = rowString + str(sudoku[row][col])\n rowString = rowString.replace(\"0\",str(zeroReplacer[0])[:1])\n newRow = list(rowString)\n if row % 3 == 0:\n print(\"┼─────────┼─────────┼─────────┼\")\n print(\"│ \" + str(newRow[0]) + \" \" + str(newRow[1]) + \" \" + str(newRow[2]) + \" │ \" + str(newRow[3]) + \" \" + str(newRow[4]) + \" \" + str(newRow[5]) + \" │ \" + str(newRow[6]) + \" \" + str(newRow[7]) + \" \" + str(newRow[8]) + \" │\")\n print(\"┼─────────┼─────────┼─────────┼\")\n else:\n actRow = 0\n for row in sudoku:\n if actRow % 3 == 0:\n print(\"┼─────────┼─────────┼─────────┼\")\n print(\"│ \" + str(row[0]) + \" \" + str(row[1]) + \" \" + str(row[2]) + \" │ \" + str(row[3]) + \" \" + str(row[4]) + \" \" + str(row[5]) + \" │ \" + str(row[6]) + \" \" + str(row[7]) + \" \" + str(row[8]) + \" │\")\n actRow = actRow + 1\n print(\"┼─────────┼─────────┼─────────┼\")\n\ndef inputByRow():\n sudoku = [[]]\n for row in range(9):\n sudoku.append([])\n for col in range(9):\n sudoku[row].append(0)\n\n print(\"To insert the Sudoku write each Line without any additional spaces. Use any character for empty fields.\")\n for row in range(9):\n incorretInput = True\n while incorretInput:\n rowText = input(\"Insert row \" + str(row + 1) + \": \")\n if len(rowText) == 9:\n incorretInput = False\n rowArray = list(rowText)\n for col in range(len(rowArray)):\n if rowArray[col].isnumeric():\n sudoku[row][col] = int(rowArray[col])\n else:\n sudoku[row][col] = 0\n printSudoku(sudoku, \" \")\n return sudoku\n\n\n\n#easy: sudoku = [[6,0,0,4,1,0,3,0,8],[8,0,5,0,6,3,4,0,0],[7,3,0,0,2,0,0,0,1],[0,0,6,1,5,7,0,0,2],[5,7,0,0,0,4,1,0,6],[1,2,0,0,9,6,0,4,0],[3,0,0,0,0,0,0,8,0],[0,6,9,0,3,0,0,5,0],[0,0,7,0,4,0,0,1,0]]\n#hard: sudoku = [[0,0,0,0,0,1,0,4,0],[0,3,0,5,0,0,0,2,0],[8,7,1,0,0,0,0,0,0],[0,0,9,0,6,7,2,0,0],[6,0,3,0,9,0,7,0,0],[0,4,7,0,2,0,3,0,0],[9,1,0,0,0,0,0,0,6],[0,6,0,0,0,0,8,0,5],[0,0,0,0,7,4,0,0,0]]\nsudoku = inputByRow()\nsudoku = solveEasy(sudoku)\nprintSudoku(sudoku, \" \")","repo_name":"TillmanOttBA/Sudoku_Solver","sub_path":"sudokuSolver.py","file_name":"sudokuSolver.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10002309096","text":"from django.shortcuts import render\nimport requests\nfrom bs4 import BeautifulSoup\nfrom rest_framework.response import Response\nfrom .serial import JobSerializer\n# Create your views here.\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass ScrapeAPIView(APIView):\n authentication_classes = []\n\n def get(self, request):\n job_lists = []\n\n # Check the page requested\n i = request.query_params.get('page')\n if i is None:\n i = 1\n\n url = f\"https://www.adzuna.com/search?p={i}\"\n response = requests.get(url)\n b = BeautifulSoup(response.content, \"html.parser\")\n results = b.find(class_='ui-search-results')\n jobs = results.select('.ui-search-results > div')\n\n jobs_list = []\n for job in jobs:\n # Your existing job data extraction logic goes here\n logo_find = job.find('div', class_ = 'ui-logo-col')\n if logo_find:\n # getting job info\n job_link = logo_find.find('a')\n img = job_link.find('img')\n info = job.find('div', class_ = 'w-full')\n company = info.find('div', class_ = 'ui-job-card-info').find('a')\n salary = info.find('div', class_ = 'ui-salary')\n salary_to_text = ' '.join(salary.stripped_strings) if salary else 'Not available'\n job_desc = info.find('div', class_ = 'hidden sm:block md:w-auto mt-1')\n job_description = job_desc.find('span') if job_desc else 'Not available'\n \n job_title = info.find('h2').find('a')\n job_title = ' '.join(job_title.stripped_strings) if job_title else \"Failed to Get title\"\n link = job_link.get('href')\n company_link = company.get('href')\n company_name = company.text\n location = info.find('div', class_ = 'ui-location').text\n img_link = img.get('src')\n cleaned_salary = salary_to_text.replace('?', '')\n job_description = ' '.join(job_description.stripped_strings)\n\n job_data = {'title': job_title,\n 'link': link,\n 'company_link': company_link,\n 'company_name': company_name,\n 'location': location,\n 'img': img_link,\n 'salary': cleaned_salary,\n 'job_desc': job_description\n }\n \n jobs_list.append(job_data)\n\n serializer = JobSerializer(jobs_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n","repo_name":"Easy-creator/job-portal-be","sub_path":"scrape/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14119130821","text":"import argparse\nimport json\nimport logging\nimport os\nimport sys\n\n#import sagemaker_containers\nimport torch\nimport torch.distributed as dist\nimport pandas as pd\nimport numpy as np\n\nfrom torch import nn\nfrom torch.optim import Adam\nfrom transformers import GPT2Tokenizer, GPT2Model\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n# set up GPT2Tokenizer\nlogger.info('Loading GPT2Tokenizer.')\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2')\ntokenizer.padding_side = \"left\"\ntokenizer.pad_token = tokenizer.eos_token\n\n# labels\nlabels = {\n \"business\": 0,\n \"entertainment\": 1,\n \"sport\": 2,\n \"tech\": 3,\n \"politics\": 4\n }\n\n# Dataset class\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, df):\n self.labels = [labels[label] for label in df['category']]\n self.texts = [tokenizer(text,\n padding='max_length',\n max_length=128,\n truncation=True,\n return_tensors=\"pt\") for text in df['text']]\n \n def classes(self):\n return self.labels\n \n def __len__(self):\n return len(self.labels)\n \n def get_batch_labels(self, idx):\n # Get a batch of labels\n return np.array(self.labels[idx])\n \n def get_batch_texts(self, idx):\n # Get a batch of inputs\n return self.texts[idx]\n \n def __getitem__(self, idx):\n batch_texts = self.get_batch_texts(idx)\n batch_y = self.get_batch_labels(idx)\n return batch_texts, batch_y\n\n# train data loader\ndef _get_train_data_loader(batch_size, train_dir, **kwargs):\n logger.info(\"Get train data loader\")\n train_df = pd.read_csv(os.path.join(train_dir, \"train.csv\"))\n train_dataset = Dataset(train_df)\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, \n batch_size=batch_size, \n shuffle=True,\n **kwargs\n )\n return train_dataloader\n\n# val data loader\ndef _get_val_data_loader(batch_size, val_dir, **kwargs):\n logger.info(\"Get val data loader\")\n val_df = pd.read_csv(os.path.join(val_dir, \"val.csv\"))\n val_dataset = Dataset(val_df)\n val_dataloader = torch.utils.data.DataLoader(\n val_dataset, \n batch_size=batch_size, \n shuffle=False,\n **kwargs\n )\n return val_dataloader\n\n# test data loader\ndef _get_test_data_loader(batch_size, test_dir, **kwargs):\n logger.info(\"Get test data loader\")\n test_df = pd.read_csv(os.path.join(test_dir, \"test.csv\"))\n test_dataset = Dataset(test_df)\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, \n batch_size=batch_size,\n shuffle=False,\n **kwargs\n )\n return test_dataloader\n\n\n# Classifier model\nclass SimpleGPT2SequenceClassifier(nn.Module):\n def __init__(self, hidden_size: int, num_classes:int ,max_seq_len:int, gpt_model_name:str):\n super(SimpleGPT2SequenceClassifier,self).__init__()\n self.gpt2model = GPT2Model.from_pretrained(gpt_model_name)\n self.fc1 = nn.Linear(hidden_size*max_seq_len, num_classes)\n \n def forward(self, input_id, mask):\n \"\"\"\n Args:\n input_id: encoded inputs ids of sent.\n \"\"\"\n gpt_out, _ = self.gpt2model(input_ids=input_id, attention_mask=mask, return_dict=False)\n batch_size = gpt_out.shape[0]\n linear_output = self.fc1(gpt_out.view(batch_size,-1))\n return linear_output\n\n# train\ndef train(args):\n # set up GPU training (if using GPU)\n use_cuda = args.num_gpus > 0\n logger.debug(\"Number of gpus available - {}\".format(args.num_gpus))\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # set the seed for generating random numbers\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n\n # load train, validation and test data\n train_loader = _get_train_data_loader(args.batch_size, args.train_dir, **kwargs)\n val_loader = _get_val_data_loader(args.batch_size, args.val_dir, **kwargs)\n test_loader = _get_test_data_loader(args.batch_size, args.test_dir, **kwargs)\n\n # print logging info\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of train data\".format(\n len(train_loader.sampler),\n len(train_loader.dataset),\n 100.0 * len(train_loader.sampler) / len(train_loader.dataset),\n )\n )\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of val data\".format(\n len(val_loader.sampler),\n len(val_loader.dataset),\n 100.0 * len(val_loader.sampler) / len(val_loader.dataset),\n )\n )\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of test data\".format(\n len(test_loader.sampler),\n len(test_loader.dataset),\n 100.0 * len(test_loader.sampler) / len(test_loader.dataset),\n )\n )\n\n # initialize model and parameters\n model = SimpleGPT2SequenceClassifier(hidden_size=args.hidden_size, num_classes=5, max_seq_len=args.max_seq_len, gpt_model_name=\"gpt2\").to(device)\n EPOCHS = args.epochs\n LR = args.lr\n\n # use cross-entropy as the loss function\n criterion = nn.CrossEntropyLoss()\n\n # use Adam as the optimizer\n optimizer = Adam(model.parameters(), lr=LR)\n\n # enable GPU training (if using GPU)\n if use_cuda:\n model = model.cuda()\n criterion = criterion.cuda()\n\n # training loop\n for epoch_num in range(EPOCHS):\n total_acc_train = 0\n total_loss_train = 0\n \n for train_input, train_label in tqdm(train_loader):\n train_label = train_label.to(device)\n mask = train_input['attention_mask'].to(device)\n input_id = train_input[\"input_ids\"].squeeze(1).to(device)\n \n model.zero_grad()\n\n output = model(input_id, mask)\n \n batch_loss = criterion(output, train_label)\n total_loss_train += batch_loss.item()\n \n acc = (output.argmax(dim=1)==train_label).sum().item()\n total_acc_train += acc\n\n batch_loss.backward()\n optimizer.step()\n \n total_acc_val = 0\n total_loss_val = 0\n \n # validate model on validation data\n with torch.no_grad():\n for val_input, val_label in val_loader:\n val_label = val_label.to(device)\n mask = val_input['attention_mask'].to(device)\n input_id = val_input['input_ids'].squeeze(1).to(device)\n \n output = model(input_id, mask)\n \n batch_loss = criterion(output, val_label)\n total_loss_val += batch_loss.item()\n \n acc = (output.argmax(dim=1)==val_label).sum().item()\n total_acc_val += acc\n \n logger.info(\n f\"Epochs: {epoch_num + 1} | Train Loss: {total_loss_train/len(train_loader): .3f} \\\n | Train Accuracy: {total_acc_train / len(train_loader.dataset): .3f} \\\n | Val Loss: {total_loss_val / len(val_loader.dataset): .3f} \\\n | Val Accuracy: {total_acc_val / len(val_loader.dataset): .3f}\")\n \n # evaluate model performance on unseen data\n test(model, test_loader, device)\n \n # save model\n save_model(model, args.model_dir)\n\n# test\ndef test(model, test_loader, device):\n model.eval()\n \n # Tracking variables\n predictions_labels = []\n true_labels = []\n \n total_acc_test = 0\n with torch.no_grad():\n\n for test_input, test_label in test_loader:\n\n test_label = test_label.to(device)\n mask = test_input['attention_mask'].to(device)\n input_id = test_input['input_ids'].squeeze(1).to(device)\n\n output = model(input_id, mask)\n\n acc = (output.argmax(dim=1) == test_label).sum().item()\n total_acc_test += acc\n \n # add original labels\n true_labels += test_label.cpu().numpy().flatten().tolist()\n # get predicitons to list\n predictions_labels += output.argmax(dim=1).cpu().numpy().flatten().tolist()\n \n logging.info(f'Test Accuracy: {total_acc_test / len(test_loader.dataset): .3f}')\n\n# save model\ndef save_model(model, model_dir):\n logger.info(\"Saving the model.\")\n path = os.path.join(model_dir, \"model.pth\")\n torch.save(model.cpu().state_dict(), path)\n\n# Loads the model parameters from a model.pth file in the SageMaker model directory model_dir.\ndef model_fn(model_dir):\n logger.info('Loading the model.')\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = SimpleGPT2SequenceClassifier(hidden_size=768, num_classes=5, max_seq_len=128, gpt_model_name=\"gpt2\")\n with open(os.path.join(model_dir, \"model.pth\"), \"rb\") as f:\n model.load_state_dict(torch.load(f, map_location=torch.device('cpu')))\n # model.to(device).eval()\n logger.info('Done loading model')\n return model.to(device)\n\n# Deserialize the Invoke request body into an object we can perform prediction on\ndef input_fn(request_body, request_content_type='application/json'):\n # Deserializing input data\n logger.info('Deserializing the input data.')\n if request_content_type == 'application/json':\n input_data = json.loads(request_body)\n text = input_data[\"text\"]\n logger.info(f'Input text: {text}')\n\n logger.info('Tokenizing input text.')\n fixed_text = \" \".join(text.lower().split())\n model_input = tokenizer(fixed_text, padding='max_length', max_length=128, truncation=True, return_tensors=\"pt\")\n mask = model_input['attention_mask'].cpu()\n input_id = model_input[\"input_ids\"].squeeze(1).cpu()\n return (input_id, mask)\n raise Exception(f'Requested unsupported ContentType in content_type {request_content_type}')\n\n# Perform prediction on the deserialized object, with the loaded model\ndef predict_fn(input_data, model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n logger.info('Generating prediction based on input parameters.')\n classes = [\"business\", \"entertainment\", \"sport\", \"tech\", \"politics\"]\n input_id, mask = input_data[0].to(device), input_data[1].to(device)\n model = model.to(device)\n output = model(input_id, mask)\n prob = torch.nn.functional.softmax(output, dim=1)[0]\n _, indices = torch.sort(output, descending=True)\n return {classes[idx]: prob[idx].item() for idx in indices[0][:5]}\n\n# Serialize the prediction result into the desired response content type\ndef output_fn(prediction, response_content_type='application/json'):\n logger.info('Serializing the generated output.')\n result = prediction\n if response_content_type == 'application/json':\n response_body_str = json.dumps(result)\n return response_body_str\n raise Exception(f'Requested unsupported ContentType in Accept:{response_content_type}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Data and model checkpoints directories\n parser.add_argument(\n \"--batch-size\",\n type=int,\n default=2,\n metavar=\"N\",\n help=\"input batch size for training (default: 2)\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=1,\n metavar=\"N\",\n help=\"number of epochs to train (default: 1)\",\n )\n parser.add_argument(\n \"--lr\", type=float, default=1e-5, metavar=\"LR\", help=\"learning rate (default: 1e-5)\"\n )\n parser.add_argument(\"--seed\", type=int, default=1, metavar=\"S\", help=\"random seed (default: 1)\")\n\n parser.add_argument(\"--hidden-size\", type=int, default=768, metavar=\"HS\", help=\"hidden size (default: 768)\")\n parser.add_argument(\"--max-seq-len\", type=int, default=128, metavar=\"MSL\", help=\"max sequence length (default: 128)\")\n\n # Container environment\n # parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n # parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\n\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--train-dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAIN\"])\n parser.add_argument(\"--val-dir\", type=str, default=os.environ[\"SM_CHANNEL_VAL\"])\n parser.add_argument(\"--test-dir\", type=str, default=os.environ[\"SM_CHANNEL_TEST\"])\n parser.add_argument(\"--num-gpus\", type=int, default=os.environ[\"SM_NUM_GPUS\"])\n\n train(parser.parse_args())\n","repo_name":"haocai1992/GPT2-News-Classifier","sub_path":"sagemaker-train-deploy/code/train_deploy.py","file_name":"train_deploy.py","file_ext":"py","file_size_in_byte":12743,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"13327396510","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\n\nfrom coupon import models\nfrom cart.views import create_cart\nfrom cart import models as models_cart\n\nUser = get_user_model()\n\nclass TestViews(TestCase):\n\n def setUp(self):\n group = Group.objects.get(name='Customers')\n permissions = Permission.objects.all()\n group.permissions.set(permissions)\n group.save()\n self.user = User.objects.create_user('unittest', password='unittest')\n self.client.login(username=\"unittest\", password=\"unittest\")\n self.user.is_superuser = True\n self.user.is_staff = True\n self.user.save()\n self.session = self.client.session\n self.cart = create_cart(self.user, self.session)\n self.coupon = models.Coupon.objects.create(name='unittest', percent=10, active=True)\n self.list_coupon_url = reverse('coupon:list_coupon')\n self.add_to_cart_url = reverse('coupon:add_to_cart')\n self.create_coupon_url = reverse('coupon:create_coupon')\n self.update_coupon_url = reverse('coupon:update_coupon', kwargs={'pk': 1})\n self.delete_coupon_url = reverse('coupon:delete_coupon', kwargs={'pk': 1})\n self.send_to_email_url = reverse('coupon:send_to_email')\n\n def test_list_coupon_view(self):\n \"\"\"test coupon list\"\"\"\n response = self.client.get(self.list_coupon_url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'coupon/coupon_list.html')\n\n def test_add_to_cart_view(self):\n \"\"\"test add coupon to card view\"\"\"\n cart = create_cart(self.user, self.session)\n response = self.client.post(self.add_to_cart_url, {'promo': 'unittest', 'cart_id': 2})\n self.assertEquals(response.status_code, 302)\n self.assertEqual(models_cart.Cart.objects.last().coupon_percent, self.coupon.percent)\n\n def test_create_coupon_view(self):\n \"\"\"test coupon create\"\"\"\n response = self.client.post(self.create_coupon_url, {\n 'name': 'test',\n 'percent': 10,\n 'active': True\n })\n self.assertEquals(response.status_code, 302)\n self.assertEqual(models.Coupon.objects.last().name, 'test')\n self.assertEqual(models.Coupon.objects.last().percent, 10)\n\n def test_update_coupon_view(self):\n \"\"\"test update coupon\"\"\"\n response = self.client.post(self.update_coupon_url, {\n 'name': 'testU',\n 'percent': 10,\n 'active': True\n })\n self.assertEquals(response.status_code, 302)\n self.assertEqual(models.Coupon.objects.last().name, 'testU')\n\n def test_delete_coupon_view(self):\n \"\"\"test delete coupon\"\"\"\n response = self.client.post(self.delete_coupon_url)\n self.assertEquals(response.status_code, 302)\n self.assertEqual(models.Coupon.objects.all().count(), 0)\n\n def test_send_to_email_view(self):\n \"\"\"test send to email coupon\"\"\"\n response = self.client.post(self.send_to_email_url, {'email': 'test@gmail.com'})\n self.assertEquals(response.status_code, 302)\n","repo_name":"xistadi/BookStore","sub_path":"src/coupon/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32587554413","text":"# participant = [\"leo\", \"kiki\", \"eden\"] #input().split(',')\n# completion = [\"eden\", \"kiki\"]#input().split(',')\nimport copy\n\nparticipant = [\"mislav\", \"stanko\", \"mislav\", \"ana\"]\ncompletion = [\"stanko\", \"ana\", \"mislav\"]\n#\"leo\", \"kiki\", \"eden\"\n#\"eden\", \"kiki\"\ndef solution(participant, completion):\n dit = {}\n hashValue = 0\n for p in participant:\n dit[hash(p)] = p\n hashValue += hash(p)\n for c in completion:\n hashValue -= hash(c)\n return dit[hashValue]\nif __name__ == \"__main__\":\n participant = [\"mislav\", \"stanko\", \"mislav\", \"ana\"]\n completion = [\"stanko\", \"ana\", \"mislav\"]\n solution(participant,completion)\n print(solution(participant,completion))\n # 동명이인인데 둘다 완주할 경우도 있음 ㅠㅠ\n # res = list(set(participant)-set(completion))\n # if len(res)>0:\n # return res[0]\n # else:\n # names = {}\n # for name in participant:\n # if name in names.keys():\n # return name\n # else:\n # names[name]=1\n","repo_name":"joniekwon/cote_study","sub_path":"프로그래머스_Level1/완주하지못한선수.py","file_name":"완주하지못한선수.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13333855268","text":"import wntr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport wn_util\nfrom time_constants import SECONDS_PER_DAY, SECONDS_PER_HOUR\nfrom LeakageDetectors import BetweenSensorInterpolator, SingleSensorForecaster\nfrom LeakProperties import LeakProperties\n\ndef residual_plot(alarms, leak_properties, ax=None, title=None):\n\t'''\n\tPlot the residuals of an Alarms object and mark the actual leak time.\n\t\n\tThis method can be used to diagnose the performance of a leakage detector.\n\n\tNote: This method is only usable if all alarm DataFrames have the same\n\ttime index, i.e. if the threshold of the leakage detector was 0.\n\n\tParameters\n\t-----------\n\n\talarms: alarms.Alarms object\n\tfor each DataFrame in alarms.values() the content of the 'time' column is\n\tassumed to be the same.\n\n\tleak_properties: LeakProperties.LeakProperties\n\tthese are used to mark the leak time in the plot by vertical lines\n\n\tax: matplotlib.axes._subplots.Axes, optional, default=None\n\tan Axes object to plot to. If none is given, a new Axes object is created.\n\n\ttitle: str, optional\n\ta title to add to the Axes object (not to the whole figure)\n\n\tReturns\n\t-------\n\tan Axes object with the plot\n\t'''\n\tall_residuals = alarms.residual_matrix()\n\n\tax = all_residuals.plot(ax=ax)\n\tleak_start = leak_properties.start_time\n\tleak_end = leak_start + leak_properties.duration\n\tkwargs = dict(alpha=.1, color='k')\n\n\tax.axvline(leak_start, **kwargs)\n\tax.axvline(leak_end, **kwargs)\n\tif title is not None:\n\t\tax.set_title(title)\n\treturn ax\n\ndef plot_ssf(k):\n\t'''\n\tCreate a residual plot for a SingleSensorForecaster with given k value.\n\n\tNote: This was used experimentally to create plots and requires the\n\texistance of global variables.\n\t'''\n\tfig, ax = plt.subplots()\n\tssf = SingleSensorForecaster(\n\t\tnodes_with_sensors = nodes_with_sensors,\n\t\ttrain_days=train_days,\n\t\tk=k,\n\t\tthreshold=0\n\t)\n\talarms_ssf = ssf.train_and_detect(pressures)\n\tresidual_plot(alarms_ssf, leak_properties, ax, title=f'$k={k}$')\n\tfig.suptitle('Single Sensor Forecaster')\n\tplt.savefig(f'../Leakage_Detector_Plots/ssf_{k}.png')\n\ndef plot_bsi(k):\n\t'''\n\tCreate a residual plot for a BetweenSensorInterpolator with given k value.\n\n\tNote: This was used experimentally to create plots and requires the\n\texistance of global variables.\n\t'''\n\tfig, ax = plt.subplots()\n\tbsi = BetweenSensorInterpolator(\n\t\tnodes_with_sensors = nodes_with_sensors,\n\t\ttrain_days=train_days,\n\t\tk=k,\n\t\tthreshold=0\n\t)\n\talarms_bsi = bsi.train_and_detect(pressures)\n\tresidual_plot(alarms_bsi, leak_properties, ax, title=f'$k={k}$')\n\tfig.suptitle('Between Sensor Interpolator')\n\tplt.savefig(f'../Leakage_Detector_Plots/bsi_{k}.png')\n\nif __name__=='__main__':\n\twn = wntr.network.WaterNetworkModel(\n\t\t'../Data/Hanoi_Leakage_Detector_Comparison.inp'\n\t)\n\tnodes_with_sensors = ['4', '13', '16', '31', 'Flow_1', 'Flow_2']\n\ttrain_days = 5\n\tleak_properties = LeakProperties(\n\t\tjunction_name='23',\n\t\tarea=0.005,\n\t\tstart_time=6*SECONDS_PER_DAY+12*SECONDS_PER_HOUR,\n\t\tduration=3*SECONDS_PER_HOUR\n\t)\n\tpressures = wn_util.compute_pressures(\n\t\twn, leak_properties, flow_links=['1', '2']\n\t)\n\tfor k in [1,2,4,6,8]:\n\t\tplot_ssf(k)\n\n","repo_name":"pstahlhofen/submission_master_thesis","sub_path":"src/test_detectors.py","file_name":"test_detectors.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9374839760","text":"import sys\n\n# python tools/filter_dump.py ref_dump.txt\n\nskip = False\nfor line in sys.stdin:\n if line.startswith('=> '):\n #print(line, end='')\n skip = True\n if line.startswith('ra'):\n skip = False\n if not skip:\n parts = line.split()\n if parts[0] in ['ra', 'sp', 'gp', 'tp', 't0', 't1', 't2',\n 'fp', 's1', 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7',\n 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11',\n 't3', 't4', 't5', 't6', 'pc']:\n print(parts[0], parts[1])\n else:\n print(line, end='')\n","repo_name":"eecsmap/riscvm","sub_path":"tools/filter_dump.py","file_name":"filter_dump.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"74293002402","text":"import mysql.connector\n\nfrom utils import *\nfrom datetime import *\n\ndef cadastro(n, cp):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n # dados da pessoa:\n while True:\n validar = valida_cpf(cp)\n if validar == True:\n cpf = pontuar_cpf(cp)\n break\n else:\n print('cpf invalido digite novamente.')\n\n comando = f'INSERT INTO pessoas (nome, cpf) VALUES (\"{n}\", \"{cpf}\");'\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n conexao.close()\n\n\ndef cadastro_funcionario_dao(nome, cpf, senha):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n while True:\n v = valida_cpf(cpf)\n if v:\n cpf = pontuar_cpf(cpf)\n break\n else:\n print('cpf invalido digite novamente.')\n\n comando = f'INSERT INTO funcionarios (nome, cpf, senha) VALUES (\"{nome}\", \"{cpf}\", \"{senha}\");'\n\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n conexao.close()\n\n\n# dados do veiculo:\ndef cadastro_veiculo(mar, mod, cor, pla, cpf):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n idproprietario = get_id_by_cpf(cpf)\n\n while True:\n v = validar_placa(pla)\n\n if v:\n placa = pontuar_placa(pla)\n\n break\n\n else:\n print('Placa invalida. Digite nomvamente.')\n\n comando = f\"\"\"INSERT INTO veiculos (marca, modelo, cor, placa, idproprietario) VALUES\n ('{mar}', '{mod}', '{cor}', '{placa}', '{idproprietario}')\"\"\"\n\n cursor.execute(comando)\n\n conexao.commit() # edita o banco de dados\n\n conexao.close()\n\n\ndef criavaga():\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n seçao = 'amarelo'\n numero = 1\n andar = 1\n tipo = 'carro'\n bloco = ''\n status = 'vazio'\n idveiculo = 'vazio'\n\n for bl in range(1, 5):\n if bl == 1:\n bloco = 'A'\n\n elif bl == 2:\n bloco = 'B'\n\n elif bl == 3:\n bloco = 'C'\n\n elif bl == 4:\n bloco = 'D'\n for ad in range(1, 4):\n if ad == 1:\n andar = 1\n if ad == 2:\n andar = 2\n if ad == 3:\n andar = 3\n for c in range(0, 10):\n if c == 0:\n tipo = 'carro'\n if c == 4:\n tipo = 'moto'\n if c == 8:\n tipo = 'PCD'\n # sistema de seçao do andar 1\n if ad == 1 and c == 5:\n seçao = 'verde'\n if ad == 1 and c == 0:\n seçao = 'amarelo'\n # sistema de seçao do andar 2\n if ad == 2 and c == 5:\n seçao = 'azul'\n if ad == 2 and c == 0:\n seçao = 'vermelho'\n # sistema de seçao do andar 3\n if ad == 3 and c == 5:\n seçao = 'preto'\n if ad == 3 and c == 0:\n seçao = 'roxo'\n comando = f'INSERT INTO vagas (seçao, numero, andar, tipo, bloco, status) VALUES ' \\\n f' (\"{seçao}\", \"{numero}\", \"{andar}\", \"{tipo}\", \"{bloco}\", \"{status}\")'\n\n numero += 1\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n conexao.close()\n\n\ndef mostravaga(tipo):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n\n\n comando = f'select id, seçao, numero, andar, tipo, bloco, status from vagas ' \\\n f'where status = \"vazio\" and tipo = \"{tipo}\";'\n\n cursor.execute(comando)\n\n resultado = cursor.fetchall() # ler o banco de dados\n\n conexao.close()\n\n return resultado\n\n\n\ndef preenche_vaga(id_vaga, id_funcionario):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n idveiculo = ultimo_veiculo()\n hora = hora_atual()\n data = data_atual()\n comando = f'UPDATE `estacionamento`.`vagas` SET `idveiculo` = \"{idveiculo}\", `idfuncionario` = \"{id_funcionario}\",'\\\n f' `status` = \"ocupado\", `data_entrada` = \"{data}\", `hora` = \"{hora}\" WHERE(`id` = \"{id_vaga}\");'\n\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n conexao.close()\n\n\ndef retirar_veiculo(idveiculo):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n\n\n comando = f'select veiculos.marca, veiculos.modelo, veiculos.cor, vagas.bloco, vagas.andar,' \\\n f' vagas.seçao, vagas.idveiculo, vagas.numero, vagas.tipo from vagas ' \\\n f'join veiculos on vagas.idveiculo = veiculos.id where veiculos.id = {idveiculo};'\n cursor.execute(comando)\n resultado1 = cursor.fetchall() # ler o banco de dados\n\n comando = f'select pessoas.id, pessoas.nome, pessoas.cpf from pessoas ' \\\n f'join veiculos on pessoas.id = veiculos.idproprietario where veiculos.id = {idveiculo};'\n cursor.execute(comando)\n resultado2 = cursor.fetchall() # ler o banco de dados\n conexao.close()\n\n resultado3 = valor_estadia(idveiculo)\n\n return resultado1, resultado2, resultado3\n\n\n\ndef deletar_dados(id_proprietario):\n\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n\n comando = 'SET FOREIGN_KEY_CHECKS = 0;'\n cursor.execute(comando)\n conexao.commit()\n\n\n comando = f'select id from veiculos where idproprietario = {id_proprietario}'\n cursor.execute(comando)\n id_veiculo = cursor.fetchall()[0]['id']\n\n\n comando = f'delete pessoas.* from pessoas where id = \"{id_proprietario}\";'\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n\n\n comando = f'delete veiculos.* from veiculos where idproprietario = \"{id_proprietario}\";'\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n\n\n comando = f'update estacionamento.vagas set idveiculo = null, data_entrada = null, hora = null,' \\\n f' idfuncionario = null, status = \"vazio\" where idveiculo = \"{id_veiculo}\";'\n cursor.execute(comando)\n conexao.commit() # edita o banco de dados\n\n\n comando = 'SET FOREIGN_KEY_CHECKS = 1;'\n cursor.execute(comando)\n conexao.commit()\n conexao.close()\n\n\n\ndef validar_login_funcionario(cpf, senha):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n comando = f'''select id from funcionarios where cpf='{cpf}' and senha='{senha}';'''\n\n cursor.execute(comando)\n result = cursor.fetchall()\n conexao.close()\n\n if result == []:\n return False\n else:\n return True, result[-1]['id']\n\n\n\ndef get_user_data_by_id(id_user):\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n comando = f'select nome from funcionarios where id = {id_user};'\n cursor.execute(comando)\n name = cursor.fetchone()['nome']\n conexao.close()\n\n return name\n\n\n\ndef mostravaga_retirada():\n conexao, cursor = conectar_banco(\n host=HOST,\n user=USER,\n password=PASSWORD,\n database=DATABASE\n )\n\n comando = 'select vagas.id, vagas.idveiculo, vagas.seçao, vagas.numero, vagas.andar, vagas.bloco, veiculos.marca, veiculos.modelo,' \\\n ' veiculos.placa from vagas join veiculos on vagas.idveiculo = veiculos.id;'\n\n\n cursor.execute(comando)\n\n resultado = cursor.fetchall() # ler o banco de dados\n print(resultado)\n conexao.close()\n\n return resultado\n\n\n\n\n","repo_name":"luizhmg/Estacionamento","sub_path":"ShoppingBoulevard/estacionamento/app/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14194700992","text":"__author__ = \"Eric Magliarditi\"\n__copyright__ = \"Copyright 2019, Massachusetts Institute of Technology\"\n__credits__ = [\"Eric Magliarditi\"]\n\n__license__ = \"unlicensed\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Eric Magliarditi\"\n__email__ = \"ericmags@mit.edu\"\n__status__ = \"Prototype\"\n\nfrom .my_data_loader import MyDataLoader\nfrom .unet import UNet\nfrom .util import *\n","repo_name":"ericmagliarditi/public_coding_examples","sub_path":"land_classification/landpy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38998133892","text":"#Exercício 47 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n'''Em uma competição de ginástica, cada atleta recebe votos de sete jurados. A melhor e a pior nota são eliminadas. A sua nota fica\nsendo a média dos votos restantes. Você deve fazer um programa que receba o nome do ginasta e as notas dos sete jurados alcançadas \npelo atleta em sua apresentação e depois informe a sua média, conforme a descrição acima informada (retirar o melhor e o pior salto \ne depois calcular a média com as notas restantes). As notas não são informados ordenadas. Um exemplo de saída do programa deve ser \nconforme o exemplo abaixo:\nAtleta: Aparecido Parente\nNota: 9.9\nNota: 7.5\nNota: 9.5\nNota: 8.5\nNota: 9.0\nNota: 8.5\nNota: 9.7\n---------------------------\nResultado final:\nAtleta: Aparecido Parente\nMelhor nota: 9.9\nPior nota: 7.5\nMédia: 9,04'''\nlista = []\nnome = input('Nome do Atleta: ')\nif nome != \"\":\n for i in range(0,7):\n nota = float(input('Nota: '))\n lista.append(nota)\n lista_ordenada = sorted(lista, reverse=True)\n\n melhor_nota = lista_ordenada[0]\n pior_nota = lista_ordenada[6]\n soma = lista_ordenada[1] + lista_ordenada[2] + lista_ordenada[3] + lista_ordenada[4] + lista_ordenada[5]\n media = soma / 5\n print('\\n---------------------------')\n print('Resultado final:')\n print('Atleta: ', nome)\n print('Melhor nota: ', melhor_nota)\n print('Pior Notas: ', pior_nota)\n print('Média: %2.2f' %media)\nelse:\n print('Programa encerrado!')","repo_name":"rafaelpederiva/Resposta_Python_Brasil","sub_path":"Exercícios de Estrutura de Repetição/Exercício 47 - Competição de Ginástica.py","file_name":"Exercício 47 - Competição de Ginástica.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8691770621","text":"import pandas as p\ndef check(inputPath):\n try:\n dataSet_path = inputPath\n dataSet = p.read_csv(dataSet_path)\n print(\"DataSet loaded\")\n return dataSet_path\n except FileNotFoundError:\n print(\"DataSet not found\")\n except Exception as e:\n print(f\"An unexpected error occurred: {e}\")","repo_name":"freiNbasya/OPItask3","sub_path":"main/check_dataSet.py","file_name":"check_dataSet.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26404740117","text":"import pygame, os, sys\n\nfrom pygame.locals import *\n\nclass Text(pygame.sprite.Sprite):\n \n def __init__(self, x, y, message, RGBtupple, font):\n pygame.sprite.Sprite.__init__(self)\n \n self.message = message\n self.font = font\n self.anti = True\n self.RGBtupple = RGBtupple\n self.image = self.font.render(self.message, self.anti, self.RGBtupple).convert_alpha()\n \n self.rect = self.image.get_rect()\n \n self.rect.left = x\n self.rect.top = y\n \n def update(self):\n temp_x = self.rect.left\n temp_y = self.rect.top\n \n self.image = self.font.render(self.message, self.anti, self.RGBtupple).convert_alpha()\n \n self.rect = self.image.get_rect()\n \n self.rect.left = temp_x\n self.rect.top = temp_y","repo_name":"jgatt/pybreaker","sub_path":"Text.py","file_name":"Text.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15310044870","text":"import cv2\nimport numpy as np\nimport matplotlib\nfrom matplotlib.pyplot import imshow\nfrom matplotlib import pyplot as plt\nimport os\nimport bresenham\n\n\ndef pixel_is_white(image,x,y) :\n return np.array_equal(image[y][x],np.array([255,255,255]))\n\ndef pixel_is_black(image,x,y) :\n return np.array_equal(image[y][x],np.array([0,0,0]))\n\n\ndef signature_denoise_on_image(img):\n if len(img.shape) > 2:\n raise ValueError('Please pass black and white image')\n \n h,w = img.shape\n\n image = np.copy(img)\n\n image[img == 0] = 255\n image[img == 255] = 0\n\n\n threshold = 50\n minLineLength = 20\n maxLineGap = 2\n\n\n #determine every lines into the document regarding some parameters \n\n lines = cv2.HoughLinesP(image, 1, np.pi/180, threshold, 0, minLineLength, maxLineGap)\n \n image = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n somme1 = 0\n\n for line in lines : \n line = line[0]\n length = np.linalg.norm(np.array([line[0], line[1]])- np.array([line[2], line[3]]))\n somme1 += length\n \n somme1 = somme1 / len(lines)\n\n\n for x in range(0, len(lines)):\n \n for x1,y1,x2,y2 in lines[x]:\n \n length = np.linalg.norm(np.array([x1, y1])- np.array([x2, y2]))\n \n if length > 20 :\n \n if x2 != x1 :\n \n if 0.3 < abs((y2-y1)/(x2-x1)) < 5:\n\n \n #determine lines between two extremal points\n coordinates = list(bresenham.bresenham(x1,y1,x2,y2)) \n \n \n coordinates = np.array(coordinates)\n \n \n #List with unique element for y axis \n unique = np.unique(coordinates[:,0])\n \n \n \n\n L1 = []\n\n for elmt in unique : \n L1 += [coordinates[ coordinates[:,0] == elmt]]\n #List of group having the same y-axis value\n \n for elmt in L1 :\n #Order each group of this list regarding x-axis\n elmt = np.sort(elmt, axis=0)\n \n lim_r = elmt[-1][0] + 1\n lim_l = elmt[0][0] - 1\n ordo = elmt[0][1]\n\n if (lim_r < w and pixel_is_white(image, lim_r, ordo)) or (lim_l >= 0 and pixel_is_white(image, lim_l, ordo)):\n for pixel in elmt :\n \n image[ordo][pixel[0]] = [255,255,255]\n \n if lim_r < w and pixel_is_white(image, lim_r, ordo) :\n if lim_l >= 0 and pixel_is_white(image, lim_l, ordo) :\n for pixel in elmt : \n image[ordo][pixel[0]] = [255,255,255] \n elif lim_l >= 0 and pixel_is_black(image, lim_r, ordo): \n for pixel in elmt :\n if pixel[0] >= elmt[int(len(elmt)/2)][0] :\n image[ordo][pixel[0]] = [255,255,255]\n \n if lim_l >= 0 and pixel_is_white(image, lim_l, ordo) : \n if lim_r < w and pixel_is_black(image, lim_r, ordo): \n for pixel in elmt : \n if pixel[0] <= elmt[int(len(elmt)/2)][0] :\n image[ordo][pixel[0]] = [255,255,255] \n\n return image ","repo_name":"GeoffroyDeMeyer/cv_mtl_die","sub_path":"noise_removal/signature_denoise.py","file_name":"signature_denoise.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70499559202","text":"\"\"\" \nModified from: https://github.com/daveredrum/ScanRefer/blob/master/lib/dataset.py\n\"\"\"\n\nimport re\nimport os\nimport sys\nimport time\nimport h5py\nimport json\nimport pickle\nimport numpy as np\nimport multiprocessing as mp\n\n#from sklearn import preprocessing\nfrom torch.utils.data import Dataset\nfrom data.scannet.model_util_scannet import ScannetDatasetConfig\n\nsys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder\nfrom lib.config import CONF\nfrom utils.pc_utils import random_sampling, rotx, roty, rotz\nfrom data.scannet.model_util_scannet import ScannetDatasetConfig, rotate_aligned_boxes_along_axis\n\n# data setting\nDC = ScannetDatasetConfig()\nMAX_NUM_OBJ = 128\nMEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])\n\n# data path\nSCANNET_V2_TSV = os.path.join(CONF.PATH.SCANNET_META, 'scannetv2-labels.combined.tsv')\nMULTIVIEW_DATA = CONF.MULTIVIEW\nGLOVE_PICKLE = os.path.join(CONF.PATH.DATA, 'glove.p')\n\n\ndef get_answer_score(freq):\n if freq == 0:\n return .0\n elif freq == 1:\n return .3\n elif freq == 2:\n return .6\n elif freq == 3:\n return .9\n else:\n return 1.\n\nclass ScannetQADatasetConfig(ScannetDatasetConfig):\n def __init__(self):\n super().__init__()\n self.num_answers = -1\n\nclass Answer(object):\n def __init__(self, answers=None, unk_token='', ignore_idx=-100):\n if answers is None:\n answers = []\n self.unk_token = unk_token\n self.ignore_idx = ignore_idx\n self.vocab = {x: i for i, x in enumerate(answers)}\n self.rev_vocab = dict((v, k) for k, v in self.vocab.items())\n\n def itos(self, i):\n if i == self.ignore_idx:\n return self.unk_token\n return self.rev_vocab[i]\n\n def stoi(self, v):\n if v not in self.vocab:\n #return self.vocab[self.unk_token]\n return self.ignore_idx\n return self.vocab[v]\n\n def __len__(self):\n return len(self.vocab) \n\n\nclass ScannetQADataset(Dataset):\n def __init__(self, scanqa, scanqa_all_scene, \n use_unanswerable=False,\n answer_cands=None,\n answer_counter=None,\n answer_cls_loss='ce',\n split='train', \n num_points=40000,\n use_height=False, \n use_color=False, \n use_normal=False, \n use_multiview=False, \n tokenizer=None,\n augment=False,\n debug=False,\n ):\n\n self.debug = debug\n self.all_data_size = -1\n self.answerable_data_size = -1\n\n self.answer_features = None\n self.use_unanswerable = use_unanswerable\n\n if split == 'train':\n # remove unanswerble qa samples for training\n self.all_data_size = len(scanqa)\n if use_unanswerable: \n self.scanqa = scanqa\n else:\n self.scanqa = [data for data in scanqa if len(set(data['answers']) & set(answer_cands)) > 0]\n self.answerable_data_size = len(self.scanqa)\n print('all train:', self.all_data_size)\n print('answerable train', self.answerable_data_size)\n elif split == 'val':\n self.all_data_size = len(scanqa)\n if use_unanswerable:\n self.scanqa = scanqa\n else:\n self.scanqa = [data for data in scanqa if len(set(data['answers']) & set(answer_cands)) > 0]\n \n self.answerable_data_size = len(self.scanqa)\n print('all val:', self.all_data_size)\n print('answerable val', self.answerable_data_size)\n elif split == 'test':\n self.scanqa = scanqa\n\n self.scanqa_all_scene = scanqa_all_scene # all scene_ids in scanqa\n self.answer_cls_loss = answer_cls_loss\n self.answer_cands = answer_cands\n self.answer_counter = answer_counter\n self.answer_vocab = Answer(answer_cands)\n self.num_answers = 0 if answer_cands is None else len(answer_cands) \n\n self.split = split\n self.num_points = num_points\n self.use_color = use_color \n self.use_height = use_height\n self.use_normal = use_normal \n self.use_multiview = use_multiview\n self.augment = augment\n\n # tokenize a question to tokens\n scene_ids = sorted(set(record['scene_id'] for record in self.scanqa))\n self.scene_id_to_number = {scene_id:int(''.join(re.sub('scene', '', scene_id).split('_'))) for scene_id in scene_ids}\n self.scene_number_to_id = {v: k for k, v in self.scene_id_to_number.items()}\n\n self.use_bert_embeds = False\n if tokenizer is None:\n from spacy.tokenizer import Tokenizer\n from spacy.lang.en import English\n nlp = English()\n # Create a blank Tokenizer with just the English vocab\n spacy_tokenizer = Tokenizer(nlp.vocab)\n \n def tokenize(sent):\n sent = sent.replace('?', ' ?')\n return [token.text for token in spacy_tokenizer(sent)]\n\n for record in self.scanqa:\n record.update(token=tokenize(record['question'])) \n else:\n self.use_bert_embeds = True\n for record in self.scanqa:\n record.update(token=tokenizer(record['question'], return_tensors='np'))\n \n # load data\n self._load_data()\n self.multiview_data = {}\n\n\n def __len__(self):\n return len(self.scanqa)\n\n def __getitem__(self, idx):\n start = time.time()\n scene_id = self.scanqa[idx]['scene_id']\n if self.split != 'test':\n object_ids = self.scanqa[idx]['object_ids']\n object_names = [' '.join(object_name.split('_')) for object_name in self.scanqa[idx]['object_names']]\n else: \n object_ids = None\n object_names = None \n\n question_id = self.scanqa[idx]['question_id']\n answers = self.scanqa[idx].get('answers', [])\n\n answer_cats = np.zeros(self.num_answers) \n answer_inds = [self.answer_vocab.stoi(answer) for answer in answers]\n\n if self.answer_counter is not None: \n answer_cat_scores = np.zeros(self.num_answers)\n for answer, answer_ind in zip(answers, answer_inds):\n if answer_ind < 0:\n continue \n answer_cats[answer_ind] = 1\n answer_cat_score = get_answer_score(self.answer_counter.get(answer, 0))\n answer_cat_scores[answer_ind] = answer_cat_score\n\n if not self.use_unanswerable:\n assert answer_cats.sum() > 0\n assert answer_cat_scores.sum() > 0\n else:\n raise NotImplementedError\n\n answer_cat = answer_cats.argmax()\n\n #\n # get language features\n #\n if self.use_bert_embeds:\n lang_feat = self.lang[scene_id][question_id]\n lang_feat['input_ids'] = lang_feat['input_ids'].astype(np.int64)\n lang_feat['attention_mask'] = lang_feat['attention_mask'].astype(np.float32)\n if 'token_type_ids' in lang_feat:\n lang_feat['token_type_ids'] = lang_feat['token_type_ids'].astype(np.int64)\n lang_len = self.scanqa[idx]['token']['input_ids'].shape[1]\n else:\n lang_feat = self.lang[scene_id][question_id]\n lang_len = len(self.scanqa[idx]['token'])\n\n lang_len = lang_len if lang_len <= CONF.TRAIN.MAX_TEXT_LEN else CONF.TRAIN.MAX_TEXT_LEN\n #\n # get point cloud features\n #\n mesh_vertices = self.scene_data[scene_id]['mesh_vertices']\n instance_labels = self.scene_data[scene_id]['instance_labels']\n semantic_labels = self.scene_data[scene_id]['semantic_labels']\n instance_bboxes = self.scene_data[scene_id]['instance_bboxes']\n\n if not self.use_color:\n point_cloud = mesh_vertices[:,0:3]\n pcl_color = mesh_vertices[:,3:6]\n else:\n point_cloud = mesh_vertices[:,0:6] \n point_cloud[:,3:6] = (point_cloud[:,3:6]-MEAN_COLOR_RGB)/256.0\n pcl_color = point_cloud[:,3:6]\n \n if self.use_normal:\n normals = mesh_vertices[:,6:9]\n point_cloud = np.concatenate([point_cloud, normals],1) # p (50000, 7)\n\n if self.use_height:\n floor_height = np.percentile(point_cloud[:,2],0.99)\n height = point_cloud[:,2] - floor_height\n point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1)\n\n '''\n if self.use_multiview:\n # load multiview database\n pid = mp.current_process().pid\n if pid not in self.multiview_data:\n self.multiview_data[pid] = h5py.File(MULTIVIEW_DATA + '.hdf5', 'r', libver='latest')\n multiview = self.multiview_data[pid][scene_id]\n point_cloud = np.concatenate([point_cloud, multiview],1)\n '''\n\n #'''\n if self.use_multiview:\n # load multiview database\n enet_feats_file = os.path.join(MULTIVIEW_DATA, scene_id) + '.pkl'\n multiview = pickle.load(open(enet_feats_file, 'rb'))\n point_cloud = np.concatenate([point_cloud, multiview],1) # p (50000, 135)\n #'''\n\n point_cloud, choices = random_sampling(point_cloud, self.num_points, return_choices=True) \n instance_labels = instance_labels[choices]\n semantic_labels = semantic_labels[choices]\n pcl_color = pcl_color[choices]\n \n # ------------------------------- LABELS ------------------------------ \n target_bboxes = np.zeros((MAX_NUM_OBJ, 6))\n target_bboxes_mask = np.zeros((MAX_NUM_OBJ)) \n angle_classes = np.zeros((MAX_NUM_OBJ,))\n angle_residuals = np.zeros((MAX_NUM_OBJ,))\n size_classes = np.zeros((MAX_NUM_OBJ,))\n size_residuals = np.zeros((MAX_NUM_OBJ, 3))\n ref_box_label = np.zeros(MAX_NUM_OBJ) # bbox label for reference target\n\n ref_center_label = np.zeros(3) # bbox center for reference target\n ref_heading_class_label = 0\n ref_heading_residual_label = 0\n ref_size_class_label = 0\n ref_size_residual_label = np.zeros(3) # bbox size residual for reference target\n\n if self.split != 'test':\n num_bbox = instance_bboxes.shape[0] if instance_bboxes.shape[0] < MAX_NUM_OBJ else MAX_NUM_OBJ\n target_bboxes_mask[0:num_bbox] = 1\n target_bboxes[0:num_bbox,:] = instance_bboxes[:MAX_NUM_OBJ,0:6]\n\n point_votes = np.zeros([self.num_points, 3])\n point_votes_mask = np.zeros(self.num_points)\n\n # ------------------------------- DATA AUGMENTATION ------------------------------ \n if self.augment and not self.debug:\n if np.random.random() > 0.5:\n # Flipping along the YZ plane\n point_cloud[:,0] = -1 * point_cloud[:,0]\n target_bboxes[:,0] = -1 * target_bboxes[:,0] \n \n if np.random.random() > 0.5:\n # Flipping along the XZ plane\n point_cloud[:,1] = -1 * point_cloud[:,1]\n target_bboxes[:,1] = -1 * target_bboxes[:,1] \n\n # Rotation along X-axis\n rot_angle = (np.random.random()*np.pi/18) - np.pi/36 # -5 ~ +5 degree\n rot_mat = rotx(rot_angle)\n point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))\n target_bboxes = rotate_aligned_boxes_along_axis(target_bboxes, rot_mat, 'x')\n\n # Rotation along Y-axis\n rot_angle = (np.random.random()*np.pi/18) - np.pi/36 # -5 ~ +5 degree\n rot_mat = roty(rot_angle)\n point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))\n target_bboxes = rotate_aligned_boxes_along_axis(target_bboxes, rot_mat, 'y')\n\n # Rotation along up-axis/Z-axis\n rot_angle = (np.random.random()*np.pi/18) - np.pi/36 # -5 ~ +5 degree\n rot_mat = rotz(rot_angle)\n point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))\n target_bboxes = rotate_aligned_boxes_along_axis(target_bboxes, rot_mat, 'z')\n\n # Translation\n point_cloud, target_bboxes = self._translate(point_cloud, target_bboxes)\n\n # compute votes *AFTER* augmentation\n # generate votes\n # Note: since there's no map between bbox instance labels and\n # pc instance_labels (it had been filtered \n # in the data preparation step) we'll compute the instance bbox\n # from the points sharing the same instance label. \n for i_instance in np.unique(instance_labels): \n # find all points belong to that instance\n ind = np.where(instance_labels == i_instance)[0]\n # find the semantic label \n if semantic_labels[ind[0]] in DC.nyu40ids:\n x = point_cloud[ind,:3]\n center = 0.5*(x.min(0) + x.max(0))\n point_votes[ind, :] = center - x\n point_votes_mask[ind] = 1.0\n point_votes = np.tile(point_votes, (1, 3)) # make 3 votes identical \n \n class_ind = [DC.nyu40id2class[int(x)] for x in instance_bboxes[:num_bbox,-2]]\n # NOTE: set size class as semantic class. Consider use size2class.\n size_classes[0:num_bbox] = class_ind\n size_residuals[0:num_bbox, :] = target_bboxes[0:num_bbox, 3:6] - DC.mean_size_arr[class_ind,:]\n\n # construct the reference target label for each bbox\n ref_box_label = np.zeros(MAX_NUM_OBJ)\n\n for i, gt_id in enumerate(instance_bboxes[:num_bbox,-1]): \n if gt_id == object_ids[0]:\n ref_box_label[i] = 1\n ref_center_label = target_bboxes[i, 0:3]\n ref_heading_class_label = angle_classes[i]\n ref_heading_residual_label = angle_residuals[i]\n ref_size_class_label = size_classes[i]\n ref_size_residual_label = size_residuals[i]\n\n \n assert ref_box_label.sum() > 0\n else:\n num_bbox = 1\n point_votes = np.zeros([self.num_points, 9]) # make 3 votes identical \n point_votes_mask = np.zeros(self.num_points)\n\n target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))\n try:\n target_bboxes_semcls[0:num_bbox] = [DC.nyu40id2class[int(x)] for x in instance_bboxes[:,-2][0:num_bbox]]\n except KeyError:\n pass\n\n object_name = None if object_names is None else object_names[0]\n object_cat = self.raw2label[object_name] if object_name in self.raw2label else 17\n\n data_dict = {}\n if self.use_bert_embeds:\n data_dict['lang_feat'] = lang_feat\n else:\n data_dict['lang_feat'] = lang_feat.astype(np.float32) # language feature vectors\n data_dict['point_clouds'] = point_cloud.astype(np.float32) # point cloud data including features\n data_dict['lang_len'] = np.array(lang_len).astype(np.int64) # length of each description\n data_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3] # (MAX_NUM_OBJ, 3) for GT box center XYZ\n data_dict['heading_class_label'] = angle_classes.astype(np.int64) # (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1\n data_dict['heading_residual_label'] = angle_residuals.astype(np.float32) # (MAX_NUM_OBJ,)\n data_dict['size_class_label'] = size_classes.astype(np.int64) # (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER\n data_dict['size_residual_label'] = size_residuals.astype(np.float32) # (MAX_NUM_OBJ, 3)\n data_dict['num_bbox'] = np.array(num_bbox).astype(np.int64)\n data_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64) # (MAX_NUM_OBJ,) semantic class index\n data_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32) # (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box\n data_dict['vote_label'] = point_votes.astype(np.float32) # \n data_dict['vote_label_mask'] = point_votes_mask.astype(np.int64) # point_obj_mask (gf3d)\n data_dict['scan_idx'] = np.array(idx).astype(np.int64)\n data_dict['pcl_color'] = pcl_color\n data_dict['ref_box_label'] = ref_box_label.astype(np.int64) # (MAX_NUM_OBJ,) # 0/1 reference labels for each object bbox\n\n data_dict['ref_center_label'] = ref_center_label.astype(np.float32) # (3,)\n data_dict['ref_heading_class_label'] = np.array(int(ref_heading_class_label)).astype(np.int64) # (MAX_NUM_OBJ,)\n data_dict['ref_heading_residual_label'] = np.array(int(ref_heading_residual_label)).astype(np.int64) # (MAX_NUM_OBJ,)\n data_dict['ref_size_class_label'] = np.array(int(ref_size_class_label)).astype(np.int64) # (MAX_NUM_OBJ,)\n data_dict['ref_size_residual_label'] = ref_size_residual_label.astype(np.float32) \n data_dict['object_cat'] = np.array(object_cat).astype(np.int64)\n\n data_dict['scene_id'] = np.array(int(self.scene_id_to_number[scene_id])).astype(np.int64)\n if type(question_id) == str:\n data_dict['question_id'] = np.array(int(question_id.split('-')[-1])).astype(np.int64)\n else:\n data_dict['question_id'] = np.array(int(question_id)).astype(np.int64)\n data_dict['pcl_color'] = pcl_color\n data_dict['load_time'] = time.time() - start\n data_dict['answer_cat'] = np.array(int(answer_cat)).astype(np.int64) # 1\n data_dict['answer_cats'] = answer_cats.astype(np.int64) # num_answers\n if self.answer_cls_loss == 'bce' and self.answer_counter is not None:\n data_dict['answer_cat_scores'] = answer_cat_scores.astype(np.float32) # num_answers\n return data_dict\n\n \n def _get_raw2label(self):\n # mapping\n scannet_labels = DC.type2class.keys()\n scannet2label = {label: i for i, label in enumerate(scannet_labels)}\n\n lines = [line.rstrip() for line in open(SCANNET_V2_TSV)]\n lines = lines[1:]\n raw2label = {}\n for i in range(len(lines)):\n label_classes_set = set(scannet_labels)\n elements = lines[i].split('\\t')\n raw_name = elements[1]\n nyu40_name = elements[7]\n if nyu40_name not in label_classes_set:\n raw2label[raw_name] = scannet2label['others']\n else:\n raw2label[raw_name] = scannet2label[nyu40_name]\n\n return raw2label\n\n def _get_unique_multiple_lookup(self):\n all_sem_labels = {}\n cache = {}\n for data in self.scanqa:\n scene_id = data['scene_id']\n\n for object_id, object_name in zip(data['object_ids'], data['object_names']):\n object_id = data['object_ids'][0]\n object_name = ' '.join(object_name.split('_'))\n\n if scene_id not in all_sem_labels:\n all_sem_labels[scene_id] = []\n\n if scene_id not in cache:\n cache[scene_id] = {}\n\n if object_id not in cache[scene_id]:\n cache[scene_id][object_id] = {}\n try:\n all_sem_labels[scene_id].append(self.raw2label[object_name])\n except KeyError:\n all_sem_labels[scene_id].append(17)\n\n all_sem_labels = {scene_id: np.array(all_sem_labels[scene_id]) for scene_id in all_sem_labels.keys()}\n\n unique_multiple_lookup = {}\n for data in self.scanqa:\n scene_id = data['scene_id']\n question_id = data['question_id']\n\n unique_multiples = []\n for object_id, object_name in zip(data['object_ids'], data['object_names']):\n object_id = data['object_ids'][0]\n object_name = ' '.join(object_name.split('_'))\n try:\n sem_label = self.raw2label[object_name]\n except KeyError:\n sem_label = 17\n\n unique_multiple_ = 0 if (all_sem_labels[scene_id] == sem_label).sum() == 1 else 1\n unique_multiples.append(unique_multiple_)\n\n unique_multiple = max(unique_multiples)\n\n # store\n if scene_id not in unique_multiple_lookup:\n unique_multiple_lookup[scene_id] = {}\n\n unique_multiple_lookup[scene_id][question_id] = unique_multiple\n\n return unique_multiple_lookup\n\n def _tranform_text_glove(self, token_type='token'):\n with open(GLOVE_PICKLE, 'rb') as f:\n glove = pickle.load(f)\n\n lang = {}\n for data in self.scanqa:\n scene_id = data['scene_id']\n question_id = data['question_id']\n\n if scene_id not in lang:\n lang[scene_id] = {}\n\n if question_id in lang[scene_id]:\n continue\n\n # tokenize the description\n tokens = data[token_type]\n embeddings = np.zeros((CONF.TRAIN.MAX_TEXT_LEN, 300))\n # tokens = ['sos'] + tokens + ['eos']\n # embeddings = np.zeros((CONF.TRAIN.MAX_TEXT_LEN + 2, 300))\n for token_id in range(CONF.TRAIN.MAX_TEXT_LEN):\n if token_id < len(tokens):\n token = tokens[token_id]\n if token in glove:\n embeddings[token_id] = glove[token]\n else:\n embeddings[token_id] = glove['unk']\n\n # store\n lang[scene_id][question_id] = embeddings\n\n return lang\n\n def _tranform_text_bert(self, token_type='token'):\n lang = {}\n\n def pad_tokens(tokens):\n N = CONF.TRAIN.MAX_TEXT_LEN - 2 \n if tokens.ndim == 2:\n tokens = tokens[0]\n padded_tokens = np.zeros(CONF.TRAIN.MAX_TEXT_LEN)\n tokens = np.append(tokens[:-1][:N+1], tokens[-1:])\n padded_tokens[:len(tokens)] = tokens\n return padded_tokens\n\n for data in self.scanqa:\n scene_id = data['scene_id']\n question_id = data['question_id']\n\n if scene_id not in lang:\n lang[scene_id] = {}\n\n if question_id in lang[scene_id]:\n continue\n\n # for BERT\n if 'token_type_ids' in data[token_type]:\n padded_input_ids = pad_tokens(data[token_type]['input_ids'])\n padded_token_type_ids = pad_tokens(data[token_type]['token_type_ids'])\n padded_attention_mask = pad_tokens(data[token_type]['attention_mask'])\n # store\n lang[scene_id][question_id] = {\n 'input_ids': padded_input_ids, \n 'token_type_ids': padded_token_type_ids,\n 'attention_mask': padded_attention_mask,\n }\n else: # for DistillBERT\n padded_input_ids = pad_tokens(data[token_type]['input_ids'])\n padded_attention_mask = pad_tokens(data[token_type]['attention_mask'])\n lang[scene_id][question_id] = {\n 'input_ids': padded_input_ids, \n 'attention_mask': padded_attention_mask,\n }\n\n return lang\n\n\n def _load_data(self):\n print('loading data...')\n # load language features\n if self.use_bert_embeds:\n self.lang = self._tranform_text_bert('token')\n else:\n self.lang = self._tranform_text_glove('token')\n\n # add scannet data\n self.scene_list = sorted(list(set([data['scene_id'] for data in self.scanqa])))\n\n # load scene data\n self.scene_data = {}\n for scene_id in self.scene_list:\n self.scene_data[scene_id] = {}\n self.scene_data[scene_id]['mesh_vertices'] = np.load(os.path.join(CONF.PATH.SCANNET_DATA, scene_id)+'_aligned_vert.npy') # axis-aligned\n self.scene_data[scene_id]['instance_labels'] = np.load(os.path.join(CONF.PATH.SCANNET_DATA, scene_id)+'_ins_label.npy')\n self.scene_data[scene_id]['semantic_labels'] = np.load(os.path.join(CONF.PATH.SCANNET_DATA, scene_id)+'_sem_label.npy')\n self.scene_data[scene_id]['instance_bboxes'] = np.load(os.path.join(CONF.PATH.SCANNET_DATA, scene_id)+'_aligned_bbox.npy')\n\n # prepare class mapping\n lines = [line.rstrip() for line in open(SCANNET_V2_TSV)]\n lines = lines[1:]\n raw2nyuid = {}\n for i in range(len(lines)):\n elements = lines[i].split('\\t')\n raw_name = elements[1]\n nyu40_name = int(elements[4])\n raw2nyuid[raw_name] = nyu40_name\n\n # store\n self.raw2nyuid = raw2nyuid\n self.raw2label = self._get_raw2label()\n self.label2raw = {v: k for k, v in self.raw2label.items()}\n if self.split != 'test':\n self.unique_multiple_lookup = self._get_unique_multiple_lookup()\n\n def _translate(self, point_set, bbox):\n # unpack\n coords = point_set[:, :3]\n # translation factors\n x_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n y_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n z_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]\n factor = [x_factor, y_factor, z_factor]\n # dump\n coords += factor\n point_set[:, :3] = coords\n bbox[:, :3] += factor\n\n return point_set, bbox\n","repo_name":"ATR-DBI/ScanQA","sub_path":"lib/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":25870,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"54"} +{"seq_id":"15143081951","text":"\"\"\"\n@Author : Lord_Bao\n@Date : 2021/3/7\n\n\"\"\"\n\n\"\"\"\n 忽略大小写,空格,标点符号。检查是否是回文\n\"\"\"\n\n\ndef is_palindrome(s):\n def to_chars(s):\n s = s.lower() # 忽略大小写\n letters = \"\"\n for char in s:\n if char in \"abcdefghijklmnopqrstuvwxyz\": # 忽略符号,空格符等\n letters += char\n\n return letters\n\n def is_pal(s):\n if len(s) <= 1:\n return True\n return s[0] == s[-1] and is_pal(s[1:-1])\n\n return is_pal(to_chars(s))\n\n\nif __name__ == '__main__':\n print(is_palindrome(\"dogGOD\"))\n print(is_palindrome(\"dogOOD\"))\n print(is_palindrome(\"Able\twas\tI,\tere\tI\tsaw\tElba\"))\n print(is_palindrome(\"Are\twe\tnot\tdrawn\tonward,\twe\tfew,\tdrawn\tonward\tto\tnew\tera?\"))\n","repo_name":"LordBao666/MITLecture6.0001_Introduction_To_CS_Programing_In_Python","sub_path":"practice/4functions_scoping_and_abstraction/is_palindrome(chap4.3.2).py","file_name":"is_palindrome(chap4.3.2).py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74671853280","text":"\"\"\"\nLC509 - Fibonacci Number\n\nThe Fibonacci numbers, commonly denoted F(n) form a sequence, called the Fibonacci sequence, such that each number is\nthe sum of the two preceding ones, starting from 0 and 1. That is,\n\nF(0) = 0, F(1) = 1\nF(N) = F(N - 1) + F(N - 2), for N > 1.\n\nGiven N, calculate F(N).\n\nExample 1:\n\nInput: 2\nOutput: 1\nExplanation: F(2) = F(1) + F(0) = 1 + 0 = 1.\n\nExample 2:\n\nInput: 3\nOutput: 2\nExplanation: F(3) = F(2) + F(1) = 1 + 1 = 2.\n\nExample 3:\n\nInput: 4\nOutput: 3\nExplanation: F(4) = F(3) + F(2) = 2 + 1 = 3.\n\"\"\"\n\n\nfrom math import sqrt\n\n\ndef fib(n: int) -> int:\n return int((((1+sqrt(5))/2)**n - ((1-sqrt(5))/2)**n)/sqrt(5))\n\n\ndef fib_(n: int) -> int:\n f_0 = 0\n f_1 = 1\n\n if n == 0:\n return f_0\n elif n == 1:\n return f_1\n else:\n for i in range(1, n):\n temp = f_1\n f_1 = temp + f_0\n f_0 = temp\n\n return f_1\n\n\nif __name__ == '__main__':\n for i in range(10):\n print(fib(i))\n\n print('')\n\n for i in range(10):\n print(fib_(i))","repo_name":"daveboat/interview_prep","sub_path":"coding_practice/general/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20984564963","text":"import socket\nimport sys\nimport time\nimport errno\nimport math\nfrom multiprocessing import Process\n\nlogmsg = 'You have chosen Logarithm\\n'\nsqrtmsg = 'You have chosen Square Root\\n'\nexpmsg = 'You have chosen Exponential\\n'\n\ndef process_start(s_sock):\n s_sock.send(str.encode('Calculator'))\n while True:\n data = s_sock.recv(2048).decode()\n if data == '1':\n s_sock.sendall(str.encode(logmsg))\n number = s_sock.recv(2048).decode()\n num = int(number)\n calculate = math.log10(num)\n result = str(calculate)\n s_sock.sendall(str.encode(result))\n elif data == '2':\n s_sock.sendall(str.encode(sqrtmsg))\n number = s_sock.recv(2048).decode()\n num = int(number)\n calculate = math.sqrt(num)\n result = str(calculate)\n s_sock.sendall(str.encode(result))\n elif data == '3':\n s_sock.sendall(str.encode(expmsg))\n number = s_sock.recv(2048).decode()\n num = int(number)\n calculate = math.exp(num)\n result = str(calculate)\n s_sock.sendall(str.encode(result))\n else:\n break\n s_sock.close()\n\nif __name__=='__main__':\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.bind((\"\",8888))\n print(\"listening...\")\n s.listen(3)\n try:\n while True:\n try:\n s_sock,s_addr = s.accept()\n p = Process(target=process_start, args=(s_sock,))\n p.start()\n\n\n\n except socket.error:\n print('got a socket error')\n\n except Exception as e:\n print('an exception occured!')\n print(e)\n sys.exit(1)\n finally:\n s.close()\n\n\n","repo_name":"itcatmeow/ITT440-March2022-LabAssignment","sub_path":"6.3server.py","file_name":"6.3server.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14430917961","text":"from backend import creat_app\nfrom flask_socketio import SocketIO, emit\nfrom backend.corelib.hardwarelib.digitest import Digitest\nimport time\n\napp = creat_app()\nsocketio = SocketIO(app, cors_allowed_origins=['http://localhost:9031', 'https://localhost:9031'])\n\nclass DigiTestTester:\n def mear(self):\n ba = Digitest()\n ba.open(\"COM5\")\n def mearsure(ba):\n ba.start_mear()\n while True:\n statusCode, value = ba.get_single_value()\n print(f'statusCode {statusCode} value {value}')\n if value != '\"DEVICE BUSY\"':\n return value\n elif statusCode < 0:\n print('distance too big when measuring')\n return None\n else:\n time.sleep(1)\n ret = ba.get_ms_method()\n print(ret)\n ba.config(debug=False,wait_cmd = True)\n ba.set_remote(True)\n ret = mearsure(ba)\n print('Hardness Result: {}'.format(ret))\n ba.set_remote(False)\n ba.close()\n return ret\n\nclass MainTask:\n def __init__(self) -> None:\n self.batchName = ''\n self.historyData = []\n \n def reset_batch(self, batchName):\n self.batchName = batchName\n self.historyData = []\n \n def mear(self):\n value = DigiTestTester().mear()\n self.historyData.append(value) \n return value \n \n def show_records(self):\n return self.historyData\n\nmainTask = MainTask()\n\n@socketio.on('client_event')\ndef echo(msg):\n print(msg)\n emit('server_response', msg)\n\n@socketio.on('connect')\ndef connect():\n print('client connected')\n emit('server_sent_connect_ok', 'Hi from Server')\n \n@socketio.on('init_batch')\ndef init_batch(batchName):\n mainTask.reset_batch(batchName)\n\n@socketio.on('mear')\ndef mear():\n data = mainTask.mear()\n emit('send_mear_data', data, broadcast=True)\n data = mainTask.show_records()\n emit('show_records', data, broadcast=True)\n\n@socketio.on('show_records')\ndef show_records():\n data = mainTask.show_records()\n emit('show_records', data, broadcast=True)\n\ndef test():\n data = DigiTestTester().mear()\n print(data)\n\nif __name__ == \"__main__\":\n socketio.run(app, debug=True, host='0.0.0.0', port=9031, keyfile='key.pem', certfile='cert.pem')\n # test()","repo_name":"superfk/digiWeb","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24838237446","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# importing whole module\nfrom tkinter import *\nfrom tkinter.ttk import *\n \n# importing strftime function to\n# retrieve system's time\nfrom time import strftime\n \n# creating tkinter window\nroot = Tk()\nroot.title('Clock')\n \n# This function is used to\n# display time on the label\ndef time():\n string = strftime('%I:%M:%S %p')\n digi_clock.config(text = string)\n digi_clock.after(1000, time)\n \n# Styling the label widget so that clock\n# will look more attractive\ndigi_clock = Label(root, font = ('calibri', 40, 'bold'),\n background = 'pink',\n foreground = 'white')\n \n# Placing clock at the centre\n# of the tkinter window\ndigi_clock.pack(anchor = 'center')\ntime()\n \nmainloop()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Anchalsaini28/Digital-Clock-Using-tkinter","sub_path":"digital clock using tkinter.py","file_name":"digital clock using tkinter.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38596871900","text":"## mit Hilfe einer for loop (Schleife) ueber die Listen\r\n## iterieren und den laengsten Namen ermitteln und ausgeben\r\n\r\n\r\nteilnehmer = ['Michael',\r\n 'Herbert', \r\n 'Adrian', \r\n 'Daniel', \r\n 'Johannes',\r\n 'Martin', \r\n 'Stefan', \r\n ]\r\n \r\ndef getLongestElement(liste):\r\n maxLength = 0\r\n maxLengthElement = ''\r\n for element in liste:\r\n if len(element) > maxLength:\r\n maxLength = len(element) \r\n maxLengthElement = element\r\n return (maxLength, maxLengthElement)\r\n\r\n# Loesung fuer mehrere gleichlange kuerzeste Namen\r\ndef getShortestElements(liste):\r\n minLength = float('inf')\r\n minLengthElements = []\r\n for element in liste:\r\n if len(element) < minLength:\r\n minLengthElements = [element]\r\n minLength = len(element)\r\n elif len(element) == minLength:\r\n minLengthElements.append(element)\r\n return (minLength, minLengthElements) \r\n \r\n## main \r\n \r\n(maxLength, maxLengthName) = getLongestElement(teilnehmer)\r\n(minLength, minLengthNames) = getShortestElements(teilnehmer)\r\n\r\nprint(\"Der laengste Name lautet %s mit einer Laenge von %d\" \r\n % (maxLengthName, maxLength) )\r\n \r\nprint(\"Die kuerzesten Namen lauten %s mit einer Laenge von %d\" \r\n % (minLengthNames, minLength) ) \r\n \r\n ","repo_name":"MarkHofstetter/python-kurs","sub_path":"laengster_namen.py","file_name":"laengster_namen.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71642702880","text":"from itertools import groupby\n\nimport pandas as pd\n\nfrom poe.ninja import retrieve_prices\n\n\ndef scarab_orb_of_horizon(prices):\n orb_of_horizon_price = prices[\"Orb of Horizons\"][0][\"chaosValue\"]\n prices = {k: v for k, v in prices.items() if \" Scarab\" in k}\n groups = list(\n (k, list(v))\n for k, v in groupby(sorted(prices.items()), key=lambda x: x[0].split()[0])\n )\n values = {\n key: {x[0].split()[1]: x[1][0][\"chaosValue\"] for x in values}\n for key, values in groups\n }\n df = pd.DataFrame(values).T\n df[\"mean\"] = df.mean(axis=1)\n analysis = pd.concat(\n [\n profit_analysis(df, key, orb_of_horizon_price=orb_of_horizon_price)\n for key in df.keys()\n ]\n ).reset_index()\n analysis.columns = [\"tier\", \"profitability\", \"profit\", \"kind\", \"price\", \"value\"]\n analysis = analysis.query('kind != \"mean\"').set_index([\"tier\", \"kind\"])\n return analysis\n\n\ndef profit(df, key, orb_of_horizon_price):\n temp = df.drop(key, axis=1)\n return temp.mean(axis=1) / 2 - df[key] - orb_of_horizon_price / 2\n\n\ndef profitability(df, key, orb_of_horizon_price):\n temp = df.drop(key, axis=1)\n return (temp.mean(axis=1) / 2 - df[key] - orb_of_horizon_price / 2) / df[key]\n\n\ndef value(df, key):\n temp = df.drop(key, axis=1)\n return temp.mean(axis=1) / 2\n\n\ndef profit_analysis(df, key, orb_of_horizon_price):\n analysis = pd.concat(\n [\n profitability(df, key, orb_of_horizon_price),\n profit(df, key, orb_of_horizon_price),\n ],\n axis=1,\n keys=[\"profitability\", \"profit\"],\n )\n analysis[\"kind\"] = key\n analysis[\"price\"] = df[key]\n analysis[\"value\"] = value(df, key)\n return analysis\n\n\nif __name__ == \"__main__\":\n prices = retrieve_prices(['Scarab','Currency'])\n analysis = scarab_orb_of_horizon(prices).sort_values(by=\"profit\", ascending=False)\n print(analysis)\n","repo_name":"arpheno/poe","sub_path":"poe/valuation/scarabs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8893496539","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport copy\nimport time\nimport argparse\n\nimport cv2 as cv\nfrom pupil_apriltags import Detector\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--width\", help='cap width', type=int, default=640)\n parser.add_argument(\"--height\", help='cap height', type=int, default=480)\n\n parser.add_argument(\"--families\", type=str, default='tag36h11')\n parser.add_argument(\"--nthreads\", type=int, default=1)\n parser.add_argument(\"--quad_decimate\", type=float, default=2.0)\n parser.add_argument(\"--quad_sigma\", type=float, default=0.0)\n parser.add_argument(\"--refine_edges\", type=int, default=1)\n parser.add_argument(\"--decode_sharpening\", type=float, default=0.25)\n parser.add_argument(\"--debug\", type=int, default=0)\n\n args = parser.parse_args()\n\n return args\n\n\ndef apriltag_center_area(image):\n # 引数解析 #################################################################\n args = get_args()\n\n families = args.families\n nthreads = args.nthreads\n quad_decimate = args.quad_decimate\n quad_sigma = args.quad_sigma\n refine_edges = args.refine_edges\n decode_sharpening = args.decode_sharpening\n debug = args.debug\n\n # Detector準備 #############################################################\n at_detector = Detector(\n families=families,\n nthreads=nthreads,\n quad_decimate=quad_decimate,\n quad_sigma=quad_sigma,\n refine_edges=refine_edges,\n decode_sharpening=decode_sharpening,\n debug=debug,\n )\n\n elapsed_time = 0\n\n start_time = time.time()\n\n # カメラキャプチャ #####################################################\n debug_image = copy.deepcopy(image)\n\n # 検出実施 #############################################################\n image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n tags = at_detector.detect(\n image,\n estimate_tag_pose=False,\n camera_params=None,\n tag_size=None,\n )\n\n # 描画 ################################################################\n debug_image = draw_tags(debug_image, tags, elapsed_time)\n\n elapsed_time = time.time() - start_time\n\n # キー処理(ESC:終了) #################################################\n key = cv.waitKey(1)\n if key == 27: # ESC\n pass\n\n return debug_image\n\n\n\ndef draw_tags(\n image,\n tags,\n elapsed_time,\n):\n for tag in tags:\n tag_family = tag.tag_family\n tag_id = tag.tag_id\n center = tag.center\n corners = tag.corners\n\n center = (int(center[0]), int(center[1]))\n corner_01 = (int(corners[0][0]), int(corners[0][1]))\n corner_02 = (int(corners[1][0]), int(corners[1][1]))\n corner_03 = (int(corners[2][0]), int(corners[2][1]))\n corner_04 = (int(corners[3][0]), int(corners[3][1]))\n\n # 中心\n cv.circle(image, (center[0], center[1]), 5, (0, 0, 255), 2)\n\n # 各辺\n cv.line(image, (corner_01[0], corner_01[1]),\n (corner_02[0], corner_02[1]), (255, 0, 0), 2)\n cv.line(image, (corner_02[0], corner_02[1]),\n (corner_03[0], corner_03[1]), (255, 0, 0), 2)\n cv.line(image, (corner_03[0], corner_03[1]),\n (corner_04[0], corner_04[1]), (0, 255, 0), 2)\n cv.line(image, (corner_04[0], corner_04[1]),\n (corner_01[0], corner_01[1]), (0, 255, 0), 2)\n\n # タグファミリー、タグID\n # cv.putText(image,\n # str(tag_family) + ':' + str(tag_id),\n # (corner_01[0], corner_01[1] - 10), cv.FONT_HERSHEY_SIMPLEX,\n # 0.6, (0, 255, 0), 1, cv.LINE_AA)\n cv.putText(image, str(tag_id), (center[0] - 10, center[1] - 10),\n cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv.LINE_AA)\n\n # 処理時間\n cv.putText(image,\n \"Elapsed Time:\" + '{:.1f}'.format(elapsed_time * 1000) + \"ms\",\n (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2,\n cv.LINE_AA)\n\n return image\n\n\nif __name__ == '__main__':\n main()","repo_name":"realslimwedy/you_only_land_once","sub_path":"backups/apriltag_module.py","file_name":"apriltag_module.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3808780528","text":"from djitellopy import Tello\nimport time\nimport cv2\n\ntello = Tello()\n\n# print(\"test\")\n\ntello.connect()\n# tello.streamon()\n\n# frame_read = tello.get_frame_read()\n\ntello.takeoff()\n\n# cv2.imwrite(\"test.png\",frame_read.frame)\n\ntello.move_forward(200)\ntello.rotate_counter_clockwise(90)\ntello.move_left(100)\n\n# time.sleep(10)\n\ntello.flip(\"f\")\n\n# print()\ntello.query_battery()\n# tello.get_flight_time()\ntello.get_battery()\n\n\n# tello.enable_mission_pads()\n# tello.set_mission_pad_detection_direction(1)\n\ntello.land();","repo_name":"jarvisN/non2023","sub_path":"python/10_NonDrone/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19868933196","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom .models import Registration\n\nclass RegistrationAPITest(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.register_url = reverse('registration:register')\n self.cancel_url = reverse('registration:cancel_registration')\n\n def test_registration(self):\n data = {'first_name': 'John', 'last_name': 'Doe', 'national_id': '1234567890'}\n response = self.client.post(self.register_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Registration.objects.count(), 1)\n\n def test_cancel_registration(self):\n registration = Registration.objects.create(first_name='John', last_name='Doe', national_id='1234567890')\n data = {'national_id': '1234567890'}\n response = self.client.post(self.cancel_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Registration.objects.count(), 1)\n\n def test_get_registration_list(self):\n response = self.client.get(reverse('registration:registration_list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_get_canceled_registration_list(self):\n # Create a canceled registration\n canceled_registration = Registration.objects.create(\n first_name='Jane', last_name='Doe', national_id='0987654321', canceled=True\n )\n response = self.client.get(reverse('registration:registration_list') + '?filter=canceled')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, canceled_registration.first_name, status_code=status.HTTP_200_OK)\n self.assertContains(response, canceled_registration.last_name, status_code=status.HTTP_200_OK)\n self.assertContains(response, canceled_registration.national_id, status_code=status.HTTP_200_OK)\n self.assertContains(response, 'Canceled', status_code=status.HTTP_200_OK)","repo_name":"Hassanprogramming/register_API","sub_path":"registration/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14102971997","text":"# -*- coding: utf-8 -*-\nimport qrcode\nfrom threading import Thread\nimport time\nimport requests\nfrom io import BytesIO\nimport http.cookiejar as cookielib\nfrom PIL import Image\nimport os\nimport threading\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QApplication, QLabel\nfrom PyQt5.QtGui import QPalette, QBrush, QPixmap\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom chatMain import *\nimport configparser\n#待筛选\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport img\nclass login(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.retranslateUi(self)\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(407, 334)\n MainWindow.setWindowIcon(QIcon(\":/img/addlogo.png\"))\n #MainWindow.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n MainWindow.setAttribute(Qt.WA_TranslucentBackground)\n MainWindow.setWindowFlags(Qt.FramelessWindowHint | Qt.Window)\n MainWindow.setStyleSheet(\n\n\" background-color: gray;\\n\"\n\n\n\n\n)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.centralwidget.setStyleSheet( \n \"border-radius: 10px;\")\n # pat2 = QPainter(self.centralwidget)\n # pat2.setRenderHint(pat2.Antialiasing) # 抗锯齿\n # pat2.setBrush(Qt.white)\n # pat2.setPen(Qt.transparent)\n # rect = self.rect()\n # rect.setLeft(19)\n # rect.setTop(19)\n # rect.setWidth(rect.width()-9)\n # rect.setHeight(rect.height()-9)\n # pat2.drawRoundedRect(rect, 4, 4)\n\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(365, 10, 25, 22))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton.setText(\"\")\n #self.pushButton.setStyleSheet('''QPushButton{background:#F76677;border-radius:10px;}QPushButton:hover{background:red;}''')\n self.pushButton.setStyleSheet(\"QPushButton{\\n\"\n\" max-width: 20px;\\n\"\n\"min-width: 20px;\\n\"\n\" max-height: 20px;\\n\"\n\" min-height: 20px;\\n\"\n\" background-color: #F76677;\\n\"\n\" border-radius: 10px;\\n\"\n\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\n\" background-color: red;\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\n\" background:#7e7e7e\\n\"\n\"}\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(60, 40, 280, 22))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(200, 230, 71, 31))\n self.label_2.setObjectName(\"label_2\")\n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(130, 70, 151, 141))\n self.frame.setStyleSheet(\"border-image: url(./img/login.png);\")\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.frame_2 = QtWidgets.QFrame(self.centralwidget)\n self.frame_2.setGeometry(QtCore.QRect(160, 230, 31, 31))\n self.frame_2.setStyleSheet(\"border-image: url(:/img/shua.png);\")\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 407, 23))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\n #self.main_layout.setSpacing(0)\n # 无边框的拖动\n def mouseMoveEvent(self, e: QtGui.QMouseEvent): # 重写移动事件\n self._endPos = e.pos() - self._startPos\n self.move(self.pos() + self._endPos)\n \n def mousePressEvent(self, e: QtGui.QMouseEvent):\n if e.button() == QtCore.Qt.LeftButton:\n self._isTracking = True\n self._startPos = QtCore.QPoint(e.x(), e.y())\n \n def mouseReleaseEvent(self, e: QtGui.QMouseEvent):\n if e.button() == QtCore.Qt.LeftButton:\n self._isTracking = False\n self._startPos = None\n self._endPos = None\n\n \n\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"登录账号\"))\n self.label.setText(_translate(\"MainWindow\", \"

    请使用哔哩哔哩客户端扫码登录

    \"))\n self.label_2.setText(_translate(\"MainWindow\", \"

    刷新二维码

    \"))\n\n# requests.packages.urllib3.disable_warnings()\n\n# headers = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36\", 'Referer': \"https://www.bilibili.com/\"}\n# headerss = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36\", 'Host': 'passport.bilibili.com','Referer': \"https://passport.bilibili.com/login\"}\n\n\n# def chatshow(): \n# config = ConfigParser.ConfigParser()\n# config.read('config.ini', encoding=\"utf-8\")\n# if not config.has_option(\"login\", \"uname\"): # 检查是否存在该option\n# chatshow()\n# else:\n# chatMain.show()\n# MainWindow.close()\n\n# def islogin(session):\n# try:\n# session.cookies.load(ignore_discard=True)\n# except Exception:\n# pass\n# loginurl = session.get(\"https://api.bilibili.com/x/web-interface/nav\", verify=False, headers=headers).json()\n# if loginurl['code'] == 0:\n# print('Cookies值有效,',loginurl['data']['uname'],',已登录!')\n# #获取用户名\n# config = configparser.ConfigParser() # 类实例化\n# config.add_section('login') # 首先添加一个新的section\n# config.set('login','username',loginurl['data']['uname']) # 写入数据\n# config.write(open('config.ini','w')) #保存数据\n# # 定义文件路径\n \n# #保存用户头像\n# r = requests.get(loginurl['data']['face'])\n# with open('img/logo.jpg', 'wb') as f:\n# f.write(r.content) \n# uname = loginurl['data']['uname']\n# print(uname)\n# # print(loginurl)\n# # MainWindow.close()\n# print(\"ini由第一个创建\")\n \n\n# return session, True\n# else:\n# print('Cookies值已经失效,请重新扫码登录!')\n# return session, False\n\n\n# def bzlogin():\n# if not os.path.exists('bzcookies.txt'):\n# with open(\"bzcookies.txt\", 'w') as f:\n# f.write(\"\")\n# session = requests.session()\n# session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')\n# session, status = islogin(session)\n# if not status:\n# getlogin = session.get('https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers).json()\n# loginurl = requests.get(getlogin['data']['url'], headers=headers).url\n# oauthKey = getlogin['data']['oauthKey']\n# qr = qrcode.QRCode()\n# qr.add_data(loginurl)\n# img = qr.make_image()\n# a = BytesIO()\n# img.save(\"./img/login.png\")\n# # img.save(a, 'png')\n# # png = a.getvalue()\n# # a.close()\n# # t = showpng(png)\n# # t.start()\n# tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\n \n# while True:\n# qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()\n# print(qrcodedata)\n# if '-4' in str(qrcodedata['data']):\n# print('二维码未失效,请扫码!')\n# elif '-5' in str(qrcodedata['data']):\n# print('已扫码,请确认!')\n# elif '-2' in str(qrcodedata['data']):\n# print('二维码已失效,请重新运行!')\n# elif 'True' in str(qrcodedata['status']):\n# print('已确认,登入成功!')\n# session.get(qrcodedata['data']['url'], headers=headers)\n# try:\n# session.cookies.load(ignore_discard=True)\n# except Exception:\n# pass\n# loginurl = session.get(\"https://api.bilibili.com/x/web-interface/nav\", verify=False, headers=headers).json()\n# if loginurl['code'] == 0:\n# print('Cookies值有效,',loginurl['data']['uname'],',已登录!')\n# #获取用户名\n# config = configparser.ConfigParser() # 类实例化\n# config.add_section('login') # 首先添加一个新的section\n# config.set('login','username',loginurl['data']['uname']) # 写入数据\n# config.write(open('config.ini','w')) #保存数据\n# # 定义文件路径\n# print(\"ini由第二个创建\")\n \n# #保存用户头像\n# r = requests.get(loginurl['data']['face'])\n# with open('/img/logo.jpg', 'wb') as f:\n# f.write(r.content) \n# uname = loginurl['data']['uname']\n\n# # print(loginurl)\n\n# break\n# else:\n# print('其他:', qrcodedata)\n# time.sleep(2)\n \n\n# session.cookies.save()\n# MainWindow.close()\n# return session\n\n\n# if __name__ == \"__main__\":\n# t = threading.Thread(target=bzlogin)\n# t.start()\n# time.sleep(1)\n# app = QtWidgets.QApplication(sys.argv)\n# MainWindow = QtWidgets.QMainWindow()\n# ui = login()\n# ui.setupUi(MainWindow)\n# ui2 = chatMain()\n# MainWindow.show()\n# # def chat():\n# # while 1:\n# # config = configparser.ConfigParser()\n# # config.read('config.ini') \n# # if not config.has_section(\"login\"):\n# # pass\n# # else:\n# # ui2.show()\n# # break\n# # t2 = threading.Thread(target = chat)\n# # t2.start()\n\n\n# sys.exit(app.exec_())\n\n","repo_name":"daiguoxi/BilibiliMassInformation","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2266514629","text":"# adapted from https://github.com/open-mmlab/mmcv or\n# https://github.com/open-mmlab/mmdetection\nimport numpy as np\nfrom numpy import random\n\nimport vedacore.image as image\nfrom vedacore.misc import registry\nfrom vedatad.misc.segment import segment_overlaps\n\n\n@registry.register_module('pipeline')\nclass SpatialRandomFlip(object):\n \"\"\"Spatially flip images.\n\n If the input dict contains the key \"flip\", then the flag will be used,\n otherwise it will be randomly decided by a ratio specified in the init\n method.\n\n Args:\n flip_ratio (float, optional): The flipping probability. Default: None.\n direction(str, optional): The flipping direction. Options are\n 'horizontal' and 'vertical'. Default: 'horizontal'.\n \"\"\"\n\n def __init__(self, flip_ratio=None, direction='horizontal'):\n self.flip_ratio = flip_ratio\n self.direction = direction\n if flip_ratio is not None:\n assert 0 <= flip_ratio <= 1\n assert direction in ['horizontal', 'vertical']\n\n def __call__(self, results):\n \"\"\"Call function to flip images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Flipped results, 'flip', 'flip_direction' keys are added into\n result dict.\n \"\"\"\n\n if np.random.rand() < self.flip_ratio:\n for key in results.get('img_fields', ['imgs']):\n if self.direction == 'horizontal':\n results[key] = np.flip(results[key], axis=2)\n else:\n results[key] = np.flip(results[key], axis=1)\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'\n\n\n@registry.register_module('pipeline')\nclass Pad(object):\n \"\"\"Pad images.\n\n There are two padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number.\n Added keys are \"pad_shape\", \"pad_fixed_size\", \"pad_size_divisor\",\n\n Args:\n size (tuple, optional): Fixed padding size.\n size_divisor (int, optional): The divisor of padded size.\n pad_val (float, optional): Padding value, 0 by default.\n \"\"\"\n\n def __init__(self, size=None, size_divisor=None, pad_val=0):\n self.size = size\n self.size_divisor = size_divisor\n self.pad_val = pad_val\n # only one of size and size_divisor should be valid\n assert size is not None or size_divisor is not None\n assert size is None or size_divisor is None\n\n def _pad_imgs(self, results):\n \"\"\"Pad images according to ``self.size``.\"\"\"\n for key in results.get('img_fields', ['imgs']):\n if self.size is not None:\n padded_imgs = image.impad(\n results[key], shape=self.size, pad_val=self.pad_val)\n elif self.size_divisor is not None:\n padded_imgs = image.impad_to_multiple(\n results[key], self.size_divisor, pad_val=self.pad_val)\n results[key] = padded_imgs\n results['pad_tsize'] = padded_imgs.shape[0]\n\n def __call__(self, results):\n \"\"\"Call function to pad images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_imgs(results)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(size={self.size}, '\n repr_str += f'size_divisor={self.size_divisor}, '\n repr_str += f'pad_val={self.pad_val})'\n return repr_str\n\n\n@registry.register_module('pipeline')\nclass Normalize(object):\n \"\"\"Normalize images.\n\n Added key is \"img_norm_cfg\".\n\n Args:\n mean (sequence): Mean values of 3 channels.\n std (sequence): Std values of 3 channels.\n to_rgb (bool): Whether to convert images from BGR to RGB,\n default is true.\n \"\"\"\n\n def __init__(self, mean, std, to_rgb=True):\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n\n def __call__(self, results):\n \"\"\"Call function to normalize images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Normalized results, 'img_norm_cfg' key is added into\n result dict.\n \"\"\"\n for key in results.get('img_fields', ['imgs']):\n results[key] = image.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'\n return repr_str\n\n\n@registry.register_module('pipeline')\nclass SpatialRandomCrop(object):\n \"\"\"Spatially random crop images.\n\n Args:\n crop_size (tuple): Expected size after cropping, (h, w).\n\n Notes:\n - If the image is smaller than the crop size, return the original image\n \"\"\"\n\n def __init__(self, crop_size):\n assert crop_size[0] > 0 and crop_size[1] > 0\n self.crop_size = crop_size\n\n def __call__(self, results):\n \"\"\"Call function to randomly crop images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Randomly cropped results, 'imgs_shape' key in result dict\n is updated according to crop size.\n \"\"\"\n\n for key in results.get('img_fields', ['imgs']):\n imgs = results[key]\n margin_h = max(imgs.shape[1] - self.crop_size[0], 0)\n margin_w = max(imgs.shape[2] - self.crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]\n\n # crop images\n imgs = imgs[:, crop_y1:crop_y2, crop_x1:crop_x2, ...]\n results[key] = imgs\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(crop_size={self.crop_size})'\n\n\n@registry.register_module('pipeline')\nclass SpatialCenterCrop(object):\n \"\"\"Spatially center crop images.\n\n Args:\n crop_size (tuple): Expected size after cropping, (h, w).\n\n Notes:\n - If the image is smaller than the crop size, return the original image\n \"\"\"\n\n def __init__(self, crop_size):\n assert crop_size[0] > 0 and crop_size[1] > 0\n self.crop_size = crop_size\n\n def __call__(self, results):\n \"\"\"Call function to center crop images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Randomly cropped results, 'imgs_shape' key in result dict\n is updated according to crop size.\n \"\"\"\n\n for key in results.get('img_fields', ['imgs']):\n imgs = results[key]\n margin_h = max(imgs.shape[1] - self.crop_size[0], 0)\n margin_w = max(imgs.shape[2] - self.crop_size[1], 0)\n offset_h = int(margin_h / 2)\n offset_w = int(margin_w / 2)\n crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]\n\n # crop images\n imgs = imgs[:, crop_y1:crop_y2, crop_x1:crop_x2, ...]\n results[key] = imgs\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(crop_size={self.crop_size})'\n\n\n@registry.register_module('pipeline')\nclass PhotoMetricDistortion(object):\n \"\"\"Apply photometric distortion to images sequentially, every\n transformation is applied with a probability of 0.5. The position of random\n contrast is in second or second to last.\n\n 1. random brightness\n 2. random contrast (mode 0)\n 3. convert color from BGR to HSV\n 4. random saturation\n 5. random hue\n 6. convert color from HSV to BGR\n 7. random contrast (mode 1)\n 8. randomly swap channels\n\n Args:\n brightness_delta (int): delta of brightness.\n contrast_range (tuple): range of contrast.\n saturation_range (tuple): range of saturation.\n hue_delta (int): delta of hue.\n \"\"\"\n\n def __init__(self,\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18,\n p=0.5):\n self.brightness_delta = brightness_delta\n self.contrast_lower, self.contrast_upper = contrast_range\n self.saturation_lower, self.saturation_upper = saturation_range\n self.hue_delta = hue_delta\n self.p = p\n\n def __call__(self, results):\n \"\"\"Call function to perform photometric distortion on images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images distorted.\n \"\"\"\n\n if 'img_fields' in results:\n assert results['img_fields'] == [\n 'imgs'\n ], ('Only single img_fields is allowed')\n imgs = results['imgs']\n assert imgs.dtype == np.float32, (\n 'PhotoMetricDistortion needs the input imgs of dtype np.float32'\n ', please set \"to_float32=True\" in \"LoadFrames\" pipeline')\n\n def _filter(img):\n img[img < 0] = 0\n img[img > 255] = 255\n return img\n\n if random.uniform(0, 1) <= self.p:\n\n # random brightness\n if random.randint(2):\n delta = random.uniform(-self.brightness_delta,\n self.brightness_delta)\n imgs += delta\n imgs = _filter(imgs)\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n mode = random.randint(2)\n if mode == 1:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n imgs *= alpha\n imgs = _filter(imgs)\n\n # convert color from BGR to HSV\n imgs = np.array([image.bgr2hsv(img) for img in imgs])\n\n # random saturation\n if random.randint(2):\n imgs[..., 1] *= random.uniform(self.saturation_lower,\n self.saturation_upper)\n\n # random hue\n # if random.randint(2):\n if True:\n imgs[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)\n imgs[..., 0][imgs[..., 0] > 360] -= 360\n imgs[..., 0][imgs[..., 0] < 0] += 360\n\n # convert color from HSV to BGR\n imgs = np.array([image.hsv2bgr(img) for img in imgs])\n imgs = _filter(imgs)\n\n # random contrast\n if mode == 0:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n imgs *= alpha\n imgs = _filter(imgs)\n\n # randomly swap channels\n if random.randint(2):\n imgs = imgs[..., random.permutation(3)]\n\n results['imgs'] = imgs\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(\\nbrightness_delta={self.brightness_delta},\\n'\n repr_str += 'contrast_range='\n repr_str += f'{(self.contrast_lower, self.contrast_upper)},\\n'\n repr_str += 'saturation_range='\n repr_str += f'{(self.saturation_lower, self.saturation_upper)},\\n'\n repr_str += f'hue_delta={self.hue_delta})'\n return repr_str\n\n\n@registry.register_module('pipeline')\nclass TemporalRandomCrop(object):\n \"\"\"Temporally crop.\n\n Args:\n num_frames (int, optional): The cropped frame num. Default: 768.\n iof_th(float, optional): The minimal iof threshold to crop. Default: 0\n \"\"\"\n\n def __init__(self, num_frames=768, iof_th=0):\n self.num_frames = num_frames\n self.iof_th = iof_th\n self.segment2label = dict(\n gt_segments='gt_labels', gt_segments_ignore='gt_labels_ignore')\n\n def get_valid_mask(self, segments, patch, iof_th):\n gt_iofs = segment_overlaps(segments, patch, mode='iof')[:, 0]\n patch_iofs = segment_overlaps(patch, segments, mode='iof')[0, :]\n iofs = np.maximum(gt_iofs, patch_iofs)\n mask = iofs > iof_th\n\n return mask\n\n def __call__(self, results):\n \"\"\"Call function to random temporally crop video frame.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Temporally cropped results, 'img_ids' is updated in\n result dict.\n \"\"\"\n\n total_frames = results['tsize']\n patch_num_frames = min(self.num_frames, total_frames)\n while True:\n start = np.random.randint(0, total_frames - patch_num_frames + 1)\n end = start + patch_num_frames\n patch = np.array([[start, end]], dtype=np.float32)\n\n mask = self.get_valid_mask(results['gt_segments'], patch,\n self.iof_th)\n if np.count_nonzero(mask) == 0:\n continue\n\n for key in results.get('segment_fields', []):\n segments = results[key]\n mask = self.get_valid_mask(segments, patch, self.iof_th)\n segments = segments[mask]\n segments[:, 0] = segments[:, 0].clip(min=start)\n segments[:, 1] = segments[:, 1].clip(max=end)\n segments -= start\n results[key] = segments\n\n label_key = self.segment2label[key]\n if label_key in results:\n results[label_key] = results[label_key][mask]\n results['img_ids'] = results['img_ids'][start:end]\n results['tsize'] = end - start\n results['tshift'] = start\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(num_frames={self.num_frames},'\n repr_str += f'iof_th={self.iof_th})'\n\n return repr_str\n\n\n@registry.register_module('pipeline')\nclass Rotate(object):\n \"\"\"Spatially rotate images.\n\n Args:\n limit (int, list or tuple): Angle range, (min_angle, max_angle).\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\".\n Default: bilinear\n border_mode (str): Border mode, accepted values are \"constant\",\n \"isolated\", \"reflect\", \"reflect101\", \"replicate\", \"transparent\",\n \"wrap\". Default: constant\n border_value (int): Border value. Default: 0\n \"\"\"\n\n def __init__(self,\n limit,\n interpolation='bilinear',\n border_mode='constant',\n border_value=0,\n p=0.5):\n if isinstance(limit, int):\n limit = (-limit, limit)\n self.limit = limit\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.border_value = border_value\n self.p = p\n\n def __call__(self, results):\n \"\"\"Call function to random rotate images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Spatially rotated results.\n \"\"\"\n\n if random.uniform(0, 1) <= self.p:\n angle = random.uniform(*self.limit)\n for key in results.get('img_fields', ['imgs']):\n imgs = [\n image.imrotate(\n img,\n angle=angle,\n interpolation=self.interpolation,\n border_mode=self.border_mode,\n border_value=self.border_value) for img in results[key]\n ]\n results[key] = np.array(imgs)\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(limit={self.limit},'\n repr_str += f'interpolation={self.interpolation},'\n repr_str += f'border_mode={self.border_mode},'\n repr_str += f'border_value={self.border_value},'\n repr_str += f'p={self.p})'\n\n return repr_str\n\n\n@registry.register_module('pipeline')\nclass TemporalCrop(object):\n \"\"\"Temporally crop.\"\"\"\n\n def __init__(self):\n self.segment2label = dict(\n gt_segments='gt_labels', gt_segments_ignore='gt_labels_ignore')\n\n def __call__(self, results):\n \"\"\"Call function to temporally crop video frame.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Temporally cropped results, 'img_ids', 'tsize', 'tshift' is\n updated in result dict.\n \"\"\"\n\n start, end = results['patch']\n patch = np.array([start, end], dtype=np.float32)\n for key in results.get('segment_fields', []):\n segments = results[key]\n iofs = segment_overlaps(segments, patch[None, :], mode='iof')[:, 0]\n mask = iofs > 0\n segments = segments[mask]\n segments[:, 0] = segments[:, 0].clip(min=start)\n segments[:, 1] = segments[:, 1].clip(max=end)\n segments -= start\n results[key] = segments\n\n label_key = self.segment2label[key]\n if label_key in results:\n labels = results[label_key]\n labels = labels[mask]\n results[label_key] = labels\n results['img_ids'] = results['img_ids'][start:end]\n results['tsize'] = end - start\n results['tshift'] = start\n\n return results\n","repo_name":"Media-Smart/vedatad","sub_path":"vedatad/datasets/pipelines/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":18240,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"54"} +{"seq_id":"10012657525","text":"import re\nfrom BeautifulSoup import BeautifulSoup\nfrom extract_engine import TallTableExtractor\nfrom extract_engine import WideTableExtractor\nfrom extract_engine import StringExtractor\n\nclass CIDBEntry(object):\n def __init__(self,page):\n self.page = open(page)\n self.reference = page.split('.')[0].split('/')[-1]\n self.soup = BeautifulSoup(self.page)\n self.target = self.soup.find('div',{'id':'todaysoftware'})\n self.result = []\n self.url = \"http://202.190.73.10/directory/local_contractor_details.php?cont_id=%s\"\n\n def process(self):\n tables = self.target.findAll('table')\n extend_table = ['A','B','F','G']\n append_table = ['C','D','E','H','I','J']\n for table in tables:\n check = table.find('tr')\n if not check:\n continue\n check = check.text\n if re.match('\\S\\.',check):\n if check[0] in extend_table:\n self.result.extend(self.process_table(table))\n elif check[0] in append_table:\n self.result.append(self.process_table(table))\n elif re.match('^Status',check):\n self.result.extend(self.process_table(table))\n \n\n def process_table(self,table):\n tall_table = ['A','B']\n wide_table = ['C','D','E','H','I','J']\n string_table = ['F','G']\n \n check = table.find('tr').text\n if check[0] in tall_table:\n extractor = TallTableExtractor(table)\n\n elif check[0] in wide_table:\n extractor = WideTableExtractor(table)\n\n elif check[0] in string_table:\n extractor = StringExtractor(table)\n\n elif re.match('^Status',check):\n extractor = TallTableExtractor(table)\n else:\n return []\n\n extractor.extract_value()\n return extractor.result\n \n def get_keys(self):\n keys = []\n for item in self.result:\n if type(item) == list:\n if not item:\n continue\n obj = item[0]\n else:\n obj = item\n temp = obj.to_dict()\n if not temp:\n continue\n keys.append([k.lower() for k in obj.keys])\n for k in keys:\n k.append('reference')\n k.append('source')\n keys.pop(4)\n return keys\n \n def get_worksheet(self):\n sheet_list = []\n for item in self.result:\n if type(item) == list:\n if not item:\n continue\n obj = item[0]\n else:\n obj = item\n if not obj.to_dict():\n continue\n temp = obj.title\n sheet_list.append(temp)\n sheet_list = [normalize_value(i) for i in sheet_list]\n sheet_list.pop(4)\n return sheet_list\n\n def is_good_record(self):\n company_info = self.result[0]\n status = False\n company_dict = company_info.to_dict()\n for key in company_dict:\n if company_dict[key]:\n if re.match('^\\S+$',company_dict[key]):\n status = True\n return status\n\n def get_data(self):\n datas = self.result\n result = []\n for data in datas:\n if type(data) == list:\n if not data:\n continue\n temp = []\n for d in data:\n tdata = d.to_dict()\n if not tdata:\n continue\n t = {}\n for td in tdata:\n t[td.lower()] = tdata[td]\n temp.append(t)\n temp[-1]['reference'] = self.reference\n temp[-1]['source'] = self.url % self.reference\n else:\n temp = {}\n tdata = data.to_dict()\n if not tdata:\n continue\n for t in tdata:\n temp[t.lower()] = tdata[t]\n temp['reference'] = self.reference\n temp['source'] = self.url % self.reference\n result.append(temp)\n result[0].update(result[4])\n result.pop(4)\n return result\n\ndef normalize_value(value):\n value = value.replace('\\r\\n','')\n value = value.replace(' ','')\n value = value.replace('/','')\n\n pattern = re.compile(':$')\n value = pattern.sub('',value)\n pattern = re.compile('^\\s+')\n value = pattern.sub('',value)\n pattern = re.compile('\\s+$')\n value = pattern.sub('',value)\n pattern = re.compile('\\s\\s+')\n value = pattern.sub(' ',value)\n pattern = re.compile('\\.\\s')\n value = pattern.sub('.',value)\n pattern = re.compile('\\S\\.')\n value = pattern.sub('',value)\n pattern = re.compile(':$')\n value = pattern.sub('',value)\n value = value.replace(' ','_')\n return value.lower() \n\n\n","repo_name":"Sinar/CIDBScraper","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"25051169839","text":"#!/usr/bin/env python3\n\nimport argparse, sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument('tr_src', type=str)\nparser.add_argument('te_src', type=str)\nparser.add_argument('tr_dst', type=str)\nparser.add_argument('te_dst', type=str)\nARGS = vars(parser.parse_args())\n\nmap = {}\ndef cvt(src_path, dst_path):\n with open(dst_path, 'w') as f_dst:\n for line in open(src_path):\n tokens = line.strip().split()\n label = tokens[0]\n if label == '1':\n output = '1'\n else:\n output = '-1'\n for field, token in enumerate(tokens[1:]):\n dim, val = token.split(':')\n feat = round(float(val), 3)\n feat = \"{0}:{1}\".format(field,feat)\n if feat not in map:\n map[feat] = len(map) + 1\n output += ' {0}:{1}:1'.format(field, map[feat])\n f_dst.write(output + '\\n')\n\ncvt(ARGS['tr_src'], ARGS['tr_dst'])\ncvt(ARGS['te_src'], ARGS['te_dst'])\nprint(len(map))\n","repo_name":"MrCoderKai/Paper-of-Recommendation-System","sub_path":"src/FFM_Field_aware_Factorization_Machine/exp_code_ffm/moredata/rna/cvt.rna.cat.py","file_name":"cvt.rna.cat.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"28411466598","text":"\"\"\"deleted column\n\nRevision ID: 28034937cc5a\nRevises: bbe5aae23218\nCreate Date: 2023-01-15 22:54:00.354284\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '28034937cc5a'\ndown_revision = 'bbe5aae23218'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('books', schema=None) as batch_op:\n batch_op.drop_column('img_name')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('books', schema=None) as batch_op:\n batch_op.add_column(sa.Column('img_name', sa.VARCHAR(), nullable=True))\n\n # ### end Alembic commands ###\n","repo_name":"UnilabEdu/UnilabPythonInternship","sub_path":"Chapter09_Structuring/Projects/2023/Kristine_Dzneladze/migrations/versions/28034937cc5a_deleted_column.py","file_name":"28034937cc5a_deleted_column.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"3365172564","text":"from amiyabot.database import select_for_paginate\nfrom core import app\nfrom core.database.user import User as UserTable, UserInfo, UserGachaInfo\n\nfrom .__model__ import QueryData, BaseModel\n\n\nclass UserModel(BaseModel):\n user_id: str\n black: int\n coupon: int = 0\n jade_point: int = 0\n\n\n@app.controller\nclass User:\n @app.route()\n async def get_user(self, data: QueryData):\n select = (\n UserTable.select(UserTable, UserInfo, UserGachaInfo)\n .join(UserInfo, 'left join', on=(UserInfo.user_id == UserTable.user_id))\n .join(\n UserGachaInfo,\n 'left join',\n on=(UserGachaInfo.user_id == UserTable.user_id),\n )\n )\n\n if data.search:\n select = select.where(UserTable.user_id.contains(data.search) | UserTable.nickname.contains(data.search))\n\n return app.response(select_for_paginate(select, page=data.currentPage, page_size=data.pageSize))\n\n @app.route()\n async def edit_user(self, data: UserModel):\n UserTable.update(black=data.black).where(UserTable.user_id == data.user_id).execute()\n UserInfo.update(jade_point=data.jade_point).where(UserInfo.user_id == data.user_id).execute()\n UserGachaInfo.update(coupon=data.coupon).where(UserGachaInfo.user_id == data.user_id).execute()\n\n return app.response(message='修改成功')\n","repo_name":"AmiyaBot/Amiya-Bot","sub_path":"core/server/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":440,"dataset":"github-code","pt":"54"} +{"seq_id":"33191047907","text":"import argparse\nimport json\n\nimport os\nimport sys\n\n\nclass Mixtape_Editor:\n # When refactoring for optimal code, I would initialize my class with the changes and mixtape data, and objects needed\n # for logging. I would also use way more helper functions, for typechecking and to simplify\n # adding, deleting, and updating items. I would also use more helper funcs to break up duties within a single func.\n # I would also be more specific about the structure of each input object for the class functions\n\n # ex.\n\n # def __init__(self, mixtape_data, changes_data):\n # self.logger = logger\n # self.mixtape_data = mixtape_data\n # self.changes_data = changes_data\n\n def run_mixtape_object_edits(self, mixtape_data: dict, change_object: dict):\n '''\n This function consumes mixtape data object, changes object. \n The data is parsed from the changes, and depending on the action being taken (add or delete),\n the function will determine which item should be acted upon, and from which object (users, playlists, songs)\n it should be acted upon in the within the mixtape.\n\n params - dict: mixtape_data\n dict: change_object\n '''\n\n for key, value in mixtape_data.items():\n # Delete mixtape object\n if change_object['action'] == \"delete\":\n for item in value:\n # making sure they are the same key, and same id.\n if change_object['id'] == item['id'] and change_object[\n 'type'] == key:\n value.remove(item)\n \n # Add mixtape object\n elif change_object['action'] == \"add\":\n if change_object['type'] == 'playlists' and key == 'playlists':\n value.append({\n \"id\": change_object['id'],\n \"user_id\": change_object['user_id'],\n \"song_ids\": change_object['song_ids']\n })\n # Adds a song to an existing playlist\n if change_object['type'] == 'songs' and key == 'playlists':\n for item in value:\n if change_object['playlist_id'] == item['id']:\n item['song_ids'].append(change_object['id'])\n\n\n def get_data(self, input_file :str):\n '''\n This function takes in a file, and returns the data as a dict.\n\n params - str: input file\n '''\n \n data = {}\n if not os.path.exists(input_file):\n sys.exit(f'ERROR: unable to locate file {input_file}')\n\n with open(input_file) as infile:\n input_object = json.load(infile)\n\n data.update(input_object)\n\n return data\n \n def output_data(self, data, output_file):\n '''\n This function takes in data, and an output file. The data is dumped \n to the output file, output.json\n\n params - dict: data \n str: output file\n '''\n\n with open(output_file, 'w') as outfile:\n # Keeping the format EXACTLY the same\n json.dump(data, outfile, indent=2, separators=(',', ' : '))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\n 'Ingest mixtape.json and a changes file, output those changes in output.json file')\n\n parser.add_argument('mixtape', help=\"the mixtape file to ingets ex. mixtape.json\")\n parser.add_argument('changes', help=\"the changes file to ingest\")\n\n\n args = parser.parse_args()\n\n mixtape_editor = Mixtape_Editor()\n\n mixtape_data = mixtape_editor.get_data(args.mixtape)\n changes_data = mixtape_editor.get_data(args.changes)\n\n for key, value in changes_data.items():\n for change_item in value:\n mixtape_editor.run_mixtape_object_edits(mixtape_data, change_item)\n\n mixtape_editor.output_data(mixtape_data, 'output.json') \n","repo_name":"brittanyrjones/mixtape-editor","sub_path":"mixtape_editor.py","file_name":"mixtape_editor.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17713498061","text":"from collections import defaultdict, Counter\nimport math\n\ndef minCoins(coins, amount):\n \n memo = {}\n \n # Instead, we could just use @cache (from functools import cache)\n def helper(a):\n \n if a in memo: return memo[a]\n if a < 0: return math.inf \n if a == 0: return 0\n memo[a] = 1 + min(helper(a - c) for c in coins)\n return memo[a]\n \n res = helper(amount)\n return -1 if math.isinf(res) else res \n \nfrom functools import lru_cache\n\n\n# Prefer no return version below \ndef listCombinationsReturn(amount, coins):\n\n @lru_cache(maxsize=None) # or @cache\n def helper(a, start):\n \n if a < 0: return []\n if a == 0: return [[]]\n \n ret = []\n for i in range(start, len(coins)): \n c = coins[i]\n ret += [[c] + l for l in helper(a-c, i)] \n # won't treat different orderings as unique because if we add a later number then we can only add that number or ones after (canonical ordering)\n # change i -> i+1 if there's no reuse \n \n return ret\n \n return helper(amount, 0)\n\n\ndef listCombinations(amount, coins): \n \n ret = []\n \n def helper(a, start, path):\n \n if a < 0: return\n \n if a == 0:\n ret.append(path)\n return\n \n for i in range(start, len(coins)):\n c = coins[i]\n helper(a-c, i, path + [c])\n \n helper(amount, 0, [])\n return ret\n\n\ndef listCombinationsDup(amount, coins): \n\n ret = []\n coins.sort() ## For dup handling\n \n def helper(a, start, path):\n \n if a < 0: return\n \n if a == 0:\n ret.append(path)\n return\n \n for i in range(start, len(coins)):\n if i > start and coins[i] == coins[i-1]: continue # For dup handling, beware: continue\n c = coins[i]\n helper(a-c, i + 1, path + [c]) ## j+1 because of no-reuse\n \n helper(amount, 0, [])\n return ret\n","repo_name":"rajkar86/python_coding_interviews","sub_path":"basics/variations/backtracking.py","file_name":"backtracking.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7226005762","text":"'''\nScript that performs signal preprocessing on audio files so that they are ready to be used by the model\nProcessed data for each test set is written to the appropriate .csv file\n'''\n\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nimport read_wave as rw\nimport spectrogram as sp\n\n# Stored wave file locations\nTRAIN_DIR = 'train'\nTEST_DIR = 'test'\nVALIDATION_DIR = 'validation'\n\n# Feaeture outputs\nTRAIN_OUT = 'train_data.csv'\nTEST_OUT = 'test_data.csv'\nVALIDATION_OUT = 'validation_data.csv'\n\nCLASSES = ['Claps',\n 'Crashes',\n 'HiHats',\n 'Kicks',\n 'Snares']\n\ndirectories = [TRAIN_DIR, TEST_DIR, VALIDATION_DIR]\nout_files = [TRAIN_OUT, TEST_OUT, VALIDATION_OUT]\n\n# spectrogram parameters\nframe_length=0.025\nframe_offset=0.01\nlowFreq=300\nhiFreq=10000\nnumFilters=26\nnumFrames=75\n\nnumDataPoints = numFilters * numFrames\n\nfor dir, out in zip(directories, out_files):\n print(\"Writing \" + dir + \" data to: \" + out)\n # Get filepaths of samples for each class\n class_files = {}\n for c in CLASSES:\n files = os.listdir(dir + '/' + c)\n class_files[c] = files\n\n # Read, process, and write data of each sample to a csv file\n data = []\n for c in class_files:\n print(\"Class being processed: \" + c)\n files = class_files[c]\n paths = []\n for file in files:\n paths.append(dir + '/' + c + '/' + file)\n\n for path in tqdm(paths):\n signal, sr = rw.read_wave(path, normalize=True, length=1, threshold=0.001)\n spec = sp.get_spectrogram(signal, sr, frame_length=frame_length, frame_offset=frame_offset, lowFreq=lowFreq, hiFreq=hiFreq, numFilters=numFilters, numFrames=numFrames)\n dataToWrite = np.append(np.array([c]), spec.flatten())\n data.append(dataToWrite)\n\n header = ['Class']\n header.extend(['D' + str(i) for i in range(numDataPoints)])\n df = pd.DataFrame(columns=header, data=data)\n df.to_csv(out)\n","repo_name":"davidliii/Sound-Sample-Classifier-AI","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31036140078","text":"from wega.db.models import (\n Fiber, Product, Composition, User, Order, OrderItem,\n)\n\n\nEXAMPLE_FIBERS = [\n Fiber(**{\n 'name': 'Other',\n 'description': 'Other fibers is used with low quantity of fibers.',\n }),\n Fiber(**{\n 'name': 'Polyester',\n 'description': 'Polyethylene terephthalate (PET / PETE) is the thermoplastic polymer resin of the polyester family used in synthetic fibers.',\n }),\n Fiber(**{\n 'name': 'Wool',\n 'description': 'Textile fiber obtained from sheep and certain other animals, including cashmere from goats, mohair from goats, qiviut from muskoxen, angora from rabbits, and other types of wool from camelids.',\n }),\n Fiber(**{\n 'name': 'Acrylic',\n 'description': 'Acrylic fibers are synthetic fibers made from a polymer (polyacrylonitrile) with an average molecular weight of ~100,000, about 1900 monomer units. Acrylic is lightweight, soft, and warm, with a wool-like feel.',\n }),\n Fiber(**{\n 'name': 'Cotton',\n 'description': 'Cotton is a soft, fluffy staple fiber that grows in a boll, or protective capsule, around the seeds of cotton plants of the genus Gossypium in the family of Malvaceae. The fiber is almost pure cellulose. The fiber is most often spun into yarn or thread and used to make a soft, breathable textile.',\n }),\n Fiber(**{\n 'name': 'Nylon',\n 'description': 'Nylon is a thermoplastic, silky material. It is a generic designation for a family of synthetic polymers known generically as aliphatic polyamides.',\n }),\n]\n\n\ndef _fill_fibers(db):\n for fiber in EXAMPLE_FIBERS:\n db.session.add(fiber)\n db.session.commit()\n\n\nEXAMPLE_PRODUCTS = [\n Product(**{\n 'name': 'Khaki Coat',\n 'description': '''\n Italian fabric. Slim fit.\n\n Main: 61% wool 28% polyester 6% acrylic 5% other fibres.\n Lining: 100% polyester.\n ''',\n 'image_name': 'coat_uno_300x300.png',\n 'price': 257.24,\n }),\n\n Product(**{\n 'name': 'Waistcoat jacket',\n 'description': '''\n Traditional herringbone jacket with two-button fastening, centre\n vent, notch lapel, flap pockets, ticket pocket, under-collar\n contrast fabric and removable cotton pocket hanky. Finished off\n with a printed lining, complemented by a 100% cotton pocket square.\n\n Dry clean only.\n 55% wool 45% polyester. Lining 100% polyester.\n Maximum 99% wool is used in these products and a minimum of 50%\n wool.\n ''',\n 'image_name': 'sako_duo_300x300.png',\n 'price': 189.90,\n }),\n\n Product(**{\n 'name': 'Compact Nylon Jacket',\n 'description': '''\n Essential jacket, designed in high-shine compact nylon to give a\n two-tone effect. Finished with black branded trim.\n\n This jacket is designed in high-shine compact nylon to give a\n two-tone effect. Finished with black branded trim.\n\n Main, lining and wadding 100% polyester.\n ''',\n 'image_name': 'trojka_300x300.png',\n 'price': 298.70,\n }),\n\n Product(**{\n 'name': 'Wadded Jacket',\n 'description': '''\n Essential navy wadded jacket with contrast white zip detailing.\n\n 100% polyester.\n ''',\n 'image_name': 'wadded_300x300.png',\n 'price': 152.00,\n }),\n\n Product(**{\n 'name': 'Pea Coat',\n 'description': '''\n Contemporary pea coat that's a true outerwear staple. Complete with\n a double-breasted front panel and broad lapels, this slim-fit\n jacket will stick around for years to come. Designed with anchor\n brand buttons, versatile slashed pockets and enriched internals.\n\n Main: 60% wool 31% polyester 9% other fibres. Lining: 100%\n polyester.\n\n Maximum 99% wool is used in these products and a minimum of 50%\n wool.\n ''',\n 'image_name': 'pea_coat_300x300.png',\n 'price': 137.30,\n }),\n\n Product(**{\n 'name': 'Single Breasted Mac',\n 'description': '''\n Versatile slim-fit mac that's a true outerwear staple. Complete\n with water-repellent outer fabric and featuring a single-breasted\n front panel and premium Corozo buttons, this jacket is enhanced\n with a wadded lining for extra warmth and will stick around for\n years to come.\n\n 63% cotton 28% polyester 9% nylon. Lining 100% polyester.\n ''',\n 'image_name': 'single_breasted_mac_300x300.png',\n 'price': 210.00,\n }),\n\n Product(**{\n 'name': 'Tapue Covert Coat',\n 'description': '''100% British wool.''',\n 'image_name': 'tapue_covert_300x300.png',\n 'price': 215.15,\n }),\n\n Product(**{\n 'name': 'Fleeced Lined Anorak',\n 'description': '''\n Stylish red anorak with contrast white trim detailing and snug\n fleece lining.\n\n 65% polyester 35% cotton. Upper lining 100% cotton. Sleeve and\n lower lining 100% polyester.''',\n 'image_name': 'fleeced_anorak_300x300.png',\n 'price': 89.00,\n }),\n\n Product(**{\n 'name': 'Coat',\n 'description': '',\n 'image_name': '',\n 'price': 19.99,\n }),\n]\n\n\ndef _fill_products(db):\n for product in EXAMPLE_PRODUCTS:\n db.session.add(product)\n db.session.commit()\n\n\nEXAMPLE_COMPOSITIONS = [\n {\n 'product_name': 'Khaki Coat',\n 'fibers': [{\n 'fiber_name': 'Wool',\n 'percentage': 61,\n }, {\n 'fiber_name': 'Polyester',\n 'percentage': 28,\n }, {\n 'fiber_name': 'Acrylic',\n 'percentage': 6,\n }, {\n 'fiber_name': 'Other',\n 'percentage': 5,\n },\n ],\n }, {\n 'product_name': 'Waistcoat jacket',\n 'fibers': [{\n 'fiber_name': 'Wool',\n 'percentage': 55,\n }, {\n 'fiber_name': 'Polyester',\n 'percentage': 45,\n },\n ],\n }, {\n 'product_name': 'Compact Nylon Jacket',\n 'fibers': [{\n 'fiber_name': 'Polyester',\n 'percentage': 100,\n },\n ],\n }, {\n 'product_name': 'Wadded Jacket',\n 'fibers': [{\n 'fiber_name': 'Polyester',\n 'percentage': 100,\n },\n ],\n }, {\n 'product_name': 'Pea Coat',\n 'fibers': [{\n 'fiber_name': 'Wool',\n 'percentage': 60,\n }, {\n 'fiber_name': 'Polyester',\n 'percentage': 31,\n }, {\n 'fiber_name': 'Other',\n 'percentage': 9,\n },\n ],\n }, {\n 'product_name': 'Single Breasted Mac',\n 'fibers': [{\n 'fiber_name': 'Cotton',\n 'percentage': 63,\n }, {\n 'fiber_name': 'Polyester',\n 'percentage': 28,\n }, {\n 'fiber_name': 'Nylon',\n 'percentage': 9,\n },\n ],\n }, {\n 'product_name': 'Tapue Covert Coat',\n 'fibers': [{\n 'fiber_name': 'Wool',\n 'percentage': 100,\n },\n ],\n }, {\n 'product_name': 'Fleeced Lined Anorak',\n 'fibers': [{\n 'fiber_name': 'Polyester',\n 'percentage': 65,\n }, {\n 'fiber_name': 'Cotton',\n 'percentage': 35,\n },\n ],\n }, {\n 'product_name': 'Coat',\n 'fibers': [{\n 'fiber_name': 'Wool',\n 'percentage': 42,\n }, {\n 'fiber_name': 'Cotton',\n 'percentage': 25,\n }, {\n 'fiber_name': 'Polyester',\n 'percentage': 12,\n }, {\n 'fiber_name': 'Acrylic',\n 'percentage': 9,\n }, {\n 'fiber_name': 'Nylon',\n 'percentage': 7,\n }, {\n 'fiber_name': 'Other',\n 'percentage': 5,\n },\n ],\n },\n]\n\n\ndef _fill_compositions(db):\n for item in EXAMPLE_COMPOSITIONS:\n product = Product.query.filter_by(name=item['product_name']).first()\n for data in item['fibers']:\n fiber = Fiber.query.filter_by(name=data['fiber_name']).first()\n composition = Composition(**{\n 'product_id': product.id,\n 'fiber_id': fiber.id,\n 'percentage': data['percentage'],\n })\n db.session.add(composition)\n db.session.commit()\n\n\nEXAMPLE_USERS = [\n User(**{\n 'username': 'admin',\n 'password': 'admin',\n 'admin': True,\n }),\n User(**{\n 'username': 'user',\n 'password': 'pass',\n 'admin': False,\n }),\n]\n\n\ndef _fill_users(db):\n for user in EXAMPLE_USERS:\n db.session.add(user)\n db.session.commit()\n\n\nEXAMPLE_ORDERS = [\n {'username': 'admin',},\n {'username': 'user',},\n]\n\n\ndef _fill_orders(db):\n for item in EXAMPLE_ORDERS:\n user = User.query.filter_by(username=item['username']).first()\n order = Order(**{'user_id': user.id,})\n db.session.add(order)\n db.session.commit()\n\n\nEXAMPLE_ORDER_ITEMS = [\n {\n 'username': 'admin',\n 'items': [\n {'product_name': 'Pea Coat', 'number': 2},\n {'product_name': 'Khaki Coat', 'number': 1},\n {'product_name': 'Waistcoat jacket', 'number': 1},\n {'product_name': 'Single Breasted Mac', 'number': 3},\n ],\n }, {\n 'username': 'user',\n 'items': [\n {'product_name': 'Khaki Coat', 'number': 1},\n {'product_name': 'Single Breasted Mac', 'number': 4},\n ],\n },\n]\n\n\ndef _fill_order_items(db):\n for data in EXAMPLE_ORDER_ITEMS:\n order = Order.query.join(User, aliased=True).\\\n filter_by(username=data['username']).first()\n for item in data['items']:\n product = Product.query.filter_by(name=item['product_name']).first()\n order_item = OrderItem(**{\n 'number': item['number'],\n 'order_id': order.id,\n 'product_id': product.id,\n })\n db.session.add(order_item)\n db.session.commit()\n\n\ndef fill_db(db):\n _fill_products(db)\n _fill_fibers(db)\n _fill_compositions(db)\n _fill_users(db)\n _fill_orders(db)\n _fill_order_items(db)\n","repo_name":"mrkvost/wega","sub_path":"example/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":10766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3587024872","text":"import asyncio\n\n# [START send_metrics_query_async]\nfrom datetime import timedelta\nimport os\n\nfrom azure.core.exceptions import HttpResponseError\nfrom azure.identity.aio import DefaultAzureCredential\nfrom azure.monitor.query.aio import MetricsQueryClient\nfrom azure.monitor.query import MetricAggregationType\n\n\nasync def query_metrics():\n credential = DefaultAzureCredential()\n client = MetricsQueryClient(credential)\n\n metrics_uri = os.environ[\"METRICS_RESOURCE_URI\"]\n async with client:\n try:\n response = await client.query_resource(\n metrics_uri,\n metric_names=[\"Ingress\"],\n timespan=timedelta(hours=2),\n granularity=timedelta(minutes=15),\n aggregations=[MetricAggregationType.AVERAGE],\n )\n\n for metric in response.metrics:\n print(metric.name)\n for time_series_element in metric.timeseries:\n for metric_value in time_series_element.data:\n print(metric_value.timestamp)\n except HttpResponseError as err:\n print(\"something fatal happened\")\n print(err)\n await credential.close()\n\n# [END send_metrics_query_async]\n\nif __name__ == \"__main__\":\n asyncio.run(query_metrics())\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_async.py","file_name":"sample_metrics_query_async.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"46401425391","text":"# Backend API providing all the required functions to interface with the system\n\nimport paho.mqtt.client as mqtt\nimport pymongo\nimport datetime\n\n\ndef connect_to_database(connection_string: str, database_name: str) -> pymongo.database.Database:\n mongo = pymongo.MongoClient(connection_string, serverSelectionTimeoutMS=2000)\n db_names = mongo.list_database_names()\n if database_name in db_names:\n print(\"Connected to MongoDB Server, returning \", database_name, \" database\")\n return mongo[database_name]\n else:\n raise NameError(\"database_name doesn't exist on server\")\n\n\ndef get_collection(select_database: pymongo.database.Database, collection_name: str) -> pymongo.collection.Collection:\n collection_names = select_database.list_collection_names()\n if collection_name in collection_names:\n print(\"returning \", collection_name, \" collection from database\")\n return select_database[collection_name]\n else:\n raise NameError(\"collection_name doesn't exist on selected database\")\n\n\ndef insert_into_collection(select_collection: pymongo.collection.Collection, node_name: str, device_name: str,\n data: str) -> bool:\n if type(select_collection) != pymongo.collection.Collection:\n raise TypeError(\"select_collection MUST be of type pymongo.collection.Collection\")\n elif type(node_name) != str:\n raise TypeError(\"node_name MUST be a string\")\n elif type(device_name) != str:\n raise TypeError(\"device_name MUST be a string\")\n\n try:\n data = float(data)\n except:\n print(\"data string cannot be converted into a float!\")\n return False\n\n result = select_collection.insert_one({\n \"node_name\": node_name,\n \"device_name\": device_name,\n \"data\": data,\n \"date\": datetime.datetime.now()\n })\n return result.acknowledged\n\n\ndef connect_to_broker(address: str, client_name: str, message_function, timeout=30) -> mqtt.Client:\n print(\"Connecting to MQTT Server on \", address)\n mqtt_client = mqtt.Client(client_name)\n mqtt_client.CONNECTION_TIMEOUT_DEFAULT = timeout\n mqtt_client.on_message = message_function # attach function to callback\n mqtt_client.connect(address)\n print(\"Connected to MQTT Server, Client: \", client_name)\n return mqtt_client\n\n\ndef subscribe(mqtt_client: mqtt.Client, topic: str, qos: int) -> tuple:\n print(\"Subscribing to \", topic)\n return mqtt_client.subscribe(topic, qos)\n\n\ndef unsubscribe(mqtt_client: mqtt.Client, topic: str) -> tuple:\n print(\"Unsubscribing to \", topic)\n return mqtt_client.unsubscribe(topic)\n\n\ndef publish(mqtt_client: mqtt.Client, topic: str, message: str, qos: int) -> bool:\n result = mqtt_client.publish(topic, message, qos)\n # result.wait_for_publish()\n print(\"Publishing \", message, \" on \", topic, \" QoS: \", qos)\n return result.is_published()\n\n\ndef disconnect(mqtt_client: mqtt.Client):\n print(\"Disconnected from MQTT Server.\")\n return mqtt_client.disconnect()\n\n\ndef start_mqtt_thread(mqtt_client: mqtt.Client):\n print(\"Listening to for messages...\")\n mqtt_client.loop_start()\n\n\ndef stop_mqtt_thread(mqtt_client: mqtt.Client):\n print(\"Stopped listening to for messages.\")\n mqtt_client.loop_stop()\n\n\ndef forever_mqtt_thread(mqtt_client: mqtt.Client):\n print(\"Listening to for messages indefinitely...\")\n mqtt_client.loop_forever()\n\n\n# Splits a topic into a list\ndef parse_topic(topic: str) -> list:\n return topic.split(\"/\")\n\n\n# Splits a topic into a list\ndef parse_msg(msg: str) -> list:\n return msg.split(\":\")\n\n\n# Assemble topic back into a string\ndef construct_topic(topic_list: list) -> str:\n return \"/\".join(topic_list)\n\n\n# Check if the user exists in the selected user data collection\ndef check_user_data(database, collection_name, api_key):\n user_doc = database[collection_name].find_one({\"api_key\": api_key})\n if user_doc is None:\n return False\n return True if (user_doc[\"api_key\"]) == api_key else False","repo_name":"robotevan/3010_IO_SPACE","sub_path":"TeamProject/MQTTBackend/backendAPI.py","file_name":"backendAPI.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2024551824","text":"from __future__ import annotations\n\nimport hashlib\nimport json\nimport re\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union\n\nimport tomlkit\nfrom tomlkit.items import Comment, Whitespace\n\nfrom pdm._types import Source\nfrom pdm.exceptions import ProjectError\nfrom pdm.iostream import stream\nfrom pdm.models import pip_shims\nfrom pdm.models.caches import CandidateInfoCache, HashCache\nfrom pdm.models.candidates import Candidate\nfrom pdm.models.environment import Environment, GlobalEnvironment\nfrom pdm.models.repositories import BaseRepository, PyPIRepository\nfrom pdm.models.requirements import Requirement, parse_requirement\nfrom pdm.models.specifiers import PySpecSet\nfrom pdm.project.config import Config\nfrom pdm.project.metadata import MutableMetadata as Metadata\nfrom pdm.utils import (\n atomic_open_for_write,\n cached_property,\n find_project_root,\n get_python_version,\n get_venv_python,\n setdefault,\n)\n\nif TYPE_CHECKING:\n from resolvelib.reporters import BaseReporter\n from tomlkit.container import Container\n\n from pdm._vendor import halo\n from pdm.resolver.providers import BaseProvider\n\n\nclass Project:\n \"\"\"Core project class\"\"\"\n\n PYPROJECT_FILENAME = \"pyproject.toml\"\n DEPENDENCIES_RE = re.compile(r\"(?:(.+?)-)?dependencies\")\n PYPROJECT_VERSION = \"2\"\n GLOBAL_PROJECT = Path.home() / \".pdm\" / \"global-project\"\n\n @classmethod\n def create_global(cls, root_path: Optional[str] = None) -> \"Project\":\n if root_path is None:\n root_path = cls.GLOBAL_PROJECT.as_posix()\n project = cls(root_path)\n project.is_global = True\n project.init_global_project()\n return project\n\n def __init__(self, root_path: Optional[str] = None) -> None:\n self.is_global = False\n self._pyproject = None # type: Optional[Container]\n self._lockfile = None # type: Optional[Container]\n self.core = None\n\n if root_path is None:\n root_path = find_project_root()\n if root_path is None and self.global_config[\"auto_global\"]:\n self.root = self.GLOBAL_PROJECT\n self.is_global = True\n self.init_global_project()\n else:\n self.root = Path(root_path or \"\").absolute()\n\n def __repr__(self) -> str:\n return f\"\"\n\n @property\n def pyproject_file(self) -> Path:\n return self.root / self.PYPROJECT_FILENAME\n\n @property\n def lockfile_file(self) -> Path:\n return self.root / \"pdm.lock\"\n\n @property\n def pyproject(self):\n # type: () -> Container\n if not self._pyproject and self.pyproject_file.exists():\n data = tomlkit.parse(self.pyproject_file.read_text(\"utf-8\"))\n self._pyproject = data\n return self._pyproject\n\n @pyproject.setter\n def pyproject(self, data):\n self._pyproject = data\n\n @property\n def tool_settings(self):\n # type: () -> Union[Container, Dict]\n data = self.pyproject\n if not data:\n return {}\n return setdefault(setdefault(data, \"tool\", {}), \"pdm\", {})\n\n @property\n def lockfile(self):\n # type: () -> Container\n if not self.lockfile_file.is_file():\n raise ProjectError(\"Lock file does not exist.\")\n if not self._lockfile:\n data = tomlkit.parse(self.lockfile_file.read_text(\"utf-8\"))\n self._lockfile = data\n return self._lockfile\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"A read-only dict configuration, any modifications won't land in the file.\"\"\"\n result = dict(self.global_config)\n result.update(self.project_config)\n return result\n\n @property\n def scripts(self) -> Dict[str, Union[str, Dict[str, str]]]:\n return self.tool_settings.get(\"scripts\")\n\n @cached_property\n def global_config(self) -> Config:\n \"\"\"Read-and-writable configuration dict for global settings\"\"\"\n return Config(Path.home() / \".pdm\" / \"config.toml\", is_global=True)\n\n @cached_property\n def project_config(self) -> Config:\n \"\"\"Read-and-writable configuration dict for project settings\"\"\"\n return Config(self.root / \".pdm.toml\")\n\n @cached_property\n def environment(self) -> Environment:\n if self.is_global:\n env = GlobalEnvironment(self)\n # Rewrite global project's python requires to be\n # compatible with the exact version\n env.python_requires = PySpecSet(\n \"==\" + get_python_version(env.python_executable, True)\n )\n return env\n if self.config[\"use_venv\"]:\n venv_python = get_venv_python(self.root)\n if venv_python:\n self.project_config[\"python.path\"] = venv_python\n return GlobalEnvironment(self)\n return Environment(self)\n\n @property\n def python_requires(self) -> PySpecSet:\n return PySpecSet(self.meta.requires_python)\n\n def get_dependencies(self, section: Optional[str] = None) -> Dict[str, Requirement]:\n metadata = self.meta\n if section in (None, \"default\"):\n deps = metadata.get(\"dependencies\", [])\n elif section == \"dev\":\n deps = metadata.get(\"dev-dependencies\", [])\n else:\n deps = metadata.get(\"optional-dependencies\", {}).get(section, [])\n result = {}\n for line in deps:\n req = parse_requirement(line)\n req.from_section = section or \"default\"\n # make editable packages behind normal ones to override correctly.\n result[req.identify()] = req\n return result\n\n @property\n def dependencies(self) -> Dict[str, Requirement]:\n return self.get_dependencies()\n\n @property\n def dev_dependencies(self) -> Dict[str, Requirement]:\n return self.get_dependencies(\"dev\")\n\n def iter_sections(self) -> Iterable[str]:\n yield \"default\"\n yield \"dev\"\n if self.meta.optional_dependencies:\n yield from self.meta.optional_dependencies.keys()\n\n @property\n def all_dependencies(self) -> Dict[str, Dict[str, Requirement]]:\n return {\n section: self.get_dependencies(section) for section in self.iter_sections()\n }\n\n @property\n def allow_prereleases(self) -> Optional[bool]:\n return self.tool_settings.get(\"allow_prereleases\")\n\n @property\n def sources(self) -> List[Source]:\n sources = self.tool_settings.get(\"source\", [])\n if not any(source.get(\"name\") == \"pypi\" for source in sources):\n sources.insert(\n 0,\n {\n \"url\": self.config[\"pypi.url\"],\n \"verify_ssl\": self.config[\"pypi.verify_ssl\"],\n \"name\": \"pypi\",\n },\n )\n return sources\n\n def get_repository(\n self, cls: Optional[Type[BaseRepository]] = None\n ) -> BaseRepository:\n \"\"\"Get the repository object\"\"\"\n if cls is None:\n cls = PyPIRepository\n sources = self.sources or []\n return cls(sources, self.environment)\n\n def get_provider(\n self,\n strategy: str = \"all\",\n tracked_names: Optional[Iterable[str]] = None,\n ) -> BaseProvider:\n \"\"\"Build a provider class for resolver.\n\n :param strategy: the resolve strategy\n :param tracked_names: the names of packages that needs to update\n :returns: The provider object\n \"\"\"\n from pdm.resolver.providers import (\n BaseProvider,\n EagerUpdateProvider,\n ReusePinProvider,\n )\n\n repository = self.get_repository(cls=self.core.repository_class)\n allow_prereleases = self.allow_prereleases\n requires_python = self.environment.python_requires\n if strategy == \"all\":\n provider = BaseProvider(repository, requires_python, allow_prereleases)\n else:\n provider_class = (\n ReusePinProvider if strategy == \"reuse\" else EagerUpdateProvider\n )\n preferred_pins = self.get_locked_candidates(\"__all__\")\n provider = provider_class(\n preferred_pins,\n tracked_names or (),\n repository,\n requires_python,\n allow_prereleases,\n )\n return provider\n\n def get_reporter(\n self,\n requirements: List[Requirement],\n tracked_names: Optional[Iterable[str]] = None,\n spinner: Optional[halo.Halo] = None,\n ) -> BaseReporter:\n \"\"\"Return the reporter object to construct a resolver.\n\n :param requirements: requirements to resolve\n :param tracked_names: the names of packages that needs to update\n :param spinner: optional spinner object\n :returns: a reporter\n \"\"\"\n from pdm.resolver.reporters import SpinnerReporter\n\n return SpinnerReporter(spinner, requirements)\n\n def get_lock_metadata(self) -> Dict[str, Any]:\n content_hash = tomlkit.string(\"sha256:\" + self.get_content_hash(\"sha256\"))\n content_hash.trivia.trail = \"\\n\\n\"\n data = {\"lock_version\": self.PYPROJECT_VERSION, \"content_hash\": content_hash}\n return data\n\n def write_lockfile(self, toml_data: Container, show_message: bool = True) -> None:\n toml_data[\"metadata\"].update(self.get_lock_metadata())\n\n with atomic_open_for_write(self.lockfile_file) as fp:\n fp.write(tomlkit.dumps(toml_data))\n if show_message:\n stream.echo(f\"Changes are written to {stream.green('pdm.lock')}.\")\n self._lockfile = None\n\n def make_self_candidate(self, editable: bool = True) -> Candidate:\n req = parse_requirement(pip_shims.path_to_url(self.root.as_posix()), editable)\n req.name = self.meta.name\n return Candidate(\n req, self.environment, name=self.meta.name, version=self.meta.version\n )\n\n def get_locked_candidates(\n self, section: Optional[str] = None\n ) -> Dict[str, Candidate]:\n if not self.lockfile_file.is_file():\n return {}\n section = section or \"default\"\n result = {}\n for package in [dict(p) for p in self.lockfile.get(\"package\", [])]:\n if section != \"__all__\" and section not in package[\"sections\"]:\n continue\n version = package.get(\"version\")\n if version:\n package[\"version\"] = f\"=={version}\"\n package_name = package.pop(\"name\")\n req = Requirement.from_req_dict(package_name, dict(package))\n can = Candidate(req, self.environment, name=package_name, version=version)\n can.marker = req.marker\n can.hashes = {\n item[\"file\"]: item[\"hash\"]\n for item in self.lockfile[\"metadata\"]\n .get(\"files\", {})\n .get(f\"{req.key} {version}\", [])\n } or None\n result[req.identify()] = can\n if section in (\"default\", \"__all__\") and self.meta.name and self.meta.version:\n result[self.meta.project_name.lower()] = self.make_self_candidate(True)\n return result\n\n def get_content_hash(self, algo: str = \"md5\") -> str:\n # Only calculate sources and dependencies sections. Otherwise lock file is\n # considered as unchanged.\n dump_data = {\n \"sources\": self.tool_settings.get(\"source\", []),\n \"dependencies\": self.meta.get(\"dependencies\", []),\n \"dev-dependencies\": self.meta.get(\"dev-dependencies\", []),\n \"optional-dependencies\": self.meta.get(\"optional-dependencies\", {}),\n \"requires-python\": self.meta.get(\"requires-python\", \"\"),\n }\n pyproject_content = json.dumps(dump_data, sort_keys=True)\n hasher = hashlib.new(algo)\n hasher.update(pyproject_content.encode(\"utf-8\"))\n return hasher.hexdigest()\n\n def is_lockfile_hash_match(self) -> bool:\n if not self.lockfile_file.exists():\n return False\n hash_in_lockfile = str(\n self.lockfile.get(\"metadata\", {}).get(\"content_hash\", \"\")\n )\n if not hash_in_lockfile:\n return False\n algo, hash_value = hash_in_lockfile.split(\":\")\n content_hash = self.get_content_hash(algo)\n return content_hash == hash_value\n\n def get_pyproject_dependencies(self, section: str) -> List[str]:\n \"\"\"Get the dependencies array in the pyproject.toml\"\"\"\n if section == \"default\":\n return setdefault(self.meta, \"dependencies\", [])\n elif section == \"dev\":\n return setdefault(self.meta, \"dev-dependencies\", [])\n else:\n return setdefault(\n setdefault(self.meta, \"optional-dependencies\", {}), section, []\n )\n\n def add_dependencies(\n self, requirements: Dict[str, Requirement], show_message: bool = True\n ) -> None:\n for _, dep in requirements.items():\n deps = self.get_pyproject_dependencies(dep.from_section)\n matched_index = next(\n (i for i, r in enumerate(deps) if dep.matches(r)), None\n )\n if matched_index is None:\n deps.append(dep.as_line())\n else:\n req = dep.as_line()\n deps[matched_index] = req\n # XXX: This dirty part is for tomlkit.Array.__setitem__()\n j = 0\n for i in range(len(deps._value)):\n if isinstance(deps._value[i], (Comment, Whitespace)):\n continue\n if j == matched_index:\n deps._value[i] = tomlkit.item(req)\n break\n j += 1\n deps.multiline(True)\n self.write_pyproject(show_message)\n\n def write_pyproject(self, show_message: bool = True) -> None:\n with atomic_open_for_write(\n self.pyproject_file.as_posix(), encoding=\"utf-8\"\n ) as f:\n f.write(tomlkit.dumps(self.pyproject))\n if show_message:\n stream.echo(f\"Changes are written to {stream.green('pyproject.toml')}.\")\n self._pyproject = None\n\n @property\n def meta(self) -> Optional[Metadata]:\n if not self.pyproject:\n self.pyproject = {\"project\": tomlkit.table()}\n return Metadata(self.pyproject_file, self.pyproject[\"project\"])\n\n def init_global_project(self) -> None:\n if not self.is_global:\n return\n if not self.pyproject_file.exists():\n self.root.mkdir(parents=True, exist_ok=True)\n self.pyproject_file.write_text(\n \"\"\"\\\n[project]\ndependencies = [\"pip\", \"setuptools\", \"wheel\"]\n\"\"\"\n )\n self._pyproject = None\n\n @property\n def cache_dir(self) -> Path:\n return Path(self.config.get(\"cache_dir\"))\n\n def cache(self, name: str) -> Path:\n path = self.cache_dir / name\n path.mkdir(parents=True, exist_ok=True)\n return path\n\n def make_wheel_cache(self) -> pip_shims.WheelCache:\n return pip_shims.WheelCache(\n self.cache_dir.as_posix(), pip_shims.FormatControl(set(), set())\n )\n\n def make_candidate_info_cache(self) -> CandidateInfoCache:\n\n python_hash = hashlib.sha1(\n str(self.environment.python_requires).encode()\n ).hexdigest()\n file_name = f\"package_meta_{python_hash}.json\"\n return CandidateInfoCache(self.cache_dir / file_name)\n\n def make_hash_cache(self) -> HashCache:\n return HashCache(directory=self.cache(\"hashes\").as_posix())\n","repo_name":"orions-stardom/pdm","sub_path":"pdm/project/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":15739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38994935349","text":"def find_prime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True\n\nN = int(input())\narr = list(map(int, input().split()))\ncount = 0\nfor a in arr:\n if a < 2:\n continue\n elif a == 2:\n count += 1\n else: \n if find_prime(a):\n count += 1\nprint(count)","repo_name":"soominnn/Algorithms","sub_path":"Boj/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9882999526","text":"import aiohttp\nimport asyncio\n\nasync def fetch(session, url):\n with aiohttp.Timeout(10, loop=session.loop):\n async with session.get(url) as response:\n return await response.text()\n\nURLs = ['http://python.org', 'http://www.youtube.com', 'http://svt.se', 'http://travis-ci.org', 'http://google.com',\n 'http://github.com', 'http://www.kickstarter.com', 'http://www.meetup.com', 'http://gp.se', 'http://dn.se']\n\nasync def main(loop):\n async with aiohttp.ClientSession(loop=loop) as session:\n for url in URLs:\n html = await fetch(session, url)\n print(url, len(html))\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n t0 = loop.time()\n loop.run_until_complete(main(loop))\n t1 = loop.time()\n print(t1 - t0)\n","repo_name":"magnus-lycka/gothpy-asyncio","sub_path":"fetchurl1.py","file_name":"fetchurl1.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5120403012","text":"\"\"\"\r\nImage Similarity by Reverse image search and retrieval with Keras\r\nA new image uploaded is only to be comapred with a number of \"n_hashes\" recently uploaded images.\r\n\r\nNethika Suraweera\r\nTinMan Kinetics\r\n02/05/2018\r\n\r\nNew Addition:\r\ncomapring with a number of \"n_hashes\" recently uploaded images\r\n==============================================================\r\n\r\n`image_hashes.json` file save the hashes calculated for each uploaded image. \r\nIf the new image is similar to a previously uploaded image, a mean hash will be calculated and \r\n`image_hashes.json` will be updated accordingly.\r\nA new image uploaded is will only be comapred with a number of \"n_hashes\" recently uploaded images.\r\n\r\nResNet50 Model: \r\n===============\r\n\r\nInspired by :https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-search.ipynb\r\n\r\nThis script uses previously-trained neural network ResNet50 \r\nfrom Keras to search through a large collection of images. \r\nSpecifically, it will show you how you can retrieve a set \r\nof images which are similar to a query image, \r\nreturning you its n nearest neighbors in terms of image content.\r\nIt removes the last classification layer from the network, \r\nleaving the last fully-connected layer as the new output layer. \r\nThe way we do this is by instantiating a new model called \r\nfeature_extractor which takes a reference to the desired \r\ninput and output layers in our ResNet50 model. \r\nThus, feature_extractor's output is the layer just before the classification, \r\nthe last 2048-neuron fully connected layer.\r\nWith ResNet50 model, a distance of 20.0 seems to be a good threshold to filter similar images.\r\n\"\"\"\r\n\r\n\r\n\r\nimport os\r\nimport sys\r\nimport glob\r\nimport random\r\nimport numpy as np\r\nimport keras\r\nfrom keras.preprocessing import image\r\nfrom keras.applications.imagenet_utils import decode_predictions, preprocess_input\r\nfrom keras.models import Model\r\nfrom sklearn.decomposition import PCA\r\nfrom scipy.spatial import distance\r\nfrom keras.applications.resnet50 import ResNet50\r\nfrom PIL import ExifTags\r\nfrom PIL import Image\r\nimport json\r\nimport string\r\n\r\n\r\ndef get_image(path,input_shape):\r\n \"\"\"\r\n get_image will return a handle to the image itself, and a numpy array of its pixels to input the network.\r\n This function preprocesses the images to be in the correct orientation using exif tags data. \r\n \"\"\"\r\n img = image.load_img(path)\r\n exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\r\n #print (exif)\r\n if 'Orientation' in exif:\r\n if exif['Orientation'] == 6:\r\n img=img.rotate(-90, expand=True)\r\n if exif['Orientation'] == 8:\r\n img=img.rotate(90, expand=True) \r\n if exif['Orientation'] == 3:\r\n img=img.rotate(180, expand=True) \r\n img=img.resize(input_shape)\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n return img, x\r\n\r\ndef get_closest_images(query_image_idx, num_results=5):\r\n \"\"\"\r\n returns the indexes and the distances of the similar images for the given queary image.\r\n \"\"\"\r\n distances = [ distance.euclidean(pca_features[query_image_idx], feat) for feat in pca_features ]\r\n idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])[1:num_results+1]\r\n dis_closest = sorted(distances)[1:num_results+1]\r\n return (idx_closest, dis_closest)\r\n\r\n\r\n#load the model\r\nmodel = ResNet50(weights='imagenet') #threshold =20 get_layer(\"flatten_1\")\r\n\r\n# input shape\r\ninput_shape = model.input_shape[1:3]\r\n\r\n#remove the last layer\r\nfeat_extractor = Model(inputs=model.input, outputs=model.get_layer(\"flatten_1\").output) #ResNet50\r\n\r\n\r\n# Image Location\r\nnew_images_path = \"dupes\"\r\n\r\n# Read new Images\r\nnew_images = glob.glob(os.path.join(new_images_path, \"*.jpg\"))\r\n\r\n# write/read to/from json\r\njson_file= 'image_hashes.json'\r\n\r\n# number of hashes to save\r\nn_hashes = 5\r\n\r\n# Match with new image\r\n## In a loop:\r\n#for new_image_path in new_images:\r\n ######################## \r\n\r\n## One at a time:\r\n#new_image_path=os.path.join(user, 'maroon_bells.jpg')\r\nnew_image_path=new_images[0]\r\n\r\nprint(\"Processing file: {}\".format(new_image_path))\r\n\r\nimg, x = get_image(new_image_path,input_shape)\r\nfeat = feat_extractor.predict(x)[0]\r\n\r\n# Read from json file\r\nif os.path.exists(json_file):\r\n image_data_all = json.load(open(json_file))\r\nelse:\r\n image_data_all = []\r\n\r\n# Threshold set to identify different images\r\nthreshold = 20.0\r\n\r\n#Comapre the image with the images in json file\r\n\r\nmatch_dict = {}\r\n\r\n\r\nif len(image_data_all) > n_hashes:\r\n image_data = image_data_all[-n_hashes:]\r\nelse:\r\n image_data = image_data_all\r\n\r\nfor j in range(len(image_data)):\r\n image_id = image_data[j]['image_id']\r\n image_freq = image_data[j]['frequency']\r\n image_hash = json.loads(image_data[j]['hash'])\r\n dist = distance.euclidean(feat,image_hash)\r\n print(image_id , image_freq, dist)\r\n if dist < threshold:\r\n match_dict[j] = dist\r\nif match_dict:\r\n indx = min(match_dict, key=match_dict.get)\r\n min_hash = json.loads(image_data[indx]['hash'])\r\n # find New Mean for hash\r\n new_mean = np.mean([feat,min_hash],axis=0)\r\n #update hash\r\n image_data[indx]['hash'] = str(new_mean.tolist())\r\n #update frequency\r\n image_data[indx]['frequency'] += 1 \r\n print(\"Matched with:\")\r\n print(image_data[indx]['image_id'])\r\nelse: #new image\r\n print(\"No match! -> New Image:\")\r\n image_id = str(len(image_data_all)+1).zfill(4)\r\n print(image_id)\r\n tempt_dict={'image_id': image_id, 'frequency': 1,'hash':str(feat.tolist())}\r\n image_data_all.append(tempt_dict)\r\n \r\n#update json file\r\nwith open(json_file, 'w') as imagefile:\r\n json.dump(image_data_all, imagefile)\r\n\r\n\r\n####################################################################################\r\n\r\n","repo_name":"Nethika/image-similarity","sub_path":"image_similarity_save_hash.py","file_name":"image_similarity_save_hash.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13009963187","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFile name: WiFi_API_File_Demo.py\nCreated on Thu Aug 13 16:53:23 2020\nDescription: MoonRanger Central Computer WiFi API File Transfer Usage\nOS: Windows or Linux\n@author: Tejas Anilkumar P. \nCarnegie Mellon University\n\"\"\"\n\n\nfrom WiFi_API_Central import WiFi_API\nimport argparse\nimport socket\nimport threading\n\ndef initParser():\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\",\"--mode\",required=True,help=\"MODE: Server or Client\")\n parser.add_argument(\"-ip\",\"--server_ip\",required=True,help=\"SERVER_IP: e.g. 192.168.1.10\")\n parser.add_argument(\"-rp\",\"--recv_port\",default = 5051,help=\"RECEIVE_PORT: e.g. 5051\")\n parser.add_argument(\"-sp\",\"--send_port\",default = 5050,help=\"SEND_PORT: e.g. 5050\")\n parser.add_argument(\"-bs\",\"--buffer_size\",default = 4096,help=\"BUFFER_SIZE: e.g. 1024\")\n args = parser.parse_args() \n\n \n#Main\ninitParser()\nserver_flag = False\nif args.mode == \"Server\":\n server_flag = True\nobj = WiFi_API(args.server_ip,args.recv_port,args.send_port,args.buffer_size,server_flag)\nthreading.Thread(target=obj.startFileChannel(),args=()).start()\n\ntry:\n n = 0\n while n!=3:\n n = int(input(\"Select Option:\\n 1. Send Files \\n 2. Receive File \\n 3. Exit\\n Enter your choice: \"))\n if n == 1:\n file_loc = str(input(\"Enter File Location: \"))\n threading.Thread(target=obj.sendFile(file_loc),args=()).start()\n elif n == 2: \n threading.Thread(target=obj.recvFile(),args=()).start()\n obj.stop()\nexcept KeyboardInterrupt:\n obj.stop()\n \n \n","repo_name":"TejasAnilkumar12/MR-WiFi-API-Central","sub_path":"WiFi_API_File_Demo.py","file_name":"WiFi_API_File_Demo.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43398758690","text":"# 题目:和为 s 的连续正数序列\n# 输入一个正整数 target ,输出所有和为 target 的连续正整数序列(至少含有两个数)。\n# 序列内的数字由小到大排列,不同序列按照首个数字从小到大排列。\n# 输入:target = 9\n# 输出:[[2,3,4],[4,5]]\nfrom typing import List\n\n\nclass Solution:\n def findContinuousSequence(self, target: int) -> List[List[int]]:\n if target < 3:\n return None\n res = []\n small, big = 1, 2\n curSum = small + big\n mid = target // 2\n while small <= mid:\n while curSum > target and small <= mid:\n curSum -= small\n small += 1\n if curSum == target:\n res.append([i for i in range(small, big + 1)])\n big += 1\n curSum += big\n return res\n\nprint(Solution().findContinuousSequence(4))\n","repo_name":"CharmSun/FuckingOffer","sub_path":"python/57_02_ContinuousSequenceWithSum.py","file_name":"57_02_ContinuousSequenceWithSum.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17762738511","text":"\"\"\"\nНе стоит сравнивать float числа с помощью оператора ==. Для сравнения float чисел нужно использовать указанный код.\n\"\"\"\nnum = 0.1 + 0.1 + 0.1\neps = 0.000000001 # точность сравнения\n\nif abs(num - 0.3) < eps: # число num отличается от числа 0.3 менее чем 0.000000001\n print('YES')\nelse:\n print('NO')","repo_name":"DAlferova/stepik_advanced","sub_path":"P8_decimal/compare_float.py","file_name":"compare_float.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72171938403","text":"\"\"\"\nPandas Data Visualization Exercise\n\"\"\"\nfrom pandas import read_csv, DataFrame\nfrom matplotlib.pyplot import show, figure, style, legend\n\n# Data\ndf3 = DataFrame(read_csv(\"df3\"))\nprint(df3.info())\nprint(df3.head())\n\n# Diagram\ndf3.plot.scatter(x=\"a\", y=\"b\", c=\"red\", s=50, figsize=(12, 3))\nfigure()\ndf3[\"a\"].plot.hist()\nfigure()\nstyle.use(\"ggplot\")\ndf3[\"a\"].plot.hist(alpha=0.5, bins=25)\ndf3[[\"a\", \"b\"]].plot.box()\nfigure()\ndf3[\"d\"].plot.kde()\nfigure()\ndf3[\"d\"].plot.density(lw=5, ls=\"--\")\ndf3.iloc[0:30].plot.area(alpha=0.4)\n\n# Bonus\nf = figure()\ndf3.iloc[0:30].plot.area(alpha=0.4, ax=f.gca())\nlegend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5))\n\n# LAST STEP\nshow()\n","repo_name":"jgyy/py-dsml-jose","sub_path":"02_python_for_data_visualization/10/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40542868946","text":"from dash_extensions.enrich import DashBlueprint, Output, Input\nfrom dash import ctx, dcc\nimport dash_mantine_components as dmc\nfrom dash_iconify import DashIconify\nfrom dash.exceptions import PreventUpdate\n\nfrom ..utils.functions import (\n make_indice_summary,\n get_stock_list,\n make_plot,\n create_stocks_table,\n)\nfrom ...API.external_API import yahoo_finance\n\nindex_bp = DashBlueprint()\n\nindex_values = [\"^DJI\", \"^GSPC\", \"^NDX\", \"^GDAXI\"]\n\nindex_bp.layout = dmc.Grid(\n [\n dmc.ChipGroup(\n make_indice_summary(index_values),\n id=\"chip-index\",\n style={\n \"display\": \"flex\",\n \"flex-grow\": \"0\",\n \"flex-basis\": \"100%\",\n \"flex-direction\": \"row\",\n \"width\": \"100%\",\n \"justify-content\": \"space-between\",\n \"flex-wrap\": \"wrap\",\n \"height\": \"auto\",\n \"min-height\": \"2em\",\n \"margin-bottom\": \"2em\",\n },\n ),\n dmc.Col(\n dmc.Paper(\n [\n dmc.Select(\n id=\"select-index\",\n label=\"Select a index\",\n data=yahoo_finance.get_indices(index_values),\n ),\n dmc.Space(h=20),\n dmc.TransferList(\n id=\"transfer-list\",\n showTransferAll=False,\n value=[[], []],\n ),\n ],\n radius=\"lg\",\n p=\"xs\",\n style={\"min-height\": \"470px\"},\n ),\n style={\n \"padding\": \"0px\",\n \"margin\": \"0px\",\n },\n span=2,\n ),\n dmc.Col(\n dmc.Paper(\n [\n dmc.Table(\n verticalSpacing=\"xs\", horizontalSpacing=\"xs\", id=\"company-table\"\n )\n ],\n radius=\"lg\",\n p=\"xs\",\n style={\"min-height\": \"470px\"},\n ),\n style={\n \"padding\": \"0px\",\n \"margin\": \"0px\",\n },\n span=5,\n ),\n dmc.Col(\n dmc.Paper(\n [dcc.Graph(id=\"stocks-graph\")],\n radius=\"lg\",\n p=\"xs\",\n style={\"min-height\": \"455px\"},\n ),\n style={\n \"padding\": \"0px\",\n \"margin\": \"0px\",\n },\n span=5,\n ),\n ],\n gutter=\"sm\",\n grow=True,\n align=\"stretch\",\n style={\n \"gap\": \"10px\",\n },\n)\n\n\n@index_bp.callback(\n Output(\"select-index\", \"value\"),\n Output(\"chip-index\", \"value\"),\n Input(\"select-index\", \"value\"),\n Input(\"chip-index\", \"value\"),\n prevent_initial_call=True,\n)\ndef syncro_chip_select(select_value, chip_value):\n if ctx.triggered_id is None:\n raise PreventUpdate\n if ctx.triggered_id == \"chip-index\":\n return chip_value, chip_value\n elif ctx.triggered_id == \"select-index\":\n return select_value, select_value\n\n\n@index_bp.callback(\n Output(\"transfer-list\", \"value\"),\n Input(\"select-index\", \"value\"),\n prevent_initial_call=True,\n)\ndef populate_list(selected_index):\n if selected_index is None:\n raise PreventUpdate\n else:\n return get_stock_list(selected_index)\n\n\n@index_bp.callback(\n Output(\"stocks-graph\", \"figure\"),\n Output(\"company-table\", \"children\", allow_duplicate=True),\n Output(\"notifications-container\", \"children\"),\n Input(\"transfer-list\", \"value\"),\n prevent_initial_call=True,\n)\ndef update_graph(stocks_list):\n stock_list = stocks_list[1]\n\n if ctx.triggered_id is None:\n raise PreventUpdate\n\n elif stock_list == []:\n return [], [], []\n else:\n message = dmc.Notification(\n id=\"my-notification\",\n title=\"Data loaded\",\n message=\"Thank you for waiting\",\n color=\"green\",\n action=\"update\",\n icon=DashIconify(icon=\"akar-icons:circle-check\"),\n )\n\n history, company_data = yahoo_finance.get_history_data(stock_list)\n history = history.reset_index()\n return ((make_plot(history)), create_stocks_table(company_data), message)\n\n\n@index_bp.callback(\n Output(\"notifications-container\", \"children\", allow_duplicate=True),\n Input(\"transfer-list\", \"value\"),\n)\ndef update_graph(stocks_list):\n stock_list = stocks_list[1]\n\n if ctx.triggered_id is None:\n raise PreventUpdate\n\n elif stock_list == []:\n raise PreventUpdate\n\n return dmc.Notification(\n id=\"my-notification\",\n title=\"Preparing Data\",\n message=\"The process has started.\",\n loading=True,\n color=\"orange\",\n action=\"show\",\n autoClose=False,\n disallowClose=True,\n )\n","repo_name":"Simon-U/dash_flask","sub_path":"application/dashboard/pages/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18613163252","text":"def phrase_search(words: list[tuple[int, str]]) -> int:\n \"\"\"\n Searches given sentence for known phrases containing a comma\n and returns number of commas that should be added to expected comma count.\n :param words: a list of tuples of word order in sentence and a word\n :return: number of commas to be added to expected comma count\n \"\"\"\n words_only = [word for (_, word) in sorted(words)]\n\n phrases = two_conjunctions(words_only)\n\n return phrases\n\n\ndef two_conjunctions(words: list[str]) -> int:\n \"\"\"\n Searches for instances of two conjunctions in a row, e.g.\n \"že když\", \"protože když\", \"že kdyby\", etc.\n :param words: list of words\n :return: number of instances of two conjunctions in a row\n \"\"\"\n instances = 0\n\n # We don't care about the first word (no comma in front of it),\n # and the last word (no another word after it, so no two conjunctions in a row),\n # so the range is from the 2nd word to the second to last.\n for i in range(1, len(words) - 2):\n if words[i].lower() == 'že':\n if words[i + 1].lower() in ['když', 'kdyby', 'kdybych', 'kdybys', 'kdybychom', 'kdybyste', 'přestože']:\n instances += 1\n if words[i].lower() in ['protože', 'přestože']:\n if words[i + 1].lower() in ['když', 'kdyby', 'kdybych', 'kdybys', 'kdybychom', 'kdybyste']:\n instances += 1\n\n return instances\n","repo_name":"klimarichard/nprg045","sub_path":"src/tagger/phrase_searcher.py","file_name":"phrase_searcher.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74945529760","text":"from turtle import Turtle\nSTARTING_POSITION = (0, -280)\nMOVE_DISTANCE = 10\nFINISH_LINE_Y = 280\n\n\nclass Player(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape(\"turtle\")\n self.penup()\n self.goto(STARTING_POSITION)\n self.setheading(90)\n\n def move_forward(self):\n self.forward(10)\n","repo_name":"GokulBakkiyarasu/Dash_car","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"1175913437","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nfrom typing import Optional\n\nfrom user import User\nfrom iou import IOU\n\n\nclass RestAPI:\n def __init__(self, database: dict[str, list[dict]]=None):\n self.database = database\n\n def get(self, url: str, payload: str=None):\n if payload:\n payload_dict = json.loads(payload)\n\n # match url:\n # case '/users':\n # return {'users': self.get_all_users_sorted_by_name()}\n # case _:\n # pass\n if url == '/users':\n if payload is None:\n return json.dumps({'users': self.get_all_users_sorted_by_name()})\n\n user_name = payload_dict['users'][0]\n return json.dumps(\n {\n 'users': [self.get_user_dict_by_name(name=user_name)]\n }\n )\n \n\n def post(self, url: str, payload: str=None):\n if payload:\n json_payload = json.loads(payload)\n\n # match url:\n # case '/add':\n # return self.add_user(user=user)\n # case '/iou':\n # iou = IOU(\n # lender = json_payload['lender'],\n # borrower = json_payload['borrower'],\n # amount = json_payload['amount']\n # )\n # return self.add_iou(iou=iou)\n # case _:\n # pass\n\n if url == '/add':\n new_user = User(json_payload['user'])\n return json.dumps(self.add_user(user=new_user).__dict__ | {'balance': 0.0})\n\n if url == '/iou':\n iou = IOU(\n lender = json_payload['lender'],\n borrower = json_payload['borrower'],\n amount = json_payload['amount']\n )\n return json.dumps(\n {\n 'users': self.add_iou(iou)\n }\n )\n\n def get_all_users_sorted_by_name(self) -> list[User]:\n return sorted(self.database['users'], key=lambda user: user['name'])\n\n def get_user_dict_by_name(self, name) -> dict:\n for user_dict in self.database['users']:\n if user_dict['name'] == name:\n return user_dict\n\n return None\n\n def add_user(self, user: User) -> User:\n \"\"\"\n Add a user to `self.database` and returns it.\n \"\"\"\n self.database['users'].append(user)\n\n return user\n\n def add_iou(self, iou: IOU) -> dict:\n \"\"\"\n Create an IOU and returns the updated database of users.\n \"\"\"\n lender_user_dict = {}\n borrower_user_dict = {}\n\n # Update database.\n for user_dict in self.database['users']:\n if iou.lender == user_dict['name']:\n lender_user_dict = user_dict\n # If lender owes borrower.\n if iou.borrower in user_dict['owes']:\n # If lender owes borrower less than new loan.\n if iou.amount > user_dict['owes'][iou.borrower]:\n user_dict['owed_by'][iou.borrower] = iou.amount - user_dict['owes'][iou.borrower]\n del user_dict['owes'][iou.borrower]\n # If lender owes borrower same as new loan.\n elif iou.amount == user_dict['owes'][iou.borrower]:\n del user_dict['owes'][iou.borrower]\n else:\n user_dict['owes'][iou.borrower] -= iou.amount\n else:\n if iou.borrower in user_dict['owed_by']:\n user_dict['owed_by'][iou.borrower] += iou.amount\n else:\n user_dict['owed_by'][iou.borrower] = iou.amount\n\n user_dict['balance'] = User.get_balance_of_user_dict(\n user_dict=user_dict)\n\n if iou.borrower == user_dict['name']:\n borrower_user_dict = user_dict\n # If lender owes borrower.\n if iou.lender in user_dict['owed_by']:\n # If lender owes borrower less than new loan.\n if iou.amount > user_dict['owed_by'][iou.lender]:\n user_dict['owes'][iou.lender] = iou.amount - user_dict['owed_by'][iou.lender]\n del user_dict['owed_by'][iou.lender]\n # If lender owes borrower same as new loan.\n elif iou.amount == user_dict['owed_by'][iou.lender]:\n del user_dict['owed_by'][iou.lender]\n else:\n user_dict['owed_by'][iou.lender] -= iou.amount\n else:\n if iou.lender in user_dict['owes']:\n user_dict['owes'][iou.lender] += iou.amount\n else:\n user_dict['owes'][iou.lender] = iou.amount\n\n user_dict['balance'] = User.get_balance_of_user_dict(\n user_dict=user_dict)\n\n output = sorted([lender_user_dict, borrower_user_dict], key=lambda user: user['name'])\n return output\n\n","repo_name":"karlosdaniel451/python-exercises","sub_path":"exercism/classes/rest_api/rest_api.py","file_name":"rest_api.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26109948456","text":"import torch\nfrom torchvision import models, transforms\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom glob import glob\nimport openslide\nfrom openslide import deepzoom\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom scipy import io\n\n# open the whole slide image and reach information\ndef get_wsi_info(slidepath):\n slide = openslide.open_slide(slidepath)\n print('Numbers of level in this WSI: ', slide.level_count)\n print('Dimensions of all levels in this WSI (width, height):\\n ', slide.level_dimensions)\n return slide\n\n\n# get tiles from one slide by deepzoom method\n# input: '../data/slides/11-04622 CD.mrxs'\ndef slide2tile_dz(slidepath, tilesize=512, readlevel=0):\n slide = get_wsi_info(slidepath)\n dzi = deepzoom.DeepZoomGenerator(slide, tile_size=tilesize, overlap=0, limit_bounds=False)\n level = dzi.level_count - readlevel - 1\n tile_num_x, tile_num_y = dzi.level_tiles[level]\n return dzi.level_count, tile_num_x, tile_num_y, dzi\n\n\n# image pre-processing\npreprocess = transforms.Compose([transforms.ToTensor(), # preprocess was operated on original image\n transforms.Normalize([0.787, 0.5723, 0.769], [0.1193, 0.1878, 0.0974])])\n\n\n# load the network\ndef load_net(modelpath, numclasses=2):\n net = models.resnet34(pretrained=False, num_classes=numclasses)\n net.load_state_dict(torch.load(modelpath))\n net.cuda()\n return net.eval()\n\n\n# network and image for prediction\ndef net_prediction(img, net):\n torch_img = preprocess(img).unsqueeze(0)\n with torch.no_grad():\n torch_img = torch_img.cuda()\n predProb = F.softmax(net(torch_img), dim=1)\n predBina = torch.argmax(predProb, dim=1)\n return predProb.squeeze().cpu().detach().numpy(), predBina.cpu().numpy()\n\n\n# main() by deepzoom method of openslide\ndef main_dz(slidepath, modelpath, predlevel=0, tilesize=512, numclass=2):\n level, num_x, num_y, sample = slide2tile_dz(slidepath, tilesize=tilesize, readlevel=predlevel)\n\n vis = np.empty((num_x, num_y, numclass))\n print('Visualization results shape: ', vis.shape)\n for i in range(num_x):\n for j in range(num_y):\n image = np.array(sample.get_tile(level-1, (i, j)))\n\n # here the tumor region about 12 and non > 12, so if half of one image are tissue is ok.\n threshold = np.mean(np.std(image, axis=2))\n # print(threshold)\n if threshold < 6:\n continue\n\n Prob, Bina = net_prediction(image, load_net(modelpath))\n print('row:', i, 'column: ', j, 'pred: ', Bina)\n vis[i, j, :] = Prob\n\n return vis\n\n\n# main() by read_region method of openslide\ndef main_rr(slidepath, modelpath, predlevel=0, windowstep=100, tilesize=512, numclass=2):\n slide = get_wsi_info(slidepath)\n width, height = slide.level_dimensions[predlevel]\n\n vis = np.empty(((width-tilesize)//windowstep+1, (height-tilesize)//windowstep+1, numclass))\n print('Visualization results shape: ', vis.shape)\n for i in range(0, width-tilesize, windowstep):\n for j in range(0, height-tilesize, windowstep):\n rgba = slide.read_region((i, j), level=predlevel, size=(tilesize, tilesize))\n image = np.array(rgba)[:, :, 0:3]\n # plt.imshow(image)\n # plt.show()\n\n # here the tumor region about 12 and non > 12, so if half of one image are tissue is ok.\n threshold = np.mean(np.std(image, axis=2))\n # print(threshold)\n if threshold < 6:\n continue\n\n Prob, Bina = net_prediction(image, load_net(modelpath))\n print('column:', i, 'row: ', j, 'pred: ', Bina)\n vis[i//windowstep, j//windowstep, :] = Prob\n return vis\n\nif __name__ == \"__main__\":\n modelpth = '../model/epoch_45.pkl'\n slidepth = '../data/slides/11-04622 CD.mrxs'\n start = time.time()\n visual = main_dz(slidepth, modelpth, predlevel=0, tilesize=512, numclass=2)\n # visual = main_rr(slidepth, modelpth, predlevel=0, windowstep=100, tilesize=512, numclass=2)\n #\n np.save('../results/vis.npy', visual)\n io.savemat('../results/vis.mat', {'prob': visual})\n print('Finished! Time consuming (sec): ', time.time() - start)\n #\n # visual = np.load('../results/vis100.npy')\n # plt.imshow(visual[:,:,1])\n # plt.show()\n\n","repo_name":"gatsby2016/DLforWSI","sub_path":"codes/9_WSI_vis.py","file_name":"9_WSI_vis.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41804909414","text":"from CommonCode.List.List import List\nfrom Protobuff.mobileUiPb_pb2 import MobileUiPb\n\n\nclass MobilePbConvertor:\n\n def convert(self, uipb, pb):\n uipb.code = pb.code\n uipb.number = pb.number\n\n def getMobileListtUiPb(self, pb):\n mobileList = List()\n for mobile in pb.mobile:\n mobileUipb = MobileUiPb()\n self.convert(pb=mobileUipb, uipb=mobile)\n mobileList.__append__(mobile)\n return mobileList\n","repo_name":"Studence/studenceServer","sub_path":"Convertor/MobilePbConvertor.py","file_name":"MobilePbConvertor.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8358716406","text":"import html\n\nfrom unearth.diagram_element import DiagramElement\nfrom unearth.common.embedded_html import EmbeddedHtml\nfrom unearth.common.event_broker import EventBroker\n\nstart = \"\"\"
    %(title)s
  • Reporte de optimizacion
    Tipo de \" \\\n \"optimizacionExpresion \" \\\n \"optimizadaFila\" \\\n \"
    {opt.tipo}{opt.regla}
    {opt.expresion_original}
    {opt.expresion_optimizada}
    {opt.fila}
    \"\"\"\n\ncontrol_template = \"\"\"\n \n \n \n \n\"\"\"\n\njs_template = \"\"\"\ndocument.getElementById(\"%(control_id)s\").checked = %(visible)s;\nvar cb = function(evt) {\n var control_id = \"%(control_id)s\";\n var control_ele = document.getElementById(control_id);\n var range_ele = document.getElementById(control_id+\"_range\");\n var opacity = Number.parseFloat(range_ele.value);\n if (!control_ele.checked) {\n var payload = {\"layer\":\"%(layer_id)s\",\"value\":false};\n } else {\n var payload = {\"layer\":\"%(layer_id)s\",\"value\":opacity};\n }\n pubsubs_publish(\"%(id)s\",payload,\"manage_layers\");\n};\n\nvar control_ele = document.getElementById(\"%(control_id)s\");\ncontrol_ele.onclick = cb;\nif (%(visible)s) {\n control_ele.checked = 'checked';\n} else {\n control_ele.removeAttribute('checked');\n}\n\nvar control_ele_range = document.getElementById(\"%(control_id)s_range\");\ncontrol_ele_range.onchange = cb;\ncontrol_ele_range.oninput = cb;\n// document.addEventListener(\"load\",\n// function(e) {\n// control_ele_range.value = %(opacity)s;\n// });\n\"\"\"\n\nend = \"\"\"
    \n \n \n \n \n
    \"\"\"\n\ncss = \"\"\"\nfieldset {\n border-width: 2px;\n border-radius: 5px;\n}\"\"\"\n\nclass LayerController(EmbeddedHtml,EventBroker):\n \"\"\"\n Create an embedded HTML map layer manager\n\n Arguments:\n chart(unearth.charts.chart.Chart): a chart object\n\n Keyword Arguments:\n layers(list): a list of unearth.charts.chart_layer.ChartLayer objects to be controlled by this layer manager.\n if None, control all layers in the chart\n title: a title to display above the layer controls\n width(int): width of the embedded HTML\n height(int): height of the embedded HTML\n \"\"\"\n\n def __init__(self,chart,layers=None,title=\"Layer Controls\",width=512,height=512):\n EmbeddedHtml.__init__(self,\"\",css,width,height)\n EventBroker.__init__(self)\n self.chart = chart\n if layers is not None:\n self.layers = layers\n else:\n self.layers = []\n for layer in self.chart.get_layers():\n metadata = layer.get_metadata()\n label = metadata.get_name()\n self.layers.append({\"layer\":layer,\"label\":label})\n\n html_content = start%({\"id\":self.get_id(),\"title\":html.escape(title,True)})\n js_content = \"\"\n layer_nr = 0\n for layer_dict in self.layers:\n layer_nr += 1\n layer = layer_dict[\"layer\"]\n label = layer_dict[\"label\"]\n layer_id = layer.get_id()\n\n visible = layer.get_visible()\n opacity = layer.get_opacity()\n control_id = DiagramElement.get_next_id()\n html_content += control_template%({\n \"id\":self.get_id(),\n \"control_label\":label,\n \"control_id\":control_id,\n \"visible\":\"true\" if visible else \"false\",\n \"opacity\":str(opacity),\n \"layer_id\":layer_id})\n js_content += js_template%({\n \"id\":self.get_id(),\n \"control_label\":label,\n \"control_id\":control_id,\n \"visible\":\"true\" if visible else \"false\",\n \"opacity\":str(opacity),\n \"layer_id\":layer_id})\n\n html_content += end\n self.set_html(html_content)\n self.set_js(js_content)\n self.add_event_producer(self,\"manage_layers\")\n self.add_event_consumer(self.chart,\"manage_layers\")\n\n def draw(self, d, cx, cy):\n super().draw(d, cx, cy)\n d.get_diagram().add_event_broker(self)\n","repo_name":"visualtopology/unearth","sub_path":"src/unearth/controllers/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71586142881","text":"\"\"\"Pose utils\"\"\"\nimport time\nimport logging\nimport datetime\n\nimport numpy as np\nimport torch\n\nfrom .. import comm\nfrom ..utils import AverageMeter\nfrom ..model_utils import save_model\nfrom ..optimizer import maybe_add_gradient_clipping\nfrom ..eval_utils.coco_eval import COCOEvaluator\n\nlogger = logging.getLogger(__name__)\n\ndef _detect_anomaly(losses, loss_dict, iteration):\n if not torch.isfinite(losses).all():\n raise FloatingPointError(\n \"Loss became infinite or NaN at iteration={}!\\nloss_dict = {}\".format(\n iteration, loss_dict\n )\n )\n\n\nclass DirectposePipeline:\n \"\"\"Train/eval directpose\"\"\"\n def __init__(self, base_iter, max_iter, model, dataloader, optimizer, scheduler,\n cfg, writer=None):\n logger.info('Configuration:\\n' + str(cfg))\n self.base_iter = base_iter\n self.max_iter = max_iter\n self.model = model\n self.dataloader = iter(dataloader)\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.cfg = cfg\n self.writer = writer\n self.iter_timer = AverageMeter()\n val_datasets = cfg.CONFIG.DATA.DATASET.VAL\n self.evaluators = [COCOEvaluator(\n val_dataset, cfg, distributed=cfg.DDP_CONFIG.DISTRIBUTED,\n output_dir=cfg.CONFIG.LOG.EVAL_DIR) for val_dataset in val_datasets]\n\n def train_step(self):\n cfg = self.cfg\n writer = self.writer\n self.model.train()\n end = time.perf_counter()\n data = next(self.dataloader)\n self.base_iter += 1\n if self.base_iter >= self.max_iter:\n return\n\n data_time = time.perf_counter() - end\n\n loss_dict = self.model(data)\n losses = sum(loss_dict.values())\n _detect_anomaly(losses, loss_dict, self.base_iter)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n\n self.optimizer.zero_grad()\n losses.backward()\n self.optimizer.step()\n\n batch_time = time.perf_counter() - end\n end = time.perf_counter()\n metrics_dict[\"batch_time\"] = batch_time\n\n # gather all metrics\n metrics_dict = {\n k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)\n for k, v in metrics_dict.items()\n }\n all_metrics_dict = comm.gather(metrics_dict)\n if self.base_iter % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n eta_str = None\n if \"data_time\" in all_metrics_dict[0]:\n # data_time among workers can have high variance. The actual latency\n # caused by data_time is the maximum among workers.\n data_time = np.max([x.pop(\"data_time\") for x in all_metrics_dict])\n if \"batch_time\" in all_metrics_dict[0]:\n # batch_time among workers can have high variance. The actual latency\n # caused by batch_time is the maximum among workers.\n batch_time = np.max([x.pop(\"batch_time\") for x in all_metrics_dict])\n self.iter_timer.update(batch_time)\n eta = (self.max_iter - self.base_iter) * self.iter_timer.avg\n eta_str = str(datetime.timedelta(seconds=int(eta)))\n # average the rest metrics\n metrics_dict = {\n k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()\n }\n total_losses_reduced = sum(loss for loss in metrics_dict.values())\n for param in self.optimizer.param_groups:\n lr = param['lr']\n print_string = 'Iter: [{0}/{1}]'.format(\n self.base_iter, self.max_iter)\n if eta_str is not None:\n print_string += f' ETA: {eta_str} '\n print_string += ' data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(\n data_time=data_time, batch_time=batch_time)\n print_string += ' loss: {loss:.5f}'.format(loss=total_losses_reduced)\n print_string += ' lr: {lr:.6f}'.format(lr=lr)\n iteration = self.base_iter\n writer.add_scalar('total_loss', total_losses_reduced, iteration)\n writer.add_scalar('learning_rate', lr, iteration)\n if len(metrics_dict) > 1:\n for km, kv in metrics_dict.items():\n writer.add_scalar(km, kv, iteration)\n print_string += f' {km}: {kv:.2f}'\n logger.info(print_string)\n self.scheduler.step()\n\n def validate(self, val_loader):\n if not self.evaluators:\n self.info('No evaluation data specified, skip...')\n return\n # save model before evaluation so we may recover from bugs\n self.save_model()\n is_training = self.model.training\n self.model.eval()\n for evaluator in self.evaluators:\n evaluator.reset()\n\n # inference\n num_devices = comm.get_world_size()\n total = len(val_loader)\n logger.info(f\"Start evaluation on {total} images.\")\n warmup_meter = min(5, total - 1)\n start_time = time.perf_counter()\n total_compute_time = 0\n with torch.no_grad():\n for idx, inputs in enumerate(val_loader):\n if idx <= warmup_meter:\n # no timing during gpu warm up\n start_time = time.perf_counter()\n total_compute_time = 0\n\n start_compute_time = time.perf_counter()\n outputs = self.model(inputs)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n total_compute_time += time.perf_counter() - start_compute_time\n for evaluator in self.evaluators:\n evaluator.process(inputs, outputs)\n iters_after_start = idx + 1 - warmup_meter * int(idx >= warmup_meter)\n seconds_per_img = total_compute_time / iters_after_start\n if idx >= warmup_meter * 2 or seconds_per_img > 5:\n total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start\n eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))\n logger.info(f\"Inference done {idx + 1}/{total}. {seconds_per_img:.4f} sec/img. ETA={str(eta)}\")\n\n # Measure the time only for this worker (before the synchronization barrier)\n total_time = time.perf_counter() - start_time\n total_time_str = str(datetime.timedelta(seconds=total_time))\n # NOTE this format is parsed by grep\n logger.info(\n \"Total inference time: {} ({:.6f} s / img per device, on {} devices)\".format(\n total_time_str, total_time / (total - warmup_meter), num_devices\n )\n )\n total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))\n logger.info(\n \"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)\".format(\n total_compute_time_str, total_compute_time / (total - warmup_meter), num_devices\n )\n )\n for evaluator in self.evaluators:\n results = evaluator.evaluate()\n if self.cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n assert isinstance(results, dict)\n for task, res in results.items():\n # Don't print \"AP-category\" metrics since they are usually not tracked.\n important_res = [(k, v) for k, v in res.items() if \"-\" not in k]\n logger.info(\"copypaste: Task: {}\".format(task))\n logger.info(\"copypaste: \" + \",\".join([k[0] for k in important_res]))\n logger.info(\"copypaste: \" + \",\".join([\"{0:.4f}\".format(k[1]) for k in important_res]))\n for k in important_res:\n self.writer.add_scalar(':'.join((task, k[0])), k[1], self.base_iter)\n\n # resume training state if applicable\n self.model.train(is_training)\n\n def save_model(self):\n \"\"\"Save checkpoint\"\"\"\n if self.cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n save_model(self.model, self.optimizer, self.base_iter, self.cfg)\n\n\ndef build_pose_optimizer(cfg, model) -> torch.optim.Optimizer:\n \"\"\"\n Build an optimizer from config.\n \"\"\"\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n override: Set[torch.nn.parameter.Parameter] = set()\n\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.CONFIG.TRAIN.LR\n weight_decay = cfg.CONFIG.TRAIN.W_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.CONFIG.TRAIN.W_DECAY_NORM\n elif key == \"bias\":\n lr = cfg.CONFIG.TRAIN.LR\n weight_decay = cfg.CONFIG.TRAIN.W_DECAY\n if value in override:\n raise NotImplementedError('KPS_GRAD_MULT not found')\n\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(\n params, cfg.CONFIG.TRAIN.LR, momentum=cfg.CONFIG.TRAIN.MOMENTUM)\n optimizer = maybe_add_gradient_clipping(cfg, optimizer)\n return optimizer\n","repo_name":"dmlc/gluon-cv","sub_path":"gluoncv/torch/utils/task_utils/pose.py","file_name":"pose.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","stars":5662,"dataset":"github-code","pt":"54"} +{"seq_id":"42139580380","text":"numbers = []\ntotals = []\neveryone = []\nonlyone=[]\nprint(\"enter 5 number from 1-10\")\ni = 0\nwhile i < 5:\n x = int(input(\" input 5 number:\"))\n numbers.append(x)\n i += 1\nprint(numbers)\nx = 0\nwhile x <= len(numbers) - 3:\n i = x + 1\n while i <= len(numbers) - 2:\n j = i + 1\n while j <= len(numbers) - 1:\n # print(x,i,j)\n total = numbers[x] + numbers[i] + numbers[j]\n if total % 10 == 0:\n ijx = [numbers[x], numbers[i], numbers[j]]\n print(numbers[x], \"+\", numbers[i], \"+\", numbers[j], \"=\", total)\n # print(ijx)\n sumq = sum(numbers) - sum(ijx)\n print(\"其余两个数的和为:!!!\", sumq)\n totals.append(ijx)\n\n j += 1\n i += 1\n x += 1\nprint(totals)\nfor i in totals:\n everyone.append(sorted((i)))\nfor j in everyone:\n if j not in onlyone:\n onlyone.append(j)\nprint(\"唯一值:\",onlyone)\nif len(totals) == 0:\n print(\"this list is none!!!,you can compare the list\")\n print(sorted(numbers))","repo_name":"linzhongxiazhi/student_python","sub_path":"学习/作业/斗牛7.0.py","file_name":"斗牛7.0.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37691380180","text":"# this progrma will implement the merge sort\n# keep care of using python 3.5\n\ndef arraycopy(to, start, end, source):\n \"copy the array\"\n for index in range(start, end):\n to[index] = source[index]\n\ndef mergeparts(v, start, middle, end, work):\n \"merge the two part into one in array v\"\n ipart1 = start;\n ipart2 = middle;\n for index in range(start, end):\n if ipart1 < middle and (ipart2 >= end or v[ipart1] <= v[ipart2]):\n work[index] = v[ipart1]\n ipart1 += 1\n else:\n work[index] = v[ipart2]\n ipart2 += 1\n\ndef top2bottom(v, start, end, work):\n \"top to bottom mode recursive\"\n if(end - start >= 2):\n imiddle = (start + end)//2;\n # split the array\n top2bottom(v, start, imiddle, work)\n top2bottom(v, imiddle, end, work)\n # merge two part\n mergeparts(v, start, imiddle, end, work)\n # copy the sort array\n arraycopy(v, start, end, work)\n\ndef mergesort(v, n, work):\n \"merge sort\"\n top2bottom(v, 0, n, work)\n\n\nprint(\"\"\"\n Merge sort\n implement using python 3.5\n top-bottom mode.\n\"\"\")\n\n# sort the list\narray = [1, 4, 89, 67, 90, 34, 56, 23, 15, 48]\nwork = list(range(len(array)))\n\nprint(\"souce array: \", array)\n\n# use the merge sort\nmergesort(array, len(array), work)\n\nprint(\"after using merge sort: \", array)\n\ninput(\"\\n\\nPress enter to exit.\")\n","repo_name":"smileboywtu/Algorithms-Python3","sub_path":"sort/merge-sort/merge-sort.py","file_name":"merge-sort.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40655948482","text":"from core.actions.BaseAction import BaseAction\nfrom common.utilities.utilities import create_dir, get_dir_path, does_dir_exist\nfrom common.error import *\nimport subprocess\nimport logging\nimport time\n\n\n_PWD = get_dir_path()\n_TEMP_PATH = \"{}/action_templates/c\".format(_PWD)\n_CODE_PATH = \"{}/virts/c\".format(_PWD)\n_EXEC_PATH = \"{}/virts/c\".format(_PWD)\n\nVIRTINE_PROC_IDENTIFIER = \"virtine\"\n\n\nclass CAction(BaseAction):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.runtime = 'c'\n\n\tdef update_parameters(self, vcode):\n\t\tinp_params = vcode.split('(')[1].split(')')[0]\n\t\tfor param in inp_params.split(','):\n\t\t\tparam = param.strip()\n\t\t\ttyp, nam = param.split(' ')\n\t\t\tself.parameters[nam] = typ\n\n\tdef preprocess_action(self, vname, vcode):\n\t\tself.update_parameters(vcode)\n\n\tdef cli_arg_convert(self, arg, typ):\n\t\tif(typ == \"int\"):\n\t\t\treturn \"atoi(\" + arg + \")\"\n\n\tdef insert_code(self, vname, vcode):\n\t\tself.preprocess_action(vname, vcode)\n\n\t\tplistdef = \"\\t\"\n\t\tplistargs = \"\"\n\n\t\targc = 1\n\t\tfor key in self.parameters:\n\t\t\tplistdef += self.parameters[key] + \" \" + key + \" = \" + self.cli_arg_convert(\"argv[\" + str(argc) + \"]\", self.parameters[key]) + \";\\n\\t\"\n\t\t\tplistargs += key + \", \"\n\t\t\targc += 1\n\n\t\t# remove the last semicolon\n\t\tplistargs = plistargs[:-2]\n\n\t\tself.action_code = vcode\n\t\tfunc_code = None\n\t\twith open(\"{}/func.c\".format(_TEMP_PATH), 'r') as f:\n\t\t\tfunc_code = f.read()\n\t\tfunc_code = func_code.replace(\"####vcode####\", vcode)\n\t\tfunc_code = func_code.replace(\"####vname####\", self.action_name)\n\t\tfunc_code = func_code.replace(\"####plistdef####\", plistdef)\n\t\tfunc_code = func_code.replace(\"####plistargs####\", plistargs)\n\n\t\tcreate_dir(\"{}/{}/\".format(_CODE_PATH, self.action_name))\n\t\twith open(\"{}/{}/{}.c\".format(_CODE_PATH, self.action_name, self.action_name), 'w') as f:\n\t\t\tf.write(func_code)\n\n\tdef compile_code(self):\n\t\tcreate_dir(\"{}/{}/\".format(_EXEC_PATH, self.action_name))\n\t\top_file = \"{}/{}/{}_{}\".format(_CODE_PATH, self.action_name, VIRTINE_PROC_IDENTIFIER, self.action_name)\n\t\tp = subprocess.Popen(\n\t\t\t[\"vcc\",\n\t\t\t\"{}/{}/{}.c\".format(_CODE_PATH, self.action_name, self.action_name),\n\t\t\t\"-o\", op_file],\n\t\t\tstdout=subprocess.PIPE)\n\t\tout, err = p.communicate()\n\t\tout = out.decode()\n\t\tend = time.time()\n\t\t\n\t\tif err or \"warning:\" in out or \"error\" in out or not does_dir_exist(op_file):\n\t\t\traise ActionCompileError(self.action_name)\n\t\treturn RC_OK\n\t\t\n\tdef execute_code(self, vargs):\n\t\tstart = time.time()\n\t\targs = [str(_) for _ in list(vargs.values())]\n\t\tp = subprocess.Popen(\n\t\t\t[\"{}/{}/{}_{}\".format(_EXEC_PATH, self.action_name, VIRTINE_PROC_IDENTIFIER, self.action_name)]+args, \n\t\t\tstdout=subprocess.PIPE)\n\t\tout, err = p.communicate()\n\t\tend = time.time()\n\t\tlogging.log(logging.CRITICAL, \"exec_c: \"+str((end-start)*1000))\n\t\tif not err:\n\t\t\treturn {\n\t\t\t\t\"result\": out.decode(),\n\t\t\t\t\"runTime\": (end-start)*1000\n\t\t\t}\n\t\telse:\n\t\t\traise ActionInvokeError(self.action_name, err.decode())\n","repo_name":"virtines/vespid","sub_path":"core/actions/CAction.py","file_name":"CAction.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18297757927","text":"import urllib.request,urllib.parse\nimport json\n\nserviceurl=\"http://maps.googleapis.com/maps/api/geocode/json?\"\n\nwhile True:\n address=input(\"Enter the location\")\n url=serviceurl+urllib.parse.urlencode({'address':address})\n\n print('Retreiving',url)\n\n uh=urllib.request.urlopen(url)\n data=uh.read().decode()\n print(\"Retrieved\",len(data),'Data')\n\n try:\n js=json.loads(data)\n except:\n js=None\n\n if not js or 'status' not in js or js['status']!='OK':\n print(\"===Failure in retreiving Data===\")\n print(data)\n continue\n \n print(json.dumps(js,indent=4))\n\n lat=js['results'][0]['geometry']['location']['lat']\n long = js['results'][0]['geometry']['location']['lng']\n\n print(\"Latitude :\",lat)\n print(\"Longitude :\",long)\n\n location=js['resutl'][0]['formatted address']\n\n print(\"Location :\",location)\n","repo_name":"urguru/python-practice-py","sub_path":"geoloc.py","file_name":"geoloc.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74228910882","text":"from __future__ import print_function, division\nimport unittest\nfrom pyscf.nao.m_thrj import thrj, thrj_nobuf\n\nclass KnowValues(unittest.TestCase):\n\n def test_thrj(self):\n \"\"\" \"\"\"\n from sympy.physics.wigner import wigner_3j\n for l1 in range(0,3):\n for l2 in range(0,3):\n for l3 in range(0,3):\n for m1 in range(-4,4+1):\n for m2 in range(-4,4+1):\n for m3 in range(-4,4+1):\n w3j1 = thrj(l1, l2, l3, m1, m2, m3)\n w3j2 = thrj_nobuf(l1, l2, l3, m1, m2, m3)\n w3j3 = float(wigner_3j(l1, l2, l3, m1, m2, m3))\n #print(w3j1, w3j2, w3j3, l1, l2, l3)\n self.assertAlmostEqual(w3j1, w3j2)\n self.assertAlmostEqual(w3j2, w3j3)\n \nif __name__ == \"__main__\": unittest.main()\n","repo_name":"pyscf/nao","sub_path":"pyscf/nao/test/test_0009_thrj.py","file_name":"test_0009_thrj.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"26735871111","text":"\"\"\"\nCode Wars tests:\n\nTest.assert_equals(prefill(3,1), [1,1,1])\nTest.assert_equals(prefill(2,'abc'), ['abc','abc'])\nTest.assert_equals(prefill('1',1), [1])\nTest.assert_equals(prefill(3, prefill(2,'2d')), [['2d','2d'],['2d','2d'],['2d','2d']])\n\"\"\"\nimport pytest\nfrom prefill_array import prefill\n\n\ncode_wars_tests = [\n ((3, 1), [1, 1, 1]),\n ((2, 'abc'), ['abc', 'abc']),\n (('1', 1), [1]),\n ((3, prefill(2, '2d')), [['2d', '2d'], ['2d', '2d'], ['2d', '2d']])\n]\n\nmy_tests = [\n ((4, 'carlos'), ['carlos', 'carlos', 'carlos', 'carlos']),\n ((3, None), [None, None, None]),\n]\n\n\n@pytest.mark.parametrize('entered, result', code_wars_tests)\ndef test_code_wars_prefill_array(entered, result):\n assert prefill(entered[0], entered[1]) == result\n\n\n@pytest.mark.parametrize('entered, result', my_tests)\ndef test_prefill_array_my_tests(entered, result):\n assert prefill(entered[0], entered[1]) == result\n","repo_name":"carloscadena/code-challenges","sub_path":"src/test_prefill_array.py","file_name":"test_prefill_array.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18632122372","text":"#IC272LAB3\r\n#Name :Srishti Ginjala\r\n#Rollno :B19084\r\n#Mobile Number:9440000900\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nimport math\r\n\r\ndf=pd.read_csv(\"seismic_bumps1.csv\")\r\ndf.drop(['nbumps','nbumps2','nbumps3','nbumps4','nbumps5','nbumps6','nbumps7','nbumps89'],axis=1,inplace=True)\r\ngrouped=df.groupby('class')\r\ndf_1=grouped.get_group(1)\r\ndf_0=grouped.get_group(0)\r\ny1=df_1['class']\r\ny0=df_0['class']\r\ndf_1.drop(['class'],axis=1,inplace=True)\r\ndf_0.drop(['class'],axis=1,inplace=True)\r\n\r\n#Question1\r\n[df_train1,df_test1,df_label_train1,df_label_test1] =train_test_split(df_1,y1,test_size=0.3,random_state=42,shuffle=True)\r\n[df_train0,df_test0,df_label_train0,df_label_test0] =train_test_split(df_0,y0,test_size=0.3,random_state=42,shuffle=True)\r\ndf_train=pd.concat([df_train0,df_train1])\r\ndf_label_train=pd.concat([df_label_train0,df_label_train1])\r\ndf_test=pd.concat([df_test0,df_test1])\r\ndf_label_test=pd.concat([df_label_test0,df_label_test1])\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n#k=1\r\nneigh = KNeighborsClassifier(n_neighbors=1)\r\nneigh.fit(df_train,df_label_train)\r\npred=neigh.predict(df_test)\r\nfrom sklearn.metrics import confusion_matrix\r\narr1= confusion_matrix(df_label_test,pred)\r\nprint(\"\\nFor k=1:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr1)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred),3))\r\n\r\n#k=3\r\nneigh = KNeighborsClassifier(n_neighbors=3)\r\nneigh.fit(df_train,df_label_train)\r\npred=neigh.predict(df_test)\r\nfrom sklearn.metrics import confusion_matrix\r\narr3 = confusion_matrix(df_label_test,pred)\r\nprint(\"\\nFor k=3:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr3)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred),3))\r\n\r\n#k=5\r\nneigh = KNeighborsClassifier(n_neighbors=5)\r\nneigh.fit(df_train,df_label_train)\r\npred=neigh.predict(df_test)\r\nfrom sklearn.metrics import confusion_matrix\r\narr5 = confusion_matrix(df_label_test,pred)\r\nprint(\"\\nFor k=5:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr5)\r\nfrom sklearn.metrics import accuracy_score \r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred),3))\r\n\r\nprint(\"\\nThe value of accuracy is highest for k=5\")\r\n\r\n#Question2\r\natt=['seismic','seismoacoustic','shift','genergy','gpuls','gdenergy','gdpuls','ghazard','energy','maxenergy']\r\ndf_train_norm=df_train.copy()\r\ndf_test_norm=df_test.copy()\r\n\r\nfor i in att:\r\n df_test_norm[i]=(df_test_norm[i]-df_train_norm[i].min())/(df_train_norm[i].max()-df_train_norm[i].min())\r\n df_train_norm[i]=(df_train_norm[i]-df_train_norm[i].min())/(df_train_norm[i].max()-df_train_norm[i].min())\r\n\r\n#k=1\r\nneigh1 = KNeighborsClassifier(n_neighbors=1)\r\nneigh1.fit(df_train_norm,df_label_train)\r\npred1=neigh1.predict(df_test_norm)\r\nfrom sklearn.metrics import confusion_matrix\r\narr1=confusion_matrix(df_label_test,pred1)\r\nprint(\"\\nFor k=1:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr1)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred1),3))\r\n\r\n#k=3\r\nneigh = KNeighborsClassifier(n_neighbors=3)\r\nneigh.fit(df_train_norm,df_label_train)\r\npred2=neigh.predict(df_test_norm)\r\nfrom sklearn.metrics import confusion_matrix\r\narr2 = confusion_matrix(df_label_test,pred2)\r\nprint(\"\\nFor k=3:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr2)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred2),3))\r\n\r\n#k=5\r\nneigh = KNeighborsClassifier(n_neighbors=5)\r\nneigh.fit(df_train_norm,df_label_train)\r\npred3=neigh.predict(df_test_norm)\r\nfrom sklearn.metrics import confusion_matrix\r\narr3 = confusion_matrix(df_label_test,pred3)\r\nprint(\"\\nFor k=5:\")\r\nprint(\"The confusion matrix is:\")\r\nprint(arr3)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,pred3),3))\r\nprint(\"\\nThe value of accuracy is highest for k=5\")\r\n\r\n#Question3\r\ntest=df_test.to_numpy()\r\nm1=np.array(df_train1.mean())\r\nm0=np.array(df_train0.mean())\r\nc1=df_train1.cov().to_numpy()\r\nc0=df_train0.cov().to_numpy()\r\nprior1=len(df_train1)/(len(df_train0)+len(df_train1))\r\nprior0=len(df_train0)/(len(df_train0)+len(df_train1))\r\n\r\ndef prob(x,m,c):\r\n x_mu=x-m\r\n mahalanobis=np.linalg.multi_dot([x_mu.T,np.linalg.inv(c),x_mu])\r\n expo=math.exp(-mahalanobis/2)\r\n return(expo/((np.abs(np.linalg.det(c)))**0.5)*((2*math.pi)**5)) \r\ntest_pred=[]\r\nfor i in range(len(test)):\r\n x=test[i]\r\n plike1=prob(x,m1,c1)\r\n plike0=prob(x,m0,c0)\r\n pc0=(plike0*prior0)/(plike0*prior0+plike1*prior1)\r\n pc1=(plike1*prior1)/(plike0*prior0+plike1*prior1)\r\n if(pc0>pc1):\r\n test_pred.append(0)\r\n else:\r\n test_pred.append(1)\r\nfrom sklearn.metrics import confusion_matrix\r\narr3 = confusion_matrix(df_label_test,test_pred)\r\nprint(\"\\nThe confusion matrix for Bayes Classifier is:\")\r\nprint(arr3)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"The accuracy_score is:\")\r\nprint(round(accuracy_score(df_label_test,test_pred),3))\r\n","repo_name":"Srishtiginjala/Programming_Assignments","sub_path":"Datascience3/Assignment 4/b19084_Assignment4/IC272lab4.py","file_name":"IC272lab4.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27306949909","text":"#!/usr/bin/env python\n\nfrom .fm import manager, logger\n\nimport argparse\nimport logging\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-a', '--allocation-rule', help='back-allocation rule', default=0, type=int)\nparser.add_argument('-n', '--net-loss', help='output the net value instead of the gross value', action='store_true')\nparser.add_argument('-p', '--static-path', help='path to the folder containing the static files', default='input')\nparser.add_argument('-i', '--files-in', help='names of the input file_path', nargs='+')\nparser.add_argument('-o', '--files-out', help='names of the output file_path', nargs='+')\nparser.add_argument('-l', '--low-memory', help='in low memory mode, loss arrays are stored in memory map', action='store_true')\nparser.add_argument('--sort-output', help='sort the output stream by item_id', action='store_true')\nparser.add_argument('--storage-method', help='store data as \"dense\" or \"sparse\"', default='sparse')\nparser.add_argument('--create-financial-structure-files', help='create financial structure', action='store_true')\nparser.add_argument('-v', '--logging-level', help='logging level (debug:10, info:20, warning:30, error:40, critical:50)',\n default=30, type=int)\nparser.add_argument('-S', '--step-policies', help='not use, kept for backward compatibility with fmcalc', action='store_true')\n\n\ndef main():\n kwargs = vars(parser.parse_args())\n\n # add handler to fm logger\n ch = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logging_level = kwargs.pop('logging_level')\n logger.setLevel(logging_level)\n\n manager.run(**kwargs)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OasisLMF/OasisLMF","sub_path":"oasislmf/pytools/fmpy.py","file_name":"fmpy.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"54"} +{"seq_id":"4748488991","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import linear_model, metrics, preprocessing, decomposition\n\n## WCZYTANIE DANYCH\ndata = np.load('dane/pure_landmarks_gender.npy')\nX, y = data[:, :-1], data[:, -1]\ny_labels = ('Mężczyzna', 'Kobieta')\n\n## PREPROCESSING\n## usunięcie złej twarzyczki\nX = np.delete(X, (8656), axis=0)\ny = np.delete(y, (8656), axis=0)\n## zerowanie podbródka\nX[:, ::2] -= X[:, 0].reshape((X.shape[0], 1))\nX[:, 1::2] -= X[:, 1].reshape((X.shape[0], 1))\n## rotacja\nfor row in range(len(y)):\n xx, yy = X[row, ::2], X[row, 1::2]\n xa, xb = xx[72], xx[105]\n ya, yb = yy[72], yy[105]\n\n theta = -np.arctan((ya-yb)/(xa-xb))\n if (ya-yb)/(xa-xb) < -8:\n theta -= np.pi\n \n R = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n \n xx, yy = np.dot(R, [xx, yy])\n## skalowanie\n xx /= max(np.absolute(xx))\n yy /= max(np.absolute(yy))\n## przypisanie do X\n X[row, ::2] = xx\n X[row, 1::2] = yy\nX = decomposition.PCA().fit_transform(X)\n\n## UTWORZENIE OBIEKTU KLASYFIKATORA WRAZ Z CROSS-VALIDACJĄ\nCs = np.linspace(10, 12, 60)\nclf = linear_model.LogisticRegressionCV(Cs = Cs, fit_intercept=True, max_iter=10000, n_jobs=-1).fit(X, y)\nprint(clf.score(X, y))\nprint(clf.scores_)\nprint(clf.C_)\n\n## TWORZENIE CONFUSSION MATRICES\nfig, (ax1, ax2) = plt.subplots(2)\nfig.suptitle('Confusion matrices (not)normalized')\n\nax1.set_title('Nie znormalizowany')\nconf_mat_disp = metrics.plot_confusion_matrix(clf, X, y, display_labels=y_labels, cmap=plt.cm.Blues, ax=ax1)\nax2.set_title('Znormalizowany względem wartości prawdziwej')\nconf_mat_disp_normalized = metrics.plot_confusion_matrix(clf, X, y, display_labels=y_labels, normalize='true', cmap=plt.cm.Blues, ax=ax2)\n\nplt.show()\n","repo_name":"rafjar/Gender_recognition","sub_path":"grid/reg_log.py","file_name":"reg_log.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33042562658","text":"import os\nimport re\nimport shutil\n\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import User, Group\n\nfrom cvat.apps.engine import models\nfrom cvat.apps.engine.log import slogger\n\n\nclass AttributeSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.AttributeSpec\n fields = ('id', 'name', 'mutable', 'input_type', 'default_value',\n 'values')\n\n # pylint: disable=no-self-use\n def to_internal_value(self, data):\n attribute = data.copy()\n attribute['values'] = '\\n'.join(map(lambda x: x.strip(), data.get('values', [])))\n return attribute\n\n def to_representation(self, instance):\n if instance:\n attribute = super().to_representation(instance)\n attribute['values'] = attribute['values'].split('\\n')\n else:\n attribute = instance\n\n return attribute\n\nclass LabelSerializer(serializers.ModelSerializer):\n attributes = AttributeSerializer(many=True, source='attributespec_set',\n default=[])\n class Meta:\n model = models.Label\n fields = ('id', 'name', 'attributes')\n\nclass JobCommitSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.JobCommit\n fields = ('id', 'version', 'author', 'message', 'timestamp')\n\nclass JobSerializer(serializers.ModelSerializer):\n task_id = serializers.ReadOnlyField(source=\"segment.task.id\")\n start_frame = serializers.ReadOnlyField(source=\"segment.start_frame\")\n stop_frame = serializers.ReadOnlyField(source=\"segment.stop_frame\")\n\n class Meta:\n model = models.Job\n fields = ('url', 'id', 'assignee', 'status', 'start_frame',\n 'stop_frame', 'task_id')\n\nclass SimpleJobSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Job\n fields = ('url', 'id', 'assignee', 'status')\n\nclass SegmentSerializer(serializers.ModelSerializer):\n jobs = SimpleJobSerializer(many=True, source='job_set')\n\n class Meta:\n model = models.Segment\n fields = ('start_frame', 'stop_frame', 'jobs')\n\nclass ClientFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.ClientFile\n fields = ('file', )\n\n # pylint: disable=no-self-use\n def to_internal_value(self, data):\n return {'file': data}\n\n # pylint: disable=no-self-use\n def to_representation(self, instance):\n if instance:\n upload_dir = instance.data.get_upload_dirname()\n return instance.file.path[len(upload_dir) + 1:]\n else:\n return instance\n\nclass ServerFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.ServerFile\n fields = ('file', )\n\n # pylint: disable=no-self-use\n def to_internal_value(self, data):\n return {'file': data}\n\n # pylint: disable=no-self-use\n def to_representation(self, instance):\n return instance.file if instance else instance\n\nclass RemoteFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RemoteFile\n fields = ('file', )\n\n # pylint: disable=no-self-use\n def to_internal_value(self, data):\n return {'file': data}\n\n # pylint: disable=no-self-use\n def to_representation(self, instance):\n return instance.file if instance else instance\n\nclass RqStatusSerializer(serializers.Serializer):\n state = serializers.ChoiceField(choices=[\n \"Queued\", \"Started\", \"Finished\", \"Failed\"])\n message = serializers.CharField(allow_blank=True, default=\"\")\n\nclass WriteOnceMixin:\n \"\"\"Adds support for write once fields to serializers.\n\n To use it, specify a list of fields as `write_once_fields` on the\n serializer's Meta:\n ```\n class Meta:\n model = SomeModel\n fields = '__all__'\n write_once_fields = ('collection', )\n ```\n\n Now the fields in `write_once_fields` can be set during POST (create),\n but cannot be changed afterwards via PUT or PATCH (update).\n Inspired by http://stackoverflow.com/a/37487134/627411.\n \"\"\"\n\n def get_extra_kwargs(self):\n extra_kwargs = super().get_extra_kwargs()\n\n # We're only interested in PATCH/PUT.\n if 'update' in getattr(self.context.get('view'), 'action', ''):\n return self._set_write_once_fields(extra_kwargs)\n\n return extra_kwargs\n\n def _set_write_once_fields(self, extra_kwargs):\n \"\"\"Set all fields in `Meta.write_once_fields` to read_only.\"\"\"\n write_once_fields = getattr(self.Meta, 'write_once_fields', None)\n if not write_once_fields:\n return extra_kwargs\n\n if not isinstance(write_once_fields, (list, tuple)):\n raise TypeError(\n 'The `write_once_fields` option must be a list or tuple. '\n 'Got {}.'.format(type(write_once_fields).__name__)\n )\n\n for field_name in write_once_fields:\n kwargs = extra_kwargs.get(field_name, {})\n kwargs['read_only'] = True\n extra_kwargs[field_name] = kwargs\n\n return extra_kwargs\n\nclass DataSerializer(serializers.ModelSerializer):\n image_quality = serializers.IntegerField(min_value=0, max_value=100)\n use_zip_chunks = serializers.BooleanField(default=False)\n client_files = ClientFileSerializer(many=True, default=[])\n server_files = ServerFileSerializer(many=True, default=[])\n remote_files = RemoteFileSerializer(many=True, default=[])\n\n class Meta:\n model = models.Data\n fields = ('chunk_size', 'size', 'image_quality', 'start_frame', 'stop_frame', 'frame_filter',\n 'compressed_chunk_type', 'original_chunk_type', 'client_files', 'server_files', 'remote_files', 'use_zip_chunks')\n\n # pylint: disable=no-self-use\n def validate_frame_filter(self, value):\n match = re.search(\"step\\s*=\\s*([1-9]\\d*)\", value)\n if not match:\n raise serializers.ValidationError(\"Invalid frame filter expression\")\n return value\n\n # pylint: disable=no-self-use\n def validate_chunk_size(self, value):\n if not value > 0:\n raise serializers.ValidationError('Chunk size must be a positive integer')\n return value\n\n # pylint: disable=no-self-use\n def validate(self, data):\n if 'start_frame' in data and 'stop_frame' in data \\\n and data['start_frame'] > data['stop_frame']:\n raise serializers.ValidationError('Stop frame must be more or equal start frame')\n return data\n\n # pylint: disable=no-self-use\n def create(self, validated_data):\n client_files = validated_data.pop('client_files')\n server_files = validated_data.pop('server_files')\n remote_files = validated_data.pop('remote_files')\n validated_data.pop('use_zip_chunks')\n db_data = models.Data.objects.create(**validated_data)\n\n data_path = db_data.get_data_dirname()\n if os.path.isdir(data_path):\n shutil.rmtree(data_path)\n\n os.makedirs(db_data.get_compressed_cache_dirname())\n os.makedirs(db_data.get_original_cache_dirname())\n os.makedirs(db_data.get_upload_dirname())\n\n for f in client_files:\n client_file = models.ClientFile(data=db_data, **f)\n client_file.save()\n\n for f in server_files:\n server_file = models.ServerFile(data=db_data, **f)\n server_file.save()\n\n for f in remote_files:\n remote_file = models.RemoteFile(data=db_data, **f)\n remote_file.save()\n\n db_data.save()\n return db_data\n\nclass TaskSerializer(WriteOnceMixin, serializers.ModelSerializer):\n labels = LabelSerializer(many=True, source='label_set', partial=True)\n segments = SegmentSerializer(many=True, source='segment_set', read_only=True)\n data_chunk_size = serializers.ReadOnlyField(source='data.chunk_size')\n data_compressed_chunk_type = serializers.ReadOnlyField(source='data.compressed_chunk_type')\n data_original_chunk_type = serializers.ReadOnlyField(source='data.original_chunk_type')\n size = serializers.ReadOnlyField(source='data.size')\n image_quality = serializers.ReadOnlyField(source='data.image_quality')\n data = serializers.ReadOnlyField(source='data.id')\n\n class Meta:\n model = models.Task\n fields = ('url', 'id', 'name', 'mode', 'owner', 'assignee',\n 'bug_tracker', 'created_date', 'updated_date', 'overlap',\n 'segment_size', 'z_order', 'status', 'labels', 'segments',\n 'project', 'data_chunk_size', 'data_compressed_chunk_type', 'data_original_chunk_type', 'size', 'image_quality', 'data')\n read_only_fields = ('mode', 'created_date', 'updated_date', 'status', 'data_chunk_size',\n 'data_compressed_chunk_type', 'data_original_chunk_type', 'size', 'image_quality', 'data')\n write_once_fields = ('overlap', 'segment_size')\n ordering = ['-id']\n\n # pylint: disable=no-self-use\n def create(self, validated_data):\n labels = validated_data.pop('label_set')\n db_task = models.Task.objects.create(**validated_data)\n for label in labels:\n attributes = label.pop('attributespec_set')\n db_label = models.Label.objects.create(task=db_task, **label)\n for attr in attributes:\n models.AttributeSpec.objects.create(label=db_label, **attr)\n\n task_path = db_task.get_task_dirname()\n if os.path.isdir(task_path):\n shutil.rmtree(task_path)\n\n os.makedirs(db_task.get_task_logs_dirname())\n os.makedirs(db_task.get_task_artifacts_dirname())\n\n db_task.save()\n return db_task\n\n # pylint: disable=no-self-use\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.owner = validated_data.get('owner', instance.owner)\n instance.assignee = validated_data.get('assignee', instance.assignee)\n instance.bug_tracker = validated_data.get('bug_tracker',\n instance.bug_tracker)\n instance.z_order = validated_data.get('z_order', instance.z_order)\n instance.project = validated_data.get('project', instance.project)\n labels = validated_data.get('label_set', [])\n for label in labels:\n attributes = label.pop('attributespec_set', [])\n (db_label, created) = models.Label.objects.get_or_create(task=instance,\n name=label['name'])\n if created:\n slogger.task[instance.id].info(\"New {} label was created\"\n .format(db_label.name))\n else:\n slogger.task[instance.id].info(\"{} label was updated\"\n .format(db_label.name))\n for attr in attributes:\n (db_attr, created) = models.AttributeSpec.objects.get_or_create(\n label=db_label, name=attr['name'], defaults=attr)\n if created:\n slogger.task[instance.id].info(\"New {} attribute for {} label was created\"\n .format(db_attr.name, db_label.name))\n else:\n slogger.task[instance.id].info(\"{} attribute for {} label was updated\"\n .format(db_attr.name, db_label.name))\n\n # FIXME: need to update only \"safe\" fields\n db_attr.default_value = attr.get('default_value', db_attr.default_value)\n db_attr.mutable = attr.get('mutable', db_attr.mutable)\n db_attr.input_type = attr.get('input_type', db_attr.input_type)\n db_attr.values = attr.get('values', db_attr.values)\n db_attr.save()\n\n instance.save()\n return instance\n\nclass ProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Project\n fields = ('url', 'id', 'name', 'owner', 'assignee', 'bug_tracker',\n 'created_date', 'updated_date', 'status')\n read_only_fields = ('created_date', 'updated_date', 'status')\n ordering = ['-id']\n\nclass BasicUserSerializer(serializers.ModelSerializer):\n def validate(self, data):\n if hasattr(self, 'initial_data'):\n unknown_keys = set(self.initial_data.keys()) - set(self.fields.keys())\n if unknown_keys:\n if set(['is_staff', 'is_superuser', 'groups']) & unknown_keys:\n message = 'You do not have permissions to access some of' + \\\n ' these fields: {}'.format(unknown_keys)\n else:\n message = 'Got unknown fields: {}'.format(unknown_keys)\n raise serializers.ValidationError(message)\n return data\n\n class Meta:\n model = User\n fields = ('url', 'id', 'username', 'first_name', 'last_name')\n ordering = ['-id']\n\nclass UserSerializer(serializers.ModelSerializer):\n groups = serializers.SlugRelatedField(many=True,\n slug_field='name', queryset=Group.objects.all())\n\n class Meta:\n model = User\n fields = ('url', 'id', 'username', 'first_name', 'last_name', 'email',\n 'groups', 'is_staff', 'is_superuser', 'is_active', 'last_login',\n 'date_joined')\n read_only_fields = ('last_login', 'date_joined')\n write_only_fields = ('password', )\n ordering = ['-id']\n\nclass ExceptionSerializer(serializers.Serializer):\n system = serializers.CharField(max_length=255)\n client = serializers.CharField(max_length=255)\n time = serializers.DateTimeField()\n\n job_id = serializers.IntegerField(required=False)\n task_id = serializers.IntegerField(required=False)\n proj_id = serializers.IntegerField(required=False)\n client_id = serializers.IntegerField()\n\n message = serializers.CharField(max_length=4096)\n filename = serializers.URLField()\n line = serializers.IntegerField()\n column = serializers.IntegerField()\n stack = serializers.CharField(max_length=8192,\n style={'base_template': 'textarea.html'}, allow_blank=True)\n\nclass AboutSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=128)\n description = serializers.CharField(max_length=2048)\n version = serializers.CharField(max_length=64)\n\nclass FrameMetaSerializer(serializers.Serializer):\n width = serializers.IntegerField()\n height = serializers.IntegerField()\n name = serializers.CharField(max_length=1024)\n\nclass DataMetaSerializer(serializers.ModelSerializer):\n frames = FrameMetaSerializer(many=True, allow_null=True)\n image_quality = serializers.IntegerField(min_value=0, max_value=100)\n\n class Meta:\n model = models.Data\n fields = (\n 'chunk_size',\n 'size',\n 'image_quality',\n 'start_frame',\n 'stop_frame',\n 'frame_filter',\n 'frames',\n )\n read_only_fields = (\n 'chunk_size',\n 'size',\n 'image_quality',\n 'start_frame',\n 'stop_frame',\n 'frame_filter',\n 'frames',\n )\n\nclass AttributeValSerializer(serializers.Serializer):\n spec_id = serializers.IntegerField()\n value = serializers.CharField(max_length=4096, allow_blank=True)\n\n def to_internal_value(self, data):\n data['value'] = str(data['value'])\n return super().to_internal_value(data)\n\nclass AnnotationSerializer(serializers.Serializer):\n id = serializers.IntegerField(default=None, allow_null=True)\n frame = serializers.IntegerField(min_value=0)\n label_id = serializers.IntegerField(min_value=0)\n group = serializers.IntegerField(min_value=0, allow_null=True)\n\nclass LabeledImageSerializer(AnnotationSerializer):\n attributes = AttributeValSerializer(many=True,\n source=\"labeledimageattributeval_set\")\n\nclass ShapeSerializer(serializers.Serializer):\n type = serializers.ChoiceField(choices=models.ShapeType.choices())\n occluded = serializers.BooleanField()\n z_order = serializers.IntegerField(default=0)\n points = serializers.ListField(\n child=serializers.FloatField(),\n allow_empty=False,\n )\n\nclass LabeledShapeSerializer(ShapeSerializer, AnnotationSerializer):\n attributes = AttributeValSerializer(many=True,\n source=\"labeledshapeattributeval_set\")\n\nclass TrackedShapeSerializer(ShapeSerializer):\n id = serializers.IntegerField(default=None, allow_null=True)\n frame = serializers.IntegerField(min_value=0)\n outside = serializers.BooleanField()\n attributes = AttributeValSerializer(many=True,\n source=\"trackedshapeattributeval_set\")\n\nclass LabeledTrackSerializer(AnnotationSerializer):\n shapes = TrackedShapeSerializer(many=True, allow_empty=False,\n source=\"trackedshape_set\")\n attributes = AttributeValSerializer(many=True,\n source=\"labeledtrackattributeval_set\")\n\nclass LabeledDataSerializer(serializers.Serializer):\n version = serializers.IntegerField()\n tags = LabeledImageSerializer(many=True)\n shapes = LabeledShapeSerializer(many=True)\n tracks = LabeledTrackSerializer(many=True)\n\nclass FileInfoSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=1024)\n type = serializers.ChoiceField(choices=[\"REG\", \"DIR\"])\n\nclass PluginSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Plugin\n fields = ('name', 'description', 'maintainer', 'created_at',\n 'updated_at')\n\nclass LogEventSerializer(serializers.Serializer):\n job_id = serializers.IntegerField(required=False)\n task_id = serializers.IntegerField(required=False)\n proj_id = serializers.IntegerField(required=False)\n client_id = serializers.IntegerField()\n\n name = serializers.CharField(max_length=64)\n time = serializers.DateTimeField()\n message = serializers.CharField(max_length=4096, required=False)\n payload = serializers.DictField(required=False)\n is_active = serializers.BooleanField()\n\nclass AnnotationFileSerializer(serializers.Serializer):\n annotation_file = serializers.FileField()","repo_name":"TaSeeMba/cvat","sub_path":"cvat/apps/engine/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":18084,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"8291756859","text":"\"\"\"\n228. 汇总区间\n数组\n简单\n\n\n给定一个  无重复元素 的 有序 整数数组 nums 。\n\n返回 恰好覆盖数组中所有数字 的 最小有序 区间范围列表 。也就是说,nums 的每个元素都恰好被某个区间范围所覆盖,并且不存在属于某个范围但不属于 nums 的数字 x 。\n\n列表中的每个区间范围 [a,b] 应该按如下格式输出:\n\n\"a->b\" ,如果 a != b\n\"a\" ,如果 a == b\n \n\n示例 1:\n\n输入:nums = [0,1,2,4,5,7]\n输出:[\"0->2\",\"4->5\",\"7\"]\n解释:区间范围是:\n[0,2] --> \"0->2\"\n[4,5] --> \"4->5\"\n[7,7] --> \"7\"\n示例 2:\n\n输入:nums = [0,2,3,4,6,8,9]\n输出:[\"0\",\"2->4\",\"6\",\"8->9\"]\n解释:区间范围是:\n[0,0] --> \"0\"\n[2,4] --> \"2->4\"\n[6,6] --> \"6\"\n[8,9] --> \"8->9\"\n \n\n提示:\n\n0 <= nums.length <= 20\n-2^31 <= nums[i] <= 2^31 - 1\nnums 中的所有值都 互不相同\nnums 按升序排列\n\n来源:力扣(LeetCode)\n链接:https://leetcode.cn/problems/summary-ranges\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def summaryRanges(self, nums: List[int]) -> List[str]:\n if not nums:\n return []\n start = nums[0]\n end = nums[0]\n ret = []\n for i in nums[1:]:\n if end + 1 == i:\n end = i\n else:\n ret.append('{}'.format(start) if start == end else '{}->{}'.format(start, end))\n start = i\n end = i\n ret.append('{}'.format(start) if start == end else '{}->{}'.format(start, end))\n return ret\n\n\nif __name__ == '__main__':\n solution = Solution()\n\n result = solution.summaryRanges([0, 1, 2, 4, 5, 7])\n print(result)\n assert result == [\"0->2\", \"4->5\", \"7\"]\n\n result = solution.summaryRanges([0, 2, 3, 4, 6, 8, 9])\n print(result)\n assert result == [\"0\", \"2->4\", \"6\", \"8->9\"]\n","repo_name":"geeknonerd/leetcode","sub_path":"search/summary_ranges.py","file_name":"summary_ranges.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"zh","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"73953437922","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pickle\nimport os\n\nimport ray\nfrom ray.rllib.agent import Agent\nfrom ray.rllib.a3c.envs import create_and_wrap\nfrom ray.rllib.a3c.runner import RemoteRunner\nfrom ray.rllib.a3c.shared_model import SharedModel\nfrom ray.rllib.a3c.shared_model_lstm import SharedModelLSTM\nfrom ray.tune.result import TrainingResult\n\n\nDEFAULT_CONFIG = {\n \"num_workers\": 4,\n \"num_batches_per_iteration\": 100,\n \"batch_size\": 10,\n \"use_lstm\": True,\n \"use_pytorch\": False,\n \"model\": {\"grayscale\": True,\n \"zero_mean\": False,\n \"dim\": 42,\n \"channel_major\": False}\n}\n\n\nclass A3CAgent(Agent):\n _agent_name = \"A3C\"\n _default_config = DEFAULT_CONFIG\n\n def _init(self):\n self.env = create_and_wrap(self.env_creator, self.config[\"model\"])\n if self.config[\"use_lstm\"]:\n policy_cls = SharedModelLSTM\n elif self.config[\"use_pytorch\"]:\n from ray.rllib.a3c.shared_torch_policy import SharedTorchPolicy\n policy_cls = SharedTorchPolicy\n else:\n policy_cls = SharedModel\n self.policy = policy_cls(\n self.env.observation_space.shape, self.env.action_space)\n self.agents = [\n RemoteRunner.remote(self.env_creator, policy_cls, i,\n self.config[\"batch_size\"],\n self.config[\"model\"], self.logdir)\n for i in range(self.config[\"num_workers\"])]\n self.parameters = self.policy.get_weights()\n\n def _train(self):\n gradient_list = [\n agent.compute_gradient.remote(self.parameters)\n for agent in self.agents]\n max_batches = self.config[\"num_batches_per_iteration\"]\n batches_so_far = len(gradient_list)\n while gradient_list:\n done_id, gradient_list = ray.wait(gradient_list)\n gradient, info = ray.get(done_id)[0]\n self.policy.apply_gradients(gradient)\n self.parameters = self.policy.get_weights()\n if batches_so_far < max_batches:\n batches_so_far += 1\n gradient_list.extend(\n [self.agents[info[\"id\"]].compute_gradient.remote(\n self.parameters)])\n res = self._fetch_metrics_from_workers()\n return res\n\n def _fetch_metrics_from_workers(self):\n episode_rewards = []\n episode_lengths = []\n metric_lists = [\n a.get_completed_rollout_metrics.remote() for a in self.agents]\n for metrics in metric_lists:\n for episode in ray.get(metrics):\n episode_lengths.append(episode.episode_length)\n episode_rewards.append(episode.episode_reward)\n avg_reward = (\n np.mean(episode_rewards) if episode_rewards else float('nan'))\n avg_length = (\n np.mean(episode_lengths) if episode_lengths else float('nan'))\n timesteps = np.sum(episode_lengths) if episode_lengths else 0\n\n result = TrainingResult(\n episode_reward_mean=avg_reward,\n episode_len_mean=avg_length,\n timesteps_this_iter=timesteps,\n info={})\n\n return result\n\n def _save(self):\n checkpoint_path = os.path.join(\n self.logdir, \"checkpoint-{}\".format(self.iteration))\n objects = [self.parameters]\n pickle.dump(objects, open(checkpoint_path, \"wb\"))\n return checkpoint_path\n\n def _restore(self, checkpoint_path):\n objects = pickle.load(open(checkpoint_path, \"rb\"))\n self.parameters = objects[0]\n self.policy.set_weights(self.parameters)\n\n def compute_action(self, observation):\n actions = self.policy.compute_action(observation)\n return actions[0]\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/ray_versions/ray-ray-0.3.0/python/ray/rllib/a3c/a3c.py","file_name":"a3c.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5956145659","text":"from enum import Enum\n\n# Third Party\nimport numpy as np\nimport torch\nimport torch.autograd.profiler as profiler\n\n# CuRobo\nfrom curobo.types.base import TensorDeviceType\n\n\nclass SquashType(Enum):\n CLAMP = 0\n CLAMP_RESCALE = 1\n TANH = 2\n IDENTITY = 3\n\n\ndef scale_ctrl(ctrl, action_lows, action_highs, squash_fn: SquashType = SquashType.CLAMP):\n if len(ctrl.shape) == 1:\n ctrl = ctrl.unsqueeze(0).unsqueeze(-1)\n # ctrl = ctrl[np.newaxis, :, np.newaxis] # TODO: does this work with gpu pytorch?\n act_half_range = (action_highs - action_lows) / 2.0\n act_mid_range = (action_highs + action_lows) / 2.0\n if squash_fn == SquashType.CLAMP:\n # ctrl = torch.clamp(ctrl, action_lows[0], action_highs[0])\n ctrl = torch.max(torch.min(ctrl, action_highs), action_lows)\n return ctrl\n elif squash_fn == SquashType.CLAMP_RESCALE:\n ctrl = torch.clamp(ctrl, -1.0, 1.0)\n elif squash_fn == SquashType.TANH:\n ctrl = torch.tanh(ctrl)\n elif squash_fn == SquashType.IDENTITY:\n return ctrl\n return act_mid_range.unsqueeze(0) + ctrl * act_half_range.unsqueeze(0)\n\n\n#######################\n## STOMP Covariance ##\n#######################\n\n\n@profiler.record_function(\"particle_opt_utils/get_stomp_cov\")\ndef get_stomp_cov(\n horizon: int,\n d_action: int,\n tensor_args=TensorDeviceType(),\n cov_mode=\"acc\",\n RETURN_M=False,\n):\n \"\"\"Computes the covariance matrix following STOMP motion planner\n\n Coefficients from here: https://en.wikipedia.org/wiki/Finite_difference_coefficient\n More info here: https://github.com/ros-industrial/stomp_ros/blob/7fe40fbe6ad446459d8d4889916c64e276dbf882/stomp_core/src/utils.cpp#L36\n \"\"\"\n cov, scale_tril, scaled_M = get_stomp_cov_jit(\n horizon, d_action, cov_mode, device=tensor_args.device\n )\n cov = tensor_args.to_device(cov)\n scale_tril = tensor_args.to_device(scale_tril)\n if RETURN_M:\n return cov, scale_tril, tensor_args.to_device(scaled_M)\n return cov, scale_tril\n\n\n@torch.jit.script\ndef get_stomp_cov_jit(\n horizon: int,\n d_action: int,\n cov_mode: str = \"acc\",\n device: torch.device = torch.device(\"cuda:0\"),\n):\n # This function can lead to nans. There are checks to raise an error when nan occurs.\n vel_fd_array = [0.0, 0.0, 1.0, -2.0, 1.0, 0.0, 0.0]\n\n fd_array = vel_fd_array\n A = torch.zeros(\n (d_action * horizon, d_action * horizon),\n dtype=torch.float32,\n device=device,\n )\n\n if cov_mode == \"vel\":\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3, 4):\n # print(j)\n index = i + j\n if index < 0:\n index = 0\n continue\n if index >= horizon:\n index = horizon - 1\n continue\n A[k * horizon + i, k * horizon + index] = fd_array[j + 3]\n elif cov_mode == \"acc\":\n for k in range(d_action):\n for i in range(0, horizon):\n for j in range(-3, 4):\n index = i + j\n if index < 0:\n index = 0\n continue\n if index >= horizon:\n index = horizon - 1\n continue\n\n if index >= horizon / 2:\n A[k * horizon + i, k * horizon - index - horizon // 2 - 1] = fd_array[j + 3]\n else:\n A[k * horizon + i, k * horizon + index] = fd_array[j + 3]\n\n R = torch.matmul(A.transpose(-2, -1), A)\n M = torch.inverse(R)\n scaled_M = (1 / horizon) * M / (torch.max(torch.abs(M), dim=1)[0].unsqueeze(0))\n cov = M / torch.max(torch.abs(M))\n\n # also compute the cholesky decomposition:\n # scale_tril = torch.zeros((d_action * horizon, d_action * horizon), **tensor_args)\n if (cov == cov.T).all() and (torch.linalg.eigvals(cov).real >= 0).all():\n scale_tril = torch.linalg.cholesky(cov)\n else:\n scale_tril = cov\n\n \"\"\"\n k = 0\n act_cov_matrix = cov[k * horizon:k * horizon + horizon, k * horizon:k * horizon + horizon]\n print(act_cov_matrix.shape)\n print(torch.det(act_cov_matrix))\n local_cholesky = matrix_cholesky(act_cov_matrix)\n for k in range(d_action):\n \n scale_tril[k * horizon:k * horizon + horizon,k * horizon:k * horizon + horizon] = local_cholesky\n \"\"\"\n\n return cov, scale_tril, scaled_M\n\n\n########################\n## Gaussian Utilities ##\n########################\n\n\ndef gaussian_logprob(mean, cov, x, cov_type=\"full\"):\n \"\"\"\n Calculate gaussian log prob for given input batch x\n Parameters\n ----------\n mean (np.ndarray): [N x num_samples] batch of means\n cov (np.ndarray): [N x N] covariance matrix\n x (np.ndarray): [N x num_samples] batch of sample values\n\n Returns\n --------\n log_prob (np.ndarray): [num_sampls] log probability of each sample\n \"\"\"\n N = cov.shape[0]\n if cov_type == \"diagonal\":\n cov_diag = cov.diagonal()\n cov_inv = np.diag(1.0 / cov_diag)\n cov_logdet = np.sum(np.log(cov_diag))\n else:\n cov_logdet = np.log(np.linalg.det(cov))\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n mahalanobis_dist = -0.5 * np.sum((diff @ cov_inv) * diff, axis=1)\n const1 = -0.5 * N * np.log(2.0 * np.pi)\n const2 = -0.5 * cov_logdet\n log_prob = mahalanobis_dist + const1 + const2\n return log_prob\n\n\ndef gaussian_logprobgrad(mean, cov, x, cov_type=\"full\"):\n if cov_type == \"diagonal\":\n cov_inv = np.diag(1.0 / cov.diagonal())\n else:\n cov_inv = np.linalg.inv(cov)\n diff = (x - mean).T\n grad = diff @ cov_inv\n return grad\n\n\ndef gaussian_entropy(cov=None, L=None): # , cov_type=\"full\"):\n \"\"\"\n Entropy of multivariate gaussian given either covariance\n or cholesky decomposition of covariance\n\n \"\"\"\n if cov is not None:\n inp_device = cov.device\n cov_logdet = torch.log(torch.det(cov))\n # print(np.linalg.det(cov.cpu().numpy()))\n # print(torch.det(cov))\n N = cov.shape[0]\n\n else:\n inp_device = L.device\n cov_logdet = 2.0 * torch.sum(torch.log(torch.diagonal(L)))\n N = L.shape[0]\n # if cov_type == \"diagonal\":\n # cov_logdet = np.sum(np.log(cov.diagonal()))\n # else:\n # cov_logdet = np.log(np.linalg.det(cov))\n\n term1 = 0.5 * cov_logdet\n # pi = torch.tensor([math.pi], device=inp_device)\n # pre-calculate 1.0 + torch.log(2.0*pi) = 2.837877066\n term2 = 0.5 * N * 2.837877066\n\n ent = term1 + term2\n return ent.to(inp_device)\n\n\ndef gaussian_kl(mean0, cov0, mean1, cov1, cov_type=\"full\"):\n \"\"\"\n KL-divergence between Gaussians given mean and covariance\n KL(p||q) = E_{p}[log(p) - log(q)]\n\n \"\"\"\n N = cov0.shape[0]\n if cov_type == \"diagonal\":\n cov1_diag = cov1.diagonal()\n cov1_inv = np.diag(1.0 / cov1_diag)\n cov0_logdet = np.sum(np.log(cov0.diagonal()))\n cov1_logdet = np.sum(np.log(cov1_diag))\n else:\n cov1_inv = np.linalg.inv(cov1)\n cov0_logdet = np.log(np.linalg.det(cov0))\n cov1_logdet = np.log(np.linalg.det(cov1))\n\n term1 = 0.5 * np.trace(cov1_inv @ cov0)\n diff = (mean1 - mean0).T\n mahalanobis_dist = 0.5 * np.sum((diff @ cov1_inv) * diff, axis=1)\n term3 = 0.5 * (-1.0 * N + cov1_logdet - cov0_logdet)\n return term1 + mahalanobis_dist + term3\n\n\n# @torch.jit.script\ndef cost_to_go(cost_seq, gamma_seq, only_first=False):\n # type: (Tensor, Tensor, bool) -> Tensor\n\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if torch.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted cost sequence\n if only_first:\n cost_seq = torch.sum(cost_seq, dim=-1, keepdim=True) / gamma_seq[..., 0]\n else:\n # cost_seq = torch.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq = torch.fliplr(\n torch.cumsum(torch.fliplr(cost_seq), dim=-1)\n ) # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\n\ndef cost_to_go_np(cost_seq, gamma_seq):\n \"\"\"\n Calculate (discounted) cost to go for given cost sequence\n \"\"\"\n # if np.any(gamma_seq == 0):\n # return cost_seq\n cost_seq = gamma_seq * cost_seq # discounted reward sequence\n cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[\n :, ::-1\n ] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])\n cost_seq /= gamma_seq # un-scale it to get true discounted cost to go\n return cost_seq\n\n\n############\n##Cholesky##\n############\ndef matrix_cholesky(A):\n L = torch.zeros_like(A)\n for i in range(A.shape[-1]):\n for j in range(i + 1):\n s = 0.0\n for k in range(j):\n s = s + L[i, k] * L[j, k]\n\n L[i, j] = torch.sqrt(A[i, i] - s) if (i == j) else (1.0 / L[j, j] * (A[i, j] - s))\n return L\n\n\n# Batched Cholesky decomp\ndef batch_cholesky(A):\n L = torch.zeros_like(A)\n\n for i in range(A.shape[-1]):\n for j in range(i + 1):\n s = 0.0\n for k in range(j):\n s = s + L[..., i, k] * L[..., j, k]\n\n L[..., i, j] = (\n torch.sqrt(A[..., i, i] - s)\n if (i == j)\n else (1.0 / L[..., j, j] * (A[..., i, j] - s))\n )\n return L\n","repo_name":"NVlabs/curobo","sub_path":"src/curobo/opt/particle/particle_opt_utils.py","file_name":"particle_opt_utils.py","file_ext":"py","file_size_in_byte":9622,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"54"} +{"seq_id":"73598971041","text":"\"\"\" Home task 08:\nExpand previous Homework 5/6/7 with additional class, which allow to provide records by JSON file:\n1.Define your input format (one or many records)\n2.Default folder or user provided file path\n3.Remove file if it was successfully processed\"\"\"\n\n\nfrom hw_04.home_task_04_part_2 import text_normalization\nfrom hw_06.home_task_06 import News, PrivateAd, Greetings, get_expiration_date\nimport os\nimport logging\nimport json\n\n\nclass JSONParser:\n def __init__(self, filepath='../input/input_08.json'):\n self.filepath = filepath\n self.read_json()\n self.write_feed_from_json()\n\n def read_json(self) -> None:\n with open(self.filepath, 'r') as source_file:\n self.content = json.load(source_file)\n\n def write_feed_from_json(self) -> None:\n feed_path = f'{os.getcwd()}/feed_writer_result.txt'\n with open(feed_path, 'a') as feed:\n for record in self.content:\n record_type = record['record_type'].lower()\n record_text = text_normalization(record['record_text'])\n if record_type == 'news':\n city = text_normalization(record['city'])\n news = News(record_text, city)\n feed.write(news.create_news_publication())\n elif record_type == \"ad\":\n expiration_date = get_expiration_date(record['expiration_date'])\n ad = PrivateAd(record_text, expiration_date)\n feed.write(ad.create_ad_publication())\n elif record_type == \"greeting\":\n greeting = Greetings()\n feed.write(greeting.create_greeting())\n else:\n logging.info(f\"Wrong type of record: {record_type}/{record_text}.\")\n #os.remove(self.filepath)\n\n\nif __name__ == '__main__':\n alternative_filepath = input(\"Specify alternative filepath or press Enter:\") # Alternative filepath: '../hw_08/alt_input_08.json'\n if alternative_filepath:\n JSONParser(alternative_filepath)\n else:\n JSONParser()\n","repo_name":"isken24/python_for_dqe","sub_path":"hw_08/home_task_08.py","file_name":"home_task_08.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2662657605","text":"from random import randint\nimport os\nfrom clint.textui import colored, puts\nfrom lang import Languages\n\nclear = lambda: os.system('cls')\nclear()\n\ncolorDict = {\n\t1: \"R\",\n\t2: \"G\",\n\t3: \"B\",\n\t4: \"Y\",\n\t5: \"P\",\n\t6: \"C\"\n}\n\nacceptedColor = {\n\t\"R\",\n\t\"G\",\n\t\"B\",\n\t\"Y\",\n\t\"P\",\n\t\"C\"\n}\n\nfirstPlay = True\n\ndef chooseLang():\n\tglobal language\n\n\tprint(\"ENG/NL?\")\n\tlangChoice = input().upper()\n\n\tlanguage = Languages(langChoice)\n\tstart()\n\n\ndef start():\n\tglobal guesses, totalGuesses, winVar, totalResults, firstPlay\n\n\tguesses = 12\n\ttotalGuesses = list()\n\ttotalResults = list()\n\twinVar = False\n\n\n\tif firstPlay:\n\t\tdisplayMenu()\n\n\n\t\tprint()\n\t\tprint(language.firstPlay)\n\t\tfirstPlay = False\n\n\telif not firstPlay:\n\t\tprint()\n\t\tprint(language.notFirstPlay)\n\n\n\tuserInput = input().lower()\n\n\tif userInput == \"yes\" or userInput == \"ja\":\n\t\tprint()\n\t\tcode = createCode()\n\t\tmainFunc(code)\n\n\tclear()\n\texit()\n\n\n\"\"\"Create code\"\"\"\ndef createCode():\n\tcode = list()\n\tfor i in range(4):\n\t\trandomNumber = randint(1, 6)\n\t\t\n\t\trandomColor = colorDict[randomNumber]\n\t\tcode.append(randomColor)\n\n\treturn code\n\n\n\"\"\"Request a guess from the user and make sure it's within the parameters.\"\"\"\ndef userCodeGuess():\n\tprint()\n\tprint(language.userCodeGuess, end = \" \")\n\n\tfor a in acceptedColor:\n\t\tprintCorrectColor(a)\n\tprint(language.amountColors)\n\n\tuserInput = input().upper()\n\tuserInput = list(userInput)\n\n\t#Here it makes sure the guess is 4 long\n\tif len(userInput) != 4:\n\t\tprint(language.incorrectLength)\n\t\treturn False\n\n\t#Here it checks if the userInput is in the accepted color dictionary\n\tfor char in userInput:\n\t\tif not char in acceptedColor:\n\t\t\tprint(language.incorrectColor)\n\t\t\tprint()\n\t\t\treturn False\n\n\t#if that's all true it returns userInput\n\treturn userInput\n\t\n\n\n\"\"\"Compare the guess to the code, return how many are in the right place and the right color.\"\"\"\ndef compareGuessToCode(userGuess, code):\n\tglobal winVar\n\n\tcompareResult = [0, 0]\n\toldString = list(userGuess.copy())\n\tnewString = list(userGuess.copy())\n\tnewCode = list(code.copy())\n\n\t#If the userGuess is the same as the code the user wins.\n\tif userGuess == code:\n\t\twinVar = True\n\t\treturn [4, 0]\n\n\t#Here it checks if a color is in the same place as code, if so it removes it from newString/Code\n\tfor index in range(0, len(oldString)):\n\t\tif oldString[index] == code[index]:\n\t\t\tnewString.remove(oldString[index])\n\t\t\tnewCode.remove(oldString[index])\n\n\t\t\tcompareResult[0] += 1\n\n\t#Here it checks the remaining colors to see if they're in the remaining code\n\tfor index in range(0, len(newString)):\n\t\tif newString[index] in newCode:\n\t\t\t#newString.remove(newString[index])\n\t\t\tcompareResult[1] += 1\n\n\treturn compareResult\n\n\n\n\"\"\"Main function calls the necesarry functions while the user still has guesses\"\"\"\ndef mainFunc(code):\n\tglobal guesses\n\n\tuserGuess = userCodeGuess()\n\tif userGuess == False:\n\t\tmainFunc(code)\n\n\n\tresults = compareGuessToCode(userGuess, code)\n\n\n\tif winVar:\n\t\tdisplayFunc(code, results)\n\t\tprint()\n\t\tprint(language.correctCode)\n\t\tprint()\n\t\tdisplayCode(code)\n\t\tstart()\n\n\n\tif guesses < 1:\n\t\tdisplayFunc(code, results)\n\t\tprint()\n\t\tprint(language.outOfGuesses)\n\t\tprint(language.codeReveal)\n\t\tprint()\n\t\tdisplayCode(code)\n\t\tstart()\n\n\n\tif guesses >= 1:\n\t\ttotalGuesses.append(userGuess)\n\t\ttotalResults.append(results)\n\n\t\tdisplayFunc(code, results)\n\t\tprint(language.guessesLeft1 + str(guesses) + language.guessesLeft2)\n\t\tguesses -= 1\n\t\tmainFunc(code)\n\n\n\ndef displayFunc(code, results):\n\tclear()\n\tfor guess in range(len(totalGuesses)):\n\n\t\tfor index in range(len(totalGuesses[guess])):\n\t\t\tprint(\"| \", end =\"\")\n\t\t\tprintCorrectColor(totalGuesses[guess][index])\n\n\t\tprint(\"|\", end = \" \")\n\n\t\tfor index in range(len(totalResults[guess])):\n\t\t\tprint(\"| \" + str(totalResults[guess][index]), end =\" \")\n\n\t\tprint()\n\ndef displayCode(code):\n\tfor index in range(len(code)):\n\t\tprint(\"| \", end =\"\")\n\t\tprintCorrectColor(code[index])\n\n\tprint(\"|\")\n\ndef displayMenu():\n\tif language.arg == \"ENG\":\n\t\tlanguage.displayMenuEng()\n\n\telif language.arg == \"NL\":\n\t\tlanguage.displayMenuNl()\n\n\ndef printCorrectColor(color):\n\tif color == \"R\":\n\t\tprint(colored.red('R'), end =\" \")\n\n\telif color == \"G\":\n\t\tprint(colored.green('G'), end =\" \")\n\n\telif color == \"B\":\n\t\tprint(colored.blue('B'), end =\" \")\n\n\telif color == \"Y\":\n\t\tprint(colored.yellow('Y'), end =\" \")\n\n\telif color == \"P\":\n\t\tprint(colored.magenta('P'), end =\" \")\n\n\telif color == \"C\":\n\t\tprint(colored.cyan('C'), end =\" \")\n\nchooseLang()","repo_name":"SuchLuukie/Mastermind","sub_path":"Mastermind.py","file_name":"Mastermind.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37780758250","text":"\nclass SocketInfo(object):\n def __init__(self, type, id, display_shape=None, idx=None):\n self.type = type\n self.id = id\n self.idx = idx\n self.display_shape = display_shape\n\nclass SvDynamicSocketsHandler(object):\n def __init__(self):\n self.inputs_registry = dict()\n self.outputs_registry = dict()\n\n def register_inputs(self, *sockets):\n for idx, socket in enumerate(sockets):\n socket.idx = idx\n self.inputs_registry[socket.id] = socket\n return sockets\n\n def register_outputs(self, *sockets):\n for idx, socket in enumerate(sockets):\n socket.idx = idx\n self.outputs_registry[socket.id] = socket\n return sockets\n\n def get_input_by_idx(self, idx):\n for socket in self.inputs_registry.values():\n if socket.idx == idx:\n return socket\n raise Exception(\"unsupported input idx\")\n \n def get_output_by_idx(self, idx):\n for socket in self.outputs_registry.values():\n if socket.idx == idx:\n return socket\n raise Exception(\"unsupported output idx\")\n\n def init_sockets(self, node):\n for socket in self.inputs_registry.values():\n s = node.inputs.new(socket.type, socket.id)\n# if socket.display_shape is not None:\n# s.display_shape = socket.display_shape\n for socket in self.outputs_registry.values():\n s = node.outputs.new(socket.type, socket.id)\n# if socket.display_shape is not None:\n# s.display_shape = socket.display_shape\n\n","repo_name":"nortikin/sverchok","sub_path":"utils/modules/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"72987367843","text":"from datetime import datetime\nfrom helper.baselApi import baselApi\nfrom dataStorage.dataAccessBasel import dataAccess\nfrom helper.bot import bot\nimport json\n\nwith open('config.json') as data_file: \n data = json.load(data_file)\n token = data[\"bebbiToken\"]\n \nlogf = open(\"error.log\", \"w\")\nbot = bot(token)\n\nuserDataSets = dataAccess.getUserData()\n\ntomorrowDisposalAreaCodes = baselApi.GetTomorrowDisposalAreaCodes()\ntodayDisposalAreaCodes = baselApi.GetTodayDisposalAreaCodes()\n\nfor userDataSet in userDataSets:\n try: \n if userDataSet.areaCode in tomorrowDisposalAreaCodes and datetime.now().hour > 12:\n bot.SendMessage(userDataSet.chatId, \"Morn isch Karton- und Papierentsorgig!\")\n if userDataSet.areaCode in todayDisposalAreaCodes and datetime.now().hour < 12:\n bot.SendMessage(userDataSet.chatId, text=\"Hüt isch Karton- und Papierentsorgig!\") \n except:\n logf.write(\"An exception occurred for basel area code: \" + str(userDataSet.areaCode) + \"\\r\\n\")\n","repo_name":"dgnaegi/altpapierBot","sub_path":"bebbiBotNotifier.py","file_name":"bebbiBotNotifier.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"42020235905","text":"#Python test Xogito - Cristian Vladu\r\n#This code uses PEP 8 Style and Naming convetion\r\n\r\n#Import used libraries\r\nimport openpyxl\r\nfrom openpyxl.styles import PatternFill\r\nfrom openpyxl.styles.borders import Border, Side\r\nimport re\r\nimport requests\r\nimport os\r\n\r\n#Create folder for Assets to download if it does not exist\r\nif not os.path.exists('Assets'):\r\n os.mkdir('Assets')\r\n\r\n#Set styles for formatting\r\nheader_fill = PatternFill(start_color='AABBCC',\r\n end_color='AABBCC',\r\n fill_type='solid')\r\n\r\nthin_border = Border(left=Side(style='thin'),\r\n right=Side(style='thin'),\r\n top=Side(style='thin'),\r\n bottom=Side(style='thin'))\r\n\r\n#Open provided file\r\nwbook = openpyxl.load_workbook(filename=\"python practice test.xlsx\")\r\nwsheet = wbook.active\r\n\r\n#Initialize the final vector\r\nfinal = []\r\nfinal.append([\"Product Code\", \"Danger column\", \"Warning column\"])\r\n\r\n#Count the number of rows\r\nnr_rows = len([row for row in wsheet if not all([cell.value == None for cell in row])])\r\n\r\n#Loop through all the rows of the provided file\r\nfor i in range(2, nr_rows+1):\r\n #Check if Danger or Warning keywords found.\r\n val = wsheet.cell(row=i, column=7).value\r\n #Convert to lowercase for a better match\r\n val = val.lower()\r\n d = 'danger' in val\r\n w = 'warning' in val\r\n \r\n #If values 'danger' or 'warning' found, store the desired values in the final vector\r\n if d:\r\n final.append([wsheet.cell(row=i+1, column=6).value, wsheet.cell(row=i, column=7).value, ''])\r\n\r\n elif w:\r\n final.append([wsheet.cell(row=i+1, column=6).value, '', wsheet.cell(row=i, column=7).value])\r\n \r\n #Download resources if 'danger' or 'warning' found\r\n if d or w :\r\n response = requests.get(wsheet.cell(row=i, column=16).value)\r\n name = (str(wsheet.cell(row=i+1, column=6).value) + '.jpg') if os.path.splitext(wsheet.cell(row=i, column=16).value)[1] == '.jpg' else (str(wsheet.cell(row=i+1, column=6).value) + '.html')\r\n with open(os.path.join('Assets', name), 'wb') as f:\r\n f.write(response.content)\r\n\r\n#Open new excel workbook\r\nwbook_result = openpyxl.Workbook()\r\nwsheet_result = wbook_result.active\r\n\r\n#Initialize current row, for formating\r\ncurr_row = 1\r\n\r\n#For each value in the final vector, store it line by line in the new excel file\r\nfor row in final:\r\n wsheet_result.append(row)\r\n wsheet_result.cell(row=curr_row,column=1).border= thin_border\r\n wsheet_result.cell(row=curr_row,column=2).border= thin_border\r\n wsheet_result.cell(row=curr_row,column=3).border= thin_border\r\n curr_row += 1 #Increment current row\r\n\r\n#Set fill color in the export excel file\r\nwsheet_result['A1'].fill = header_fill\r\nwsheet_result['B1'].fill = header_fill\r\nwsheet_result['C1'].fill = header_fill\r\n\r\n#Save and close workbook\r\nwbook_result.save('final.xlsx')","repo_name":"xdkrs/PythonTest","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9851569630","text":"#!/usr/bin/env python3\n''' task 8: list all docs in Python '''\n\n\ndef list_all(mongo_collection):\n ''' list all documents in a collection. '''\n docs = []\n for doc in mongo_collection.find():\n docs.append(doc)\n return docs\n","repo_name":"sekaycee/alx-backend-storage","sub_path":"0x01-NoSQL/8-all.py","file_name":"8-all.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22921980912","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0006_profile_karma_point'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='profile',\n old_name='karma_point',\n new_name='karma',\n ),\n ]\n","repo_name":"arguman/arguman.org","sub_path":"web/profiles/migrations/0007_auto_20151028_2252.py","file_name":"0007_auto_20151028_2252.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1374,"dataset":"github-code","pt":"54"} +{"seq_id":"73592700963","text":"\"\"\"\n @author: JiaGuo\n @emil: 1520047927@qq.com\n @date: Created in 2022/4/23 10:35\n @description:多数元素\n @modified By:\n @version: 1.0\n\"\"\"\n\n\nclass Solution:\n def majorityElement(self, nums) -> int:\n \"\"\"\n 不可取 暴力\n :param nums:\n :return:\n \"\"\"\n nums_len = len(nums)\n res_len = nums_len / 2\n res_dict = dict()\n for i in nums:\n if res_dict.get(i):\n res_dict[i] += 1\n else:\n res_dict[i] = 1\n for k, v in res_dict.items():\n if v >= res_len:\n return k\n\n def majorityElement1(self, nums) -> int:\n \"\"\"\n 摩尔投票法 详情见md 必须得超过一半\n :param nums:\n :return:\n \"\"\"\n major, count = 0, 0\n for ii in nums:\n if count == 0:\n major = ii\n if ii == major:\n count += 1\n else:\n count -= 1\n return major\n\n\nif __name__ == '__main__':\n a = Solution().majorityElement1([3, 3, 4, 4, 5, 5, 4, 5, 5, 5])\n\n print(a)\n pass\n","repo_name":"gj-hat/Leetcode","sub_path":"169-多数元素/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40397619771","text":"import cv2\r\nfrom firebase import firebase\r\n#connecting to the firebase server using its id\r\nfirebase = firebase.FirebaseApplication('https://parkpak-c41e3.firebaseio.com/')\r\n# https://github.com/Itseez/opencv/blob/master\r\n# /data/haarcascades/haarcascade_frontalface_default.xml\r\n# XML file trained with data from faces\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\n# capture frames from a camera \r\nliveFeed = cv2.VideoCapture(0)\r\n\r\n# loop runs if capturing has been initialized.\r\nfacePresent = 0\r\nwhile 1:\r\n ret, img = liveFeed.read()\r\n\r\n # convert to gray scale of each frames\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Detects faces of different sizes in the input image\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n\r\n for (x, y, w, h) in faces:\r\n # To draw a rectangle in a face\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 0), 2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n facePresent += 1\r\n # Display an image in a window\r\n cv2.imshow('img', img)\r\n result = firebase.put(\r\n '',\r\n '/camera',\r\n {\r\n \"faceCount\":str(facePresent)\r\n }\r\n )\r\n print(result)\r\n # Wait for Esc key to stop\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n facePresent = 0\r\n\r\n# Close the window \r\nliveFeed.release()\r\n\r\n# De-allocate any associated memory usage \r\ncv2.destroyAllWindows() \r\n","repo_name":"chunklin/ParkPak","sub_path":"numFaces.py","file_name":"numFaces.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71106241762","text":"from copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport multiprocessing as mp\n\nfrom torch.distributions import MultivariateNormal\n\nfrom optim.base_optim import BaseOptim\nfrom utils.policy_dict import agent_policy\nfrom utils.torch_util import get_flatten_params, set_flatten_params, xavier_init, compute_centered_ranks\nfrom assembly.assemble_rl import worker_func\n\n\nclass NESDeepMind(BaseOptim):\n def __init__(self, config):\n super(NESDeepMind, self).__init__()\n\n self.name = config[\"name\"]\n self.sigma_init = config[\"sigma_init\"]\n self.sigma_curr = self.sigma_init\n self.sigma_decay = config[\"sigma_decay\"]\n self.learning_rate = config[\"learning_rate\"]\n self.population_size = config[\"population_size\"]\n self.weight_decay = config[\"weight_decay\"]\n self.reward_shaping = config[\"reward_shaping\"]\n self.reward_norm = config[\"reward_norm\"]\n\n self.truncation_selection = config[\"truncation_selection\"]\n self.top_T = config[\"top_T\"] # top T individuals become the parents of the next generation\n self.elite_candidate_size = config[\"elite_candidate_size\"]\n self.addition_eval_elite_candidate = config[\"addition_eval_elite_candidate\"]\n self.elitism = config[\"elitism\"]\n\n self.agent_ids = None\n\n self.agent_ids = None\n self.mean = None\n self.cov = None\n self.mvn = None # MultivariateNormal model\n self.mu_model = None\n self.optimizer = None\n self.perturbation_param = None\n\n def init_population(self, policy: torch.nn.Module, env):\n self.agent_ids = env.get_agent_ids()\n perturbation = []\n perturbation_param = []\n\n for _num in range(self.population_size):\n perturbed_policy = deepcopy(policy)\n perturbed_policy.set_policy_id(_num)\n perturbed_policy.norm_init()\n perturbation_param.append(get_flatten_params(perturbed_policy)['params'])\n perturbation.append(agent_policy(self.agent_ids, perturbed_policy))\n\n # Calculate the init mean, covariance matrix, and multivariate normal model\n self.perturbation_param = np.array(perturbation_param)\n self.mean = torch.from_numpy(np.mean(a=self.perturbation_param, axis=0))\n self.cov = torch.from_numpy(np.ma.cov(x=self.perturbation_param, rowvar=True))\n self.mvn = MultivariateNormal(loc=self.mean, covariance_matrix=self.cov)\n\n # Init optimizer\n self.optimizer = torch.optim.Adam([self.mean, self.cov], lr=self.learning_rate)\n\n return perturbation\n\n def next_population(self, assemble, results):\n\n rewards = results['rewards'].tolist()\n best_reward_this_generation = max(rewards)\n rewards = np.array(rewards)\n\n # fitness shaping\n if self.reward_shaping:\n rewards = compute_centered_ranks(rewards)\n\n # normalization\n if self.reward_norm:\n r_std = rewards.std()\n rewards = (rewards - rewards.mean()) / r_std\n\n # update mean and cov based on gradient\n self.optimizer.zero_grad()\n loss = -torch.mean(self.mvn.log_prob(torch.from_numpy(self.perturbation_param)) * torch.from_numpy(rewards))\n loss.backward()\n self.optimizer.step()\n\n # sample new perturbation based on mean and cov\n perturbation_next = self.mvn.sample(self.population_size)\n\n # create new population based on sampled individuals as NN params\n for _num in range(self.population_size):\n perturbed_policy = deepcopy(policy)\n perturbed_policy.set_policy_id(_num)\n perturbed_policy.norm_init()\n self.perturbation_param.append(get_flatten_params(perturbed_policy)['params'])\n perturbation.append(agent_policy(self.agent_ids, perturbed_policy))\n\n if self.sigma_curr >= 0.01:\n self.sigma_curr *= self.sigma_decay\n\n return self.perturbation, self.sigma_curr, best_reward_this_generation\n\n def get_elite_model(self):\n return self.elite_model['0']\n","repo_name":"gbymb4/SnakeES","sub_path":"optim/es/nes_deepmind.py","file_name":"nes_deepmind.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34294243808","text":"from django.http.response import JsonResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom customer.decorators import check_permission_api\nfrom customer.models import UserAddress\n\nfrom ratelimit.decorators import ratelimit\n\n\n@ratelimit(key='ip', rate='500/h', method=ratelimit.ALL, block=True)\n@require_http_methods(['POST'])\n@check_permission_api(['user'])\ndef delete_address(request):\n \"\"\"\n user can delete each of his addresses\n :param request: user, address_id\n :return: error or success message\n \"\"\"\n user = request.user\n address_id = request.POST.get('address_id')\n\n if not address_id:\n res_body = {\n \"error\": \"address_id not provided\"\n }\n return JsonResponse(res_body, status=400)\n\n res_body, status = user.delete_address(address_id=address_id)\n\n return JsonResponse(res_body, status=status)\n","repo_name":"SaEED-ABB/Hinata","sub_path":"customer/api/views/delete_address.py","file_name":"delete_address.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8184439439","text":"import torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch\r\nclass se_res_block(nn.Module):#这些都是二维的,没有那个两个全连接的东西\r\n def __init__(self, channel, kernel_size = 3, stride = 1, padding = 1, enable = True):\r\n super(se_res_block, self).__init__()\r\n self.enable = enable\r\n\r\n self.conv1 = nn.Conv2d(channel, channel, kernel_size, stride, padding)\r\n self.conv1_norm = nn.InstanceNorm2d(channel)\r\n self.conv2 = nn.Conv2d(channel, channel, kernel_size, stride, padding)\r\n self.conv2_norm = nn.InstanceNorm2d(channel)\r\n\r\n if self.enable:\r\n self.se_conv1 = nn.Conv2d(channel, channel // 16, kernel_size=1)\r\n self.se_conv2 = nn.Conv2d(channel // 16, channel, kernel_size=1)\r\n\r\n def forward(self, x):\r\n output = F.relu(self.conv1_norm(self.conv1(x)))\r\n output = self.conv2_norm(self.conv2(output))\r\n\r\n if self.enable:\r\n se = F.avg_pool2d(output, output.size(2))#output.size(2)是512,相当于是取大的,最后pool后结果是batch*channel*1*1\r\n se = F.relu(self.se_conv1(se))\r\n se = torch.sigmoid(self.se_conv2(se))\r\n output = output * se\r\n\r\n output += x\r\n output = F.relu(output)\r\n return output\r\n\r\n# dd = torch.randn(2,64,512,128)\r\n# dd = dd.cuda()\r\n# djgnet = se_res_block(64)\r\n# djgnet = djgnet.cuda()\r\n# yy = djgnet(dd)\r\n# print(yy.shape)","repo_name":"ddyss/Res-Unet","sub_path":"my_code_for_PAT/se_res_block.py","file_name":"se_res_block.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30796582129","text":"\"\"\"\"\"\" \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n\"\"\" \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nA simple wrapper for linear regression. (c) 2015 Tucker Balch \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nNote, this is NOT a correct DTLearner; Replace with your own implementation. \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nCopyright 2018, Georgia Institute of Technology (Georgia Tech) \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nAtlanta, Georgia 30332 \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nAll Rights Reserved \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nTemplate code for CS 4646/7646 \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nGeorgia Tech asserts copyright ownership of this template and all derivative \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nworks, including solutions to the projects assigned in this course. Students \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nand other users of this template code are advised not to share it with others \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nor to make it available on publicly viewable websites including repositories \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nsuch as github and gitlab. This copyright statement should not be removed \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nor edited. \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nWe do grant permission to share solutions privately with non-students such \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nas potential employers. However, sharing with other current or future \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nstudents of CS 7646 is prohibited and subject to being investigated as a \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nGT honor code violation. \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n-----do not edit anything above this line--- \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nStudent Name: Tucker Balch (replace with your name) \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nGT User ID: twu411 (replace with your User ID) \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nGT ID: 596568372 (replace with your GT ID) \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n\"\"\" \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \nimport numpy as np\n\n\nclass DTLearner(object):\n \"\"\"\n This is a DTLearner. It is implemented correctly.\n\n :param verbose: If “verbose” is True, your code can print out information for debugging.\n If verbose = False your code should not generate ANY output. When we test your code, verbose will be False.\n :type verbose: bool\n \"\"\"\n\n def __init__(self, leaf_size = 1, verbose=False):\n \"\"\"\n Constructor method\n \"\"\"\n self.leaf_size = leaf_size\n self.verbose = verbose\n pass # move along, these aren't the drones you're looking for\n\n def author(self):\n \"\"\"\n :return: The GT username of the student\n :rtype: str\n \"\"\"\n return \"twu411\" # replace tb34 with your Georgia Tech username\n def optfactor(self, data_x, data_y):\n c = 0\n factor = 0\n for i in range(data_x.shape[1]):\n if np.std(data_x[:, i]) ==0 or np.std(data_y) == 0:\n factor = 0\n else:\n ci = abs(np.corrcoef(data_x[:, i], y=data_y))\n if ci[0, 1] >= c:\n c = ci[0, 1]\n factor = i\n return (factor)\n\n def build_tree(self, data):\n #termination condition: leaf size limitation or all y are equal\n if data.shape[0] <= self.leaf_size or np.std(data[:, -1]) == 0:\n c = np.empty((data.shape[0], 4))\n c[:, 0] = -1\n c[:, 1] = np.mean(data[:, -1])\n c[:, 2:] = -100\n return c\n\n facter = self.optfactor(data[:, 0:-1], data[:, -1])\n splitval = np.median(data[:, facter])\n #judge if median could not divide tree any more\n if np.all(data[:, facter] <= splitval):\n c = np.array([[-1, np.mean(data[:, -1]), -100, -100]])\n return c\n\n #recursion starts here:\n lefttree = self.build_tree(data[data[:, facter] <= splitval])\n righttree = self.build_tree(data[data[:, facter] > splitval])\n root = np.array(([facter, splitval, 1, lefttree.shape[0] + 1],))\n c = np.concatenate((root, lefttree, righttree), axis=0)\n return c\n\n\n\n\n\n\n def add_evidence(self, data_x, data_y):\n \"\"\"\n Add training data to learner\n\n :param data_x: A set of feature values used to train the learner\n :type data_x: numpy.ndarray\n :param data_y: The value we are attempting to predict given the X data\n :type data_y: numpy.ndarray\n \"\"\"\n\n # combine x and y to be introduced into trees\n new_y = np.array(data_y, copy=False, subok=True, ndmin=2).T\n data = np.column_stack([data_x, data_y]) #column stack is used because concatenate or append somehow doesn't work\n self.tree = self.build_tree(data)\n if self.verbose == True:\n print(\"tree:\\n\", self.build_tree(data))\n print(\"tree shape:\\n\", self.build_tree(data).shape)\n\n\n # build and save the model\n\n def query(self, points):\n \"\"\"\n Estimate a set of test points given the model we built.\n\n :param points: A numpy array with each row corresponding to a specific query.\n :type points: numpy.ndarray\n :return: The predicted result of the input data according to the trained model\n :rtype: numpy.ndarray\n \"\"\"\n tree = self.tree\n\n testy = np.array(())\n for test in points:\n treenode = 0\n while int(tree[treenode, 0]) != -1:\n facter = int(tree[treenode, 0])\n\n if test[facter] <= tree[treenode, 1]:\n treenode = treenode + int(tree[treenode, 2])\n else:\n treenode = treenode + int(tree[treenode, 3])\n # I have to write the following three-line shit because we are not allowed to use lists.\n thisy = tree[treenode, 1]\n thisy = np.array((thisy))\n testy = np.append(testy, thisy)\n return testy\n\nif __name__ == \"__main__\":\n print(\"the secret clue is 'zzyzx'\")\n\n","repo_name":"Erhushenshou/cs7646-ml4t-2022","sub_path":"project4/DTLearner.py","file_name":"DTLearner.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"12393719092","text":"from django.conf import settings\n\n\ndef standard(request):\n '''\n Define a dictionary of context variables to pass to every template.\n '''\n context = {\n 'page_title': settings.SITE_TITLE,\n 'site_title': settings.SITE_TITLE,\n 'site_acronym': settings.SITE_ACRONYM,\n 'application_version_no': settings.APPLICATION_VERSION_NO,\n 'user_navbar': 'ibms/user_navbar_li.html',\n 'confluence_url': settings.CONFLUENCE_URL,\n }\n return context\n","repo_name":"ScottEvansDBCA/ibms","sub_path":"ibms_project/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"5374051051","text":"import sys\nfrom dataclasses import dataclass\nfrom typing import Type\n\nfrom src.proto_file import ProtoFile\nfrom src.proto_message import ProtoMessageAdded\nfrom src.proto_node import ProtoNodeDiff\nfrom src.util.parser import Parser\n\n\n@dataclass\nclass CompatibilityChecker:\n allowed_diff_types: list[Type[ProtoNodeDiff]]\n\n def check_compatibility(self, before: ProtoFile, after: ProtoFile):\n for diff in before.diff(after):\n if diff.__class__ in self.allowed_diff_types:\n continue\n yield diff\n\n\ndef main() -> int:\n with open(sys.argv[1], \"r\") as proto_file:\n before = Parser.loads(proto_file.read())\n\n with open(sys.argv[2], \"r\") as proto_file:\n after = Parser.loads(proto_file.read())\n\n violations = list(\n CompatibilityChecker([ProtoMessageAdded]).check_compatibility(before, after)\n )\n if violations:\n print(f\"Violations: {violations}\")\n return 1\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"shaldengeki/py_proto","sub_path":"src/util/compatibility_checker.py","file_name":"compatibility_checker.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13190109738","text":"from django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\n\n\nclass BasePageURL(models.Model):\n \"\"\" Links a Page object with a given url \"\"\"\n\n name = models.CharField(max_length=50, unique=True)\n slug = models.SlugField(max_length=60, default=\"\", unique=True, blank=True,\n help_text=\"De naam in de url, laat leeg om automatisch te genereren ut de titel\")\n description = models.CharField(default=\"\", max_length=128)\n in_footer = models.BooleanField(default=True, verbose_name=\"Whether the page is linked in the footer\")\n footer_order = models.IntegerField(default=0, verbose_name=\"Order of appearance in the footer\")\n\n page = models.ForeignKey('PageDisplay.Page', on_delete=models.CASCADE, blank=True, editable=False)\n\n def save(self, **kwargs):\n if self.slug == \"\":\n self.slug = slugify(self.name)\n\n if self.id is None:\n from PageDisplay.models import Page\n self.page = Page.objects.create(name=self.name)\n\n return super(BasePageURL, self).save(**kwargs)\n\n def get_absolute_url(self):\n return reverse(\"general:general_pages:view_page\", kwargs={'slug': self.slug})\n\n def __str__(self):\n return self.name\n\n def delete(self, **kwargs):\n page = self.page\n result = super(BasePageURL, self).delete(**kwargs)\n page.delete()\n return result","repo_name":"DutcherNL/Shakespear","sub_path":"general/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41154470044","text":"from __future__ import print_function\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport argparse\r\nimport random\r\nSEED = 1\r\n\r\nrandom.seed(SEED)\r\ntorch.manual_seed(SEED)\r\ntorch.backends.cudnn.deterministic = True\r\n\r\ndef adjust_learning_rate(optimizer, epoch, init_lr=0.1, decay=0.1, per_epoch=10):\r\n \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\"\r\n print(\"[adjust_learning_rate] optimizer's learning rate is going to be decayed\")\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] *= 1 / (1 + decay)\r\n return optimizer, float(param_group['lr'])\r\n\r\n\r\ndef maginal_loss(normal_ee, error_ee,device='cuda:0'):\r\n margin = 1 - normal_ee + error_ee\r\n zero_t = torch.zeros(margin.shape).to(device)\r\n loss = torch.max(zero_t, margin)\r\n return torch.mean(loss)\r\n\r\n\r\ndef train(model, train_X, epochs=500, lr=0.01, batch_size=100,lemma_dict=None,device='cuda:0'):\r\n \"\"\"\r\n [update_note] train_X's dimension is changed. Now is [# of structure , # of data , length of each data point]\r\n \"\"\"\r\n optimizer = torch.optim.Adam(model.parameters(), lr)\r\n input_length = train_X.shape[1]\r\n\r\n for epoch in range(1, epochs + 1):\r\n if epoch % 10 == 0 and epoch != 0 :\r\n optimizer, lr_int = \\\r\n adjust_learning_rate(optimizer, epoch, init_lr=lr, decay=0.05, per_epoch=10)\r\n model.train(); batch_count = 0\r\n\r\n corrupted_X = train_X.clone()\r\n corrupted_X[0] = torch.tensor(np.random.randint(0,len(lemma_dict),size=(input_length,3)))\r\n\r\n for idx in range(0, input_length, batch_size):\r\n start_idx = idx\r\n end_idx = idx + batch_size\r\n if end_idx > input_length:\r\n end_idx = -1\r\n\r\n batch_inputs = train_X[:, start_idx:end_idx, :]\r\n corrupt_inputs = corrupted_X[:, start_idx:end_idx, :]\r\n\r\n batch_count += 1\r\n if batch_count % 1000 == 0 :\r\n print(\"{} epochs - {}batch is on progress. fyi, loss is {}\"\\\r\n .format(epoch, batch_count,torch.mean(normal_ee + error_ee)))\r\n\r\n batch_inputs = batch_inputs.to(device)\r\n corrupt_inputs = corrupt_inputs.to(device)\r\n\r\n normal_ee = model(batch_inputs)\r\n error_ee = model(corrupt_inputs)\r\n\r\n loss = maginal_loss(normal_ee, error_ee,device)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n print(\"epoch : {} | loss : {}\".format(epoch, loss))\r\n\r\nif __name__ == '__init__' :\r\n\r\n parser = argparse.ArgumentParser(description='hello')\r\n parser.add_argument('--train_X_path', type=str, required=True, help='train_X path. it should be h5py file format')\r\n parser.add_arguemnt('--lemma_dict_path', type=str, required=True, help='lemma_dict path. it should be pickle file format')\r\n args = parser.parse_args()\r\n\r\n with h5py.File(args.train_X_path, 'r') as f:\r\n train_X = f['train_X'][:]\r\n train_X = torch.tensor(train_X)\r\n lemma_dict = pd.read_pickle(args.lemma_dict_path)\r\n train_X = train_X[:, :(train_X.shape[1] / 50) * 50]\r\n\r\n params = \\\r\n {'VOCAB_SIZE': len(lemma_dict),\r\n 'EMBED_SIZE': 100,\r\n 'HID_SIZE': 75,\r\n 'BATCH_SIZE': 50,\r\n 'DEVICE': 'cuda:0', }\r\n\r\n model = eelm.EELM(**params)\r\n model = model.cuda()\r\n\r\n train.train(model, train_X)","repo_name":"lisatiny/Deep-Learning-for-Event-Driven-Stock-Prediction","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12833800401","text":"import pandas as pd\nimport numpy as np\n\n\ndef category_courses(category, df):\n catIwant = df[df[\"course_category\"] == category]\n grouped_cat = pd.DataFrame(catIwant.groupby('published_title')['rating'].mean())\n grouped_cat['num of ratings'] = pd.DataFrame(catIwant.groupby('published_title')['rating'].count())\n grouped_cat.sort_values('num of ratings',ascending=False, inplace=True)\n grouped_cat.reset_index(inplace=True)\n grouped_cat = grouped_cat[\"published_title\"].head(10)\n cat_courses = pd.DataFrame(grouped_cat)\n cat_courses = pd.merge(cat_courses, df[[\"published_title\", \"price\"]], on = \"published_title\")\n cat_courses.drop_duplicates(inplace=True)\n return cat_courses[[\"published_title\", \"price\"]]\n","repo_name":"anushamohan/udemy-recommender","sub_path":"myapp/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"73602265122","text":"\"\"\"\nImport required modules\n\"\"\"\n# Import os for clearing screen to help with user experience\nimport os\n# Import time to add pauses at certain points during the quiz\nimport time\n# Import random to get the random question from the data dictionary\nimport random\n# Import math to get 10 random questions out of 20\nimport math\n# Import gspread for tracking users names and scores\nimport gspread\nfrom google.oauth2.service_account import Credentials\n# Import pyfiglet for converting regular text in to different forms\n# of ASCII art\nimport pyfiglet\n# Import colorama for adding a colour to the different parts of quiz\nimport colorama\nfrom colorama import Fore\ncolorama.init(autoreset=True)\n\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n ]\n\n\"\"\"\nGoogle sheets access credentials variables\n\"\"\"\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('space_quiz')\n\nhigh_scores = SHEET.worksheet(\"scores\")\nscores = high_scores.get_all_values()\n\nNAME = \"\"\nSCORE = 0\n\n\"\"\"\nDictionary of quiz questions for space quiz\n\"\"\"\nquiz_data = [\n {\"question\": \"Which planet is closest to the sun?\",\n \"answers\": {\"a\": \"Mercury\",\n \"b\": \"Mars\",\n \"c\": \"Venus\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"What shape is the Milky Way?\",\n \"answers\": {\"a\": \"Spiral\",\n \"b\": \"Circle\",\n \"c\": \"Squire\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"Which planet is named after the Roman god of war?\",\n \"answers\": {\"a\": \"Earth\",\n \"b\": \"Mars\",\n \"c\": \"Jupiter\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"What is the name of the force which keeps the planets \"\n \"in orbit around the sun?\",\n \"answers\": {\"a\": \"Magnetic Force\",\n \"b\": \"Electric Force\",\n \"c\": \"Gravity Force\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"What would you find if you travelled to the centre \"\n \"of the solar system?\",\n \"answers\": {\"a\": \"The Black Hole\",\n \"b\": \"The Moon\",\n \"c\": \"The Sun\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"Which planet has a day which lasts eight months?\",\n \"answers\": {\"a\": \"Mars\",\n \"b\": \"Venus\",\n \"c\": \"Earth\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"How many planets are there in the solar system?\",\n \"answers\": {\"a\": \"Eight\",\n \"b\": \"Seven\",\n \"c\": \"Ten\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"How long is one year on Jupiter?\",\n \"answers\": {\"a\": \"3 Earth years\",\n \"b\": \"8 Earth years\",\n \"c\": \"12 Earth years\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"How many moons does Earth have?\",\n \"answers\": {\"a\": \"Just One\",\n \"b\": \"Two\",\n \"c\": \"None\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"Who invented the telescope?\",\n \"answers\": {\"a\": \"Galileo\",\n \"b\": \"Hans Lippershey\",\n \"c\": \"Johannes Kepler\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"How old is the sun?\",\n \"answers\": {\"a\": \"Roughly 4.6 billion years old\",\n \"b\": \"Roughly 1 billion years old\",\n \"c\": \"Roughly 9 billion years old\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"What color is the sun?\",\n \"answers\": {\"a\": \"White\",\n \"b\": \"Bright yellow\",\n \"c\": \"A mixture of all colors\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"What color is Mars sunset?\",\n \"answers\": {\"a\": \"Red\",\n \"b\": \"Yellow\",\n \"c\": \"Blue\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"Which planet has the most moons?\",\n \"answers\": {\"a\": \"Earth\",\n \"b\": \"Saturn\",\n \"c\": \"Mars\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"Which planet is known as the Morning Star?\",\n \"answers\": {\"a\": \"The Sun\",\n \"b\": \"Earth\",\n \"c\": \"Venus\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"How much of the universe is composed of dark matter?\",\n \"answers\": {\"a\": \"27 percent\",\n \"b\": \"80 percent\",\n \"c\": \"2 percent\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"Where can you go to see projections of the night sky?\",\n \"answers\": {\"a\": \"A Museum\",\n \"b\": \"An Aquarium\",\n \"c\": \"A Planetarium\"},\n \"correct_answer\": \"c\"},\n {\"question\": \"What are the storms produced by the sun called?\",\n \"answers\": {\"a\": \"Solar storms\",\n \"b\": \"Sun storms\",\n \"c\": \"Cosmic storms\"},\n \"correct_answer\": \"a\"},\n {\"question\": \"What is the study of the stars, planets, and galaxies?\",\n \"answers\": {\"a\": \"Geography\",\n \"b\": \"Astronomy\",\n \"c\": \"Galaxology\"},\n \"correct_answer\": \"b\"},\n {\"question\": \"How long does a solar eclipse last?\",\n \"answers\": {\"a\": \"About seven and a half minutes\",\n \"b\": \"About three miutes\",\n \"c\": \"About 5 minutes\"},\n \"correct_answer\": \"a\"},\n]\n\n\ndef clear():\n \"\"\"\n Function to clear the terminal on windows, mac and\n linux for a better user experience.\n \"\"\"\n # for windows\n if os.name == 'nt':\n os.system('cls')\n # for mac and linux(here, os.name is 'posix')\n else:\n os.system('clear')\n\n\ndef quiz_intro():\n \"\"\"\n Quiz intro displays ASCII title text. Gets\n user name, shows instructions and asks user if they are ready to begin.\n \"\"\"\n\n ascii_banner = pyfiglet.figlet_format(\"Space Quiz\")\n print(Fore.CYAN + ascii_banner)\n\n print(\"WOULD YOU LIKE TO TEST YOUR KNOWLEDGE ABOUT THE OUTER SPACE?\")\n time.sleep(1)\n global NAME\n NAME = input(\"Please type your name and hit the enter key to start:\\n\")\n clear()\n\n # Relaunches quiz intro if no name is entered and user only clicks Enter\n if NAME == \"\":\n print(f\"{Fore.RED}A name is required to take the quiz!\")\n quiz_intro()\n else:\n print(f\"{Fore.CYAN}Welcome to the Space Quiz {NAME}.\\n\")\n print(\"The quiz consists of ten questions to test your knowledge \"\n \"about the outer space and the solar system.\\n\")\n time.sleep(1)\n print(\"The questions are in multiple choice format.\\n\")\n time.sleep(1)\n print(\"Options are a, b or c for all questions.\\n\")\n time.sleep(1)\n print(\"When prompted, please enter you answer a, b or c and hit the \"\n \"enter key.\\n\")\n time.sleep(1)\n\n # Asks user if they'd like to begin the quiz pulling in the name they have\n # entered above\n begin_quiz = input(f\"Are you ready to begin, {NAME}? (y/n): \")\n\n while begin_quiz != \"y\":\n begin_quiz = input(f\"{Fore.RED}Please enter 'y' to begin {NAME}, else \"\n \"complete the quiz another time: \")\n\n if begin_quiz.lower() == \"y\":\n print(f\"{Fore.CYAN}\\nOkay, let's start. Good luck!\\n\")\n time.sleep(1.5)\n clear()\n\n\n# run_quiz function based on project by Leah Fisher\n# https://github.com/cornishcoder1/Food_of_Japan_Quiz\n\n\ndef run_quiz(data):\n \"\"\"\n Creates a custom object with 10 questions randomly selected from the data\n and loops through the questions and answers in the quiz data dictionary\n \"\"\"\n score = 0\n # Initialize an array that will contain all question numbers\n questions_numbers = []\n # Initialize an array that will contain all question objects from data\n questions_toshow = []\n\n for newquestion in range(10):\n newquestion = math.floor(random.random()*len(quiz_data))\n while newquestion in questions_numbers:\n newquestion = math.floor(random.random()*len(quiz_data))\n questions_numbers.append(newquestion)\n questions_toshow.append(quiz_data[newquestion])\n\n for entry in questions_toshow:\n user_answer = \"\"\n correct_answer = entry['correct_answer']\n\n # this loop repeats the question until the user enters\n # either a, b or c\n while user_answer not in ['a', 'b', 'c']:\n print(f\"{Fore.CYAN}{entry['question']}\")\n\n # this code prints the 3 options for each question\n for key, value in entry['answers'].items():\n print(f\"{key}: {value}\")\n\n user_answer = input(\"answer(a, b or c): \\n\")\n user_answer = user_answer.lower()\n # to check if the user pick the accepted option a, b or c\n if user_answer not in entry['answers']:\n print(f\"{Fore.RED}Only a, b or c are valid options!\\n\")\n\n # this code checks if the answer is correct and adds\n # a point to the score\n if user_answer == entry['correct_answer']:\n print(f\"{Fore.GREEN}That's correct {NAME}! Well done!\\n\")\n print(\"🥳..🥳..🥳..🥳..🥳..🥳..🥳..🥳..🥳..🥳..🥳\")\n score = score + 1\n print(f\"Your score: {score}\")\n input(f\"{Fore.CYAN}PLEASE PRESS ENTER TO MOVE ON TO THE NEXT \"\n \"QUESTION.\\n\")\n clear()\n # this code displays the correct answer if the user enters\n # the wrong answer\n elif user_answer != entry['correct_answer']:\n print(f\"{Fore.RED}Sorry {NAME}, that's incorrect.\\n\")\n print(\"😕..😕..😕..😕..😕..😕..😕..😕..😕..😕..😕\")\n print(f\"The correct answer was {correct_answer}.\")\n input(f\"\\n{Fore.CYAN}PLEASE PRESS ENTER TO MOVE ON TO THE NEXT \"\n \"QUESTION.\\n\")\n clear()\n\n # the final screen congratulates the user and tells\n # the final score\n print(f\"\\nWell done for completing the Space Quiz, {NAME}.\\n\")\n print(f\"\\n{Fore.CYAN}Your total score was {score} points out of 10.\\n\")\n print(\"Hope you had fun!\")\n data = NAME, score\n export_results(data)\n\n# Export results based on Love Sandwiches project by\n# Code Institute\n\n\ndef export_results(data):\n \"\"\"\n This function will export the results of the quiz including\n the user name and final score to the scores worksheet\n \"\"\"\n print(\"Updating results worksheet...\\n\")\n scores_worksheet = SHEET.worksheet(\"scores\")\n scores_worksheet.append_row(data)\n print(\"Results exported to worksheet successfully\")\n time.sleep(1)\n\n input(f\"{Fore.CYAN}PRESS ENTER TO SEE THE HIGH SCORE TABLE.\\n\")\n clear()\n\n\ngame_high_scores = [\n\n # High Scores table display\n \"\"\"\n =====✴=====✴=====✴=====✴=====✴=====✴=====✴=====\n H I G H S C O R E S\n ===============================================\n \\tPOS\\tNAME\\tSCORE\\t\n ===============================================\n \"\"\"\n]\n\n\ndef display_high_scores():\n \"\"\"\n Displays five highest scores in the High Score table\n \"\"\"\n score_sheet = SHEET.worksheet(\"scores\").get_all_values()[1:]\n for data in score_sheet:\n data[1] = data[1]\n\n update_data = sorted(score_sheet, key=lambda x: int(x[1]), reverse=True)\n\n print(f\"{Fore.CYAN}{game_high_scores[0]}\")\n if len(update_data) < 5:\n count = len(update_data)\n else:\n count = 5\n\n for i in range(0, count):\n print(f\"\"\"\n {i+1}\\t{update_data[i][0]}\\t {update_data[i][1]}\"\"\")\n print(f\"\"\"{Fore.CYAN}\\n\n =====✴=====✴=====✴=====✴=====✴=====✴=====✴=====\"\"\")\n\n\ndef try_again():\n \"\"\"\n At the end of the quiz, proposes to try again\n \"\"\"\n\n restart = input(\"Would you like to try again and beat your score? (Y/N)\\n\")\n if restart.upper() == \"Y\":\n clear()\n print(f\"{Fore.CYAN}Okay, let's try again! Good luck!\")\n time.sleep(1.5)\n clear()\n run_quiz(quiz_data)\n if restart.upper() == \"N\":\n print(\"Thank you for taking a quiz. Hope to see you again soon.\\n\")\n else:\n print(f\"{Fore.RED}I did not understand. Only Y/N are valid options!\")\n try_again()\n\n\ndef main_function():\n '''Main function that controls the flow of the quiz'''\n clear()\n quiz_intro()\n run_quiz(quiz_data)\n display_high_scores()\n try_again()\n\n\nmain_function()\n","repo_name":"AstaJoks/space-quiz","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":12332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27771566706","text":"import os\r\nimport re\r\n\r\n#input = os.open('C:\\\\Users\\\\vhdl\\\\Desktop\\\\pp_adv\\\\input_1.txt',os.O_RDONLY)\r\n\r\nwith open('input_1.txt') as f:\r\n lines = f.readlines()\r\n\r\nlines_added = []\r\nadded = 0\r\nfor i in range(0,len(lines)):\r\n lines[i] = re.sub('\\\\n','', lines[i])\r\n if lines[i] == '':\r\n lines_added.append(added)\r\n added = 0\r\n else:\r\n lines[i] = int(lines[i])\r\n added = added + lines[i]\r\n\r\nmax(lines_added) # wynik 1\r\n\r\n## 2\r\nlines_added.sort()\r\nwynik2 = 65240 + 68996 + 70374\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"wejengin2/aoc","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73685979042","text":"# encoding= utf-8 \n# @Time : 2020/6/1 15:00 \n# @Author : Yao\n# @File : reduce.py \n# @Software: PyCharm\nfrom functools import reduce\nfrom operator import xor\n\n#求整数0~5的累计异或的三种方式\nn = 0\nfor i in range(6):\n n ^= i\nprint(n)\nprint(reduce(lambda a, b: a ^ b, range(6)))\nprint(reduce(xor, range(6)))\n\n\n\n","repo_name":"yaoshunqing/Python-fluent","sub_path":"10/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"558079494","text":"import ibm_db\nimport ibm_db_dbi\nimport sys\nimport configparser\nimport logging\nimport jsonify\nimport traceback\nimport classes.iDb as db\nimport classes.user as u\nimport classes.Book as b\nimport traceback\n\n# Set Logging Level\nlogging.basicConfig(level=logging.INFO)\n\n# Abstract? class of a saved book (favorite)\n\n\nclass Review:\n\n def __init__(self, user_id, isbn, rating, comment):\n\n # Log the creation of the instance\n logging.debug('Created Favorites Class Instance')\n\n # Keep list of instances created\n instances = []\n instances.append(self)\n\n self.rating = rating\n self.comment = comment\n self.user_id = user_id\n self.isbn = isbn\n\n # Class method to retrieve favorites based on user_id\n @staticmethod\n def getReviewsByUser(user_id):\n try:\n sql = \"SELECT * FROM REVIEWS WHERE USER_ID = \" + str(user_id)\n\n # Calls database with constructed SQL from imported db class\n reviews = db.dbQuery.callDbFetch(sql)\n\n # Log Results of DB call and return results\n logging.debug(\"successful connect to db2\")\n logging.info(\"Reviews response: \" + str(reviews))\n return favs\n\n except:\n logging.error(\"Oops!\" + str(sys.exc_info()) + \"occured. \")\n return {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": {\"error\": str(sql) + str(sys.exc_info())}\n }\n\n @staticmethod\n def getReviewsByISBN(isbn):\n try:\n sql = \"SELECT * FROM KXJ28592.REVIEWS WHERE ISBN = \" + str(isbn)\n db_obj = db.dbQuery(sql)\n reviews = db.dbQuery.callDbFetch(db_obj)\n\n # Log Results of DB call and return results\n logging.debug(\"successful connect to db2\")\n logging.info(\"SQL: \" + str(sql))\n logging.info(\"Reviews response: \" + str(reviews))\n return {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": {\"reviews\": reviews}}\n\n except:\n logging.error(\"Oops!\" + str(sys.exc_info()) + \"occured. \")\n logging.error(traceback.print_exc())\n return {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": {\"error\": str(sql) + str(sys.exc_info()) + str(traceback.print_exc())}}\n\n\n # Class method which adds book to a user's favorites (also to database)\n def addReview(self):\n\n user_id = self.user_id\n rating = self.rating\n comment = self.comment\n isbn = self.isbn\n\n sql = \"INSERT INTO REVIEWS (USER_ID,RATING,COMMENT,ISBN) VALUES (\\'\" + str(user_id) + '\\',\\'' + str(rating) + '\\',\\'' + str(comment) + '\\',\\'' + str(isbn) + '\\');'\n\n logging.info(sql)\n # The only line of code that really does things (calls out to add favorite to Database)\n query_object = db.dbQuery(sql)\n results = db.dbQuery.callDbInsert(query_object)\n\n # Log things about\n logging.debug(sql)\n logging.debug('Result 113: ' + str(results))\n\n # Handle successful response\n if results['statusCode'] == 200:\n logging.debug(results)\n logging.info('Successfully added to Favorites' + str(results))\n return results\n\n # Handle unsuccessful resposes\n elif results['statusCode'] == 400:\n return results\n\n else:\n logging.warning('Unexpected Response recieved from DB callout')\n logging.error(results)\n return results\n\n\n @staticmethod\n def deleteReview(user_id, isbn):\n try:\n sql = \"DELETE FROM REVIEWS WHERE USER_ID = \" + str(user_id) + \" AND ISBN = \" + str(isbn)\n\n # Calls database with constructed SQL from imported db class\n query_object = db.dbQuery(sql)\n result = db.dbQuery.callDbFetch(query_object)\n\n # Log Results of DB call and return results\n logging.debug(\"successful connect to db2\")\n logging.info(\"response: \" + str(result))\n return {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": \"Success\"\n }\n\n except:\n logging.error(\"Oops!\" + str(sys.exc_info()) + \"occured. \")\n return {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": {\"error\": str(sql) + str(sys.exc_info())}\n }\n","repo_name":"brandonwarech/book-tracker-capstone","sub_path":"classes/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6136214198","text":"import functools as fn\r\n\r\n\r\ndef sum(array):\r\n return f\"{fn.reduce(lambda a, b: a + b, array):.1f}\"\r\n\r\n\r\ndef media(array):\r\n return f\"{fn.reduce(lambda a, b: a + b, array)/len(array):.1f}\"\r\n\r\n\r\ndef coluna_input():\r\n try:\r\n result = int(input())\r\n if result > 11 or result < 0:\r\n raise ValueError\r\n except ValueError:\r\n print(f\"Linha inexistente de M[12][12]\")\r\n else:\r\n return result\r\n\r\n\r\ndef operacao(inp, array):\r\n if inp == \"S\":\r\n return sum(array)\r\n if inp == \"M\":\r\n return media(array)\r\n\r\n\r\ndef matriz():\r\n array = []\r\n inp = coluna_input()\r\n op = input()\r\n nest_list = []\r\n for i in range(0, 12):\r\n for j in range(0, 12):\r\n nest_list.append(float(input()))\r\n if j == 11:\r\n array.append(nest_list)\r\n nest_list = []\r\n coluna = list(map(lambda x: x[inp], array))\r\n return operacao(op, coluna)\r\n\r\n\r\ntry:\r\n print(matriz())\r\nexcept:\r\n pass\r\n","repo_name":"HeyLucasLeao/URI","sub_path":"1182.py","file_name":"1182.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1223790061","text":"from __future__ import annotations\n\nimport asyncio\nfrom asyncio import AbstractEventLoop\nimport datetime\nimport logging\nimport random\nfrom ssl import SSLContext\nimport time\nfrom typing import (\n Any,\n ClassVar,\n Optional,\n Tuple,\n Type,\n Union,\n)\n\nfrom aiogoogle import (\n Aiogoogle,\n GoogleAPI,\n HTTPError,\n)\nfrom aiogoogle.auth import (\n ApiKey,\n UserCreds,\n)\nfrom aiogoogle.auth.creds import ClientCreds\nimport aiogoogle.excs\nimport aiogoogle.models\nfrom aiogoogle.models import Response\nfrom aiogoogle.sessions.aiohttp_session import AiohttpSession\nfrom aiohttp import (\n ClientResponse,\n ClientTimeout,\n Fingerprint,\n)\nfrom aiohttp.typedefs import StrOrURL\nimport attr\nfrom yarl import URL\n\nfrom dl_constants.enums import UserDataType\nfrom dl_core.aio.web_app_services.gsheets import (\n Cell,\n GSheetsSettings,\n NumberFormatType,\n Range,\n Sheet,\n Spreadsheet,\n)\nfrom dl_file_uploader_lib import exc as file_upl_exc\nfrom dl_utils.aio import ContextVarExecutor\n\n\nLOGGER = logging.getLogger(__name__)\n\n\n# this is the dt from which all DATE and DATE_TIME values are calculated in gsheets\n# https://developers.google.com/sheets/api/guides/formats#about_date_time_values\nGSHEETS_EPOCH = datetime.datetime(\n year=1899,\n month=12,\n day=30,\n)\nURL_SAFE_CHARS = \"\" # we want to encode all symbols with no exceptions\n\n\ndef google_api_error_to_file_uploader_exception(err: HTTPError) -> file_upl_exc.DLFileUploaderBaseError:\n if not isinstance(err.res, Response):\n LOGGER.warning(f\"Unknown aiogoogle http error: {err}\")\n return file_upl_exc.DLFileUploaderBaseError(orig=err)\n\n orig_error_obj: dict[str, Any] = err.res.json.get(\"error\", {})\n if isinstance(orig_error_obj, dict):\n orig_status_code = orig_error_obj.get(\"code\", -1)\n orig_reason = orig_error_obj.get(\"status\")\n else:\n orig_status_code = err.res.status_code\n orig_reason = None\n\n err_cls: Type[file_upl_exc.DLFileUploaderBaseError]\n if orig_status_code == 403:\n err_cls = file_upl_exc.PermissionDenied\n elif orig_status_code == 404:\n err_cls = file_upl_exc.DocumentNotFound\n elif orig_status_code == 400 and orig_reason == \"FAILED_PRECONDITION\":\n err_cls = file_upl_exc.UnsupportedDocument\n elif orig_status_code >= 500:\n err_cls = file_upl_exc.RemoteServerError\n else:\n LOGGER.warning(f\"Unknown aiogoogle http error: {err}\")\n err_cls = file_upl_exc.DLFileUploaderBaseError\n\n return err_cls(\n orig=err,\n debug_info=err.res.json,\n details=dict(\n orig_error=orig_error_obj,\n ),\n )\n\n\n@attr.s\nclass GSheetsOAuth2:\n access_token: Optional[str] = attr.ib(repr=False)\n refresh_token: str = attr.ib(repr=False)\n\n\nclass AiohttpGSheetsSession(AiohttpSession):\n def __init__(\n self,\n *args: Any,\n proxy: Optional[StrOrURL] = None,\n proxy_headers: Optional[dict[str, str]] = None,\n ssl: Optional[Union[SSLContext, bool, Fingerprint]] = True,\n **kwargs: Any,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.proxy = proxy\n self.proxy_headers = proxy_headers\n self.ssl = ssl\n self.last_response_size_bytes = -1\n\n async def _request(\n self,\n method: str,\n str_or_url: StrOrURL,\n **kwargs: Any,\n ) -> ClientResponse:\n url = URL(str_or_url)\n headers = kwargs.pop(\"headers\", {})\n if \"key\" in url.query:\n query_params = {**url.query}\n key = query_params.pop(\"key\")\n url = url.with_query(query_params)\n headers[\"X-goog-api-key\"] = key\n\n LOGGER.info(f\"Sending request: {method} {url}\")\n start_t = time.monotonic()\n resp = await super()._request(\n method,\n url,\n allow_redirects=False,\n proxy=self.proxy,\n proxy_headers=self.proxy_headers,\n ssl=self.ssl,\n headers=headers,\n **kwargs,\n )\n elapsed = time.monotonic() - start_t\n resp_text = await resp.read()\n self.last_response_size_bytes = len(resp_text)\n LOGGER.info(\n f\"Received response: {method} {url},\"\n f\" status_code: {resp.status},\"\n f\" elapsed: {elapsed:.6f} s,\"\n f\" payload_size: {self.last_response_size_bytes} bytes\"\n )\n return resp\n\n\ndef make_type(value: Any, user_type: UserDataType | str) -> Any:\n if value is None or value == \"\":\n return None\n if user_type == UserDataType.integer:\n if isinstance(value, str): # overflow\n return None\n return int(value)\n if user_type == UserDataType.float:\n if isinstance(value, str): # overflow\n return None\n return float(value)\n if user_type == UserDataType.boolean:\n return bool(value)\n if user_type in (UserDataType.date, UserDataType.genericdatetime, UserDataType.datetime, UserDataType.datetimetz):\n actual_dt = GSHEETS_EPOCH + datetime.timedelta(days=value)\n if user_type == UserDataType.date:\n dt_str = actual_dt.strftime(\"%Y-%m-%d\")\n else:\n dt_str = actual_dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n year, *rest = dt_str.split(\"-\")\n return f\"{year:>04}-\" + \"-\".join(rest) # years <1000 are not padded properly in strftime\n if user_type == \"time\":\n time_value = datetime.timedelta(days=value)\n hours, remainder = divmod(time_value.total_seconds(), 3600)\n minutes, seconds = divmod(remainder, 60)\n return f\"{int(hours):02}:{int(minutes):02}:{int(seconds):02}\"\n if user_type == UserDataType.string:\n return str(value)\n\n raise ValueError(f\"Type {user_type} is not supported here\")\n\n\n@attr.s\nclass GSheetsClient:\n settings: GSheetsSettings = attr.ib()\n _tpe: ContextVarExecutor = attr.ib()\n _loop: AbstractEventLoop = attr.ib(init=False, factory=asyncio.get_event_loop)\n auth: Optional[GSheetsOAuth2] = attr.ib(default=None)\n\n session_timeout: ClassVar[ClientTimeout] = ClientTimeout(total=180.0)\n\n last_request_size_bytes: int = attr.ib(init=False, default=-1)\n _aiogoogle: Optional[Aiogoogle] = attr.ib(init=False, default=None)\n _sheets_api: Optional[GoogleAPI] = attr.ib(init=False, default=None)\n\n async def __aenter__(self: GSheetsClient) -> GSheetsClient:\n self._aiogoogle = Aiogoogle(\n session_factory=lambda: AiohttpGSheetsSession(\n timeout=self.session_timeout,\n headers={\n \"user-agent\": \"DataLens\",\n \"Accept\": \"application/json\",\n },\n ),\n api_key=ApiKey(self.settings.api_key),\n client_creds=ClientCreds(\n client_id=self.settings.client_id,\n client_secret=self.settings.client_secret,\n ),\n )\n await self._aiogoogle.__aenter__()\n return self\n\n async def __aexit__(self, *args: Any) -> None:\n assert self._aiogoogle, \"Aiogoogle is not initialized for the client\"\n await self._aiogoogle.__aexit__(*args)\n\n def _process_values(\n self, raw_values: list[list[Any]], user_types: list[UserDataType | str]\n ) -> Tuple[list[list[Any]], list[UserDataType | str]]:\n \"\"\"\n Tries to convert values to passed BITypes and falls back to string when fails to do so\n But the fallback happens only on return, i.e. it tries to convert all values to the original passed type\n Values are processed inplace, types are copied\n \"\"\"\n\n new_user_types = user_types.copy()\n for row_idx, row in enumerate(raw_values):\n width_diff = len(user_types) - len(row)\n if width_diff > 0:\n row.extend([None] * width_diff)\n else:\n row = row[: len(user_types)]\n for col_idx, (value, user_type) in enumerate(zip(row, user_types)):\n try:\n raw_values[row_idx][col_idx] = make_type(value, user_type)\n except (ValueError, TypeError):\n new_user_types[col_idx] = UserDataType.string\n raw_values[row_idx][col_idx] = str(value)\n except OverflowError:\n raw_values[row_idx][col_idx] = None\n return raw_values, new_user_types\n\n def _get_cell_data(self, value: dict[str, Any]) -> Cell:\n top_level_value_keys = (\"userEnteredValue\", \"effectiveValue\", \"formattedValue\")\n if not value or not any(value_key in value for value_key in top_level_value_keys):\n return Cell(\n value=None,\n number_format_type=NumberFormatType.NUMBER_FORMAT_TYPE_UNSPECIFIED,\n empty=True,\n )\n number_format = NumberFormatType(\n value.get(\"effectiveFormat\", {}).get(\n \"numberFormat\", {\"type\": NumberFormatType.NUMBER_FORMAT_TYPE_UNSPECIFIED}\n )[\"type\"]\n )\n\n extended_value_key = (\n \"formattedValue\"\n if number_format\n in (\n NumberFormatType.TIME,\n NumberFormatType.NUMBER_FORMAT_TYPE_UNSPECIFIED,\n NumberFormatType.TEXT,\n )\n else \"effectiveValue\"\n )\n if \"effectiveValue\" in value and \"boolValue\" in value[\"effectiveValue\"]:\n # bool values may be marked with any number format\n extended_value = value[\"effectiveValue\"]\n else:\n extended_value = value.get(extended_value_key, None)\n\n if isinstance(extended_value, dict): # formattedValue is a string\n value_key = list(extended_value.keys())[0]\n actual_value = extended_value[value_key]\n if value_key == \"boolValue\":\n number_format = NumberFormatType.BOOLEAN\n else:\n actual_value = extended_value\n\n NUMBER_FORMATS = (\n NumberFormatType.NUMBER,\n NumberFormatType.PERCENT,\n NumberFormatType.CURRENCY,\n NumberFormatType.SCIENTIFIC,\n )\n\n if number_format in NUMBER_FORMATS:\n if isinstance(actual_value, float) or actual_value is None:\n number_format = NumberFormatType.FLOAT\n elif isinstance(actual_value, int):\n number_format = NumberFormatType.INTEGER\n elif isinstance(actual_value, str):\n try:\n _ = float(actual_value)\n except ValueError:\n # not a number\n number_format = NumberFormatType.TEXT\n else:\n # if the conversion was successful, this means that the number is too big to be encoded in json\n actual_value = None\n number_format = NumberFormatType.FLOAT\n else:\n actual_value = str(actual_value)\n number_format = NumberFormatType.TEXT\n\n if number_format in (NumberFormatType.DATE, NumberFormatType.DATE_TIME):\n if isinstance(actual_value, (int, float)):\n gsheets_internal_value = actual_value\n\n try:\n actual_dt = GSHEETS_EPOCH + datetime.timedelta(days=gsheets_internal_value)\n if number_format == NumberFormatType.DATE:\n actual_value = actual_dt.strftime(\"%Y-%m-%d\")\n elif number_format == NumberFormatType.DATE_TIME:\n actual_value = actual_dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n except OverflowError:\n actual_value = None\n else:\n number_format = NumberFormatType.TEXT\n\n return Cell(\n value=actual_value,\n number_format_type=number_format,\n )\n\n def _get_sheet_data(self, sheet: dict[str, Any], num_rows: Optional[int] = None) -> list[list[Cell]]:\n data: list[list[Cell]] = []\n rows_read = 0\n for rowdata in sheet[\"data\"][0].get(\"rowData\", []):\n if num_rows is not None and rows_read >= num_rows:\n break\n processed_rowdata = [self._get_cell_data(value) for value in rowdata.get(\"values\", [])]\n actual_data_ends_at = len(processed_rowdata) - 1\n while actual_data_ends_at > 0 and processed_rowdata[actual_data_ends_at].empty:\n actual_data_ends_at -= 1\n processed_rowdata = processed_rowdata[: actual_data_ends_at + 1]\n data.append(processed_rowdata)\n rows_read += 1\n if data:\n n_cols = max(len(row) for row in data)\n for idx, row in enumerate(data): # filling incomplete rows with NULLs and trimming too wide rows\n diff = n_cols - len(row)\n if diff > 0:\n data[idx].extend([Cell(value=None, empty=True)] * diff)\n elif diff < 0:\n data[idx] = row[:n_cols]\n return data\n\n async def _require_init(self) -> None:\n if self._aiogoogle is None:\n raise ValueError(\"Aiogoogle is not set up for the client. It must be used as an async context manager.\")\n if self._sheets_api is None:\n self._sheets_api = await self._aiogoogle.discover(\"sheets\", \"v4\")\n\n def _is_retryable_status(self, status_code: int) -> bool:\n return status_code in (408, 429) or status_code >= 500\n\n def _raise_retryable(self, err: aiogoogle.excs.HTTPError) -> None:\n \"\"\"Makes an exception based on the passed error response and raises\"\"\"\n\n err_resp_json: dict[str, Any] = err.res.json or {}\n details: list[dict] = err_resp_json[\"error\"].get(\"details\", [])\n\n if err.res.status_code == 429:\n err_info: dict[str, Any] = next((item for item in details if item[\"reason\"] == \"RATE_LIMIT_EXCEEDED\"), {})\n quota_err_cls = {\n \"ReadRequestsPerMinutePerProject\": file_upl_exc.QuotaExceededReadRequestsPerMinutePerProject,\n \"ReadRequestsPerMinutePerUser\": file_upl_exc.QuotaExceededReadRequestsPerMinutePerUser,\n }.get(\n err_info.get(\"metadata\", {}).get(\"quota_limit\"),\n file_upl_exc.QuotaExceeded,\n )\n raise quota_err_cls(debug_info=err_resp_json)\n if err.res.status_code >= 500:\n raise file_upl_exc.RemoteServerError(debug_info=err_resp_json)\n raise err\n\n async def _make_request(self, request: aiogoogle.models.Request) -> dict:\n await self._require_init()\n assert self._aiogoogle is not None\n\n # exponential backoff\n maximum_backoff = 64 # maximum number of seconds to wait between retries\n deadline = 220 # maximum number of seconds to keep sending requests\n current_backoff_sum = 0\n current_try = 0\n resp_json: Optional[dict] = None\n while True:\n try:\n if self.auth is not None:\n # Aiogoogle saves user creds on refresh and reuses them\n # But if we pass incomplete creds smth will go wrong, so it is better not to interfere\n if self._aiogoogle.user_creds is None:\n user_creds = UserCreds(\n access_token=self.auth.access_token, refresh_token=self.auth.refresh_token\n )\n else:\n user_creds = None\n resp_json = await self._aiogoogle.as_user(request, user_creds=user_creds) # type: ignore\n else:\n resp_json = await self._aiogoogle.as_api_key(request) # type: ignore\n break\n except aiogoogle.excs.HTTPError as err:\n if self._is_retryable_status(err.res.status_code):\n LOGGER.error(err)\n if current_backoff_sum > deadline:\n self._raise_retryable(err)\n backoff_seconds = min(2**current_try + random.uniform(0, 1), maximum_backoff)\n current_backoff_sum += backoff_seconds\n LOGGER.info(\n f\"Got status {err.res.status_code} on try {current_try},\"\n f\" going to back off for {backoff_seconds:.3f} seconds\"\n )\n current_try += 1\n await asyncio.sleep(backoff_seconds)\n continue\n raise\n\n assert isinstance(resp_json, dict), resp_json\n session = self._aiogoogle.session_context.get()\n assert isinstance(session, AiohttpGSheetsSession)\n self.last_response_size_bytes = session.last_response_size_bytes\n return resp_json\n\n async def _request_spreadsheet(self, spreadsheet_id: str, include_data: bool, range: Optional[str] = None) -> dict:\n await self._require_init()\n assert self._sheets_api is not None\n\n req_params = dict(\n spreadsheetId=spreadsheet_id,\n includeGridData=include_data,\n )\n if range is not None:\n req_params[\"ranges\"] = range\n req_params[\"path_params_safe_chars\"] = {\"ranges\": URL_SAFE_CHARS}\n\n return await self._make_request(self._sheets_api.spreadsheets.get(**req_params))\n\n async def _request_values(self, spreadsheet_id: str, range: str) -> dict:\n await self._require_init()\n assert self._sheets_api is not None\n\n req_params = dict(\n spreadsheetId=spreadsheet_id,\n range=range,\n dateTimeRenderOption=\"SERIAL_NUMBER\",\n majorDimension=\"ROWS\",\n valueRenderOption=\"UNFORMATTED_VALUE\",\n path_params_safe_chars={\"range\": URL_SAFE_CHARS},\n )\n\n return await self._make_request(self._sheets_api.spreadsheets.values.get(**req_params))\n\n async def get_spreadsheet(\n self,\n spreadsheet_id: str,\n include_data: bool = True,\n num_rows: Optional[int] = None,\n ) -> Spreadsheet:\n \"\"\"\n :param spreadsheet_id: ID of the spreadsheet to get, e.g. 1rnUFa7AiSKD5O80IKCvMy2cSZvLU1kRw9dxbtZbDMWc\n :param include_data: whether to include actual data or just save spreadsheet and sheet properties\n :param num_rows: number of rows to get, all rows if None (default), ignored when included_data is False\n :return: Spreadsheet object with properties and data if it is requested\n \"\"\"\n\n resp_json = await self._request_spreadsheet(spreadsheet_id=spreadsheet_id, include_data=include_data)\n\n sheets = []\n for sheet in resp_json.get(\"sheets\", []):\n sheet_properties = sheet[\"properties\"]\n data: Optional[list[list[Cell]]]\n if include_data:\n data = await self._loop.run_in_executor(self._tpe, self._get_sheet_data, sheet, num_rows)\n else:\n data = None\n sheets.append(\n Sheet(\n id=sheet_properties[\"sheetId\"],\n index=sheet_properties[\"index\"],\n title=sheet_properties[\"title\"],\n row_count=sheet_properties[\"gridProperties\"][\"rowCount\"],\n column_count=sheet_properties[\"gridProperties\"][\"columnCount\"],\n data=data,\n )\n )\n return Spreadsheet(\n id=resp_json[\"spreadsheetId\"],\n url=resp_json[\"spreadsheetUrl\"],\n title=resp_json[\"properties\"][\"title\"],\n sheets=sheets,\n )\n\n async def get_spreadsheet_sample(self, spreadsheet_id: str, sample_rows: Optional[int] = None) -> Spreadsheet:\n \"\"\"\n :param spreadsheet_id: spreadsheetId\n :param sample_rows: rows to request from every sheet (for values), default is `2600 / column count * 50` (~4 MB in each response, Google recommends 2 MB)\n :return: Spreadsheet instance\n \"\"\"\n\n resp_json = await self._request_spreadsheet(spreadsheet_id=spreadsheet_id, include_data=False)\n\n sheets = []\n for sheet in resp_json.get(\"sheets\", []):\n sheet_properties = sheet[\"properties\"]\n sheets.append(\n Sheet(\n id=sheet_properties[\"sheetId\"],\n index=sheet_properties[\"index\"],\n title=sheet_properties[\"title\"],\n row_count=sheet_properties[\"gridProperties\"][\"rowCount\"],\n column_count=sheet_properties[\"gridProperties\"][\"columnCount\"],\n data=None,\n )\n )\n spreadsheet_meta = Spreadsheet(\n id=resp_json[\"spreadsheetId\"],\n url=resp_json[\"spreadsheetUrl\"],\n title=resp_json[\"properties\"][\"title\"],\n sheets=sheets,\n )\n\n for idx, sheet in enumerate(spreadsheet_meta.sheets):\n sample_rows_to_request = 2600 // sheet.column_count if sample_rows is None else sample_rows\n sample_range = Range(\n sheet_title=sheet.title,\n row_from=1,\n col_from=1,\n row_to=sample_rows_to_request,\n col_to=sheet.column_count,\n )\n sheet_sample = await self.get_single_range(\n spreadsheet_id=spreadsheet_id,\n range=sample_range,\n )\n\n sheet_sample.batch_size_rows = sample_rows_to_request * 50 # for values\n sheet_sample.column_count = len(sheet_sample.data[0]) if sheet_sample.data else 0\n\n spreadsheet_meta.sheets[idx] = sheet_sample\n\n return spreadsheet_meta\n\n async def get_single_range(self, spreadsheet_id: str, range: Range) -> Sheet:\n resp_json = await self._request_spreadsheet(spreadsheet_id=spreadsheet_id, include_data=True, range=str(range))\n\n sheet_json = resp_json[\"sheets\"][0]\n sheet_properties = sheet_json[\"properties\"]\n sheet_data = await self._loop.run_in_executor(self._tpe, self._get_sheet_data, sheet_json)\n sheet = Sheet(\n id=sheet_properties[\"sheetId\"],\n index=sheet_properties[\"index\"],\n title=sheet_properties[\"title\"],\n row_count=sheet_properties[\"gridProperties\"][\"rowCount\"],\n column_count=sheet_properties[\"gridProperties\"][\"columnCount\"],\n data=sheet_data,\n )\n return sheet\n\n async def get_single_values_range(\n self, spreadsheet_id: str, range: Range, user_types: list[UserDataType | str]\n ) -> Tuple[list[list[Any]], list[UserDataType | str]]:\n resp_json = await self._request_values(spreadsheet_id=spreadsheet_id, range=str(range))\n\n raw_values = resp_json.get(\"values\", [])\n values, updated_user_types = await self._loop.run_in_executor(\n self._tpe,\n self._process_values,\n raw_values,\n user_types,\n )\n\n return values, updated_user_types\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/dl_file_uploader_lib/dl_file_uploader_lib/gsheets_client.py","file_name":"gsheets_client.py","file_ext":"py","file_size_in_byte":23051,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"9858539350","text":"import gzip\nimport sys\nimport os\nimport binascii\nimport datetime\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nimport numpy as np\n\nclass Utils:\n\n def DT(year, month, day, hour=0, minute=0, second=0):\n return datetime.datetime(year, month, day, hour, minute, second, 0)\n\n def getDT(d, t, micros=False):\n if \"/\" in d:\n dv = d.split(\"/\")\n year, month, day = int(dv[2]), int(dv[0]), int(dv[1])\n else:\n dv = d.split(\"-\")\n year, month, day = int(dv[0]), int(dv[1]), int(dv[2])\n tv = t.split(\":\")\n try:\n h, m, s = int(tv[0]), int(tv[1]), int(tv[2].split(\".\")[0])\n us = 0 if micros == False else int(tv[2].split(\".\")[-1])*1000\n except:\n print(\"Error parsing datetime time=%s, date=%s\" % (t, d))\n return datetime.datetime(2018,1,1)\n return datetime.datetime(year, month, day, h,m,s,us)\n\n def parse(line):\n # trades data : date, time, price, qty, flag -> datetime, price , qty\n if type(line) != str: line = line.decode(\"ascii\")\n dv = line.strip().split(\",\")\n return OHLC(Utils.getDT(dv[0].split(\" \")[0],dv[0].split(\" \")[1]),\n float(dv[1]), float(dv[2]), float(dv[3]),float(dv[4]),int(dv[5]),float(dv[6]))\n\n\nclass OHLC:\n\n def __init__(self, ts0, O, H, L, C, T, V, ts=None):\n self.ts0, self.O, self.H, self.L, self.C, self.T, self.V, self.ts = ts0, O, H, L, C, T, V, ts\n if self.ts is None : self.ts = self.ts0\n\n def __str__(self):\n return (\"%s,%10.6f,%10.6f,%10.6f,%10.6f,%6d,%6d\") % (str(self.ts0),self.O, self.H, self.L, self.C, self.T, int(self.V))\n\nclass ReadOHLC:\n\n def read(infile):\n records=[]\n with gzip.open(infile) as rdr:\n for line in rdr:\n records.append(Utils.parse(line))\n return records\n\n\nclass PlotEvent:\n\n def __init__(self, data, lowerBound, upperBound, eventTime, contract, event):\n l , u , e = PlotEvent.getIx(data, lowerBound), \\\n PlotEvent.getIx(data,upperBound), \\\n PlotEvent.getIx(data, eventTime)\n dates = [data[i].ts0 for i in range(l, u)]\n Ov = [data[i].O for i in range(l, u)]\n Cv = [data[i].C for i in range(l, u)]\n minO, minC = np.min(Ov), np.min(Cv)\n maxO, maxC = np.max(Ov), np.max(Cv)\n minV = np.min([minO, minC])\n maxV = np.max([maxO, maxC])\n\n eventInData = data[e].ts0\n eventDate = eventTime.strftime(\"%m/%d/%Y\")\n title = \"%s %s %s\" % (contract, event, eventDate)\n \n fig, ax = plt.subplots()\n ax.set(title=title)\n ax.plot(dates, Ov, 'g')\n ax.plot(dates, Cv, 'b')\n ax.plot([eventInData, eventInData], [minV,maxV], 'r')\n timeFmt = DateFormatter(\"%H:%M:%S\") \n ax.xaxis.set_major_formatter(timeFmt)\n plt.show()\n\n def getIx(data, date):\n b, e = 0 , len(data)\n while True:\n m = int((b +e)/2)\n d =data[m].ts0 \n if d == date : \n return m\n if date < d : e = m\n else: b = m\n if e - b <= 1 :\n return b\n return b\n\ndef main():\n if len(sys.argv) < 5:\n print(\"usage : \")\n quit()\n infile = sys.argv[1]\n event = sys.argv[2]\n eventDate = sys.argv[3]\n eventHour = int(sys.argv[4].split(':')[0])\n eventMinute = int(sys.argv[4].split(':')[1])\n lb = Utils.DT(int(eventDate[0:4]), int(eventDate[4:6]), int(eventDate[6:8]))\n ub = lb + datetime.timedelta(days=1)\n eventTime = Utils.DT(lb.year, lb.month, lb.day, eventHour, eventMinute)\n data = ReadOHLC.read(infile)\n contract = os.path.splitext(os.path.basename(infile))[0].split(\"_\")[0]\n PlotEvent(data, lb, ub, eventTime, contract, event)\n\n\nif __name__ == '__main__':\n # example usage\n # python ./plotEvent.py ../data/UB_OHLC_30.csv.gz non-farm-payrolls 20190201 7:30\n main()\n\n\n","repo_name":"jrrpanix/SocialNetworks","sub_path":"src/attic/plotEvent.py","file_name":"plotEvent.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3595560242","text":"from copy import deepcopy\nfrom typing import Any, TYPE_CHECKING\n\nfrom azure.core.rest import HttpRequest, HttpResponse\nfrom azure.mgmt.core import ARMPipelineClient\n\nfrom . import models as _models\nfrom ._configuration import PolicyInsightsClientConfiguration\nfrom ._serialization import Deserializer, Serializer\nfrom .operations import (\n AttestationsOperations,\n ComponentPolicyStatesOperations,\n Operations,\n PolicyEventsOperations,\n PolicyMetadataOperations,\n PolicyRestrictionsOperations,\n PolicyStatesOperations,\n PolicyTrackedResourcesOperations,\n RemediationsOperations,\n)\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from azure.core.credentials import TokenCredential\n\n\nclass PolicyInsightsClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes\n \"\"\"Query component policy states at varying resource scopes for Resource Provider mode policies.\n\n :ivar policy_tracked_resources: PolicyTrackedResourcesOperations operations\n :vartype policy_tracked_resources:\n azure.mgmt.policyinsights.operations.PolicyTrackedResourcesOperations\n :ivar remediations: RemediationsOperations operations\n :vartype remediations: azure.mgmt.policyinsights.operations.RemediationsOperations\n :ivar policy_events: PolicyEventsOperations operations\n :vartype policy_events: azure.mgmt.policyinsights.operations.PolicyEventsOperations\n :ivar policy_states: PolicyStatesOperations operations\n :vartype policy_states: azure.mgmt.policyinsights.operations.PolicyStatesOperations\n :ivar policy_metadata: PolicyMetadataOperations operations\n :vartype policy_metadata: azure.mgmt.policyinsights.operations.PolicyMetadataOperations\n :ivar policy_restrictions: PolicyRestrictionsOperations operations\n :vartype policy_restrictions: azure.mgmt.policyinsights.operations.PolicyRestrictionsOperations\n :ivar component_policy_states: ComponentPolicyStatesOperations operations\n :vartype component_policy_states:\n azure.mgmt.policyinsights.operations.ComponentPolicyStatesOperations\n :ivar operations: Operations operations\n :vartype operations: azure.mgmt.policyinsights.operations.Operations\n :ivar attestations: AttestationsOperations operations\n :vartype attestations: azure.mgmt.policyinsights.operations.AttestationsOperations\n :param credential: Credential needed for the client to connect to Azure. Required.\n :type credential: ~azure.core.credentials.TokenCredential\n :param subscription_id: Microsoft Azure subscription ID. Required.\n :type subscription_id: str\n :param base_url: Service URL. Default value is \"https://management.azure.com\".\n :type base_url: str\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no\n Retry-After header is present.\n \"\"\"\n\n def __init__(\n self,\n credential: \"TokenCredential\",\n subscription_id: str,\n base_url: str = \"https://management.azure.com\",\n **kwargs: Any\n ) -> None:\n self._config = PolicyInsightsClientConfiguration(\n credential=credential, subscription_id=subscription_id, **kwargs\n )\n self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n self._serialize.client_side_validation = False\n self.policy_tracked_resources = PolicyTrackedResourcesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.remediations = RemediationsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.policy_events = PolicyEventsOperations(self._client, self._config, self._serialize, self._deserialize)\n self.policy_states = PolicyStatesOperations(self._client, self._config, self._serialize, self._deserialize)\n self.policy_metadata = PolicyMetadataOperations(self._client, self._config, self._serialize, self._deserialize)\n self.policy_restrictions = PolicyRestrictionsOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.component_policy_states = ComponentPolicyStatesOperations(\n self._client, self._config, self._serialize, self._deserialize\n )\n self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)\n self.attestations = AttestationsOperations(self._client, self._config, self._serialize, self._deserialize)\n\n def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:\n \"\"\"Runs the network request through the client's chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest(\"GET\", \"https://www.example.org/\")\n \n >>> response = client._send_request(request)\n \n\n For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.HttpResponse\n \"\"\"\n\n request_copy = deepcopy(request)\n request_copy.url = self._client.format_url(request_copy.url)\n return self._client.send_request(request_copy, **kwargs)\n\n def close(self) -> None:\n self._client.close()\n\n def __enter__(self) -> \"PolicyInsightsClient\":\n self._client.__enter__()\n return self\n\n def __exit__(self, *exc_details) -> None:\n self._client.__exit__(*exc_details)\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/_policy_insights_client.py","file_name":"_policy_insights_client.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"30218963685","text":"\"\"\"\nModule: feature_views.py\nDescription: This module contains Django views for managing features.\n\nDependencies:\n- Django (django.contrib.gis.geos, django.http)\n- drf_yasg (openapi, utils)\n- rest_framework (permissions, generics, response)\n- geoservice (features.models, features.serializers)\n- utils (logger.Error, pagination.paginator, response.wrapper)\n\nClasses:\n- FeatureBaseAPIView: Base class for feature views. Provides common attributes and settings for feature views.\n- FeatureListAPIView: View for listing features. Supports filtering by bounding box coordinates.\n- FeatureRetrieveUpdateDestroyAPIView: View for retrieving, updating, and deleting a single feature.\n\n\"\"\"\n\nfrom django.contrib.gis.geos import Polygon\nfrom django.http import Http404\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import permissions, generics\nfrom rest_framework.response import Response\n\nfrom geoservice.features.models import Feature\nfrom geoservice.features.serializers import FeatureGeneralSerializer, FeatureUpdateSerializer\nfrom utils.logger.Error import ErrorLogger\nfrom utils.pagination.paginator import FeaturePagination\nfrom utils.response.wrapper import ResponseWrapper\nfrom utils.constants import *\n\n\nclass FeatureBaseAPIView:\n \"\"\"\n Base class for feature views. Provides common attributes and settings for feature views.\n \"\"\"\n permission_classes = (permissions.IsAuthenticated,)\n response_wrapper = ResponseWrapper\n error_logger = ErrorLogger\n http_method_names = [\"get\", \"post\", \"patch\", \"delete\"]\n queryset = Feature.objects.all()\n serializer_class = FeatureGeneralSerializer\n response_serializer_class = FeatureGeneralSerializer\n pagination_class = FeaturePagination\n lookup_field = 'id'\n\n\nclass FeatureListAPIView(FeatureBaseAPIView, generics.ListAPIView):\n \"\"\"\n View for listing features. Supports filtering by bounding box coordinates.\n \"\"\"\n\n @swagger_auto_schema(manual_parameters=[\n openapi.Parameter('xmin', openapi.IN_QUERY, type=openapi.TYPE_NUMBER),\n openapi.Parameter('ymin', openapi.IN_QUERY, type=openapi.TYPE_NUMBER),\n openapi.Parameter('xmax', openapi.IN_QUERY, type=openapi.TYPE_NUMBER),\n openapi.Parameter('ymax', openapi.IN_QUERY, type=openapi.TYPE_NUMBER)\n ])\n def get(self, request, *args, **kwargs):\n \"\"\"\n Handle GET requests to retrieve a list of features.\n\n Parameters:\n - request: The HTTP request object.\n - args: Additional positional arguments.\n - kwargs: Additional keyword arguments.\n\n Returns:\n - Response: The HTTP response containing the list of features.\n\n \"\"\"\n try:\n self.get_serializer()\n response = super().get(request, *args, **kwargs)\n return Response(\n **self.response_wrapper(\n SUCCESS_CODES.get(\n 'FEATURE_LIST_SUCCESS', DEFAULT_SUCCESS_CODE\n ), response.data\n ).formatted_output_success()\n )\n except Exception as err:\n self.error_logger.log_unexpected_error(err, dict(), 'E500', request.get_full_path())\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('UNKNOWN_ERROR', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n\n def get_queryset(self):\n \"\"\"\n Get the queryset for retrieving the list of features.\n\n Returns:\n - queryset: The queryset for retrieving the list of features.\n \"\"\"\n queryset = super().get_queryset()\n xmin = self.request.query_params.get('xmin')\n ymin = self.request.query_params.get('ymin')\n xmax = self.request.query_params.get('xmax')\n ymax = self.request.query_params.get('ymax')\n\n if xmin and ymin and xmax and ymax:\n bbox_polygon = Polygon.from_bbox((xmin, ymin, xmax, ymax))\n queryset = queryset.filter(features__intersects=bbox_polygon)\n return queryset\n\n\nclass FeatureRetrieveUpdateDestroyAPIView(FeatureBaseAPIView, generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n View for retrieving, updating, and deleting a single feature.\n \"\"\"\n\n serializer_class = FeatureUpdateSerializer\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Handle GET requests to retrieve a single feature.\n\n Parameters:\n - request: The HTTP request object.\n - args: Additional positional arguments.\n - kwargs: Additional keyword arguments.\n\n Returns:\n - Response: The HTTP response containing the retrieved feature.\n\n \"\"\"\n try:\n response = self.retrieve(request, *args, **kwargs)\n return Response(\n **self.response_wrapper(\n SUCCESS_CODES.get(\n 'FEATURE_RETRIEVE_SUCCESS', DEFAULT_SUCCESS_CODE\n ),\n response.data).formatted_output_success()\n )\n except Http404:\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('FEATURE_INVALID_ID', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n except Exception as err:\n self.error_logger.log_unexpected_error(err, dict(), 'E500', request.get_full_path())\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('UNKNOWN_ERROR', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n\n def patch(self, request, *args, **kwargs):\n \"\"\"\n Handle PATCH requests to partially update a single feature.\n\n Parameters:\n - request: The HTTP request object.\n - args: Additional positional arguments.\n - kwargs: Additional keyword arguments.\n\n Returns:\n - Response: The HTTP response indicating the success of the partial update.\n\n \"\"\"\n try:\n response = self.partial_update(request, *args, **kwargs)\n return Response(\n **self.response_wrapper(\n SUCCESS_CODES.get(\n 'FEATURE_UPDATE_SUCCESS', DEFAULT_SUCCESS_CODE\n ), response.data\n ).formatted_output_success()\n )\n except Http404:\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('FEATURE_INVALID_ID', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n except Exception as err:\n self.error_logger.log_unexpected_error(err, dict(), 'E500', request.get_full_path())\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('UNKNOWN_ERROR', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Handle DELETE requests to delete a single feature.\n\n Parameters:\n - request: The HTTP request object.\n - args: Additional positional arguments.\n - kwargs: Additional keyword arguments.\n\n Returns:\n - Response: The HTTP response indicating the success of the deletion.\n\n \"\"\"\n try:\n response = self.destroy(request, *args, **kwargs)\n return Response(\n **self.response_wrapper(\n SUCCESS_CODES.get('FEATURE_DELETE_SUCCESS', DEFAULT_SUCCESS_CODE),\n response.data).formatted_output_success()\n )\n except Http404:\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('FEATURE_INVALID_ID', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n except Exception as err:\n self.error_logger.log_unexpected_error(err, dict(), 'E500', request.get_full_path())\n return Response(\n **self.response_wrapper(\n ERROR_CODES.get('UNKNOWN_ERROR', DEFAULT_ERROR_CODE)\n ).formatted_output_error()\n )\n","repo_name":"Alvi-Rahman/AI_Infrasolutions_Assesment","sub_path":"geoservice/features/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35297923219","text":"def count_substring(str,substr) :\n\tcount = 0\n\tfor i in range(0,len(str) - len(substr) + 1) :\n\t\tif str[i] == substr[0] :\n\t\t\tflag = 1\n\t\t\tfor j in range(0,len(substr)) :\n\t\t\t\tif str[i+j] != substr[j] :\n\t\t\t\t\tflag = 0\n\t\t\t\t\tbreak\n\t\t\tif flag == 1 :\n\t\t\t\tcount += 1\n\t\n\treturn count\n\t\t\t\nif __name__ == '__main__' :\n\tstring = input().strip()\n\tsub_string = input().strip()\n\tcount = count_substring(string,sub_string)\n\tprint(count)\n","repo_name":"Vishal-raj-1/Language-Proficiency-In-Python3","sub_path":"String/Find_a_string.py","file_name":"Find_a_string.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6721679244","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/10/22 20:37\nimport logging\n\nimport pytest\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='report.log',\n filemode='w'\n)\n\nlogger = logging.getLogger(__name__)\n\n\n# 作用:用来修改用例的编码方式,从而支持给用例名起中文名字,hook函数不用加入到fixture中的。\ndef pytest_collection_modifyitems(session, config, items):\n for item in items:\n # item.name:用例的名字\n item.name = item.name.encode('utf-8').decode('unicode-escape')\n # item._nodeid:用例的路径\n item._nodeid = item.nodeid.encode('utf-8').decode('unicode-escape')\n\n if 'login' in item.nodeid:\n item.add_marker(pytest.mark.login)\n\n items.reverse()\n\n\n\ndef pytest_addoption(parser, pluginmanager):\n # 创建组名,将下面所有的Options,展示在这个组名下面。\n mygroup = parser.getgroup('school')\n mygroup.addoption('--env', # 注册一个命令行选项\n default = 'test', # 参数的默认值\n dest = 'env', # 存储的变量\n help='set your run env 2021.') # 帮助提示,对参数的描述\n\n@pytest.fixture(scope='session')\ndef cmdoption(request):\n envValue = request.config.getoption('--env', default='test')\n if envValue == 'test':\n print('env is test!')\n elif envValue == 'dev':\n print('env is develop')\n return envValue","repo_name":"hanzway/py-learn","sub_path":"com/xch01/tasks/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28396972877","text":"def dfs(current):\n seen[current] = True\n for i in al[current]:\n if seen[i] and not finished[i]:\n return False\n res = dfs(i)\n if not res:\n path.append(i)\n return res\n finished[current] = True\n return True\n\n\nn, m = map(int, input().split())\nal = [list() for _ in range(n)]\nfor _ in range(m):\n a, b = map(int, input().split())\n al[a].append(b) \nflg = True\npath = list()\nfor i in range(n):\n seen = [False]*n\n finished = [False]*n\n flg = dfs(i)\n if not flg:\n path.append(i)\n break\nif flg:\n print('not cycle path')\nelse:\n print('cycle')\n path.reverse()\n print(path)\n","repo_name":"blueletter123456789/book-algo-data-structure","sub_path":"sec13/p1306.py","file_name":"p1306.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26775564622","text":"from collections import Counter\nfrom typing import Tuple, Dict\n\n\nclass TextToolsMixin:\n \"\"\"Help functions used in NLP for DA and DAM\"\"\"\n\n def get_vocabulary(\n self, min_freq: int = 1, text_attrs: Tuple[str, ...] = ('text',)\n ) -> Dict[str, int]:\n \"\"\"Get the text vocabulary in a dict that maps from the word to the index from all Documents.\n\n :param text_attrs: the textual attributes where vocabulary will be derived from\n :param min_freq: the minimum word frequency to be considered into the vocabulary.\n :return: a vocabulary in dictionary where key is the word, value is the index. The value is 2-index, where\n `0` is reserved for padding, `1` is reserved for unknown token.\n \"\"\"\n\n all_tokens = Counter()\n for d in self:\n all_tokens.update(d.get_vocabulary(text_attrs=text_attrs))\n\n # 0 for padding, 1 for unknown\n return {\n k: idx\n for idx, k in enumerate(\n (k for k, v in all_tokens.items() if v >= min_freq), start=2\n )\n }\n","repo_name":"yuanjie-ai/MeUtils","sub_path":"meutils/other/docarray/array/mixins/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"37681865655","text":"import re\n\nskip_types = ['declaration']\n\ndef upper(s): return f'{s[0].upper()}{s[1:]}'\n\ndef get_function(function, content):\n if function == 'print': return f'Console.WriteLine({content})'\n elif function == 'alen' or function == 'slen': return f'{content}.Length'\n else: return f'{upper(function)}({content})'\n\ndef style_function(match_obj):\n function = match_obj.group(1)\n content = match_obj.group(2)\n return get_function(function, content)\n\ndef get_statement(statement):\n if statement == 'elif': return 'else if'\n else: return statement\n\ndef compile_cs(outpath, commands):\n\n program = ''\n spaces = 0\n\n # get import statements\n for command in commands:\n if command.type == 'function-call' and command.args[0] == 'print':\n program += 'using System;\\n\\n'\n break\n\n # open class\n program += 'class Program\\n'\n program += '{\\n'\n spaces += 1\n\n # for each command\n for command in commands:\n\n # get command data\n type = command.type\n args = command.args\n\n # update function names\n args = [\n re.sub('([a-zA-Z][a-zA-Z0-9]*)\\s*\\((.*)\\)', style_function, arg)\n for arg in args\n ]\n\n # append spacing\n if type == 'bracket-end': program += ' ' * (spaces - 1)\n elif type not in skip_types: program += ' ' * spaces\n\n if type == 'function-def':\n program += f'static {args[0]} {upper(args[1])}({\", \".join(args[2:])})'\n elif type == 'function-call':\n function = args[0]\n content = \", \".join(args[1:])\n program += f'{get_function(function, content)};'\n elif type == 'var-create': program += f'{args[0]} {args[1]};'\n elif type == 'var-set': program += f'{args[0]} {args[1]} = {args[2]};'\n elif type == 'var-update': program += f'{\" \".join(args)};'\n elif type == 'array-create': program += f'{args[0]}[] {args[1]};'\n elif type == 'array-set':\n program += f'{args[0]}[] {args[1]} = {{ {\", \".join(args[2:])} }};'\n elif type == 'array-update':\n program += f'{args[0]} = new {args[1]}[] {{ {\", \".join(args[2:])} }};'\n elif type == 'array-index-update': program += f'{args[0]}[{args[1]}] = {args[2]};'\n elif type == 'comment': program += f'// {args[0]}'\n elif type == 'declaration':\n if args[0] == 'MAIN':\n program += ' static void Main()\\n'\n program += ' {\\n'\n spaces += 1\n elif type == 'statement-args':\n statement = get_statement(args[0])\n program += f'{statement} ({args[1]})'\n elif type == 'statement-for':\n program += f'for (int {args[0]} = {args[1]}; {args[0]} < {args[2]}; {args[0]}++)'\n elif type == 'statement-foreach':\n program += f'foreach ({args[0]} {args[1]} in {args[2]})'\n elif type == 'statement-raw': program += args[0]\n elif type == 'statement-return': program += f'return{args[0]};'\n elif type == 'bracket-start':\n program += '{'\n spaces += 1\n elif type == 'bracket-end':\n program += '}'\n spaces -= 1\n\n if type not in skip_types: program += '\\n' # newline\n\n # close class\n if spaces > 1: program += ' }\\n'\n program += '}\\n'\n\n # write program to file\n file = open(outpath, 'w')\n file.write(program)\n file.close()\n\n print(f'compiled successfully into {outpath}')\n","repo_name":"csaye/flexscript","sub_path":"src/compile_cs.py","file_name":"compile_cs.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2508590485","text":"from __future__ import print_function\nimport numpy as np\nimport datetime\nimport csv\nfrom sklearn import metrics\nfrom sklearn.utils import shuffle\nfrom sklearn import ensemble\nfrom sklearn import cross_validation\n\nspecies_map = {'CULEX RESTUANS' : \"100000\",\n 'CULEX TERRITANS' : \"010000\", \n 'CULEX PIPIENS' : \"001000\", \n 'CULEX PIPIENS/RESTUANS' : \"101000\", \n 'CULEX ERRATICUS' : \"000100\", \n 'CULEX SALINARIUS': \"000010\", \n 'CULEX TARSALIS' : \"000001\",\n 'UNSPECIFIED CULEX': \"001100\"} # hack!\ndef date(text):\n return datetime.datetime.strptime(text, \"%Y-%m-%d\").date()\n \ndef precip(text):\n TRACE = 1e-3\n text = text.strip()\n if text == \"M\":\n return None\n if text == \"-\":\n return None\n if text == \"T\":\n return TRACE\n return float(text)\n \ndef ll(text):\n return int(float(text)*100)/100\n\ndef impute_missing_weather_station_values(weather):\n # Stupid simple\n for k, v in weather.items():\n if v[0] is None:\n v[0] = v[1]\n elif v[1] is None:\n v[1] = v[0]\n for k1 in v[0]:\n if v[0][k1] is None:\n v[0][k1] = v[1][k1]\n for k1 in v[1]:\n if v[1][k1] is None:\n v[1][k1] = v[0][k1]\n \ndef load_weather():\n weather = {}\n for line in csv.DictReader(open(\"../input/weather.csv\")):\n for name, converter in {\"Date\" : date,\n \"Tmax\" : float,\"Tmin\" : float,\"Tavg\" : float,\n \"DewPoint\" : float, \"WetBulb\" : float,\n \"PrecipTotal\" : precip,\"Sunrise\" : precip,\"Sunset\" : precip,\n \"Depart\" : float, \"Heat\" : precip,\"Cool\" : precip,\n \"ResultSpeed\" : float,\"ResultDir\" : float,\"AvgSpeed\" : float,\n \"StnPressure\" : float, \"SeaLevel\" : float}.items():\n x = line[name].strip()\n line[name] = converter(x) if (x != \"M\") else None\n station = int(line[\"Station\"]) - 1\n assert station in [0,1]\n dt = line[\"Date\"]\n if dt not in weather:\n weather[dt] = [None, None]\n assert weather[dt][station] is None, \"duplicate weather reading {0}:{1}\".format(dt, station)\n weather[dt][station] = line\n impute_missing_weather_station_values(weather) \n return weather\n \n \ndef load_training():\n training = []\n for line in csv.DictReader(open(\"../input/train.csv\")):\n for name, converter in {\"Date\" : date, \n \"Latitude\" : ll, \"Longitude\" : ll,\n \"NumMosquitos\" : int, \"WnvPresent\" : int}.items():\n line[name] = converter(line[name])\n training.append(line)\n return training\n \ndef load_testing():\n training = []\n for line in csv.DictReader(open(\"../input/test.csv\")):\n for name, converter in {\"Date\" : date, \n \"Latitude\" : ll, \"Longitude\" : ll}.items():\n line[name] = converter(line[name])\n training.append(line)\n return training\n \n \ndef closest_station(lat, longi):\n # Chicago is small enough that we can treat coordinates as rectangular.\n stations = np.array([[41.995, -87.933],\n [41.786, -87.752]])\n loc = np.array([lat, longi])\n deltas = stations - loc[None, :]\n dist2 = (deltas**2).sum(1)\n return np.argmin(dist2)\n \ndef normalize(X, mean=None, std=None):\n count = X.shape[1]\n if mean is None:\n mean = np.nanmean(X, axis=0)\n for i in range(count):\n X[np.isnan(X[:,i]), i] = mean[i]\n if std is None:\n std = np.std(X, axis=0)\n for i in range(count):\n X[:,i] = (X[:,i] - mean[i]) / std[i]\n return mean, std\n \ndef scaled_count(record):\n SCALE = 10.0\n if \"NumMosquitos\" not in record:\n # This is test data\n return 1\n return int(np.ceil(record[\"NumMosquitos\"] / SCALE))\n \n \ndef assemble_X(base, weather):\n X = []\n for b in base:\n date = b[\"Date\"]\n lat, longi = b[\"Latitude\"], b[\"Longitude\"]\n case = [date.year, date.month, date.day,date.weekday(), lat, longi]\n # Look at a selection of past weather values\n for days_ago in [1,2,3,5,8,12]:\n day = date - datetime.timedelta(days=days_ago)\n for obs in [\"Tmax\",\"Tmin\",\"Tavg\",\"DewPoint\",\"WetBulb\",\"PrecipTotal\",\"Depart\",\"Sunrise\",\"Sunset\",\"Heat\",\"Cool\",\"ResultSpeed\",\"ResultDir\"]:\n station = closest_station(lat, longi)\n case.append(weather[day][station][obs])\n # Specify which mosquitos are present\n species_vector = [float(x) for x in species_map[b[\"Species\"]]]\n case.extend(species_vector)\n # Weight each observation by the number of mosquitos seen. Test data\n # Doesn't have this column, so in that case use 1. This accidentally\n # Takes into account multiple entries that result from >50 mosquitos\n # on one day. \n for repeat in range(scaled_count(b)):\n X.append(case) \n X = np.asarray(X, dtype=np.float32)\n return X\n \ndef assemble_y(base):\n y = []\n for b in base:\n present = b[\"WnvPresent\"]\n for repeat in range(scaled_count(b)):\n y.append(present) \n return np.asarray(y, dtype=np.int32).reshape(-1,1)\n\n\nclass AdjustVariable(object):\n def __init__(self, variable, target, half_life=20):\n self.variable = variable\n self.target = target\n self.half_life = half_life\n def __call__(self, nn, train_history):\n delta = self.variable.get_value() - self.target\n delta /= 2**(1.0/self.half_life)\n self.variable.set_value(np.float32(self.target + delta))\n\ndef cross_validate(estimator, X_train, y_train):\n \"\"\"\n StratifiedKFold cross validation.\n \"\"\"\n\n # cross validation\n cv = cross_validation.StratifiedKFold(y_train, n_folds=3, shuffle=True)\n scores = cross_validation.cross_val_score(estimator, X_train, y_train, scoring='roc_auc', cv=cv, n_jobs=-1)\n print('Cross Validation - scores:', ', '.join(str(round(score, 3)) for score in scores))\n print('Average score:', round(scores.mean(), 3))\n \n # Fit the model\n estimator\n estimator.fit(X_train, y_train)\n return estimator, scores\n \ndef train(classifier):\n weather = load_weather()\n training = load_training()\n \n X = assemble_X(training, weather)\n y = assemble_y(training).ravel()\n \n X, y = shuffle(X, y)\n\n classifier, scores = cross_validate(classifier, X, y)\n #classifier.fit(X, y)\n \n return classifier \n \n\ndef submit(estimator):\n weather = load_weather()\n testing = load_testing()\n X = assemble_X(testing, weather) \n predictions = estimator.predict_proba(X)[:, 1] \n #\n out = csv.writer(open(\"submission_final_opt_extrarandomtrees.csv\", \"w\"))\n out.writerow([\"Id\",\"WnvPresent\"])\n for row, p in zip(testing, predictions):\n out.writerow([row[\"Id\"], p])\n\n\nif __name__ == \"__main__\":\n forest = ensemble.ExtraTreesClassifier(n_estimators=5000, class_weight='auto')\n forest = train(forest)\n submit(forest)\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/predict-west-nile-virus/liuyanyxy/extra-random-trees-auc-0-92-cv.py","file_name":"extra-random-trees-auc-0-92-cv.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"4378199725","text":"import tkinter as tk\nfrom tkinter import filedialog as fd\nfrom functions import (\n deep_folders,\n rename_and_relocation,\n rename_and_relocation_without_arch,\n del_empty_dirs,\n result_sorting_with_arch,\n result_sorting_without_arch,\n)\n\nroot = tk.Tk()\nroot.title('Smart file assistant')\nroot.geometry('+700+300')\n\n\ndef main() -> None:\n try:\n adress = str(way_for_sort.get())\n if unpack_var.get() == 1 and deep_var.get() == 1:\n deep_folders(adress)\n rename_and_relocation(adress)\n result_sorting_with_arch(adress)\n if unpack_var.get() == 1 and deep_var.get() != 1:\n rename_and_relocation(adress)\n result_sorting_with_arch(adress)\n if unpack_var.get() != 1 and deep_var.get() == 1:\n deep_folders(adress)\n rename_and_relocation_without_arch(adress)\n result_sorting_without_arch(adress)\n if unpack_var.get() != 1 and deep_var.get() != 1:\n rename_and_relocation_without_arch(adress)\n del_empty_dirs(adress)\n result_sorting_without_arch(adress)\n except:\n err_win = tk.Tk()\n err_win.title(\"warning!\")\n tk.Label(err_win, text='Specify the path to the folder, first!').grid(pady=5, padx=5)\n err_win.geometry('+1100+350')\n\n def exit_err() -> None:\n err_win.destroy()\n\n tk.Button(err_win, text='Ok', command=exit_err).grid(pady=5, padx=5)\n\n\ntk.Label(root, text='Sort folder path:').grid(row=0, column=0, pady=5, padx=5)\ntk.Label(\n root,\n text='Warning! All names of files will be translated!',\n font=('Arial', 10, 'bold'),\n).grid(row=4, column=1, columnspan=2, pady=5, padx=5, sticky='e')\n\nunpack_var = tk.IntVar()\nunpack = tk.Checkbutton(root, text='Unpack archives', variable=unpack_var)\nunpack.grid(row=3, column=0, columnspan=2, sticky='w')\n\ndeep_var = tk.IntVar()\ndeppest = tk.Checkbutton(root, text='Subfolders sorting', variable=deep_var)\ndeppest.grid(row=4, column=0, columnspan=2, sticky='w')\n\nway_for_sort = tk.Entry(root, width=100)\nway_for_sort.grid(row=0, column=1)\n\n\ndef callback() -> str:\n name = fd.askdirectory()\n way_for_sort.insert(0, name)\n return str(name)\n\n\ntk.Button(text='Find folder', command=callback).grid(row=1, column=1)\n\ntk.Button(root, text='Sort!', command=main).grid(\n row=1, column=1, pady=5, padx=5, sticky='e'\n)\n\nif __name__ == '__main__':\n root.mainloop()\n","repo_name":"ViktorKos/Python","sub_path":"sorter_mac_os/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73475724323","text":"import sys, heapq\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN,M,X = map(int,input().split())\nG = [[] for _ in range(N+1)]\nrG = [[] for _ in range(N+1)]\n\nfor _ in range(M):\n s,e,t = map(int,input().split()) \n G[s].append((e,t))\n rG[e].append((s,t))\n\ndist = [INF for _ in range(N+1)]\nrdist = [INF for _ in range(N+1)]\n\ndef dijkstra(start, G, dist):\n pq = []\n heapq.heappush(pq,(0,start))\n dist[start] = 0\n\n while pq:\n cost, cur = heapq.heappop(pq)\n if dist[cur] < cost: continue\n\n for i in G[cur]:\n target = cost + i[1]\n if target < dist[i[0]]:\n dist[i[0]] = target\n heapq.heappush(pq, (dist[i[0]],i[0]))\n\ndijkstra(X,G,dist)\ndijkstra(X,rG,rdist)\n\nans = 0\nfor i,j in zip(dist[1:],rdist[1:]): ans = max(ans,i+j)\nprint(ans)\n","repo_name":"kangjunseo/Algorithm","sub_path":"Algorithm - BOJ/Graph Theory/1238.py","file_name":"1238.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24486460763","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n===========================================\n @Project : vpp-learning \n @Author : Xiangyu Zeng\n @Date : 2/8/22 8:15 PM \n @Description :\n \n===========================================\n\"\"\"\nfrom torch.nn import Sequential\n\nimport pfrl\nimport torch\nimport torch.nn.functional as F\nfrom pfrl.nn import Recurrent\nfrom pfrl.q_functions import DiscreteActionValueHead\nfrom torch import nn\n\n\nclass NetworkObsVisit(torch.nn.Module):\n def __init__(self, action_size):\n super().__init__()\n\n self.recurrent_obs = Sequential(\n nn.Conv2d(15, 24, kernel_size=4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1),\n nn.Linear(3888, 512),\n nn.ReLU(),\n # nn.LSTM(input_size=512, hidden_size=128),\n nn.Linear(512, 64),\n nn.ReLU(),\n nn.Linear(64, action_size),\n DiscreteActionValueHead())\n\n # pfrl.nn.RecurrentSequential\n self.recurrent_visit = Sequential(\n nn.Conv3d(1, 8, kernel_size=4, stride=2, padding=1),\n nn.ReLU(),\n nn.Flatten(start_dim=1),\n nn.Linear(216, 108),\n nn.ReLU(),\n # nn.LSTM(input_size=108, hidden_size=64),\n nn.Linear(108, 32),\n nn.ReLU(),\n nn.Linear(32, action_size),\n DiscreteActionValueHead())\n\n def forward(self, state):\n obs = state[0].float()\n visit = state[1].float()\n\n out = self.recurrent_obs(obs)\n out = self.recurrent_visit(visit)\n # recurrent_obs = recurrent_state[0].float()\n # recurrent_visit = recurrent_state[1].float()\n # out_obs = self.recurrent_obs(obs, recurrent_obs)\n # out_visit = self.recurrent_visit(visit, recurrent_visit)\n return out\n","repo_name":"zengxyu/vpp-learning","sub_path":"agents/network/network_obs_visit.py","file_name":"network_obs_visit.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"2368384930","text":"from ....util.ServiceConnection import serviceConnection\nimport pandas as pd\nimport numpy as np\nimport math\n\nclass FormulaTarifas(object):\n def __init__(self):\n connection = serviceConnection()\n self.connMDB = connection.get_connectionMDB()\n self.meses = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre']\n\n def calcular_tarifas(self, dataframe, ano, mes):\n tarifas = dataframe\n\n # tarifas['juank'] = 123456\n\n #Consutla MongoDB GESTOR IDANE (trae solo IPC - mes anterior) - cuadrar para cuando sea diciembre\n tarifas['ipc_mes_anterior'] = self.__getIPC(ano, mes, 2)\n tarifas['ipc_mes_consultado'] = self.__getIPC(ano, mes, 1)\n\n # print('VALOR IF > ', (1 - (tarifas[18] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])) / tarifas[21])))\n # print('SI ES MENOR A 0.6 > ', (tarifas[18] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8]))))\n # print('SI ES MAYOR A 0.6 > ', (tarifas[21] * (1 - 0.6)))\n\n # CALCULO TARIFAS ESTRATO 1\n tarifas.loc[(1 - (tarifas[18] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])) / tarifas[21])) > 0.6, 'CT_E1'] = (tarifas[21] * (1 - 0.6))\n tarifas.loc[(1 - (tarifas[18] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])) / tarifas[21])) <= 0.6, 'CT_E1'] = (tarifas[18] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])))\n # print('---------------------------------tarifas -> ', tarifas['CT_E1'].isnull().values.any(), '-------------------------')\n\n if tarifas['CT_E1'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_E1'] = 0\n\n # CALCULO TARIFAS ESTRATO 2\n tarifas.loc[(1 - (tarifas[19] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])) / tarifas[21])) > 0.5, 'CT_E2'] = (tarifas[21] * (1 - 0.5))\n tarifas.loc[(1 - (tarifas[19] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])) / tarifas[21])) <= 0.5, 'CT_E2'] = (tarifas[19] * np.minimum((tarifas['ipc_mes_consultado'] / tarifas['ipc_mes_anterior']), (tarifas[21] / tarifas[8])))\n # tarifas.loc[np.isnan(tarifas['CT_E2']), 'CT_E2'] = 0 # Validamos si es NaN\n if tarifas['CT_E2'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_E2'] = 0\n\n # CALCULO TARIFAS ESTRATO 3\n tarifas['CT_E3'] = tarifas[21] * (1 - 0.15)\n # tarifas.loc[np.isnan(tarifas['CT_E3']), 'CT_E3'] = 0 # Validamos si es NaN\n if tarifas['CT_E3'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_E3'] = 0\n \n # CALCULO TARIFAS ESTRATO 5\n tarifas['CT_E5'] = tarifas[21] * (1.2)\n # tarifas.loc[np.isnan(tarifas['CT_E5']), 'CT_E5'] = 0 # Validamos si es NaN\n if tarifas['CT_E5'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_E5'] = 0\n \n # CALCULO TARIFAS ESTRATO 6\n tarifas['CT_E6'] = tarifas[21] * (1.2)\n # tarifas.loc[np.isnan(tarifas['CT_E6']), 'CT_E6'] = 0 # Validamos si es NaN\n if tarifas['CT_E6'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_E6'] = 0\n \n # CALCULO TARIFAS ESTRATO INDUSTRIAL\n tarifas['CT_INDUSTRIAL'] = tarifas[21] * (1.2)\n # tarifas.loc[np.isnan(tarifas['CT_INDUSTRIAL']), 'CT_INDUSTRIAL'] = 0 # Validamos si es NaN\n if tarifas['CT_INDUSTRIAL'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_INDUSTRIAL'] = 0\n \n # CALCULO TARIFAS ESTRATO COMERCIAL\n tarifas['CT_COMERCIAL'] = tarifas[21] * (1.2)\n # tarifas.loc[np.isnan(tarifas['CT_COMERCIAL']), 'CT_COMERCIAL'] = 0 # Validamos si es NaN\n if tarifas['CT_COMERCIAL'].isnull().values.any(): # Validamos si es NaN\n tarifas['CT_COMERCIAL'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 1\n tarifas['CPS_E1'] = (1 - tarifas['CT_E1'] / tarifas[21]) * 100\n # tarifas.loc[np.isnan(tarifas['CPS_E1']), 'CPS_E1'] = 0 # Validamos si es NaN\n if tarifas['CPS_E1'].isnull().values.any(): # Validamos si es NaN\n tarifas['CPS_E1'] = 0\n \n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 2\n tarifas['CPS_E2'] = (1 - tarifas['CT_E2'] / tarifas[21]) * 100\n # tarifas.loc[np.isnan(tarifas['CPS_E2']), 'CPS_E2'] = 0 # Validamos si es NaN\n if tarifas['CPS_E2'].isnull().values.any(): # Validamos si es NaN\n tarifas['CPS_E2'] = 0\n \n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_E3'] = (1 - tarifas['CT_E3'] / tarifas[21]) * 100\n # tarifas.loc[np.isnan(tarifas['CPS_E3']), 'CPS_E3'] = 0 # Validamos si es NaN\n if tarifas['CPS_E3'].isnull().values.any(): # Validamos si es NaN\n tarifas['CPS_E3'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_E4'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_E5'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_E6'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_INDUSTRIAL'] = 0\n\n # CALCULO PORCENTAJE SUBSIDIOS ESTRATO 3\n tarifas['CPS_COMERCIAL'] = 0\n \n print('data TARIFAS -> ', tarifas)\n\n return tarifas\n\n def __getIPC(self, ano, periodo, mes):\n MES_ARG = self.meses[int(periodo) - mes] # m-1 ó mes consultado\n result = list(self.connMDB.indicesDANE.find({\"anio\": ano}, {'meses.' + MES_ARG: 1 }))\n key_mes = []\n for x in result:\n for key, value in x['meses'].items():\n key_mes.append(key)\n\n obj = []\n\n for m in key_mes:\n result_mes = result[0]['meses'][m]\n ipc = result_mes[len(result_mes)-1]['ipc']\n return ipc","repo_name":"juankmiloh/ciad-platform","sub_path":"Backend/concret_sources/models/revisor/formulas/FormulaTarifas.py","file_name":"FormulaTarifas.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38604750847","text":"import sys\nsys.path.append('../')\nimport common\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\n\nif __name__ == '__main__':\n # links = ['scp-' + str(num).zfill(3) + '-jp' if num < 100 else 'scp-' + str(num) + '-jp'\n # for num in range(2, 3000)]\n # common.iterate_and_save_link_list(links, '../../yaml/jp/scp_jp', base_url=common.jp_url, start=1160)\n\n filename = '../../yaml/jp/metatitle_jp'\n if os.path.exists(filename + '.yaml'):\n os.remove(filename + '.yaml')\n metatitle_url = 'http://scp-jp.wikidot.com/scp-series-jp'\n suffix_list = ['', '-2', '-3']\n for suffix in suffix_list:\n print(suffix)\n obj = {}\n res = requests.get(metatitle_url + suffix)\n soup = BeautifulSoup(res.text, 'html.parser')\n content_panel = soup.find(\n 'div', {'class': 'content-panel standalone series scp'})\n lists = content_panel.find_all('li')\n print(lists)\n links = common.process_hrefs([li.find('a')['href'] for li in lists])\n texts = [li.text for li in lists]\n common.get_and_save_metatitle(\n links, texts, filename, mode='a')\n","repo_name":"ternbusty/scp_network_wiki","sub_path":"src/jp/get_scp_jp.py","file_name":"get_scp_jp.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"72274242722","text":"import sys\n\n\ndef main():\n if len(sys.argv) > 1 :\n try :\n funcs = str(dir(__import__(sys.argv[1])))\n except ModuleNotFoundError:\n print(\"You don't have that module.\")\n return;\n funcs = funcs.replace(\"\\'\", '\\\"').replace(',', ',\\n')\n funcs = '{\"'+sys.argv[1]+'\": '+funcs+\"}\"\n file = open(\"function_lists_\"+sys.argv[1]+\".json\", 'w+')\n file.write(funcs)\n file.close()\n else :\n print(\"Type the module that you want to get the functions list by an argument.\")\n\nmain()\n","repo_name":"code-yeongyu/phone_code_pc","sub_path":"Documents/get_modules_in_py.py","file_name":"get_modules_in_py.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"184064709","text":"\nimport pandas as pd\ndf = pd.read_csv(\"Province.csv\",encoding=\"gbk\")\nprovince = df['Unnamed: 1'].tolist()\ncode = [i for i in range(1,len(province)+1)]\ncode = dict(zip(province,code))\nprint(code)\n\n#存放json的文件夹名称\njsonpath = './json'\n#输出csv的文件夹名称\ncsvpath = './json_csv/'\n\nimport os\ndef scan_files(directory,prefix=None,postfix=None):\n files_list=[]\n for root, sub_dirs, files in os.walk(directory):\n for special_file in files:\n if postfix:\n if special_file.endswith(postfix):\n files_list.append(os.path.join(root,special_file))\n elif prefix:\n if special_file.startswith(prefix):\n files_list.append(os.path.join(root,special_file))\n else:\n files_list.append(os.path.join(root,special_file)) \n return files_list\n#读取当前目录的上级目录\ntemp = os.path.abspath(jsonpath)\nall_files = scan_files(temp)\n\nimport json\nwith open(all_files[0],'r',encoding='utf8')as fp:\n json_data = json.load(fp)\nprint(json_data)\nfor a in json_data.items():\n for b in a[1].items():\n for c in b[1].items():\n print(c[1])\nvalues = [i for i in c[1].values()]\nkeys = [i for i in c[1].keys()]\nprint(values)\nprint(keys)\n\npro_num = 0\nfor file in all_files:\n file_name = file.split('\\\\')[-1].split('.')[0]\n year = file_name[0:4]\n for pro in province:\n if pro in file:\n pro_num = code[pro]\n break\n if \"理科\" in file:\n subject = \"理科\"\n elif \"文科\" in file:\n subject = \"文科\"\n else:\n subject = \"综合\"\n try:\n try:\n with open(file,'r',encoding='gbk')as fp:\n json_data = json.load(fp)\n except:\n with open(file,'r',encoding='utf-8-sig')as fp:\n json_data = json.load(fp)\n except:\n print(file_name,\"failed!\")\n continue\n for a in json_data.items():\n for b in a[1].items():\n for c in b[1].items():\n print(file,\" open successfully!\",\" \",subject)\n values = [i for i in c[1].values()]\n keys = [i for i in c[1].keys()]\n df = pd.DataFrame([keys,values,[pro_num] * len(keys),[year] * len(keys),[subject] * len(keys)])\n df = df.T\n df.set_axis(['分数', '累计人数', '省份','年份','科类'], axis='columns')\n \n new_path = csvpath + file_name + '.csv'\n df.to_csv(new_path,encoding='utf-8-sig')\n\n","repo_name":"destinyvoilet/REC2020","sub_path":"第一组实验报告/json_to_scv 尹行健.py","file_name":"json_to_scv 尹行健.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6290181722","text":"\"\"\"\nTests brewblox_devcon_spark.commander\n\"\"\"\n\nimport asyncio\n\nimport pytest\nfrom brewblox_service import scheduler\nfrom brewblox_service.testing import matching\n\nfrom brewblox_devcon_spark import (codec, commander, connection,\n service_status, service_store)\nfrom brewblox_devcon_spark.models import (ErrorCode, IntermediateResponse,\n ServiceConfig)\n\nTESTED = commander.__name__\n\n\n@pytest.fixture\nasync def setup(app):\n config: ServiceConfig = app['config']\n config.command_timeout = 1\n\n service_status.setup(app)\n scheduler.setup(app)\n service_store.setup(app)\n codec.setup(app)\n connection.setup(app)\n commander.setup(app)\n\n\nasync def test_acknowledge(app, client):\n welcome = ','.join([\n '!BREWBLOX',\n 'ed70d66f0',\n '3f2243a',\n '2019-06-18',\n '2019-06-18',\n '1.2.1-rc.2',\n 'p1',\n '78',\n '0A',\n '1234567F0CASE'\n ])\n service_status.set_enabled(app, True)\n await asyncio.wait_for(service_status.wait_connected(app), timeout=5)\n assert not service_status.is_acknowledged(app)\n\n with pytest.warns(UserWarning, match='incompatible device ID'):\n await connection.fget(app).on_event(welcome)\n\n assert service_status.is_acknowledged(app)\n assert service_status.desc(app).controller.device.device_id == '1234567f0case'\n\n\nasync def test_unexpected_event(app, client, mocker):\n m_log_info = mocker.patch(TESTED + '.LOGGER.info', autospec=True)\n await connection.fget(app).on_event('hello world!')\n m_log_info.assert_called_with(matching(r'.*hello world!'))\n\n\nasync def test_unexpected_response(app, client, mocker):\n m_log_error = mocker.patch(TESTED + '.LOGGER.error', autospec=True)\n response = IntermediateResponse(\n msgId=123,\n error=ErrorCode.OK,\n payload=[]\n )\n message = codec.fget(app).encode_response(response)\n await connection.fget(app).on_response(message)\n m_log_error.assert_called_with(matching(r'.*Unexpected message'))\n\n\nasync def test_firmware_update_call(app, client, mocker):\n # We don't unit test OTA update logic because it makes very in-depth assumptions\n # about how particle devices respond to YMODEM calls\n # We'll check now whether the basic call works\n service_status.set_enabled(app, True)\n await asyncio.wait_for(service_status.wait_connected(app), timeout=5)\n await commander.fget(app).firmware_update()\n","repo_name":"BrewBlox/brewblox-devcon-spark","sub_path":"test/test_commander.py","file_name":"test_commander.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"2369251237","text":"from aocd import get_data\n\nmy_list = get_data(day=8, year=2015).split(\"\\n\")\n\ntotal_chars = 0\ntotal_updated_chars = 0\n\nfor i in range(0, len(my_list)):\n string = my_list[i]\n total_chars += len(string)\n replaced_string_backlash = string.replace(\"\\\\\", \"\\\\\\\\\")\n replaced_string_quote = string.replace(\"\\\"\", \"\\\\\\\"\")\n total_updated_chars += 2 + len(string) + len(replaced_string_backlash) + len(replaced_string_quote) - (2*len(string))\n\nprint(total_updated_chars - total_chars)\n","repo_name":"mihirbpi/Advent-of-Code-2015","sub_path":"adv08/adv8-2.py","file_name":"adv8-2.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45372928169","text":"\"\"\"Выполнение команды на целевой системе через SSH.\"\"\"\n\nimport logging\nimport os\nfrom typing import Final\n\nfrom ..internal.base_task import BaseTask\n\n_log = logging.getLogger(__name__)\n_log.setLevel(logging.DEBUG)\n\n\nCMD: Final[str] = \"ssh -t {user}@{host} 'cd {folder} && {command}'\"\n\n\nclass RemoteCommand(BaseTask):\n \"\"\"Выполнение команды на целевой системе через SSH.\"\"\"\n\n def __init__(\n self,\n desc: str,\n command: str,\n need_confirm: bool = True,\n remote_user: str = \"root\",\n remote_host: str = \"target\",\n remote_folder: str = \"/home/code\",\n ) -> None:\n \"\"\"Выполнение команды на целевой системе через SSH.\"\"\"\n super().__init__(desc, need_confirm)\n self.__command = command\n self.__remote_user = remote_user\n self.__remote_host = remote_host\n self.__remote_folder = remote_folder\n\n def _execute(self) -> None:\n cmd = CMD.format(\n user=self.__remote_user,\n host=self.__remote_host,\n folder=self.__remote_folder,\n command=self.__command,\n )\n _log.info(\"Выполняем команду:\\n{0}\".format(cmd))\n os.system(cmd)\n","repo_name":"Konstantin-Dudersky/smarthome","sub_path":"setup/setup/tasks/remote_command.py","file_name":"remote_command.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21840953301","text":"from levenshtein_numerics import levenshtein_c, levenshtein_cython\nimport numpy as np\nimport pickle, os, gzip, string\n\ndef levenshtein_phrase_distance(phr_s, phr_t, CYTHON=False):\n s, t = phr_s.split(), phr_t.split()\n # Create dictionary and assign each word to a unique int\n d = {}\n idx = 1\n # Translate phr_s and phr_t in int-language\n s1, s2 = [], []\n for w1 in s:\n if w1 not in d.keys():\n d[w1]=idx\n idx+=1\n s1.append(d[w1])\n for w2 in t:\n if w2 not in d.keys():\n d[w2]=idx\n idx+=1\n s2.append(d[w2])\n # aviod calculation?\n aviod_calc = True\n for word in s:\n if word in t:\n avoid_calc = False\n # for completely different phrases: distance = num of words of longer phrase\n if avoid_calc:\n return max(len(s), len(t))\n else:\n if CYTHON:\n return levenshtein_cython(s1, len(s), s2, len(t))\n else:\n return levenshtein_c(s1, len(s), s2, len(t))\n","repo_name":"harmening/phrase-frequency-counter","sub_path":"levenshtein_distance.py","file_name":"levenshtein_distance.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35346668642","text":"r\"\"\"Expected format\n123 234 345|12 3221 22\n\"\"\"\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(description='Removes entries where one side is empty, outputs left side')\nparser.add_argument('-l','--left', help='Left sentences', required=True)\nparser.add_argument('-r','--right', help='Right sentences', required=True)\nargs = parser.parse_args()\n\nwith open(args.left) as left_reader:\n with open(args.right) as right_reader:\n for left_line, right_line in zip(left_reader, right_reader):\n if left_line.strip() and right_line.strip():\n sys.stdout.write(left_line)\n\n","repo_name":"fstahlberg/ucam-scripts","sub_path":"t2t/remove_empty_in_parallel.py","file_name":"remove_empty_in_parallel.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3328903290","text":"#!/usr/bin/env python3\n\nimport numpy as _np\nimport matplotlib.pyplot as _plt\n\n\nclass EchoObj(_np.ndarray):\n\n def __new__(cls, *args):\n return _np.ndarray.__new__(cls, 10, dtype=float)\n\n def __init__(self, array=None):\n if array is not None:\n if isinstance(array, (_np.ndarray, list, tuple)) and\\\n len(array) == 10:\n for i in range(len(array)):\n self[i] = array[i]\n else:\n raise Exception('error in array parameter: ' +\n 'type={0:s}, len={1:d}'.format(\n type(array), len(array)))\n else:\n for i in range(len(self)):\n self[i] = 0.0\n\n\ndef line(x1, y1, x2, y2):\n t = EchoObj()\n t[0] = x1\n t[1] = y1\n t[2] = x2\n t[3] = y2\n return t\n\n\ndef circle_out(x, y, R, theta):\n \"(x,y) is the position there the tangent lines cross.\"\n t = EchoObj()\n t[0] = x - R*_np.tan(theta/2)\n t[1] = y\n t[2] = x + R*_np.tan(theta/2)*_np.cos(theta)\n t[3] = y - R*_np.tan(theta/2)*_np.sin(theta)\n t[4] = t[0] - R\n t[5] = y\n t[6] = t[0] + R\n t[7] = y - 2*R\n t[8] = 0\n return t\n\n\ndef circle_inn(x, y, R, theta):\n \"(x,y) is the position there the tangent lines cross.\"\n t = EchoObj()\n t[0] = x - R*_np.tan(theta/2)*_np.cos(theta)\n t[1] = y + R*_np.tan(theta/2)*_np.sin(theta)\n t[2] = x + R*_np.tan(theta/2)\n t[3] = y\n t[4] = t[2] - R\n t[5] = y + 2*R\n t[6] = t[2] + R\n t[7] = y\n t[8] = 1\n return t\n\n\ndef reflect(t_in):\n if isinstance(t_in, (list, tuple)) and isinstance(t_in[0], EchoObj):\n t_out = t_in.copy()\n for i in range(len(t_out)):\n t_out[-i-1] = reflect(t_in[i])\n elif isinstance(t_in, EchoObj):\n t_out = t_in.copy()\n t_out[0] = -t_in[2]\n t_out[1] = t_in[3]\n t_out[2] = -t_in[0]\n t_out[3] = t_in[1]\n t_out[4] = -t_in[6]\n t_out[6] = -t_in[4]\n else:\n raise Exception('error')\n return t_out\n\n\ndef translate(t_in, delta):\n if isinstance(t_in, (list, tuple)) and isinstance(t_in[0], EchoObj):\n t_out = t_in.copy()\n for i in range(len(t_in)):\n t_out[i] = translate(t_in[i], delta)\n elif isinstance(t_in, EchoObj):\n t_out = t_in.copy()\n t_out[[0, 2]] += delta\n if _np.any(t_out[[4, 6]] != 0):\n t_out[[4, 6]] += delta\n else:\n raise Exception('error')\n return t_out\n\n\ndef invert(t_in):\n if isinstance(t_in, (list, tuple)) and isinstance(t_in[0], EchoObj):\n t_out = t_in.copy()\n for i in range(len(t_out)):\n t_out[-i-1] = invert(t_in[i])\n elif isinstance(t_in, EchoObj):\n t_out = t_in.copy()\n t_out[1] = t_in[3]\n t_out[3] = t_in[1]\n else:\n raise Exception('error')\n return t_out\n\n\ndef translate_radius(t_in, delta):\n if isinstance(t_in, (list, tuple)) and isinstance(t_in[0], EchoObj):\n t_out = t_in.copy()\n for i in range(len(t_in)):\n t_out[i] = translate_radius(t_in[i], delta)\n elif isinstance(t_in, EchoObj):\n t_out = t_in.copy()\n t_out[[1, 3]] += delta\n if _np.any(t_out[[5, 7]] != 0):\n t_out[[5, 7]] += delta\n else:\n raise Exception('error')\n return t_out\n\n\ndef concatenate(geometry):\n geo = []\n delta = None\n el2 = EchoObj()\n for el in geometry:\n delta = 0 if delta is None else el2[2] - el[0]\n el2 = translate(el, delta)\n geo.append(el2)\n\n return geo\n\n\ndef read_geometry_file(fname):\n array = _np.loadtxt(fname=fname, skiprows=1)\n points = []\n for a in array:\n b = list(a)\n if len(b) == 9:\n b.append(0)\n points.append(EchoObj(b))\n return points\n\n\ndef convert_units(points, unit=1e-2):\n for p in points:\n p *= unit\n return points\n\n\ndef create_linear_taper(fname=None, r_in=0.012, r_out=0.004, t=20,\n s_in=0.0025, s_out=0.0025, C=None):\n p2 = _np.array([0.0, r_in])\n p3 = _np.array([t*_np.abs(r_in-r_out), r_out])\n\n if C is None:\n t1 = line(p2[0], p2[1], p3[0], p3[1])\n points = [t1, ]\n else:\n theta = _np.arctan(1/t)\n if r_in > r_out:\n t1 = circle_out(p2[0], p2[1], r_out, theta)\n t3 = circle_inn(p3[0], p3[1], r_in, theta)\n else:\n t1 = circle_inn(p2[0], p2[1], r_out, theta)\n t3 = circle_out(p3[0], p3[1], r_in, theta)\n t2 = line(t1[2], t1[3], t3[0], t3[1])\n points = [t1, t2, t3]\n\n if s_in > 0.0:\n t0 = line(t1[0]-s_in, t1[1], t1[0], t1[1])\n points = [t0, ] + points\n if s_out > 0.0:\n t_end = points[-1]\n t_end = line(t_end[2], t_end[3], t_end[2]+s_out, t_end[3])\n points += [t_end,]\n\n if fname is not None:\n create_geometry_file(fname, points)\n\n return points\n\n\ndef create_collimator(fname=None, R_in=0.012, R_out=None, r=0.004,\n t_in=20, t_out=None, g=0.02, C_in=None, C_out=None):\n\n t_out = t_out or t_in\n R_out = R_out or R_in\n init = 0.0025\n\n theta_in = _np.arctan(1/t_in)\n theta_out = _np.arctan(1/t_out)\n\n points = create_linear_taper(r_in=R_in, r_out=r, t=t_in,\n s_in=init, s_out=0.0, C=C_in)\n t_m = line(points[-1][2], points[-1][3], points[-1][2]+g, points[-1][3])\n points2 = create_linear_taper(r_in=r, r_out=R_out, t=t_out,\n s_in=0.0, s_out=init, C=C_out)\n points2 = translate(points2, t_m[2]-points2[0][0])\n points += [t_m, ] + points2\n\n if fname is not None:\n create_geometry_file(fname, points)\n\n return points\n\n\ndef create_geometry_file(fname, points):\n if not fname.endswith('.txt'):\n fname += '.txt'\n with open(fname, 'w') as f:\n f.write('{0:d}'.format(len(points)) + '\\n')\n for p in points:\n f.write(''.join(['{0:12.5f}'.format(x) for x in p]) + '\\n')\n\n\ndef plot_geometry(points):\n fig = _plt.figure()\n for p in points:\n color = 'b' if _np.all(p[[4, 5, 6, 7]] == 0) else 'r'\n _plt.plot(p[[0, 2]], p[[1, 3]]*1e3, color)\n _plt.xlabel('s [m]')\n _plt.ylabel('R [mm]')\n _plt.grid('on')\n _plt.show()\n return fig\n\n\ndef create_input_file(\n fname, geo_fname,\n geo_unit='m', geo_type='recta',\n geo_width=0.024, geo_bound='magn', geo_conv=True,\n beam_sigma=5e-4, beam_offset=-1, modes=(0, 1),\n mesh_leng=10001, mesh_step=None):\n\n if mesh_step is None:\n mesh_step = beam_sigma/5.0\n\n with open(fname, 'w') as f:\n f.write('%%%% Generated automatically\\n')\n f.write('''\n%%%%%%%%%%%%%%%%%%%%% geometry %%%%%%%%%%%%%%%%%%%%%%%\n\nGeometryFile= {0:<21s} % geometry in \"Units\"\nUnits= {1:<28s} % m/cm/mm - only for GeometryFile!!!\nGeometryType= {2:<21s} % recta/round\nWidth= {3:<28.5g} % in m\nSymmetryCondition= {4:<16s} % magn/elec - boundary condition on axis\nConvex= {5:<27d} % 1(convex)/0(no)\n'''.format(geo_fname, geo_unit, geo_type,\n geo_width, geo_bound, 1 if geo_conv else 0))\n\n f.write('''\n%%%%%%%%%%%%%%% input beam and field %%%%%%%%%%%%%%%%%%\n\nInPartFile= - % -(Gaussian pencil beam)\nBunchSigma= {0:<23.5g} % in m\nOffset= {1:<27d} % in mesh steps/-1(default, near wall)\nInFieldFile= - % -(no output)\n'''.format(beam_sigma, beam_offset))\n\n f.write('''\n%%%%%%%%%%%%%%%%%%%%%% model %%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nWakeIntMethod= ind % ind/dir/-\nModes= {0:s}\nParticleMotion= - % 1d/2d/3d/-\n'''.format(' '.join(['{0:d}'.format(x) for x in modes])))\n\n f.write('''\n%%%%%%%%%%%%%%%%%%%%%% mesh %%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nMeshLength= {0:<23d} % in mesh steps\nStepY= {1:<28.5g} %\nStepZ= {1:<28.5g} %\nNStepsInConductive=0 % 0(default)\nTimeSteps=0 % 0(default)\n'''.format(mesh_leng, mesh_step))\n\n f.write('''\n%%%%%%%%%%%%%%%%%%%% monitors %%%%%%%%%%%%%%%%%%%%%%%%%%\n\nOutPartFile= - % -(no output)\nOutFieldFile= - % -(no output)\n''')\n\n\n# def parse_from_input_file(fname):\n#\n# if mesh_step is None: mesh_step = beam_sigma/5.0\n#\n# geo_fname,\n# geo_unit='m', geo_type='recta',geo_width=0.024,geo_bound='magn',geo_conv=True,\n# beam_sigma=5e-4, beam_offset=-1,\n# modes = (0,1),\n# mesh_leng = 10001, mesh_step=None\n#\n# with open(fname, 'r') as f:\n# f.write('%%%% Generated automatically\\n')\n# f.write('''\n# %%%%%%%%%%%%%%%%%%%%% geometry %%%%%%%%%%%%%%%%%%%%%%%\n#\n# GeometryFile= {0:<21s} % geometry in \"Units\"\n# Units= {1:<28s} % m/cm/mm - only for GeometryFile!!!\n# GeometryType= {2:<21s} % recta/round\n# Width= {3:<28.5g} % in m\n# SymmetryCondition= {4:<16s} % magn/elec - boundary condition on axis\n# Convex= {5:<27d} % 1(convex)/0(no)\n# '''.format(geo_fname,geo_unit,geo_type,geo_width,geo_bound,1 if geo_conv else 0))\n#\n#\n# f.write('''\n# %%%%%%%%%%%%%%% input beam and field %%%%%%%%%%%%%%%%%%\n#\n# InPartFile= - % -(Gaussian pencil beam)\n# BunchSigma= {0:<23.5g} % in m\n# Offset= {1:<27d} % in mesh steps/-1(default, near wall)\n# InFieldFile= - % -(no output)\n# '''.format(beam_sigma,beam_offset))\n#\n#\n# f.write('''\n# %%%%%%%%%%%%%%%%%%%%%% model %%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# WakeIntMethod= ind % ind/dir/-\n# Modes= {0:s}\n# ParticleMotion= - % 1d/2d/3d/-\n# '''.format(' '.join(['{0:d}'.format(x) for x in modes])))\n#\n#\n# f.write('''\n# %%%%%%%%%%%%%%%%%%%%%% mesh %%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# MeshLength= {0:<23d} % in mesh steps\n# StepY= {1:<28.5g} %\n# StepZ= {1:<28.5g} %\n# NStepsInConductive=0 % 0(default)\n# TimeSteps=0 % 0(default)\n# '''.format(mesh_leng,mesh_step))\n#\n# f.write('''\n# %%%%%%%%%%%%%%%%%%%% monitors %%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# OutPartFile= - % -(no output)\n# OutFieldFile= - % -(no output)\n# ''')\n","repo_name":"lnls-fac/collective_effects","sub_path":"pycolleff/pycolleff/echo2d_util.py","file_name":"echo2d_util.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8346484914","text":"import pygame, math\nfrom . import scene, playscene, view, pview, ptext, settings, graphics, snek, geometry, sound, progress\nfrom .pview import T\n\n\nclass self:\n\tpass\n\ndef init():\n\tself.t = 0\n\tself.jopt = 0\n\tself.done = False\n\tself.tdone = 0\n\tself.you = snek.You((0, 0))\n\ttilt = -0.5\n\tself.you.ps = []\n\tps = [(4 * S / (1 + C ** 2), 4 * S * C / (1 + C ** 2)) for C, S in math.CSround(120, jtheta0 = 1)]\n\tps = [math.R(tilt, p) for p in ps]\n\tthetas = [math.atan2(x1 - x0, y1 - y0) for (x0, y0), (x1, y1) in geometry.traversepoly(ps)]\n\tthetas = thetas[:-1] + thetas[-1:]\n\td, p0 = 0, (0, 0)\n\tfor p, theta in zip(ps, thetas):\n\t\tself.you.ps.append((d, p, theta))\n\t\td += math.distance(p, p0)\n\t\tp0 = p\n\tself.you.length = d\n\tself.you.d = d\n\tself.you.pos = ps[-1]\n\tself.you.theta = thetas[-1]\n\tself.you.speed = 1\n\tself.you.chomp(quiet = True)\n\tself.you.tchomp = 100\n\tself.you.menu = True\n\tself.you.fixmovement = False\n\tfor _ in range(120):\n\t\tself.you.think(1 / 120, 0, 0)\n\tsound.playmusic(\"brittle\")\n\ndef reset():\n\tself.t = 0\n\tself.done = False\n\tself.tdone = 0\n\ndef think(dt, kpressed, kdowns):\n\tself.t += dt\n\tself.you.think(dt, 0, 0)\n\t\n\tif \"quit\" in kdowns:\n\t\tscene.current = None\n\tif self.done:\n\t\tself.tdone += dt\n\t\tif self.tdone > 0.5:\n\t\t\tfinish()\n\telse:\n\t\tif \"up\" in kdowns:\n\t\t\tself.jopt -= 1\n\t\t\tsound.playsound(\"blip0\")\n\t\tif \"down\" in kdowns:\n\t\t\tself.jopt += 1\n\t\t\tsound.playsound(\"blip0\")\n\t\tself.jopt = math.clamp(self.jopt, 0, 2)\n\t\tif \"act\" in kdowns:\n\t\t\tself.done = True\n\t\t\tsound.playsound(\"blip1\")\n\ndef finish():\n\tif self.jopt == 0:\n\t\tscene.setcurrent(\"adventure\")\n\tif self.jopt == 1:\n\t\tscene.setcurrent(\"endless\")\n\tif self.jopt == 2:\n\t\tscene.setcurrent(\"settings_menu\")\n\t\n\ndef draw():\n\tpview.fill((0, 20, 50))\n\tview.x0, view.y0, view.scale = 0, 0, 150\n\tself.you.draw()\n\tpview.fill((0, 20, 50, 200))\n\tptext.draw(settings.gamename, center = T(640, 180), fontsize = T(160), owidth = 0.5, shade = 1)\n\toptnames = [\n\t\t\"Adventure/Tutorial\",\n\t\t\"Endless\",\n\t\t\"Settings\",\n\t]\n\tangle = math.mix(-0.2, 0.2, math.cycle(0.4 * self.t))\n\tfor j, optname in enumerate(optnames):\n\t\ty = 400 + 90 * j\n\t\tptext.draw(optname, midleft = T(250, y), fontsize = T(80), owidth = 0.5, shade = 1)\n\t\tif j == self.jopt:\n\t\t\timgtop = \"head-top-0\" if not progress.adventuredone else \"head-top-3\"\n\t\t\tgraphics.drawimgscreen(T(160, y + 15), \"head-bottom\", pview.f * 10, angle)\n\t\t\tgraphics.drawimgscreen(T(160, y + 15), imgtop, pview.f * 10, angle)\n\tptext.draw(\"by Christopher Night\\nmusic by Kevin MacLeod\",\n\t\tmidtop = T(960, 540), fontsize = T(40), owidth = 0.5, shade = 1)\n\n\tbtext = None\n\tif self.jopt == 0:\n\t\tbtext = \"Adventure complete. Thank you for playing!\" if progress.adventuredone else \"Adventure progress %d%%\" % (4 * progress.adventure)\n\tif self.jopt == 1:\n\t\tbtext = \"Endless stages completed: %d\" % progress.endless\n\tif not progress.adventuredone and progress.adventure == 0 and progress.endless == 0:\n\t\tbtext = \"Controls: Arrows or WASD, Space or Enter\"\n\tif btext:\n\t\tptext.draw(btext, midbottom = T(640, 700), fontsize = T(34), owidth = 0.5, shade = 1)\n\n\talpha = math.imix(0, 255, math.fadebetween(self.tdone, 0, 0, 0.5, 1))\n\tif alpha > 0:\n\t\tpview.fill((20, 60, 120, alpha))\n\n\n","repo_name":"cosmologicon/pyjam","sub_path":"woundabout/src/menuscene.py","file_name":"menuscene.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"43197506269","text":"\"\"\"\nFlattens twitter newline json such that it can be read by the huggingface dataset loader.\n\nDependent on:\n isn't dependent\n\nAuthors:\n Kenneth Enevoldsen\n\"\"\"\nimport glob\nimport os\n\nimport ndjson\n\n\ndef flatten_post(\n post: dict,\n keys_to_keep=None,\n):\n if keys_to_keep is None:\n keys_to_keep = [\n \"text\",\n \"id\",\n \"possibly_sensitive\",\n \"author_id\",\n \"source\",\n \"lang\",\n ]\n return {k: post[k] for k in keys_to_keep}\n\n\ndef flatten_ndjson(path: str, write_folder: str):\n \"\"\"flatten a json and write to disk\"\"\"\n fname = os.path.split(path)[-1]\n path_, ext = os.path.splitext(fname)\n write_path = os.path.join(write_folder, path_ + \"_flatten\" + ext)\n\n print(f\"Flattening: {path} to {write_path}\")\n\n # stream in json from orgin to write_path\n with open(path) as f:\n reader = ndjson.reader(f)\n\n with open(write_path, \"w\") as f:\n writer = ndjson.writer(f, ensure_ascii=False)\n\n for post in reader:\n # write flattened post to new json\n writer.writerow(flatten_post(post))\n\n\ndef main(read_path, write_path):\n path = os.path.join(read_path, \"**\", \"*.ndjson\")\n json_files = glob.glob(path, recursive=True)\n\n for j_file in json_files:\n flatten_ndjson(j_file, write_path)\n\n\nif __name__ == \"__main__\":\n read_path = os.path.join(\"/work\", \"twitter\")\n write_path = os.path.join(\"/work\", \"twitter_cleaned\")\n main(read_path, write_path)\n","repo_name":"centre-for-humanities-computing/danish-foundation-models","sub_path":"archive_v1/src/applications/hopetwitter/flatten_ndjson.py","file_name":"flatten_ndjson.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"54"} +{"seq_id":"38966254697","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom array import array\r\n# loading the file containing training data\r\ndata_file = input('enter file CSV filename : ')\r\ndata = pd.read_csv(data_file, header=None)\r\nn1 = (len(data.columns))-1 # to determine the target value column from data file\r\nY = data.iloc[:, n1].values # storing the target value\r\nn = (len(data.columns)) - 1\r\nX = data.iloc[:, 0:n].values # storing all values of x in data file in one array\r\n\r\nlearning_rate = float(input('Enter the learning rate : '))\r\nthreshold = float(input('Enter the threshold rate : '))\r\n\r\nlength = len(X) # length of data set\r\nweights = np.zeros([n+1]) # initializing weights\r\n# inserting x0 in the x array\r\nx0 = np.ones(length)\r\nX = np.insert(X, 0, x0, axis=1) # inserting X0 values in X array as X0 is always 1\r\n\r\niters = 0 # to count iterations\r\nsum_sq_error = 0\r\nsqe_old = 1\r\nsqe_new = 0\r\nwhile (abs(sqe_new - sqe_old)) > threshold:\r\n learnt_func = [] # to store the learnt function after every weight update\r\n\r\n for i in range(length):\r\n a = 0\r\n for w, x in list(zip(weights, X[i])):\r\n a += w * x\r\n learnt_func.append(a)\r\n\r\n # calculating error between predicted and actual target\r\n for lf, y in zip(learnt_func, Y):\r\n error = (Y - learnt_func)\r\n\r\n # gradient descent to update weights based on errors obtained\r\n temp = []\r\n for w in range(len(weights)):\r\n gradient = sum(error * X[:, w])\r\n temp.append(weights[w] + learning_rate * gradient)\r\n\r\n # saving sum of squared errors\r\n sum_sq_error = 0\r\n\r\n for l, y in zip(learnt_func, Y):\r\n sum_sq_error += ((y-l) ** 2)\r\n\r\n sqe_new = sum_sq_error # to check difference between new squared error and squared error in previous iterations\r\n print(iters, weights, sum_sq_error)\r\n\r\n weights = temp\r\n temp = []\r\n error = []\r\n if (abs(sqe_new - sqe_old)) < threshold:\r\n iters = length\r\n\r\n else:\r\n iters = iters + 1\r\n sqe_old = sqe_new\r\n sqe_new = 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Patrali27/Linear-regression","sub_path":"progass1.py","file_name":"progass1.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29148787532","text":"from random import *\n\nwin = 0\nloss = 0\ntied = 0\n\nwhile(True):\n items = [\"Rock\", \"Paper\",\"Scissors\", \"Lizard\", \"Spock\"]\n\n for i in range(5):\n print(i+1, items[i])\n\n playerchoice = -1\n while(playerchoice > 5 or playerchoice < 1):\n playerchoice = int(input(\"Make your choice: \"))\n\n print(\"You chose:\", items[playerchoice - 1])\n\n\n\n\n\n# if player chose rock\n if (playerchoice == 1):\n cheatlist = [1,2,3]\n cpuchoice = cheatlist[randint(0,2)]\n if cpuchoice == 2 or cpuchoice == 5:\n print(\"you lose\")\n loss +=1\n else:\n print(\"You Lose!\")\n loss +=1\n\n# if player chose paper\n elif (playerchoice == 2):\n cheatlist = [2,3,5]\n cpuchoice = cheatlist[randint(0,2)]\n if(cpuchoice == 1 or cpuchoice == 5):\n print(\"You won!\")\n win += 1\n else:\n print(\"You Lose!\")\n loss += 1\n\n# if player chose scissors\n elif (playerchoice == 3):\n if(cpuchoice == 2 or cpuchoice == 4):\n print(\"You won!\")\n win += 1\n else:\n print(\"You Lose!\")\n loss += 1\n\n# if player chose lizard\n elif (playerchoice == 4):\n if(cpuchoice == 2 or cpuchoice == 5):\n print(\"You won!\")\n win += 1\n else:\n print(\"You Lose!\")\n loss += 1\n\n# if player chose spock\n elif (playerchoice == 5):\n if(cpuchoice == 1 or cpuchoice == 3):\n print(\"You won!\")\n win += 1\n else:\n print(\"You Lose!\")\n loss += 1\n\n print()\n print(\"win = \", win, \"loss=\", loss, \"ties=\", tied)\n print(\"Lez Go Again\")\n print()","repo_name":"SadiqurS/Rock_Paper_Scissors_Spock","sub_path":"RPSS.py","file_name":"RPSS.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36233206625","text":"import unittest\r\nimport os\r\nfrom unittest.mock import patch\r\n\r\nimport my_rbfs as rbfs\r\nimport bfs_node as bfs\r\nimport common as c\r\n\r\nclass TestChessboardFunctions(unittest.TestCase):\r\n def test_is_safe(self):\r\n # Test case 1: Safe board configuration\r\n board1 = [1, 3, 0, 2]\r\n self.assertTrue(c.is_safe(board1))\r\n\r\n # Test case 2: Unsafe board configuration\r\n board2 = [1, 3, 2, 0]\r\n self.assertFalse(c.is_safe(board2))\r\n\r\n # Test case 3: Empty board should always be safe\r\n board3 = []\r\n self.assertTrue(c.is_safe(board3))\r\n\r\n def test_get_answer(self):\r\n # Test case 1: User enters one of the valid options\r\n with unittest.mock.patch('builtins.input', return_value='y'):\r\n self.assertEqual(c.get_answer(\"Choose 'y' or 'n': \", 'y', 'n'), 'y')\r\n\r\n # Test case 2: User enters an invalid option first, then a valid one\r\n with unittest.mock.patch('builtins.input', side_effect=['invalid', 'y']):\r\n self.assertEqual(c.get_answer(\"Choose 'y' or 'n': \", 'y', 'n'), 'y')\r\n\r\n def test_get_number_of_range(self):\r\n # Test case 1: User enters a valid number within the specified range\r\n with unittest.mock.patch('builtins.input', return_value='5'):\r\n self.assertEqual(c.get_number_of_range(\"Enter a number between 1 and 10: \", 1, 10), 5)\r\n\r\n # Test case 2: User enters a number below the minimum\r\n with unittest.mock.patch('builtins.input', side_effect=[0, 5]):\r\n self.assertEqual(c.get_number_of_range(\"Enter a number between 1 and 10: \", 1, 10), 5)\r\n\r\n # Test case 3: User enters a number above the maximum\r\n with unittest.mock.patch('builtins.input', side_effect=[15, 5]):\r\n self.assertEqual(c.get_number_of_range(\"Enter a number between 1 and 10: \", 1, 10), 5)\r\n\r\n def test_get_board(self):\r\n # Test case 1: User chooses to input queen positions\r\n with unittest.mock.patch('builtins.input', side_effect=[3, 2, 1, 4]):\r\n self.assertEqual(c.get_board(4, 'n'), [2, 1, 0, 3])\r\n\r\nclass TestNQueensRBFSSolver(unittest.TestCase):\r\n def test_node_creation(self):\r\n state = [0, 1, 2, 3] # Example state\r\n node = rbfs.Node(state)\r\n self.assertEqual(node.state, state)\r\n self.assertIsNone(node.parent)\r\n self.assertEqual(node.g, 0)\r\n self.assertEqual(node.h, 0)\r\n self.assertEqual(node.f, 0)\r\n\r\n def test_generate_successors(self):\r\n state = [0, 1, 2,3] # Example state\r\n node = rbfs.Node(state)\r\n successors = rbfs.generate_successors(node)\r\n self.assertTrue(isinstance(successors, list))\r\n self.assertEqual(len(successors), 12) # There are 12 possible successor states for a 4x4 board\r\n\r\n def test_heuristic_f2(self):\r\n state = [1, 3, 0, 2] # Example state with no conflicts\r\n self.assertEqual(rbfs.heuristic_f2(state), 0)\r\n\r\n state = [1, 0, 3, 3] # Example state with conflicts\r\n self.assertEqual(rbfs.heuristic_f2(state), 3)\r\n\r\nclass TestNQueensBFSSolver(unittest.TestCase):\r\n def test_node_bfs_creation(self):\r\n state = [0, 1, 2] # Example state\r\n node = bfs.NodeBFS(state)\r\n self.assertEqual(node.state, state)\r\n self.assertIsNone(node.parent)\r\n\r\n def test_solve_n_queens_bfs(self):\r\n # Test case 1: Solve a small board with a known solution\r\n initial_board = [1, 3, 0, 2] # A 4x4 board with a known solution\r\n result = bfs.solve_n_queens_bfs(initial_board)\r\n self.assertIsNotNone(result)\r\n self.assertTrue(c.is_safe(result.state))\r\n\r\n # Test case 2: Try to solve an unsolvable board\r\n initial_board = [0, 0, 0] # An unsolvable 3x3 board\r\n result = bfs.solve_n_queens_bfs(initial_board)\r\n self.assertIsNone(result)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","repo_name":"kovalsofiia1/8-queens","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71938280801","text":"from app.util import BaseDAO\nfrom .model import Ticket\nfrom .sql import SqlTicket\n\n\nclass TicketDao(BaseDAO):\n\n def save(self, ticket: Ticket):\n cursor = self._save(SqlTicket._INSERT,\n (\n ticket.trip_id,\n ticket.origin_id,\n ticket.destination_id,\n ticket.passenger_id,\n ticket.seat_id,\n ticket.route_price\n ))\n ticket.id = cursor.fetchone()[0]\n cursor.close()\n return ticket\n\n def get_all(self):\n return self._get_all(SqlTicket._SELECT_ALL, Ticket)\n\n def get_by_id(self, id: int):\n return self._get_by(SqlTicket._SELECT_BY_ID.format(SqlTicket.TABLE_NAME, id), Ticket)\n\n def get_join(self, id: int) -> dict:\n cursor = self.connection.cursor()\n cursor.execute(SqlTicket._SELECT_JOIN.format(id))\n row = cursor.fetchone()\n cursor.close()\n if row:\n columns_name = [desc[0] for desc in cursor.description]\n return dict(zip(columns_name, row))\n\n def update(self, current_ticket: Ticket, new_ticket: Ticket):\n self._update(SqlTicket._UPDATE.format(SqlTicket.TABLE_NAME),\n (\n new_ticket.trip_id,\n new_ticket.origin_id,\n new_ticket.destination_id,\n new_ticket.passenger_id,\n new_ticket.trip_id,\n new_ticket.route_price,\n str(current_ticket.id)\n ))\n\n def delete(self, id: int):\n self._delete(SqlTicket._DELETE.format(SqlTicket.TABLE_NAME, id))\n","repo_name":"MarkAntonio/bus_ticket_system","sub_path":"app/modules/ticket/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41416258209","text":"import numpy as np\n\nimport sys\nsys.path.append('../src')\nfrom hmm import HiddenMarkovModel\n\n\ndef show_params(hmm, truehmm):\n A = hmm.A\n B = hmm.B\n ini_prob = hmm.ini_prob\n \n tA = truehmm.A\n tB = truehmm.B\n tini_prob = truehmm.ini_prob\n \n nstate, nobs = hmm.B.shape\n \n for i in range(nstate):\n for j in range(nstate):\n print(f' A[{i},{j}] = {A[i,j]:.4f} ({tA[i,j]}) ', end='')\n print()\n print()\n \n for i in range(nstate):\n for v in range(nobs):\n print(f' B[{i},{v}] = {B[i,v]:.4f} ({tB[i,v]}) ', end='')\n print()\n print()\n \n for i in range(nstate):\n print(f' ini_prob[{i}] = {ini_prob[i]:.4f} ({tini_prob[i]}) ', end='')\n print()\n\n\nif __name__ == '__main__':\n # create true HMM and generate observations\n truehmm = HiddenMarkovModel(nstate=2, nobs=3)\n truehmm.ini_prob = np.array([0.8, 0.2])\n truehmm.A = np.array([\n [0.65, 0.35],\n [0.25, 0.75]\n ])\n truehmm.B = np.array([\n [0.2, 0.4, 0.4],\n [0.5, 0.4, 0.1]\n ])\n\n nseq = 1000\n obs, _ = truehmm.simulate(nseq)\n\n # fit HMM\n hmm = HiddenMarkovModel(nstate=2, nobs=3)\n \n print(' Parameter = initial value (true value)')\n show_params(hmm, truehmm)\n \n iterinfo = hmm.learning(obs, maxiter=10, tol=0.01, verbose=False)\n \n print()\n print()\n print(' Iteration information:')\n for k, v in iterinfo.items():\n print(f' {k} : {v}')\n \n print()\n print()\n print(' Parameter = estimated value (true value)')\n show_params(hmm, truehmm)\n \n \n\n","repo_name":"CYehLu/HiddenMarkovModel","sub_path":"python/examples/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30807820883","text":"import sqlite3\r\n\r\nclass NotesApp:\r\n def __init__(self, db_name='notes.db'):\r\n self.conn = sqlite3.connect(db_name)\r\n self.create_table()\r\n\r\n def create_table(self):\r\n cursor = self.conn.cursor()\r\n cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS notes (\r\n id INTEGER PRIMARY KEY,\r\n title TEXT,\r\n content TEXT\r\n )\r\n ''')\r\n self.conn.commit()\r\n\r\n def add_note(self, title, content):\r\n cursor = self.conn.cursor()\r\n cursor.execute('INSERT INTO notes (title, content) VALUES (?, ?)', (title, content))\r\n self.conn.commit()\r\n\r\n def view_notes(self):\r\n cursor = self.conn.cursor()\r\n cursor.execute('SELECT * FROM notes')\r\n notes = cursor.fetchall()\r\n for note in notes:\r\n print(f\"ID: {note[0]}, Title: {note[1]}, Content: {note[2]}\")\r\n\r\n def delete_note(self, note_id):\r\n cursor = self.conn.cursor()\r\n cursor.execute('DELETE FROM notes WHERE id = ?', (note_id,))\r\n self.conn.commit()\r\n\r\n def close_connection(self):\r\n self.conn.close()\r\n\r\n# Contoh penggunaan aplikasi\r\nif __name__ == \"__main__\":\r\n app = NotesApp()\r\n\r\n while True:\r\n print(\"\\nMenu:\")\r\n print(\"1. Tambah Catatan\")\r\n print(\"2. Lihat Catatan\")\r\n print(\"3. Hapus Catatan\")\r\n print(\"4. Keluar\")\r\n\r\n choice = input(\"Pilih menu (1/2/3/4): \")\r\n\r\n if choice == '1':\r\n title = input(\"Masukkan judul catatan: \")\r\n content = input(\"Masukkan isi catatan: \")\r\n app.add_note(title, content)\r\n print(\"Catatan ditambahkan!\")\r\n\r\n elif choice == '2':\r\n print(\"\\nDaftar Catatan:\")\r\n app.view_notes()\r\n\r\n elif choice == '3':\r\n note_id = input(\"Masukkan ID catatan yang ingin dihapus: \")\r\n app.delete_note(note_id)\r\n print(\"Catatan dihapus!\")\r\n\r\n elif choice == '4':\r\n app.close_connection()\r\n print(\"Aplikasi ditutup.\")\r\n break\r\n\r\n else:\r\n print(\"Pilihan tidak valid. Silakan coba lagi.\")\r\n","repo_name":"dugipandu/UTS-PBO","sub_path":"jawaban 7.py","file_name":"jawaban 7.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"86746096567","text":"import numpy as np\r\nimport cv2\r\nimport os\r\nimport imutils\r\n\r\nimport time\r\n\r\nNMS_THRESHOLD=0.25\r\nMIN_CONFIDENCE=0.2\r\n\r\nlabel_colors = {0: (0, 0, 255), 1: (0, 255, 0), 2: (0, 255, 255)}\r\n\r\ndef pedestrian_detection(image, model, layer_name, personidz=0):\r\n (H, W) = image.shape[:2]\r\n results = []\r\n\r\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\r\n swapRB=True, crop=False)\r\n model.setInput(blob)\r\n layerOutputs = model.forward(layer_name)\r\n\r\n boxes = []\r\n centroids = []\r\n confidences = []\r\n classIDs = []\r\n\r\n for output in layerOutputs:\r\n for detection in output:\r\n\r\n scores = detection[5:]\r\n classID = np.argmax(scores)\r\n confidence = scores[classID]\r\n\r\n if classID == 0 or classID == 1 and confidence > MIN_CONFIDENCE:\r\n\r\n box = detection[0:4] * np.array([W, H, W, H])\r\n (centerX, centerY, width, height) = box.astype(\"int\")\r\n\r\n x = int(centerX - (width / 2))\r\n y = int(centerY - (height / 2))\r\n\r\n boxes.append([x, y, int(width), int(height)])\r\n centroids.append((centerX, centerY))\r\n confidences.append(float(confidence))\r\n classIDs.append(classID)\r\n idzs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONFIDENCE, NMS_THRESHOLD)\r\n \r\n if len(idzs) > 0:\r\n for i in idzs.flatten():\r\n (x, y) = (boxes[i][0], boxes[i][1])\r\n (w, h) = (boxes[i][2], boxes[i][3])\r\n res = (confidences[i], (x, y, x + w, y + h), centroids[i], classIDs[i])\r\n results.append(res)\r\n return results\r\n\r\n# 讀入類別名稱\r\nlabelsPath = \"./configs/yolov4-helmet-detection.names\"\r\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\r\n\r\n# 讀入yolo-v4參數以及設定檔\r\nweights_path = \"./configs/yolov4-helmet-detection.weights\"\r\nconfig_path = \"./configs/yolov4-helmet-detection.cfg\"\r\nmodel = cv2.dnn.readNetFromDarknet(config_path, weights_path)\r\nlayer_name = model.getLayerNames()\r\nlayer_name = [layer_name[i[0] - 1] for i in model.getUnconnectedOutLayers()]\r\n\r\nmodel.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\r\nmodel.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\r\n\r\n\r\n# 針對筆電視訊影像進行偵測,也可改為其他影像來源\r\ncap = cv2.VideoCapture(0)\r\n\r\nstart_time = time.time()\r\ncounter = 0\r\n\r\nwhile True:\r\n (grabbed, image) = cap.read()\r\n\r\n if not grabbed:\r\n break\r\n \r\n image = imutils.resize(image, width=700)\r\n results = pedestrian_detection(image, model, layer_name,\r\n\t\tpersonidz=LABELS.index(\"helmet\"))\r\n\r\n # 畫出偵測到的每個方框\r\n for res in results:\r\n cv2.rectangle(image, (res[1][0],res[1][1]), (res[1][2],res[1][3]), label_colors[res[3]], 2)\r\n\r\n\r\n counter += 1 # 計算幀數\r\n if (time.time() - start_time) != 0: # 實時顯示幀數\r\n cv2.putText(image, \"FPS {0}\".format(float('%.1f' % (counter / (time.time() - start_time)))), (550, 50),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\r\n 2)\r\n # src = cv2.resize(image, (416 // 2, 416 // 2), interpolation=cv2.INTER_CUBIC) # 窗口大小\r\n cv2.imshow(\"Detection\",image)\r\n # cv2.imshow('frame', src)\r\n print(\"FPS: \", counter / (time.time() - start_time))\r\n counter = 0\r\n start_time = time.time()\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n# https://ithelp.ithome.com.tw/articles/10270068","repo_name":"KW-Baker/2022_Meichu_Hackathon_OSCAR","sub_path":"3_Jetson_Nano/helmet_detector/halmet_detector.py","file_name":"halmet_detector.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37776150280","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# This code originates from \"Polyhedra Wireframe\" feature of Alessandro\n# Zomparelli's Tissue addon,\n# https://github.com/alessandro-zomparelli/tissue/blob/master/polyhedra.py\n# adapted to work as Sverchok's scripted node.\n\n\"\"\"\nin verts_in v\nin edges_in s\nin faces_in s\nin thickness_in s\nin subdivisions_in s\nout verts_out v\nout edges_out s\nout faces_out s\n\"\"\"\n\ndissolve_inners = False\n\nfrom math import pi, sin, cos\n\nimport bmesh\nfrom mathutils import Vector\n\nfrom sverchok.data_structure import zip_long_repeat\nfrom sverchok.utils.sv_bmesh_utils import bmesh_from_pydata\n\ndef flatten_vector(vec, x, y):\n \"\"\"\n Find planar vector according to two axis.\n :arg vec: Input vector.\n :type vec: :class:'mathutils.Vector'\n :arg x: First axis.\n :type x: :class:'mathutils.Vector'\n :arg y: Second axis.\n :type y: :class:'mathutils.Vector'\n :return: Projected 2D Vector.\n :rtype: :class:'mathutils.Vector'\n \"\"\"\n vx = vec.project(x)\n vy = vec.project(y)\n mult = 1 if vx.dot(x) > 0 else -1\n vx = mult*vx.length\n mult = 1 if vy.dot(y) > 0 else -1\n vy = mult*vy.length\n return Vector((vx, vy))\n\ndef vector_rotation(vec):\n \"\"\"\n Find vector rotation according to X axis.\n :arg vec: Input vector.\n :type vec: :class:'mathutils.Vector'\n :return: Angle in radians.\n :rtype: float\n \"\"\"\n v0 = Vector((1,0))\n ang = Vector.angle_signed(vec, v0)\n if ang < 0: ang = 2*pi + ang\n return ang\n\nverts_out = []\nedges_out = []\nfaces_out = []\nfor verts, edges, faces, thickness, subdivisions in zip_long_repeat(verts_in, edges_in, faces_in, thickness_in, subdivisions_in):\n if isinstance(thickness, (list, tuple)):\n thickness = thickness[0]\n if isinstance(subdivisions, (list, tuple)):\n subdivisions = subdivisions[0]\n #faces = list(set([tuple(f) for f in faces]))\n uniq_faces = []\n for face in faces:\n if set(face) in [set(f) for f in uniq_faces]:\n pass\n #print(face)\n else:\n uniq_faces.append(face)\n bm = bmesh_from_pydata(verts, [], uniq_faces, normal_update=True)\n \n merge_dist = thickness*0.001\n\n subs = subdivisions\n\n# bm.verts.ensure_lookup_table()\n# bm.edges.ensure_lookup_table()\n# bm.faces.ensure_lookup_table()\n\n # Subdivide edges\n proportional_subs = True\n if subs > 1 and proportional_subs:\n wire_length = [e.calc_length() for e in bm.edges]\n all_edges = list(bm.edges)\n max_segment = max(wire_length)/subs\n split_edges = [[] for i in range(subs+1)]\n for e, l in zip(all_edges, wire_length):\n split_edges[int(l//max_segment)].append(e)\n for i in range(2,subs):\n perc = {}\n for e in split_edges[i]:\n perc[e]=0.1\n bmesh.ops.bisect_edges(bm, edges=split_edges[i], cuts=i, edge_percents=perc)\n\n ### Create double faces\n double_faces = []\n double_layer_edge = []\n double_layer_piece = []\n for f in bm.faces:\n verts0 = [v.co for v in f.verts]\n verts1 = [v.co for v in f.verts]\n verts1.reverse()\n double_faces.append(verts0)\n double_faces.append(verts1)\n\n # Create new bmesh object and data layers\n bm1 = bmesh.new()\n\n # Create faces and assign Edge Layers\n for verts in double_faces:\n new_verts = []\n for v in verts:\n vert = bm1.verts.new(v)\n new_verts.append(vert)\n bm1.faces.new(new_verts)\n\n bm1.verts.ensure_lookup_table()\n bm1.edges.ensure_lookup_table()\n bm1.faces.ensure_lookup_table()\n\n n_faces = len(bm.faces)\n n_doubles = len(bm1.faces)\n\n polyhedra = []\n\n for e in bm.edges:\n done = []\n\n # ERROR: Naked edges\n e_faces = len(e.link_faces)\n if e_faces < 2:\n bm.free()\n bm1.free()\n message = \"Naked edges are not allowed\"\n raise Exception(message)\n\n edge_vec = e.verts[1].co - e.verts[0].co\n\n # run first face\n for i1 in range(e_faces-1):\n f1 = e.link_faces[i1]\n #edge_verts1 = [v.index for v in f1.verts if v in e.verts]\n verts1 = [v.index for v in f1.verts]\n va1 = verts1.index(e.verts[0].index)\n vb1 = verts1.index(e.verts[1].index)\n # check if order of the edge matches the order of the face\n dir1 = va1 == (vb1+1)%len(verts1)\n edge_vec1 = edge_vec if dir1 else -edge_vec\n\n # run second face\n faces2 = []\n normals2 = []\n for i2 in range(i1+1,e_faces):\n #for i2 in range(n_faces):\n if i1 == i2: continue\n f2 = e.link_faces[i2]\n f2.normal_update()\n #edge_verts2 = [v.index for v in f2.verts if v in e.verts]\n verts2 = [v.index for v in f2.verts]\n va2 = verts2.index(e.verts[0].index)\n vb2 = verts2.index(e.verts[1].index)\n # check if order of the edge matches the order of the face\n dir2 = va2 == (vb2+1)%len(verts2)\n # check for normal consistency\n if dir1 != dir2:\n # add face\n faces2.append(f2.index+1)\n normals2.append(f2.normal)\n else:\n # add flipped face\n faces2.append(-(f2.index+1))\n normals2.append(-f2.normal)\n\n # find first polyhedra (positive)\n plane_x = f1.normal # normal\n plane_y = plane_x.cross(edge_vec1).normalized() # tangent face perp edge\n #print(f\"PX: {plane_x.length}, PY: {plane_y.length}\")\n id1 = (f1.index+1)\n\n min_angle0 = 10\n\n # check consistent faces\n if id1 not in done:\n id2 = None\n min_angle = min_angle0\n for i2, n2 in zip(faces2,normals2):\n v2 = flatten_vector(-n2, plane_x, plane_y)\n angle = vector_rotation(v2)\n if angle < min_angle:\n id2 = i2\n min_angle = angle\n if id2 is None:\n continue\n done.append(id2)\n add = True\n for p in polyhedra:\n if id1 in p or id2 in p:\n add = False\n if id2 not in p:\n p.append(id2)\n if id1 not in p:\n p.append(id1)\n break\n if add:\n polyhedra.append([id1, id2])\n\n # find second polyhedra (negative)\n plane_x = -f1.normal # normal\n plane_y = plane_x.cross(-edge_vec1) # tangent face perp edge\n id1 = -(f1.index+1)\n\n if id1 not in done:\n id2 = None\n min_angle = min_angle0\n for i2, n2 in zip(faces2, normals2):\n v2 = flatten_vector(n2, plane_x, plane_y)\n angle = vector_rotation(v2)\n if angle < min_angle:\n id2 = -i2\n min_angle = angle\n done.append(id2)\n add = True\n for p in polyhedra:\n if id1 in p or id2 in p:\n add = False\n if id2 not in p: p.append(id2)\n if id1 not in p: p.append(id1)\n break\n if add: polyhedra.append([id1, id2])\n\n print(\"Polyhedra\", polyhedra)\n for i in range(len(bm1.faces)):\n for j in (False,True):\n if j: id = i+1\n else: id = -(i+1)\n join = []\n keep = []\n for p in polyhedra:\n if id in p: join += p\n else: keep.append(p)\n if len(join) > 0:\n keep.append(list(dict.fromkeys(join)))\n polyhedra = keep\n\n for i, p in enumerate(polyhedra):\n for j in p:\n if j is None:\n continue\n bm1.faces[j].material_index = i\n\n delete_faces = []\n wireframe_faces = []\n not_wireframe_faces = []\n flat_faces = []\n\n bm.free()\n\n #bmesh.ops.bisect_edges(bm1, edges=bm1.edges, cuts=3)\n\n bm1.faces.index_update()\n #merge_verts = []\n for p in polyhedra:\n delete_faces_poly = []\n wireframe_faces_poly = []\n faces_id = [(f-1)*2 if f > 0 else (-f-1)*2+1 for f in p]\n faces_id_neg = [(-f-1)*2 if -f > 0 else (f-1)*2+1 for f in p]\n merge_verts = []\n faces = [bm1.faces[f_id] for f_id in faces_id]\n for f in faces:\n delete = False\n if f.index in delete_faces: continue\n \n cen = f.calc_center_median()\n for e in f.edges:\n mid = (e.verts[0].co + e.verts[1].co)/2\n vec1 = e.verts[0].co - e.verts[1].co\n vec2 = mid - cen\n ang = Vector.angle(vec1,vec2)\n length = vec2.length\n length = sin(ang)*length\n if length < thickness/2:\n delete = True\n \n if False:\n sides = len(f.verts)\n for i in range(sides):\n v = f.verts[i].co\n v0 = f.verts[(i-1)%sides].co\n v1 = f.verts[(i+1)%sides].co\n vec0 = v0 - v\n vec1 = v1 - v\n ang = (pi - vec0.angle(vec1))/2\n length = min(vec0.length, vec1.length)*sin(ang)\n if length < thickness/2:\n delete = True\n break\n\n if delete:\n delete_faces_poly.append(f.index)\n else:\n wireframe_faces_poly.append(f.index)\n merge_verts += [v for v in f.verts]\n if len(wireframe_faces_poly) < 2:\n delete_faces += faces_id\n not_wireframe_faces += faces_id_neg\n else:\n wireframe_faces += wireframe_faces_poly\n flat_faces += delete_faces_poly\n\n #wireframe_faces = list(dict.fromkeys(wireframe_faces))\n bmesh.ops.remove_doubles(bm1, verts=merge_verts, dist=merge_dist)\n bm1.edges.ensure_lookup_table()\n bm1.faces.ensure_lookup_table()\n bm1.faces.index_update()\n\n\n wireframe_faces = [i for i in wireframe_faces if i not in not_wireframe_faces]\n wireframe_faces = list(dict.fromkeys(wireframe_faces))\n\n flat_faces = list(dict.fromkeys(flat_faces))\n\n ############# FRAME #############\n bm1.faces.index_update()\n wireframe_faces = [bm1.faces[i] for i in wireframe_faces]\n original_faces = wireframe_faces\n #bmesh.ops.remove_doubles(bm1, verts=merge_verts, dist=0.001)\n\n # detect edge loops\n\n loops = []\n boundaries_mat = []\n neigh_face_center = []\n face_normals = []\n\n # compute boundary frames\n new_faces = []\n wire_length = []\n vert_ids = []\n\n # append regular faces\n\n for f in original_faces:\n loop = list(f.verts)\n loops.append(loop)\n boundaries_mat.append([f.material_index for v in loop])\n f.normal_update()\n face_normals.append([f.normal for v in loop])\n\n push_verts = []\n inner_loops = []\n\n for loop_index, loop in enumerate(loops):\n is_boundary = loop_index < len(neigh_face_center)\n materials = boundaries_mat[loop_index]\n new_loop = []\n loop_ext = [loop[-1]] + loop + [loop[0]]\n\n # calc tangents\n tangents = []\n for i in range(len(loop)):\n # vertices\n vert0 = loop_ext[i]\n vert = loop_ext[i+1]\n vert1 = loop_ext[i+2]\n # edge vectors\n vec0 = (vert0.co - vert.co).normalized()\n vec1 = (vert.co - vert1.co).normalized()\n # tangent\n _vec1 = -vec1\n _vec0 = -vec0\n ang = (pi - vec0.angle(vec1))/2\n normal = face_normals[loop_index][i]\n tan0 = normal.cross(vec0)\n tan1 = normal.cross(vec1)\n tangent = (tan0 + tan1).normalized()/sin(ang)*thickness/2\n #if tangent.length > 0.04:\n # print(\"T\", tangent.length)\n tangents.append(tangent)\n\n # calc correct direction for boundaries\n mult = -1\n if is_boundary:\n dir_val = 0\n for i in range(len(loop)):\n surf_point = neigh_face_center[loop_index][i]\n tangent = tangents[i]\n vert = loop_ext[i+1]\n dir_val += tangent.dot(vert.co - surf_point)\n if dir_val > 0: mult = 1\n\n # add vertices\n for i in range(len(loop)):\n vert = loop_ext[i+1]\n area = 1\n new_co = vert.co + tangents[i] * mult * area\n # add vertex\n new_vert = bm1.verts.new(new_co)\n new_loop.append(new_vert)\n vert_ids.append(vert.index)\n new_loop.append(new_loop[0])\n\n # add faces\n #materials += [materials[0]]\n for i in range(len(loop)):\n v0 = loop_ext[i+1]\n v1 = loop_ext[i+2]\n v2 = new_loop[i+1]\n v3 = new_loop[i]\n face_verts = [v1,v0,v3,v2]\n if mult == -1: face_verts = [v0,v1,v2,v3]\n new_face = bm1.faces.new(face_verts)\n # Material by original edges\n piece_id = 0\n new_face.select = True\n new_faces.append(new_face)\n wire_length.append((v0.co - v1.co).length)\n max_segment = max(wire_length)/subdivisions\n #for f,l in zip(new_faces,wire_length):\n # f.material_index = min(int(l/max_segment), subdivisions-1)\n bm1.verts.ensure_lookup_table()\n push_verts += [v.index for v in loop_ext]\n\n # At this point topology han been build, but not yet thickened\n\n bm1.verts.ensure_lookup_table()\n bm1.edges.ensure_lookup_table()\n bm1.faces.ensure_lookup_table()\n bm1.verts.index_update()\n\n ### Displace vertices ###\n\n circle_center = [0]*len(bm1.verts)\n circle_normal = [0]*len(bm1.verts)\n\n smooth_corners = [True] * len(bm1.verts)\n corners = [[] for i in range(len(bm1.verts))]\n normals = [0]*len(bm1.verts)\n vertices = [0]*len(bm1.verts)\n # Define vectors direction\n for f in new_faces:\n v0 = f.verts[0]\n v1 = f.verts[1]\n id = v0.index\n corners[id].append((v1.co - v0.co).normalized())\n normals[id] = v0.normal.copy()\n vertices[id] = v0\n smooth_corners[id] = False\n # Displace vertices\n for i, vecs in enumerate(corners):\n if len(vecs) > 0:\n v = vertices[i]\n nor = normals[i]\n ang = 0\n for vec in vecs:\n ang += nor.angle(vec)\n ang /= len(vecs)\n div = sin(ang)\n if div == 0: div = 1\n dv = nor*thickness/2/div\n #dv *= v.calc_shell_factor()\n if dv.length > 0.2:\n print(dv.length)\n v.co += dv\n\n # Removing original flat faces\n\n flat_faces = [bm1.faces[i] for i in flat_faces]\n for f in flat_faces:\n f.material_index = subdivisions+1\n for v in f.verts:\n if smooth_corners[v.index]:\n v.co += (1.0/v.calc_shell_factor()) * v.normal*thickness/2\n smooth_corners[v.index] = False\n delete_faces = delete_faces + [f.index for f in original_faces]\n delete_faces = list(dict.fromkeys(delete_faces))\n delete_faces = [bm1.faces[i] for i in delete_faces]\n bmesh.ops.delete(bm1, geom=delete_faces, context='FACES')\n\n bmesh.ops.remove_doubles(bm1, verts=bm1.verts, dist=merge_dist)\n bm1.faces.ensure_lookup_table()\n bm1.edges.ensure_lookup_table()\n bm1.verts.ensure_lookup_table()\n\n if dissolve_inners:\n bm1.edges.index_update()\n dissolve_edges = []\n for f in bm1.faces:\n e = f.edges[2]\n if e not in dissolve_edges:\n dissolve_edges.append(e)\n bmesh.ops.dissolve_edges(bm1, edges=dissolve_edges, use_verts=True, use_face_split=True)\n \n new_verts, new_edges, new_faces = pydata_from_bmesh(bm1)\n verts_out.append(new_verts)\n edges_out.append(new_edges)\n faces_out.append(new_faces)\n\n","repo_name":"nortikin/sverchok","sub_path":"node_scripts/SNLite_templates/bmesh/polyhedra_wireframe.py","file_name":"polyhedra_wireframe.py","file_ext":"py","file_size_in_byte":17302,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"16283461846","text":"'''\nled-button.py\n-------------\nControl a buzzer and a led using a push-button for a given period.\n\nRequired:\n- LED\n- Resistor (> ~220 Ohm)\n- Push-button\n- Buzzer\n'''\n\n# Import necessary modules\nfrom gpiozero import LED, Button, Buzzer\nfrom time import sleep \n\n# Choose pin\nled_pin = 17\nbutton_pin = 2\nbuzzer_pin = 18\n\n# GPIO pin\nled = LED(led_pin)\n\n# Button\nbutton = Button(button_pin)\n\n# Buzzer\nbuzzer = Buzzer(buzzer_pin)\n\nwhile True:\n\n # Wait for press\n if button.is_pressed:\n print(\"On\")\n\n # Switch the led on\n led.on()\n\n # Buzzer on\n buzzer.on()\n else:\n # Switch the led off\n led.off()\n\n # Buzzer ff\n buzzer.off()\n\n print(\"Off\")\n","repo_name":"kaymal/raspberry-pi-projects","sub_path":"led-projects/button-buzzer.py","file_name":"button-buzzer.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37350264030","text":"from flask import Flask, request\nfrom flask_cors import CORS\n\nfrom src.modules import process_data as pr\nfrom src.modules import web_scraper as ws\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\n@app.route('/process', methods=['POST', 'OPTIONS'])\ndef process():\n if request.method == 'OPTIONS':\n return {}, 200\n try:\n \n try:\n data = request.get_json()\n strike = int(data['strike'])\n tmax_trigger = int(data['triggerTmax'])\n start_year = int(data['startYear'])\n except Exception as e:\n print(f\"Bad request: {e}\")\n return {'success': False}, 400\n\n annual_payout, daily_payout = pr.calculate_payout(strike, tmax_trigger, start_year)\n\n print('backend hit successfully')\n\n return {\n 'success': True,\n 'annualChartData': annual_payout,\n 'dailyChartData': daily_payout\n }, 200\n except Exception as e:\n print(f\"Exception: {e}\")\n return {'success': False}, 500\n \n\n@app.route('/future', methods=['GET'])\ndef future():\n try:\n future = ws.get_future(timeout=20)\n return {\n 'success': True,\n 'future': future\n }, 200\n except Exception as e:\n print(f'Unable to get future: {e}')\n return {\n 'success': False\n }, 500\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"kvenk96/option-dashboard","sub_path":"backend/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35754467327","text":"# Written by Noah Coleman\n# 11/12/2020\n\n# This is an intro to \"Action Chains\"\n# This program will play cookie clicker and perform automatic upgrades.\n# We can store a list of actions in a queue, and perform them in a sequence\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nPATH = \"C:\\\\Program Files (x86)\\\\Mine\\\\Apps\\\\chromedriver.exe\"\ndriver = webdriver.Chrome(PATH)\ndriver.get(\"https://orteil.dashnet.org/cookieclicker/\")\n\n# When we get to the page, the elements won't be there right away, so we have to wait for the page to load.\n# This is called an implicit wait\ndriver.implicitly_wait(5)\n\n# Cookie to click and cookie count elements\ncookie = driver.find_element_by_id(\"bigCookie\")\nnumberOfCookies = driver.find_element_by_id(\"cookies\")\n\n# List of upgrade elements\nitems = [driver.find_element_by_id(\"productPrice\" + str(i)) for i in range(1, -1, -1)] #(start, stop, step)\n\n# Creating an action chain object called actions using our driver\nactions = ActionChains(driver)\n\n# The first thing to add to the action chain is clicking the cookie constantly\nactions.click(cookie)\nactions.perform()\n\nfor i in range(5000):\n # perform all the actions we described above (click)\n actions.perform()\n # take the first value before the split in numberOfCookies.text, and convert it to an integer.\n count = int(numberOfCookies.text.split(\" \")[0].replace(\" , \" , \"\"))\n # for each item in the upgrades section, check if we can afford the upgrade from \"best\" to \"worst\"\n # if we can, move cursor to the upgrade, and click.\n for item in items:\n cost = int(item.text)\n if cost <= count:\n upgrade_actions = ActionChains(driver)\n upgrade_actions.move_to_element(item)\n upgrade_actions.double_click()\n upgrade_actions.perform()\n","repo_name":"noahcoleman42/CookieClicker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30153655987","text":"# Summer/Winter Coding(~2018)\n# 소수 만들기\n# nums에 있는 숫자들 중 서로 다른 3개를 골라 더했을 때 소수가 되는 경우의 개수를 return \n\nfrom itertools import combinations \n\ndef solution(nums):\n max_sum = sum(nums)+1\n is_prime = [True]*max_sum\n is_prime[0] = False\n is_prime[1] = False\n \n for i in range(2, max_sum):\n if is_prime[i]:\n for j in range(2*i, max_sum, i):\n is_prime[j] = False\n \n comb = combinations(nums, 3)\n \n answer = 0\n for c in comb:\n if is_prime[sum(c)]:\n answer += 1\n \n return answer\n\n# =================================================================","repo_name":"eagerithm/algorithms","sub_path":"bugoverdose/math/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19469842103","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.views.generic import TemplateView\n\nfrom dark.models.tournament import Tournament\n\n\nclass UserAllTournamentsView(LoginRequiredMixin, TemplateView):\n template_name = 'dark/tournament/user_all.html'\n login_url = reverse_lazy('user:login')\n redirect_field_name = 'redirect_to'\n\n def get_context_data(self, **kwargs):\n current_user = self.request.user\n now = timezone.now()\n user_tournaments = Tournament.objects.filter(participants__in=[current_user])\n return {\n \"upcoming_tournaments\": user_tournaments.filter(start_date__gt=now),\n \"in_progress_tournaments\": user_tournaments.filter(start_date__lte=now).filter(end_date__gt=now),\n \"archived_tournaments\": user_tournaments.filter(end_date__lte=now)\n }\n","repo_name":"DARK-development-team/DARK","sub_path":"dark/views/tournament/user_all.py","file_name":"user_all.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38943201267","text":"import tweepy as tweepy\n# trocar por nada depois\napiKey = input(\"Enter your consumer API key: \")\napiSecret = input(\"Enter your consumer API secret: \")\nauth = tweepy.OAuthHandler(apiKey, apiSecret)\nredirect_url = auth.get_authorization_url()\nprint(redirect_url)\nverifier = input('Pin:')\nauth.get_access_token(verifier)\nprint(f\"Key:{auth.access_token}\\nSecret:{auth.access_token_secret}\")\nuserName = input(\"Insert your @: \")\nuserKey = auth.access_token\nuserSecret = auth.access_token_secret\ndata = open('data.txt', 'w')\ndata.write(f'=======================CONSUMER=======================\\n{apiKey}\\n{apiSecret}\\n=======================USER=======================\\n{userKey}\\n{userSecret}\\n=======================USER @=====================\\n{userName}')","repo_name":"Sigolirous/twitterBot","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587403361","text":"\"\"\"MXNet Module for NNConv layer\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nimport mxnet as mx\nfrom mxnet.gluon import nn\nfrom mxnet.gluon.contrib.nn import Identity\n\nfrom .... import function as fn\nfrom ....utils import expand_as_pair\n\n\nclass NNConv(nn.Block):\n r\"\"\"Graph Convolution layer from `Neural Message Passing\n for Quantum Chemistry `__\n\n .. math::\n h_{i}^{l+1} = h_{i}^{l} + \\mathrm{aggregate}\\left(\\left\\{\n f_\\Theta (e_{ij}) \\cdot h_j^{l}, j\\in \\mathcal{N}(i) \\right\\}\\right)\n\n where :math:`e_{ij}` is the edge feature, :math:`f_\\Theta` is a function\n with learnable parameters.\n\n Parameters\n ----------\n in_feats : int\n Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.\n NN can be applied on homogeneous graph and unidirectional\n `bipartite graph `__.\n If the layer is to be applied on a unidirectional bipartite graph, ``in_feats``\n specifies the input feature size on both the source and destination nodes. If\n a scalar is given, the source and destination node feature size would take the\n same value.\n out_feats : int\n Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.\n edge_func : callable activation function/layer\n Maps each edge feature to a vector of shape\n ``(in_feats * out_feats)`` as weight to compute\n messages.\n Also is the :math:`f_\\Theta` in the formula.\n aggregator_type : str\n Aggregator type to use (``sum``, ``mean`` or ``max``).\n residual : bool, optional\n If True, use residual connection. Default: ``False``.\n bias : bool, optional\n If True, adds a learnable bias to the output. Default: ``True``.\n\n Examples\n --------\n >>> import dgl\n >>> import numpy as np\n >>> import mxnet as mx\n >>> from mxnet import gluon\n >>> from dgl.nn import NNConv\n >>>\n >>> # Case 1: Homogeneous graph\n >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))\n >>> g = dgl.add_self_loop(g)\n >>> feat = mx.nd.ones((6, 10))\n >>> lin = gluon.nn.Dense(20)\n >>> lin.initialize(ctx=mx.cpu(0))\n >>> def edge_func(efeat):\n >>> return lin(efeat)\n >>> efeat = mx.nd.ones((12, 5))\n >>> conv = NNConv(10, 2, edge_func, 'mean')\n >>> conv.initialize(ctx=mx.cpu(0))\n >>> res = conv(g, feat, efeat)\n >>> res\n [[0.39946803 0.32098457]\n [0.39946803 0.32098457]\n [0.39946803 0.32098457]\n [0.39946803 0.32098457]\n [0.39946803 0.32098457]\n [0.39946803 0.32098457]]\n \n\n >>> # Case 2: Unidirectional bipartite graph\n >>> u = [0, 1, 0, 0, 1]\n >>> v = [0, 1, 2, 3, 2]\n >>> g = dgl.heterograph({('_N', '_E', '_N'):(u, v)})\n >>> u_feat = mx.nd.random.randn(2, 10)\n >>> v_feat = mx.nd.random.randn(4, 10)\n >>> conv = NNConv(10, 2, edge_func, 'mean')\n >>> conv.initialize(ctx=mx.cpu(0))\n >>> efeat = mx.nd.ones((5, 5))\n >>> res = conv(g, (u_feat, v_feat), efeat)\n >>> res\n [[ 0.24425688 0.3238042 ]\n [-0.11651017 -0.01738572]\n [ 0.06387337 0.15320925]\n [ 0.24425688 0.3238042 ]]\n \n \"\"\"\n\n def __init__(\n self,\n in_feats,\n out_feats,\n edge_func,\n aggregator_type,\n residual=False,\n bias=True,\n ):\n super(NNConv, self).__init__()\n self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)\n self._out_feats = out_feats\n if aggregator_type == \"sum\":\n self.reducer = fn.sum\n elif aggregator_type == \"mean\":\n self.reducer = fn.mean\n elif aggregator_type == \"max\":\n self.reducer = fn.max\n else:\n raise KeyError(\n \"Aggregator type {} not recognized: \".format(aggregator_type)\n )\n self._aggre_type = aggregator_type\n\n with self.name_scope():\n self.edge_nn = edge_func\n if residual:\n if self._in_dst_feats != out_feats:\n self.res_fc = nn.Dense(\n out_feats,\n in_units=self._in_dst_feats,\n use_bias=False,\n weight_initializer=mx.init.Xavier(),\n )\n else:\n self.res_fc = Identity()\n else:\n self.res_fc = None\n\n if bias:\n self.bias = self.params.get(\n \"bias\", shape=(out_feats,), init=mx.init.Zero()\n )\n else:\n self.bias = None\n\n def forward(self, graph, feat, efeat):\n r\"\"\"Compute MPNN Graph Convolution layer.\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : mxnet.NDArray or pair of mxnet.NDArray\n The input feature of shape :math:`(N, D_{in})` where :math:`N`\n is the number of nodes of the graph and :math:`D_{in}` is the\n input feature size.\n efeat : mxnet.NDArray\n The edge feature of shape :math:`(N, *)`, should fit the input\n shape requirement of ``edge_nn``.\n\n Returns\n -------\n mxnet.NDArray\n The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`\n is the output feature size.\n \"\"\"\n with graph.local_scope():\n feat_src, feat_dst = expand_as_pair(feat, graph)\n\n # (n, d_in, 1)\n graph.srcdata[\"h\"] = feat_src.expand_dims(-1)\n # (n, d_in, d_out)\n graph.edata[\"w\"] = self.edge_nn(efeat).reshape(\n -1, self._in_src_feats, self._out_feats\n )\n # (n, d_in, d_out)\n graph.update_all(\n fn.u_mul_e(\"h\", \"w\", \"m\"), self.reducer(\"m\", \"neigh\")\n )\n rst = graph.dstdata.pop(\"neigh\").sum(axis=1) # (n, d_out)\n # residual connection\n if self.res_fc is not None:\n rst = rst + self.res_fc(feat_dst)\n # bias\n if self.bias is not None:\n rst = rst + self.bias.data(feat_dst.context)\n return rst\n","repo_name":"dmlc/dgl","sub_path":"python/dgl/nn/mxnet/conv/nnconv.py","file_name":"nnconv.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"15080552900","text":"from itertools import combinations\n\ndef getPrimes():\n primes = [ True for i in range(3001)]\n primes[0] = False\n primes[1] = False\n i = 2 \n while i**2 < 3000:\n if primes[i] is True:\n j = 2\n while j*i < 3000:\n primes[j*i] = False\n j+=1\n i+=1\n\n return primes\n\n\n\ndef solution(nums):\n answer = 0\n primes = getPrimes()\n combs = combinations(nums,3)\n for c in combs :\n if primes[sum(c)] is True:\n answer+=1\n return answer\n\nprint(solution([1,2,7,6,4]\t))","repo_name":"ohtaehyun/algo_study","sub_path":"programmers/lv2/make_prime_number_lv2.py","file_name":"make_prime_number_lv2.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71118856802","text":"import math\nfrom typing import List, Dict, Optional\n\nimport torch\nfrom fairseq.sequence_generator import SequenceGenerator as FairGenerator\nfrom torch import Tensor\n\n\nclass SequenceGenerator(FairGenerator):\n def _generate(self, sample, *args, **kwargs):\n incremental_states = [{} for _ in self.model.models]\n net_input = sample[\"net_input\"]\n src_tokens = net_input[\"src_tokens\"]\n batch_size = src_tokens.size(0)\n beam_size = self.beam_size\n max_len = self.model.max_decoder_positions() - 1\n encoder_outs = self.model.forward_encoder(net_input)\n\n new_order = torch.arange(batch_size).repeat_interleave(beam_size).to(src_tokens.device).long()\n encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)\n\n scores = torch.zeros(batch_size * beam_size, max_len + 2).to(src_tokens).float()\n tokens = torch.zeros(batch_size * beam_size, max_len + 2).to(src_tokens).long().fill_(self.pad)\n tokens[:, 0] = self.eos\n\n cands_to_ignore = torch.zeros(batch_size, beam_size).to(src_tokens).eq(-1)\n\n finalized = [[] for _ in range(batch_size)]\n finished = [False for _ in range(batch_size)]\n\n num_remaining_sent = batch_size\n bbsz_offsets = (torch.arange(0, batch_size) * beam_size).unsqueeze(1).type_as(tokens)\n cand_offsets = torch.arange(0, 2 * beam_size).type_as(tokens)\n\n for step in range(max_len + 1):\n lprobs, _ = self.model.forward_decoder(tokens[:, :step + 1], encoder_outs, incremental_states)\n lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)\n\n if step == max_len:\n lprobs[:, :self.eos] = -math.inf\n lprobs[:, self.eos+1:] = -math.inf\n\n lprobs = lprobs + scores[:, step: step + 1]\n\n if step == 0:\n lprobs = lprobs[::beam_size]\n\n cand_scores, cand_indices = torch.topk(lprobs.view(batch_size, -1), k=beam_size * 2)\n cand_beams = torch.div(cand_indices, self.vocab_size, rounding_mode='floor')\n cand_indices = cand_indices.fmod(self.vocab_size)\n\n cand_bbsz_idx = cand_beams + bbsz_offsets\n\n eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)\n eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)\n\n eos_bbsz_idx = torch.masked_select(cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size])\n\n batch_idxs = None\n if eos_bbsz_idx.numel() > 0:\n eos_scores = torch.masked_select(cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size])\n finalized_sents = self.finalize_hypos(step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, max_len)\n num_remaining_sent -= len(finalized_sents)\n\n if num_remaining_sent == 0:\n break\n\n if len(finalized_sents) > 0:\n new_bsz = batch_size - len(finalized_sents)\n\n batch_mask = torch.ones(batch_size, dtype=torch.bool, device=cand_indices.device)\n batch_mask[finalized_sents] = False\n batch_idxs = torch.arange(batch_size, device=cand_indices.device).masked_select(batch_mask)\n\n eos_mask = eos_mask[batch_idxs]\n cand_beams = cand_beams[batch_idxs]\n bbsz_offsets.resize_(new_bsz, 1)\n cand_bbsz_idx = cand_beams + bbsz_offsets\n cand_scores = cand_scores[batch_idxs]\n cand_indices = cand_indices[batch_idxs]\n\n cands_to_ignore = cands_to_ignore[batch_idxs]\n\n scores = scores.view(batch_size, -1)[batch_idxs].view(new_bsz * beam_size, -1)\n tokens = tokens.view(batch_size, -1)[batch_idxs].view(new_bsz * beam_size, -1)\n batch_size = new_bsz\n\n eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))\n active_mask = eos_mask.type_as(cand_offsets) * beam_size * 2 + cand_offsets[: eos_mask.size(1)]\n new_cands_to_ignore, active_hypos = torch.topk(active_mask, k=beam_size, dim=1, largest=False)\n cands_to_ignore = new_cands_to_ignore.ge(2 * beam_size)[:, :beam_size]\n active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos).view(-1)\n\n tokens[:, :step+1] = torch.index_select(tokens[:, :step+1], dim=0, index=active_bbsz_idx)\n tokens.view(batch_size, beam_size, -1)[:, :, step+1] = torch.gather(cand_indices, dim=1, index=active_hypos)\n\n if step > 0:\n scores[:, :step+1] = torch.index_select(scores[:, :step+1], dim=0, index=active_bbsz_idx)\n\n scores.view(batch_size, beam_size, -1)[:, :, step+1] = torch.gather(cand_scores, dim=1, index=active_hypos)\n\n if active_bbsz_idx is not None:\n if batch_idxs is not None:\n corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)\n active_bbsz_idx.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)\n self.model.reorder_incremental_state(incremental_states, active_bbsz_idx)\n encoder_outs = self.model.reorder_encoder_out(encoder_outs, active_bbsz_idx)\n\n for sent in range(len(finalized)):\n scores = torch.tensor([float(elem[\"score\"].item()) for elem in finalized[sent]])\n _, sorted_scores_indices = torch.sort(scores, descending=True)\n finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]\n return finalized\n\n\n def finalize_hypos(\n self,\n step: int,\n bbsz_idx: torch.Tensor,\n eos_scores,\n tokens,\n scores,\n finalized: List[List[Dict[str, Tensor]]],\n finished: List[bool],\n beam_size: int,\n max_len: int,\n *args, **kwargs\n ):\n # clone relevant token and attention tensors.\n # tokens is (batch * beam, max_len). So the index_select\n # gets the newly EOS rows, then selects cols 1..{step + 2}\n tokens_clone = tokens.index_select(0, bbsz_idx)[:, 1: step + 2] # skip the first index, which is EOS\n tokens_clone[:, step] = self.eos\n\n # normalize sentence-level scores\n eos_scores = eos_scores / (step + 1) ** self.len_penalty\n\n cum_unfin: List[int] = []\n prev = 0\n for f in finished:\n if f:\n prev += 1\n else:\n cum_unfin.append(prev)\n\n sents_seen: Dict[str, Optional[Tensor]] = {}\n\n # For every finished beam item\n for i in range(bbsz_idx.size()[0]):\n idx = bbsz_idx[i]\n score = eos_scores[i]\n # sentence index in the current (possibly reduced) batch\n unfin_idx = torch.div(idx, beam_size, rounding_mode='floor')\n # sentence index in the original (unreduced) batch\n sent = unfin_idx + cum_unfin[unfin_idx]\n # print(f\"{step} FINISHED {idx} {score} {sent}={unfin_idx} {cum_unfin}\")\n seen = str(int(sent)) + \"_\" + str(int(unfin_idx))\n if seen not in sents_seen:\n sents_seen[seen] = None\n\n # An input sentence (among those in a batch) is finished when\n # beam_size hypotheses have been collected for it\n if len(finalized[sent]) < beam_size:\n finalized[sent].append({\"tokens\": tokens_clone[i], \"score\": score, \"alignment\": None})\n\n newly_finished: List[int] = []\n\n for seen in sents_seen.keys():\n # check termination conditions for this sentence\n sent: int = int(float(seen.split(\"_\")[0]))\n unfin_idx: int = int(float(seen.split(\"_\")[1]))\n\n if not finished[sent] and self.is_finished(step, unfin_idx, max_len, len(finalized[sent]), beam_size):\n finished[sent] = True\n newly_finished.append(unfin_idx)\n\n return newly_finished\n\n","repo_name":"voidmagic/chaos","sub_path":"modules/basics/translation/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10260196643","text":"def maximumProfit(prices):\n ans = 0\n min_so_far = prices[0]\n n = len(prices)\n\n for i in range(1, n):\n min_so_far = min(min_so_far, prices[i])\n ans = max(ans, prices[i] - min_so_far)\n return ans\n\n\n# prices = [1, 2, 3, 4]\n# prices = [98, 101, 66, 72]\nprices = [2, 2, 2, 2]\nprint(maximumProfit(prices))\n","repo_name":"shankar7042/Strivers-SDE-Sheet-Challenge","sub_path":"Arrays/buy_and_sell_stock.py","file_name":"buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2378111968","text":"\nimport xml.etree.ElementTree as ET\n\ndef levenshtein_distance(first, second):\n \"\"\"Find the Levenshtein distance between two strings.\"\"\"\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [[0] * second_length for x in range(first_length)]\n for i in range(first_length):\n distance_matrix[i][0] = i\n for j in range(second_length):\n distance_matrix[0][j]=j\n for i in xrange(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n return distance_matrix[first_length-1][second_length-1]\n\ntree=ET.parse(\"nodedetails1.xml\")\nroot=tree.getroot()\n\nlist1=['208.16.7.9','80','55']\nbiglist=[]\n#list2=[['201.15.7.9','8080','80'],['241.15.63.7','1234','90'],['208.16.7.8','5454','100'],['201.15.7.9','6443','75'],['176.34.54.70','5432','65'],['123.73.65.92','4432','78'],['208.16.8.10','8032','90'],['192.23.71.165','4423','85'],['192.23.72.165','447','85']]\nfor child in root:\n\tsmalllist=[]\n\tsmalllist.append(child[0].text)\n\tsmalllist.append(child[1].text)\n\tsmalllist.append(child[2].text)\n\tbiglist.append(smalllist)\n\nstr1=\"\"\nstr2=\"\"\n\nfor elem in list1:\n\tstr1+=str(elem)\ni=0\nwhile (i<10):\n\tfor elem in biglist[i]:\n\t\tstr2+=str(elem)\n\tprint (\"Levenshtein distance between seed node and node\",(i+1),\":\",levenshtein_distance(str1,str2))\n\ti+=1\n\tstr2=\"\"\n\t\t\t\n\n","repo_name":"shikhafadnavis/sybilhunter","sub_path":"bwuptime.py","file_name":"bwuptime.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6676807415","text":"from bs4 import BeautifulSoup\nimport urllib.request as req\n\nf = open(\"./알라딘중고샵.txt\",\"w\")\n\npage_num = 1\nwhile True :\n url=\"https://www.aladin.co.kr/search/wsearchresult.aspx?SearchTarget=Used&KeyWord=%ED%8C%8C%EC%9D%B4%EC%8D%AC&KeyRecentPublish=0&OutStock=0&ViewType=Detail&SortOrder=11&CustReviewCount=0&CustReviewRank=0&KeyFullWord=%ED%8C%8C%EC%9D%B4%EC%8D%AC&KeyLastWord=%ED%8C%8C%EC%9D%B4%EC%8D%AC&CategorySearch=&chkKeyTitle=&chkKeyAuthor=&chkKeyPublisher=&ViewRowCount=25&page={}\".format(page_num)\n code = req.urlopen(url)\n soup = BeautifulSoup(code,\"html.parser\")\n title = soup.select(\"a.bo3 > b\")\n price = soup.select(\"a.bo_used > b\")\n if len(title) == 0: #끝페이지까지 크롤링 완료\n break # 끝 페이지까지 크롤링을 다하면, 다음 페이지가 아무것도 없으니 title 변수에 원소가 0개이다\n\n for i in range(len(title)): # len 함수에 리스트 함수 전달시 -> 리스트 속 원소의 개수를 알려줌\n print(title[i].string, price[i].string) # range(n) : 0부터 n-1까지 들어있는 리스트를 한번에 만들어준다 0 1 2 3 4 ... n-1\n f.write(title[i].string + \", \" + price[i].string + \"\\n\") # -> 결론적으로 i는 리스트자료형들의 인덱스 번호를 뜻하게 된다\n page_num += 1\nf.close()\n\n# \\n : 한줄씩 정보 나오게 한다","repo_name":"sue4869/python_web","sub_path":"26_알라딘_온라인중고샵.py","file_name":"26_알라딘_온라인중고샵.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1373996967","text":"from PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import QWidget, QListWidget, QListWidgetItem, QApplication,QMessageBox, \\\n QScrollArea,QTreeView ,QPushButton,QGridLayout,QLabel,QLineEdit, QSpinBox, QWidget\nimport sys\nimport datetime\nimport pandas as pd\nfrom PIL import Image, ImageDraw , ImageFont\nfrom PIL import ImageEnhance\nfrom PIL import Image\nimport shutil\nimport os\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\nclass mainwindow(QWidget):\n\n def __init__(self, fileName, parent=None):\n super(mainwindow, self).__init__(parent)\n\n self.zoom_count = 0\n self.zoom_flag = False\n\n self.CSV_fileName = fileName\n self.zoom_file_path = \"photo_edit/zoom/temp.jpg\"\n\n widget = QWidget()\n self.mainLayout = QGridLayout(widget)\n self.btn_layout = QGridLayout(self)\n self.push_layout = QGridLayout()\n\n\n\n\n self.Qtree = QTreeView()\n # self.Qtree.setStyleSheet('QTreeView {background-color: purple; color: white; border:5px;'\n # 'border-style:outset;border-color: white;selection-color: yellow}')\n self.model = QtGui.QStandardItemModel(self)\n\n self.setWindowTitle(\"Main\")\n self.setFixedSize(700,550)\n\n self.Qtree.doubleClicked.connect(self.show_details)\n\n self.add_patient_pushbotton = QPushButton(\"Add New Patient\")\n # self.add_patient_pushbotton.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:5px;border-color: white}')\n self.add_patient_pushbotton.clicked.connect(self.add_patient)\n self.add_patient_pushbotton.setFixedSize(200, 30)\n\n\n\n self.zoom_in_push = QPushButton(\"Zoom In\")\n # self.zoom_in_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.zoom_in_push.clicked.connect(self.zoom)\n\n\n\n self.zoom_out_push = QPushButton(\"Zoom Out\")\n # self.zoom_out_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.zoom_out_push.clicked.connect(self.zoom)\n\n\n\n self.brightness_push = QPushButton(\"Brightness\")\n # self.brightness_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.brightness_push.clicked.connect(self.brightness)\n\n\n self.sharpness_push = QPushButton(\"Sharpness\")\n # self.sharpness_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.sharpness_push.clicked.connect(self.sharpness)\n\n\n self.color_push = QPushButton(\"Color\")\n # self.color_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.color_push.clicked.connect(self.color)\n\n self.reset_push = QPushButton(\"Reset\")\n # self.reset_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.reset_push.clicked.connect(self.reset)\n\n\n self.close_push = QPushButton(\"Close\")\n # self.close_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.close_push.clicked.connect(self.end)\n\n self.save_push = QPushButton(\"Add\")\n # self.save_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.save_push.clicked.connect(self.write_to_csv)\n self.save_push.setFixedSize(200, 30)\n\n self.back_push = QPushButton(\"Cancel\")\n # self.back_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.back_push.clicked.connect(self.loadCsv)\n self.back_push.setFixedSize(200, 30)\n\n self.back1_push = QPushButton(\"Back\")\n # self.back1_push.setStyleSheet('QPushButton {background-color:yellow;border-style:outset;'\n # 'border-width:2px;border-color: white}')\n self.back1_push.clicked.connect(self.loadCsv)\n self.back1_push.setFixedSize(200, 30)\n\n self.save_push.hide()\n self.back_push.hide()\n self.back1_push.hide()\n\n\n self.name_edit_line = QLineEdit()\n self.name_edit_line.setFixedHeight(50)\n self.age_edit_line = QSpinBox()\n self.age_edit_line.setFixedHeight(50)\n self.date_edit_line = QLineEdit()\n self.date_edit_line.setFixedHeight(50)\n self.photo_edit_line = QLineEdit()\n self.photo_edit_line.setFixedHeight(50)\n self.diagnose_edit_line = QLineEdit()\n self.diagnose_edit_line.setFixedHeight(100)\n # self.diagnose_edit_line.setFixedSize(360, 100)\n\n self.Name_label = QLabel(\"Name:\")\n # self.Name_label.setFixedHeight(20)\n self.Age_label = QLabel(\"Age:\")\n self.Date_label = QLabel(\"visit Date:\")\n self.photo_label = QLabel(\"Upload Photo:\")\n self.diagnose_label = QLabel(\"Diagnose:\")\n\n self.name_edit_line.hide()\n self.Name_label.hide()\n self.Age_label.hide()\n self.age_edit_line.hide()\n self.date_edit_line.hide()\n self.Date_label.hide()\n self.photo_edit_line.hide()\n self.photo_label.hide()\n self.diagnose_label.hide()\n self.diagnose_edit_line.hide()\n\n self.scroll = QScrollArea()\n self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll.setWidgetResizable(True)\n self.scroll.setWidget(widget)\n\n self.imageLabel = QLabel()\n\n\n self.listWidget = QListWidget(self)\n # self.listWidget.setStyleSheet('QListWidget{background-color: purple; color: white; border:5px;'\n # 'border-style:outset;border-color: white;selection-color: yellow}')\n self.listWidget.itemDoubleClicked.connect(self.image)\n\n\n self.listWidget.hide()\n self.Qtree.setModel(self.model)\n self.Qtree.hide()\n\n self.push_layout.addWidget(self.add_patient_pushbotton,0,0,0,1)\n self.push_layout.addWidget(self.zoom_in_push,0,0)\n self.push_layout.addWidget(self.zoom_out_push,0,1)\n self.push_layout.addWidget(self.brightness_push,0,2)\n self.push_layout.addWidget(self.sharpness_push,0,3)\n self.push_layout.addWidget(self.color_push,0,4)\n self.push_layout.addWidget(self.reset_push,0,5)\n self.push_layout.addWidget(self.close_push,0,6)\n self.push_layout.addWidget(self.save_push, 0, 0)\n self.push_layout.addWidget(self.back_push, 0, 1)\n self.push_layout.addWidget(self.back1_push,0,0)\n self.push_layout.addWidget(self.listWidget,1,0)\n\n\n self.btn_layout.addLayout(self.push_layout, 5, 0)\n self.btn_layout.addWidget(self.Name_label,0,0)\n self.btn_layout.addWidget(self.name_edit_line, 0, 1)\n self.btn_layout.addWidget(self.Age_label, 1, 0)\n self.btn_layout.addWidget(self.age_edit_line, 1, 1)\n self.btn_layout.addWidget(self.Date_label, 2, 0)\n self.btn_layout.addWidget(self.date_edit_line, 2, 1)\n self.btn_layout.addWidget(self.photo_label, 3, 0)\n self.btn_layout.addWidget(self.photo_edit_line, 3, 1)\n self.btn_layout.addWidget(self.diagnose_label, 4, 0)\n self.btn_layout.addWidget(self.diagnose_edit_line, 4, 1)\n self.btn_layout.addWidget(self.scroll)\n self.btn_layout.addWidget(self.Qtree, 0, 0)\n\n\n self.mainLayout.addWidget(self.imageLabel, 0, 0)\n\n self.scroll.hide()\n\n def loadCsv(self):\n self.model.clear()\n self.add_patient_pushbotton.show()\n self.back1_push.hide()\n self.listWidget.hide()\n\n self.name_edit_line.hide()\n self.Name_label.hide()\n self.Age_label.hide()\n self.age_edit_line.hide()\n self.date_edit_line.hide()\n self.Date_label.hide()\n self.photo_edit_line.hide()\n self.photo_label.hide()\n self.diagnose_label.hide()\n self.diagnose_edit_line.hide()\n self.save_push.hide()\n self.back_push.hide()\n\n self.zoom_out_push.hide()\n self.zoom_in_push.hide()\n self.close_push.hide()\n self.reset_push.hide()\n self.sharpness_push.hide()\n self.brightness_push.hide()\n self.color_push.hide()\n\n\n data = pd.read_csv(self.CSV_fileName)\n header = list(data)\n data = data.values.tolist()\n self.next_id = len(data)\n items = [QtGui.QStandardItem(str(l)) for l in header]\n self.model.appendRow(items)\n for lst in data:\n items = [QtGui.QStandardItem(str(l)) for l in lst]\n self.model.appendRow(items)\n\n self.Qtree.show()\n\n\n def show_details(self,signal):\n\n\n r = signal.row() - 1\n\n if r >= 0:\n self.back1_push.show()\n\n self.listWidget.clear()\n data = pd.read_csv(self.CSV_fileName)\n data = data.values.tolist()\n self.d = data[r]\n\n txt = \"id is: \" + str(self.d[0]) + \"\\nname is: \" + str(self.d[1]) + \"\\nage is: \" + str(\n self.d[2]) + \"\\nDiagnoses: \" + str(\n self.d[5]) \\\n + \"\\nPress to show the image\"\n\n QListWidgetItem(txt, self.listWidget)\n\n self.listWidget.setGeometry(1,1,500,200)\n\n self.Qtree.close()\n self.add_patient_pushbotton.hide()\n self.listWidget.show()\n\n else:\n pass\n\n\n def add_patient(self):\n self.Qtree.close()\n self.add_patient_pushbotton.hide()\n\n self.name_edit_line.clear()\n self.age_edit_line.clear()\n self.date_edit_line.clear()\n self.photo_edit_line.clear()\n self.diagnose_edit_line.clear()\n\n self.name_edit_line.show()\n self.Name_label.show()\n self.Age_label.show()\n self.age_edit_line.show()\n self.date_edit_line.show()\n self.Date_label.show()\n self.photo_edit_line.show()\n self.photo_label.show()\n self.diagnose_label.show()\n self.diagnose_edit_line.show()\n self.save_push.show()\n self.back_push.show()\n\n currentDT = datetime.datetime.now()\n self.date_edit_line.setText(str(currentDT))\n\n\n def write_to_csv(self):\n name = self.name_edit_line.text()\n age = str(self.age_edit_line.value())\n date = self.date_edit_line.text()\n image_name = \"data/\" + name + \"_\" + date + \".jpg\"\n diag = self.diagnose_edit_line.text()\n\n l2 = [self.next_id, name, age, date, image_name, diag]\n\n data = pd.read_csv(self.CSV_fileName)\n header = list(data)\n\n l1 = data.values.tolist()\n dict = {}\n for i, key in enumerate(header):\n lst = []\n for l in l1:\n lst.append(l[i])\n lst.append(l2[i])\n dict[str(key)] = lst\n\n df = pd.DataFrame(dict)\n df.to_csv(self.CSV_fileName, index=False)\n\n self.loadCsv()\n\n\n def image(self):\n self.scroll.show()\n self.listWidget.close()\n self.back1_push.hide()\n self.fileName_edit = \"photo_edit/temp.jpg\"\n try:\n image = Image.open(self.d[4])\n\n image.save(\"photo_edit/temp.jpg\")\n\n\n # self.scaleFactor = 0.0\n self.scaleFactor = 1.0\n\n fileName = self.d[4]\n if fileName:\n image = QtGui.QImage(fileName)\n # print(type(image))\n\n if image.isNull():\n QMessageBox.information(self, \"Image Viewer\",\n \"Cannot load %s.\" % fileName)\n return\n self.qpixmap = QtGui.QPixmap.fromImage(image)\n self.imageLabel.setPixmap(self.qpixmap)\n\n self.imageLabel.show()\n\n except:\n # img = np.zeros((500,500,3))\n # image = QtGui.QImage(img, img.shape[1],img.shape[0], img.shape[1] * 3, QtGui.QImage.Format_RGB888)\n\n img = Image.new('RGB', (700, 600), (0, 0, 0))\n\n draw = ImageDraw.Draw(img)\n draw.text((200, 200), \"No Photo To Display!\", fill='rgb(255, 255, 255)',\n font=ImageFont.truetype(\"/usr/share/fonts/dejavu/DejaVuSans.ttf\", 25))\n\n img.save(self.fileName_edit)\n img.save(self.d[4])\n img = img.convert(\"RGBA\")\n data = img.tobytes(\"raw\", \"RGBA\")\n\n\n qim = QtGui.QImage(data, img.size[0], img.size[1], QtGui.QImage.Format_ARGB32)\n pix = QtGui.QPixmap.fromImage(qim)\n\n self.imageLabel.setPixmap(pix)\n self.imageLabel.show()\n\n\n self.add_patient_pushbotton.hide()\n self.zoom_out_push.show()\n self.zoom_in_push.show()\n self.close_push.show()\n self.reset_push.show()\n self.sharpness_push.show()\n self.brightness_push.show()\n self.color_push.show()\n\n\n def adjustScrollBar(self, scrollBar, scale):\n scrollBar.setValue(int(scale * scrollBar.value()\n + ((scale - 1) * scrollBar.pageStep() / 2)))\n\n\n def zoom(self):\n if \"zoom\" not in os.listdir(\"photo_edit\"):\n os.mkdir(\"photo_edit/zoom\")\n sender = self.sender()\n self.zoom_flag = True\n image = Image.open(self.fileName_edit)\n\n if sender == self.zoom_in_push:\n self.zoom_count += 1\n else:\n self.zoom_count += -1\n scale = 1.2\n scale = (scale)**self.zoom_count\n self.adjustScrollBar(self.scroll.horizontalScrollBar(),scale)\n self.adjustScrollBar(self.scroll.verticalScrollBar(),scale)\n\n h = int(image.size[1]*scale)\n w = int(image.size[0]*scale)\n image = image.resize((w,h),Image.BICUBIC)\n image.save(self.zoom_file_path)\n self.imageLabel.clear()\n image = QtGui.QImage(self.zoom_file_path)\n self.qpixmap = QtGui.QPixmap.fromImage(image)\n self.imageLabel.setPixmap(self.qpixmap)\n\n\n\n\n def brightness(self):\n if self.zoom_flag:\n shutil.copy(self.zoom_file_path,self.fileName_edit)\n self.zoom_flag = False\n self.zoom_count = 0\n\n image = Image.open(self.fileName_edit)\n enhancer = ImageEnhance.Contrast(image)\n out = enhancer.enhance(1.7)\n out.save(self.fileName_edit)\n out = QtGui.QImage(self.fileName_edit)\n\n self.imageLabel.clear()\n qpixmap = QtGui.QPixmap.fromImage(out)\n self.imageLabel.setPixmap(qpixmap)\n self.scaleFactor = 1.0\n self.imageLabel.adjustSize()\n\n def sharpness(self):\n if self.zoom_flag:\n shutil.copy(self.zoom_file_path,self.fileName_edit)\n self.zoom_flag = False\n self.zoom_count = 0\n\n self.image = Image.open(self.fileName_edit)\n enhancer = ImageEnhance.Sharpness(self.image)\n out = enhancer.enhance(1.7)\n out.save(self.fileName_edit)\n out = QtGui.QImage(self.fileName_edit)\n\n self.imageLabel.clear()\n qpixmap = QtGui.QPixmap.fromImage(out)\n self.imageLabel.setPixmap(qpixmap)\n self.scaleFactor = 1.0\n self.imageLabel.adjustSize()\n\n def color(self):\n if self.zoom_flag:\n shutil.copy(self.zoom_file_path,self.fileName_edit)\n self.zoom_flag = False\n self.zoom_count = 0\n\n self.image = Image.open(self.fileName_edit)\n enhancer = ImageEnhance.Color(self.image)\n out = enhancer.enhance(1.7)\n out.save(self.fileName_edit)\n out = QtGui.QImage(self.fileName_edit)\n\n self.imageLabel.clear()\n qpixmap = QtGui.QPixmap.fromImage(out)\n self.imageLabel.setPixmap(qpixmap)\n self.scaleFactor = 1.0\n self.imageLabel.adjustSize()\n\n def reset(self):\n self.zoom_flag = False\n self.zoom_count = 0\n self.imageLabel.clear()\n o_i = QtGui.QImage(self.d[4])\n qpixmap = QtGui.QPixmap.fromImage(o_i)\n self.imageLabel.setPixmap(qpixmap)\n self.imageLabel.adjustSize()\n self.image = Image.open(self.d[4])\n self.image.save(\"photo_edit/temp.jpg\")\n\n def end(self):\n self.scroll.hide()\n self.imageLabel.close()\n\n self.loadCsv()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n exp=mainwindow(\"database/patient.csv\")\n exp.loadCsv()\n exp.show()\n sys.exit(app.exec_())\n","repo_name":"helmogey/PS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13955422741","text":"#!/usr/bin/env python\n\"\"\"This is a simple script to move or copy deeply nested files to the highest level directory\nof operation or another specified directory.\n \"\"\"\nimport argparse\nfrom hashlib import new\nfrom operator import ne, truediv\nimport os\nimport shutil\nfrom os.path import join, exists, abspath, isdir\n\n\ndef move_or_copy(oldpath, newpath, copy, v: bool = False):\n if exists(newpath):\n if v:\n print(f\"{newpath} already exists\")\n return\n if copy:\n if v:\n print(f\"Copy: {oldpath} --> {newpath}\")\n try:\n shutil.copy2(oldpath, newpath, follow_symlinks=True)\n except IsADirectoryError:\n shutil.copytree(oldpath, newpath, symlinks=True)\n return\n if v:\n print(f\"Move: {oldpath} --> {newpath}\")\n shutil.move(oldpath, newpath)\n return\n\n\ndef move_dirs(source, output, copy, v):\n for working_dir, dirs, files in os.walk(source, topdown=False):\n for d in dirs:\n end_dir = d.split(\"/\")[-1]\n oldpath = join(working_dir, d)\n newpath = join(output, end_dir)\n move_or_copy(oldpath, newpath, copy, v)\n\n\ndef move_files(source, output, copy, v):\n for working_dir, dirs, files in os.walk(source, topdown=False):\n for f in files:\n oldpath = join(working_dir, f)\n newpath = join(output, f)\n move_or_copy(oldpath, newpath, copy, v)\n\n\ndef clean_empty_folders(root_path, remove_root=False, new_path=None):\n path = new_path if new_path is not None else root_path\n if not isdir(path):\n print(\"not dir\")\n return\n files = os.listdir(path)\n if len(files):\n for f in files:\n fullpath = join(path, f)\n if isdir(fullpath):\n clean_empty_folders(root_path, remove_root, new_path=fullpath)\n if exists(path):\n files = os.listdir(path)\n if len(files) == 0:\n os.rmdir(path)\n if remove_root and len(os.listdir(root_path)) == 0:\n os.rmdir(root_path)\n\n\ndef uproot(\n source: str = None,\n output: str = None,\n clean: bool = False,\n clean_all: bool = False,\n copy: bool = False,\n dirs: bool = False,\n v: bool = False,\n):\n \"\"\"Moves files from deeply nested dirs to work or output dir\n\n Args:\n source (str, optional): The working directory. Defaults to None.\n output (str, optional): The output directory. Defaults to None.\n clean (bool, optional): If empty directories should be removed.\n copy (bool, optional): If files/folders should be moved or copied.\n dirs (bool, optional): If folders should be operated on instead of files.\n v (bool, optional): If operations steps should be shown on screen.\n \"\"\"\n if source is None:\n print(\"No source directory specified\")\n return\n if type(source) == list:\n source = source[0]\n if source == \".\":\n source = os.getcwd()\n source = abspath(source) if source else source\n output = abspath(output) if output else source\n if not isdir(source):\n print(f\"{source} is not a directory\")\n return 1\n if output and not isdir(output):\n print(f\"{output} is not a directory\")\n return 1\n\n if source and output:\n if (len(source) < len(output)) and (source in output):\n print(\"Output folder cannot be inside source path\")\n return 1\n if dirs:\n move_dirs(source, output, copy, v)\n else:\n move_files(source, output, copy, v)\n if clean:\n if v:\n print(\"Cleaning empty folders..\")\n clean_empty_folders(source)\n if clean_all:\n if v:\n print(\"Cleaning removing empty source ..\")\n clean_empty_folders(source, True)\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description=\"\"\"This is a simple program for recursively moving files from \n subsdirectories to the current or specified directory\"\"\"\n )\n parser.add_argument(\n \"-s\",\n \"--source\",\n help=\"The folder to perform moving opertaions on. Defaults to the current directory not specified\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"The folder to move files to. Defaults to the root of the starting directory or current directory. This has to be outside the source directory\",\n )\n\n parser.add_argument(\n \"-O\",\n \"--make_output\",\n help=\"Same as --output but creates the output directory if specified one does not exist.\",\n )\n parser.add_argument(\n \"-r\",\n \"--remove_empty\",\n action=\"store_true\",\n help=\"Specifies if the empty directories should be cleared after moving. Defaults to false if this flag is not set.\",\n )\n parser.add_argument(\n \"-R\",\n \"--remove_empty_source\",\n action=\"store_true\",\n help=\"same as --remove_empty but also removes the source directory itself.\",\n )\n parser.add_argument(\n \"-c\",\n \"--copy\",\n action=\"store_true\",\n help=\"Copy files to destination instead of moving\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Prints the current operations.\",\n )\n parser.add_argument(\n \"-d\",\n \"--directories\",\n action=\"store_true\",\n help=\"Operates on the directories at the bottom of the file tree instead of files\",\n )\n\n args, unknown = parser.parse_known_args()\n\n if args.make_output:\n if not exists(abspath(args.make_output)):\n if args.verbose:\n print(f\"{args.make_output} does not exists. Creating...\")\n os.mkdir(abspath(args.make_output))\n uproot(\n source=args.source or unknown or None,\n output=args.output or args.make_output,\n clean=args.remove_empty,\n clean_all=args.remove_empty_source,\n copy=args.copy,\n dirs=args.directories,\n v=args.verbose,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"keystroke3/uproot","sub_path":"uproot.py","file_name":"uproot.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"3237273759","text":"from itertools import chain, combinations\n\nimport networkx as nx\nimport pandas as pd\n\n\ndef make_topic_coocc(topic_mix, thres):\n\n co_occ = (\n topic_mix.reset_index(drop=False)\n .melt(id_vars=\"index\")\n .query(f\"value>{thres}\")\n .reset_index(drop=False)\n .groupby(\"index\")[\"variable\"]\n .apply(lambda x: list(x))\n )\n return co_occ\n\n\ndef make_network_from_coocc(\n co_occ: list, thres: float = 0.1, extra_links: int = 200, spanning: bool = True\n) -> nx.Graph:\n \"\"\"Create a network from a list of co-occurring terms\n Args\n co_occ: each element is a list of co-occurring entities\n thres: maximum occurrence rate\n weight_thres: extra edges to add\n spanning: filter the network with a maximum spanning tree\n \"\"\"\n\n # Make weighted edge list\n pairs = list(chain(*[sorted(list(combinations(x, 2))) for x in co_occ]))\n pairs = [x for x in pairs if len(x) > 0]\n\n edge_list = pd.DataFrame(pairs, columns=[\"source\", \"target\"])\n\n edge_list[\"weight\"] = 1\n\n edge_list_weighted = (\n edge_list.groupby([\"source\", \"target\"])[\"weight\"].sum().reset_index(drop=False)\n )\n\n # Make and post-process network\n net = nx.from_pandas_edgelist(edge_list_weighted, edge_attr=True)\n\n if spanning is True:\n msp = nx.maximum_spanning_tree(net)\n msp_plus = make_msp_plus(net, msp, thres=extra_links)\n return msp_plus\n\n else:\n return net\n\n\ndef make_msp_plus(net: nx.Graph, msp: nx.Graph, thres: int = 200) -> nx.Graph:\n \"\"\"Create a network combining maximum spanning tree and top edges\n Args:\n net: original network\n msp: maximum spanning tree of the original network\n thres: extra edges to aadd\n Returns:\n A network\n \"\"\"\n\n msp_ed = set(msp.edges())\n\n top_edges_net = nx.Graph(\n [\n x\n for x in sorted(\n net.edges(data=True),\n key=lambda x: x[2][\"weight\"],\n reverse=True,\n )\n if (x[0], x[1]) not in msp_ed\n ][:thres]\n )\n\n # Combines them\n united_graph = nx.Graph(\n list(msp.edges(data=True)) + list(top_edges_net.edges(data=True))\n )\n return united_graph\n","repo_name":"nestauk/createch","sub_path":"createch/pipeline/network_analysis.py","file_name":"network_analysis.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73077170080","text":"import sys\n\nsys.stdin = open('input.txt')\n\n\n\ntestNum = int(input())\nfor test in range(1, testNum+1):\n lst = [list(map(int, input().split())) for i in range(9)]\n\n ###가로확인\n res = 1\n for row in lst:\n sorted(row)\n cnt = 0\n for a in row:\n cnt +=a\n\n if cnt != 45:\n res =0\n ##세로확인\n\n for i in range(9):\n cnt = 0\n for j in range(9):\n cnt += lst[j][i]\n\n if cnt != 45:\n res =0\n\n # ##좌하향 대각선 확인.\n # cnt = 0\n # for i in range(9):\n # cnt += lst[i][i]\n # print(cnt)\n # if cnt != 45:\n # res =0\n #\n # ##우상향 대각선 확인.\n #\n # cnt =0\n # for i in range(9):\n # cnt += lst[8-i][i]\n # if cnt != 45:\n # res =0\n\n ## 3*3 격자 확인\n #for문 3개\n for i in range(0,7,3):\n for j in range(0,7,3):\n cnt = 0\n for a in range(3):\n for b in range(3):\n cnt += lst[i+a][j+b]\n\n if cnt != 45:\n res =0\n\n\n\n print(f'#{test} {res}')","repo_name":"cmkds/algo","sub_path":"SWEA/0819_1974_스도쿠 검증/1974.py","file_name":"1974.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10045876271","text":"import os\nfrom unittest import TestCase\n\nfrom pyfibre.addons.shg_pl_trans.shg_pl_trans_parser import (\n SHGPLTransParser)\n\n\nclass TestSHGPLTransParser(TestCase):\n\n def setUp(self):\n self.parser = SHGPLTransParser()\n self.input_files = [\n os.path.join('not', 'a', 'file.tif'),\n os.path.join('a', 'file-shg.tif'),\n os.path.join('a', 'file-pl.tif'),\n os.path.join('a', 'full_file-pl-shg.tif')]\n\n def test_cache_file_sets(self):\n\n self.parser._cache_file_sets(self.input_files, 'SHG')\n self.assertIn(\n os.path.join('a', 'file'), self.parser._file_set_cache)\n self.assertEqual(1, len(self.parser._file_set_cache))\n\n self.parser._cache_file_sets(self.input_files, 'PL-Trans')\n self.assertIn(\n os.path.join('a', 'file'), self.parser._file_set_cache)\n self.assertEqual(1, len(self.parser._file_set_cache))\n\n self.parser._cache_file_sets(self.input_files, 'SHG-PL-Trans')\n self.assertIn(\n os.path.join('a', 'full_file'), self.parser._file_set_cache)\n self.assertEqual(2, len(self.parser._file_set_cache))\n\n def test_get_file_sets(self):\n\n file_sets = self.parser.get_file_sets(self.input_files)\n self.assertEqual(2, len(file_sets))\n\n self.assertIn('SHG-PL-Trans', file_sets[0].registry)\n self.assertIn('SHG', file_sets[1].registry)\n self.assertIn('PL-Trans', file_sets[1].registry)\n","repo_name":"franklongford/PyFibre","sub_path":"pyfibre/addons/shg_pl_trans/tests/test_shg_pl_trans_parser.py","file_name":"test_shg_pl_trans_parser.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"fa","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"30778521407","text":"import pymysql.cursors\nimport sql\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='xjj520520ljf',\n db='duplicate',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\ndef output_music():\n\twith connection.cursor() as cursor:\n\t\tsql1 = 'select distinct * from musics order by COMMENTS desc'\n\t\tcursor.execute(sql1)\n\t\tcomments = cursor.fetchmany(100)\n\twith open('T100_music.txt','w') as f:\n\t\tfor i in comments:\n\t\t\tf.write('%s\\n'%str(i))\n\t\t\tprint(i)\n\t\n\n\nif __name__ == '__main__':\n\n\toutput_music()\n\tconnection.close()","repo_name":"cap-ljf/music163_TOP100","sub_path":"music163_TOP100/top1000_cmusic.py","file_name":"top1000_cmusic.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31019198335","text":"from bs4 import BeautifulSoup\nimport requests\n\nclass Page():\n def __init__(self):\n self.url =''\n self.html =''\n self.Ps = []\n self.Spans = []\n self.H1s = []\n self.H2s = []\n self.H3s = []\n self.H4s = []\n self.H5s = []\n self.H6s = []\n self.getalldone = False\n self.links = []\n self.textlist = []\n\n #run all the main methods in the required order\n def process(self, url):\n self.seturl(url)\n self.extract_html_page()\n self.getAllText()\n self.getLinks()\n self.cleantext()\n\n #allow to set the url to parse\n def seturl(self, url):\n #add url analysis to pre check\n self.url = url\n\n #extract HTML content using request and BeautifulSoup\n def extract_html_page(self):\n response = requests.get(self.url)\n self.html = BeautifulSoup(response.content, \"html.parser\")\n\n #extract all the

    paragraphe\n def getP(self):\n AllP = self.html.findAll('p')\n for P in AllP:\n self.Ps.append(P.get_text())\n\n #extract all the paragraphe\n def getSpan(self):\n Allspan = self.html.findAll('span')\n for span in Allspan:\n self.Spans.append(span.get_text())\n\n #extract all the titles\n def getH(self):\n for n in [n + 1 for n in range(6)]:\n AllH = self.html.findAll('h'+str(n))\n for H in AllH:\n exec(f\"self.H{str(n)}s.append(H.get_text())\")\n\n #extract all P, span and Hn\n def getAllText(self):\n if self.getalldone == False:\n self.getP()\n self.getSpan()\n self.getH()\n self.getLinks()\n self.getalldone = True\n\n #combine all the text in one list\n def getAllTextCombined(self):\n self.getAllText()\n Alltext=[]\n\n for n in [n + 1 for n in range(6)]:\n AllH = []\n exec(f\"AllH = self.H{str(n)}s\")\n for H in AllH:\n Alltext.append(H)\n\n for span in self.Spans:\n Alltext.append(span)\n for P in self.Ps:\n Alltext.append(P)\n return Alltext\n\n #extract all the links in the page\n def getLinks(self):\n for link in self.html.findAll('a'): #attrs={'href': re.compile(\"^http://\")}):\n #print(link.get('href'))\n self.links.append(link.get('href'))\n\n #put the text into a list of sentence\n def cleantext(self):\n self.textlist = [' '.join(x.split()) for x in self.getAllTextCombined()]\n","repo_name":"VincentMalara/Understand_website_topic_with_NLP","sub_path":"HTML_parsing.py","file_name":"HTML_parsing.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12650172938","text":"from . import blueprint\nimport flask\nfrom flask import request, jsonify\nfrom gtts import gTTS\n\n@blueprint.route(\"/get_audio/\")\ndef streamwav():\n date=str(request.args[\"date\"])\n param=str(request.args[\"params\"])\n val=str(request.args[\"val\"])\n mytext = 'The ' + param +' on '+ date + \" is : \" + val\n print(mytext)\n language = 'en'\n myobj = gTTS(text=mytext, lang=language, slow=False)\n myobj.save(\"./static/audio/welcome.mp3\")\n def generate():\n with open(\"./static/audio/welcome.mp3\", \"rb\") as fwav:\n data = fwav.read(1024)\n while data:\n yield data\n data = fwav.read(1024)\n return flask.Response(generate(), mimetype=\"audio/mp3\")","repo_name":"risha-daz/swasti-framework","sub_path":"blueprint_module/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25660947044","text":"matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n#FOR DE ALIMENTAÇÃO\nfor l in range(0, 3):\n for c in range(0, 3):\n matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: '))\nprint('-=' * 30)\n#FOR PARA PRINTAR NA TELA\nfor l in range(0, 3):\n for c in range(0, 3):\n print(f'[{matriz[l][c]:^5}]', end='')\n print() # quebrar a linha\n","repo_name":"RonalddMatias/Curso-Completo-Python","sub_path":"Curso de Python/Python-Exercícios/ex086.py","file_name":"ex086.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18769928248","text":"#!/usr/bin/python3\n\"\"\"\nQueries the Reddit API and returns the\nnumber of subscribers (not active users, total subscribers)\nfor a given subreddit.\n\"\"\"\nimport requests\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"\n returns the number of subscribers for subreddit\n return 0 if invalid subreddit is given\n \"\"\"\n url = f\"https://www.reddit.com/r/{subreddit}/about.json\"\n\n headers = {'User-Agent': 'Harploid/10.2'}\n\n resp = requests.get(url, headers=headers).json()\n subs = resp.get('data', {}).get('subscribers')\n if subs:\n return subs\n return 0\n","repo_name":"HarpII/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41233265584","text":"import shutil\n\nfrom .. import plugin\n\nTEMPLATE = \"\"\"Disk {disk} :: Free: {disk_free:,.3f} GB ({disk_free_pct:.2f})\nDisk Total: {disk_total:,.3f} GB\nDisk Used: {disk_used:,.3f} GB ({disk_used_pct:.2f})\n\"\"\"\n\n\nclass Check(plugin.Plugin):\n \"\"\"Nagios plugin to perform Disk checks.\"\"\"\n\n def cli(self):\n \"\"\"Add command line arguments specific to the plugin.\"\"\"\n group = self.parser.add_mutually_exclusive_group()\n group.add_argument(\n \"-p\",\n \"--percent\",\n dest=\"percent\",\n action=\"store_true\",\n default=False,\n help=\"Warning/Critical values are a percentage (default)\",\n )\n group.add_argument(\n \"-m\",\n \"--mega-bytes\",\n dest=\"mb\",\n action=\"store_true\",\n default=False,\n help=\"Warning/Critical values are in Mega-Bytes\",\n )\n group.add_argument(\n \"-g\",\n \"--giga-bytes\",\n dest=\"gb\",\n action=\"store_true\",\n default=False,\n help=\"Warning/Critical values are in Giga-Bytes\",\n )\n self.parser.add_argument(\n \"-w\",\n \"--warn\",\n dest=\"warn\",\n type=float,\n default=20.0,\n help=\"Amount of disk free to warn at\",\n )\n self.parser.add_argument(\n \"-c\",\n \"--critical\",\n dest=\"critical\",\n type=float,\n default=10.0,\n help=\"Amount of disk free to mark critical \"\n \"[Default: %0.2(default)f]\",\n )\n self.parser.add_argument(\n \"disk\",\n help=\"Directory path for disk to check\",\n )\n\n def execute(self):\n \"\"\"Execute the actual working parts of the plugin.\"\"\"\n try:\n result = shutil.disk_usage(self.opts.disk)\n except OSError as err:\n self.message = f\"Error gathering disk usage: {err}\"\n self.status = plugin.Status.UNKNOWN\n return\n\n # Stats and stuff\n stats = {\n \"disk\": self.opts.disk,\n \"disk_total\": result.total / (1024 * 1024 * 1024),\n \"disk_used\": result.used / (1024 * 1024 * 1024),\n \"disk_free\": result.free / (1024 * 1024 * 1024),\n \"disk_free_pct\": (result.free / result.total) * 100.0,\n \"disk_used_pct\": (result.used / result.total) * 100.0,\n }\n\n if self.opts.mb or self.opts.gb:\n divisor = 1024 * 1024 if self.opts.mb else 1024 * 1024 * 1024\n free = result.free / divisor\n else:\n # Fallback to percentage\n free = (result.free / result.total) * 100.0\n\n if free < self.opts.critical:\n self.status = plugin.Status.CRITICAL\n elif free < self.opts.warn:\n self.status = plugin.Status.WARN\n else:\n self.status = plugin.Status.OK\n\n self.message = TEMPLATE.strip().format(**stats)\n self.add_perf_multi(stats)\n\n\ndef run():\n \"\"\"Entry point from setup.py for installation of wrapper.\"\"\"\n instance = Check()\n instance.main()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"HopliteInd/cp-nagios-plugins","sub_path":"libnagios/checks/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29340573770","text":"# -*- coding: utf-8 -*\n\"\"\"maintain a dictionary of parameters\"\"\"\n\nimport os\nimport six\nimport json\nimport logging\n\n\ndef _get_dict_from_environ_or_json_or_file(args, env_name):\n if args == '':\n return None\n if args is None:\n s = os.environ.get(env_name)\n else:\n s = args\n if os.path.exists(s):\n s = open(s).read()\n if isinstance(s, six.string_types):\n try:\n r = eval(s)\n except SyntaxError as e:\n raise ValueError('json parse error: %s \\n>Got json: %s' %\n (repr(e), s))\n return r\n else:\n return s #None\n\n\ndef parse_file(filename):\n \"\"\"parse_file\"\"\"\n d = _get_dict_from_environ_or_json_or_file(filename, None)\n if d is None:\n raise ValueError('file(%s) not found' % filename)\n return d\n\n\ndef evaluate_file(filename):\n \"\"\"evaluate_file\"\"\"\n # logging.info(\n # f\"error loading _jsonnet (this is expected on Windows), treating {filename} as plain json\"\n # )\n logging.info(filename)\n with open(filename, \"r\") as evaluation_file:\n return evaluation_file.read()\n\n\ndef from_file(filename, log_info=True):\n \"\"\"from_file\"\"\"\n json_file = json.loads(evaluate_file(filename), strict=False)\n if log_info:\n logging.info(json.dumps(json_file, indent=4, sort_keys=True))\n return json_file\n\n\ndef log_info(json_file):\n \"\"\"log info\"\"\"\n logging.info(json.dumps(json_file, indent=4, sort_keys=True))\n\n\ndef replace_none(params):\n \"\"\"replace_none\"\"\"\n if params == \"None\":\n return None\n elif isinstance(params, dict):\n for key, value in params.items():\n params[key] = replace_none(value)\n if key == \"split_char\" and isinstance(value, str):\n try:\n value = chr(int(value, base=16))\n logging.debug(\"ord(value): {} \".format(ord(value)))\n except Exception:\n pass\n params[key] = value\n return params\n elif isinstance(params, list):\n return [replace_none(value) for value in params]\n return params\n","repo_name":"PaddlePaddle/ERNIE","sub_path":"erniekit/utils/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":6044,"dataset":"github-code","pt":"54"} +{"seq_id":"26445840557","text":"import threading\n\ninput_string_1 = \"\"\ninput_string_2 = \"\"\n\ndef write_string_1():\n global input_string_1\n input_string_1 = input(\"Введите первую строку: \\n\")\n\ndef write_string_2():\n global input_string_2\n input_string_2 = input(\"Введите вторую строку: \")\n\n# Создаем и запускаем первый поток\nthread1 = threading.Thread(target=write_string_1)\nthread1.start()\n\n# Создаем и запускаем второй поток\nthread2 = threading.Thread(target=write_string_2)\nthread2.start()\n\n# Ожидаем завершения обоих потоков\nthread1.join()\nthread2.join()\n\n# Объединяем строки\ncombined_string = input_string_1 + input_string_2\n\n# Выводим объединенную строку\nprint(\"Объединенная строка: \", combined_string)","repo_name":"brstu/SP-2023","sub_path":"trunk/PO210719/task_05/src/atomic.py","file_name":"atomic.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22876011626","text":"\r\nimport math\r\nimport wx\r\nimport os\r\n \r\nclass MainWindow(wx.Frame):\r\n \r\n def __init__(self, title):\r\n \r\n import ctypes\r\n myappid = 'calculator.scottiltd'\r\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \r\n \r\n wx.Frame.__init__(self, None, wx.ID_ANY, title=title, style=wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))\r\n \r\n self.path = os.path.dirname(os.getcwd())\r\n \r\n self.settings = {\"mode\": \"Default\", \"history\": True, \"HistoryMaxlenght\": 10, \"HistorySave\": \"All\"}\r\n self.history = []\r\n self.text = [\"0\"]\r\n self.printer = [\"0\"]\r\n self.memory = [\"0\"] \r\n\r\n self.font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.NORMAL, underline = False, faceName =\"\")\r\n self.operators_font = wx.Font(9, wx.DEFAULT, wx.BOLD, wx.NORMAL, underline = False, faceName =\"\")\r\n \r\n self.rootx = False\r\n self.new = True\r\n \r\n################################################################################\r\n##################################FRAME HEADER##################################\r\n################################################################################\r\n \r\n optionsMenu = wx.Menu()\r\n \r\n self.menuHistory = wx.Menu()\r\n self.showHistory = self.menuHistory.Append(111, \"Show History\\tCTRL+h\", \"Show Calculator's History\")\r\n self.Bind(wx.EVT_MENU, self.OnShowHistory, self.showHistory)\r\n self.copyHistory = self.menuHistory.Append(112, \"Copy History\\tALT+c\", \"Copy Calculator's History\")\r\n self.Bind(wx.EVT_MENU, self.OnCopyHistory, self.copyHistory)\r\n self.deleteHistory = self.menuHistory.Append(113, \"Delete History\\tALT+d\", \"Delete Calculator's History\")\r\n self.Bind(wx.EVT_MENU, self.OnDeleteHistory, self.deleteHistory)\r\n optionsMenu.AppendSubMenu(self.menuHistory, \"History\", \"Options for Calculator's History\")\r\n optionsMenu.AppendSeparator()\r\n \r\n################################################################################\r\n \r\n settingsMenu = wx.Menu()\r\n \r\n menuHistorySettings = wx.Menu()\r\n self.enableHistory = menuHistorySettings.AppendCheckItem(211, \"Enable History\", \"Enable History\")\r\n self.Bind(wx.EVT_MENU, self.OnEnableHistory, self.enableHistory)\r\n if self.settings[\"history\"] == True:\r\n self.enableHistory.Check()\r\n menuHistorySettings.AppendSeparator()\r\n self.pos10 = menuHistorySettings.AppendRadioItem(212, \"10 items\", \"Max items in history: 10\")\r\n self.Bind(wx.EVT_MENU, self.OnMax10Items, self.pos10)\r\n self.pos5 = menuHistorySettings.AppendRadioItem(213, \"5 items\", \"Max items in history: 5\")\r\n self.Bind(wx.EVT_MENU, self.OnMax5Items, self.pos5)\r\n self.pos3 = menuHistorySettings.AppendRadioItem(214, \"3 items\", \"Max items in history: 3\")\r\n self.Bind(wx.EVT_MENU, self.OnMax3Items, self.pos3)\r\n if self.settings[\"HistoryMaxlenght\"] == 10:\r\n self.pos10.Check()\r\n elif self.settings[\"HistoryMaxlenght\"] == 5:\r\n self.pos5.Check()\r\n elif self.settings[\"HistoryMaxlenght\"] == 3:\r\n self.pos3.Check() \r\n menuHistorySettings.AppendSeparator()\r\n self.CorrectOnly = menuHistorySettings.AppendRadioItem(215, \"Save Correct Only\", \"Save Only Correct Operations\")\r\n self.Bind(wx.EVT_MENU, self.OnCorrectOnly, self.CorrectOnly)\r\n self.All = menuHistorySettings.AppendRadioItem(216, \"Save All\", \"Save All Operations\")\r\n self.Bind(wx.EVT_MENU, self.OnAll, self.All) \r\n if self.settings[\"HistorySave\"] == \"CorrectOnly\":\r\n self.CorrectOnly.Check()\r\n elif self.settings[\"HistorySave\"] == \"All\":\r\n self.All.Check()\r\n settingsMenu.AppendSubMenu(menuHistorySettings, \"History\", \"History Settings\")\r\n \r\n################################################################################\r\n \r\n modeMenu = wx.Menu()\r\n self.defaultMode = modeMenu.AppendRadioItem(301, \"Default\\tCTRL+d\", \"Set Default Mode\")\r\n self.Bind(wx.EVT_MENU, self.OnDefaultMode, self.defaultMode)\r\n self.scientificMode = modeMenu.AppendRadioItem(302, \"Scientific\\tCTRL+s\", \"Set Scientific Mode\")\r\n self.Bind(wx.EVT_MENU, self.OnScientificMode, self.scientificMode)\r\n if self.settings[\"mode\"] == \"Default\":\r\n self.defaultMode.Check()\r\n elif self.settings[\"mode\"] == \"Scientific\":\r\n self.scientificMode.Check()\r\n################################################################################\r\n \r\n infoMenu = wx.Menu()\r\n self.info = infoMenu.Append(400, \"Info\\tCTRL+i\", \"Information About Calculator\")\r\n self.Bind(wx.EVT_MENU, self.OnAbout, self.info)\r\n \r\n################################################################################\r\n \r\n fakeMenu = wx.Menu()\r\n self.m1 = fakeMenu.Append(1, \"1\", \"1\") \r\n self.Bind(wx.EVT_MENU, self.On1, self.m1)\r\n self.m2 = fakeMenu.Append(2, \"2\", \"2\") \r\n self.Bind(wx.EVT_MENU, self.On2, self.m2)\r\n self.m3 = fakeMenu.Append(3, \"3\", \"3\") \r\n self.Bind(wx.EVT_MENU, self.On3, self.m3)\r\n self.m4 = fakeMenu.Append(4, \"4\", \"4\") \r\n self.Bind(wx.EVT_MENU, self.On4, self.m4)\r\n self.m5 = fakeMenu.Append(5, \"5\", \"5\") \r\n self.Bind(wx.EVT_MENU, self.On5, self.m5)\r\n self.m6 = fakeMenu.Append(6, \"6\", \"6\") \r\n self.Bind(wx.EVT_MENU, self.On6, self.m6)\r\n self.m7 = fakeMenu.Append(7, \"7\", \"7\") \r\n self.Bind(wx.EVT_MENU, self.On7, self.m7)\r\n self.m8 = fakeMenu.Append(8, \"8\", \"8\") \r\n self.Bind(wx.EVT_MENU, self.On8, self.m8)\r\n self.m9 = fakeMenu.Append(9, \"9\", \"9\") \r\n self.Bind(wx.EVT_MENU, self.On9, self.m9)\r\n self.m0 = fakeMenu.Append(10, \"0\", \"0\") \r\n self.Bind(wx.EVT_MENU, self.On0, self.m0)\r\n self.mPlus = fakeMenu.Append(11, \"Plus\", \"Plus\") \r\n self.Bind(wx.EVT_MENU, self.OnPlus, self.mPlus)\r\n self.mMinus = fakeMenu.Append(12, \"Minus\", \"Minus\") \r\n self.Bind(wx.EVT_MENU, self.OnMinus, self.mMinus)\r\n self.mMultiply = fakeMenu.Append(13, \"Multiply\", \"Multiply\") \r\n self.Bind(wx.EVT_MENU, self.OnMultiply, self.mMultiply)\r\n self.mDivide = fakeMenu.Append(14, \"Divide\", \"Divide\") \r\n self.Bind(wx.EVT_MENU, self.OnDivide, self.mDivide)\r\n self.mPoint = fakeMenu.Append(15, \"Point\", \"Point\") \r\n self.Bind(wx.EVT_MENU, self.OnPoint, self.mPoint) \r\n self.mEqual = fakeMenu.Append(16, \"Equal\", \"Equal\") \r\n self.Bind(wx.EVT_MENU, self.OnEqual, self.mEqual) \r\n self.mCanc = fakeMenu.Append(17, \"Canc\", \"Canc\") \r\n self.Bind(wx.EVT_MENU, self.OnCanc, self.mCanc) \r\n self.mBack = fakeMenu.Append(18, \"Back\", \"Back\")\r\n self.Bind(wx.EVT_MENU, self.OnBack, self.mBack) \r\n self.mCopy = fakeMenu.Append(20, \"Copy\", \"Copy\") \r\n self.Bind(wx.EVT_MENU, self.OnCopy, self.mCopy) \r\n \r\n################################################################################\r\n \r\n menuBar = wx.MenuBar()\r\n menuBar.Append(optionsMenu, \"Options\")\r\n menuBar.Append(settingsMenu, \"Settings\")\r\n menuBar.Append(modeMenu, \"Mode\")\r\n menuBar.Append(infoMenu, \"?\")\r\n self.SetMenuBar(menuBar) \r\n \r\n################################################################################\r\n################################################################################\r\n \r\n self.accel_tab = wx.AcceleratorTable([wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('q'), wx.ID_EXIT),\r\n wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('i'), 400), \r\n wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('c'), 20), \r\n wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('s'), 302),\r\n wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('d'), 301), \r\n wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('h'), 111),\r\n wx.AcceleratorEntry(wx.ACCEL_ALT, ord('c'), 112),\r\n wx.AcceleratorEntry(wx.ACCEL_ALT, ord('d'), 113),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD1, 1),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"1\"), 1),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD2, 2),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"2\"), 2),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD3, 3),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"3\"), 3),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD4, 4),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"4\"), 4),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD5, 5),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"5\"), 5),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD6, 6),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"6\"), 6),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD7, 7),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"7\"), 7),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD8, 8),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"8\"), 8),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD9, 9),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"9\"), 9),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD0, 10),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\"0\"), 10),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_ADD, 11),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_ADD, 11), \r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_SUBTRACT, 12),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_SUBTRACT, 12),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_MULTIPLY, 13),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_MULTIPLY, 13),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_DIVIDE, 14),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_DIVIDE, 14),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_DECIMAL, 15),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_DECIMAL, 15),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\",\"), 15),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, ord(\".\"), 15),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_NUMPAD_SPACE, 16),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_RETURN, 16),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_DELETE, 17),\r\n wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_BACK, 18), \r\n ])\r\n \r\n self.SetAcceleratorTable(self.accel_tab) \r\n \r\n################################################################################\r\n################################################################################\r\n \r\n self.statusbar = self.CreateStatusBar(1)\r\n self.statusbar.SetStatusText(\" Welcome to Calculator: Good Work\")\r\n \r\n################################################################################\r\n################################################################################\r\n \r\n self.SetIcon(wx.Icon(self.path + '/favicon.ico'))\r\n \r\n################################################################################\r\n##################################FRAME BODY####################################\r\n################################################################################\r\n \r\n\r\n self.display = wx.Button(self, id=wx.ID_ANY, label=\"\\n\", style=wx.BU_RIGHT, size=(310, 65))\r\n self.display.SetLabel(\"0 \")\r\n font1 = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL, underline = False, faceName =\"\")\r\n self.display.SetFont(font1)\r\n \r\n################################################################################\r\n \r\n mc = wx.Button(self, wx.ID_STATIC, \"mc\")\r\n mc.Bind(wx.EVT_BUTTON, self.OnMemoryCanc)\r\n rm = wx.Button(self, wx.ID_STATIC, \"rm\")\r\n rm.Bind(wx.EVT_BUTTON, self.OnMemoryCall) \r\n mx = wx.Button(self, wx.ID_STATIC, \"m+\")\r\n mx.Bind(wx.EVT_BUTTON, self.OnMemoryPlus) \r\n canc = wx.Button(self, wx.ID_STATIC, \"canc\")\r\n canc.Bind(wx.EVT_BUTTON, self.OnCanc) \r\n \r\n################################################################################\r\n \r\n one = wx.Button(self, wx.ID_STATIC, \"1\")\r\n one.Bind(wx.EVT_BUTTON, self.On1) \r\n two = wx.Button(self, wx.ID_STATIC, \"2\")\r\n two.Bind(wx.EVT_BUTTON, self.On2) \r\n three = wx.Button(self, wx.ID_STATIC, \"3\")\r\n three.Bind(wx.EVT_BUTTON, self.On3) \r\n four = wx.Button(self, wx.ID_STATIC, \"4\")\r\n four.Bind(wx.EVT_BUTTON, self.On4) \r\n five = wx.Button(self, wx.ID_STATIC, \"5\")\r\n five.Bind(wx.EVT_BUTTON, self.On5) \r\n six = wx.Button(self, wx.ID_STATIC, \"6\")\r\n six.Bind(wx.EVT_BUTTON, self.On6) \r\n seven = wx.Button(self, wx.ID_STATIC, \"7\")\r\n seven.Bind(wx.EVT_BUTTON, self.On7, seven) \r\n eight = wx.Button(self, wx.ID_STATIC, \"8\")\r\n eight.Bind(wx.EVT_BUTTON, self.On8) \r\n nine = wx.Button(self, wx.ID_STATIC, \"9\")\r\n nine.Bind(wx.EVT_BUTTON, self.On9) \r\n zero = wx.Button(self, wx.ID_STATIC, \"0\")\r\n zero.Bind(wx.EVT_BUTTON, self.On0)\r\n \r\n################################################################################\r\n \r\n point = wx.Button(self, wx.ID_STATIC, \",\")\r\n point.Bind(wx.EVT_BUTTON, self.OnPoint) \r\n sign = wx.Button(self, wx.ID_STATIC, \"+/-\")\r\n sign.Bind(wx.EVT_BUTTON, self.OnChangeSign) \r\n plus = wx.Button(self, wx.ID_STATIC, \"+\")\r\n plus.Bind(wx.EVT_BUTTON, self.OnPlus)\r\n minus = wx.Button(self, wx.ID_STATIC, \"-\")\r\n minus.Bind(wx.EVT_BUTTON, self.OnMinus)\r\n multiply = wx.Button(self, wx.ID_STATIC, \"*\")\r\n multiply.Bind(wx.EVT_BUTTON, self.OnMultiply)\r\n divide = wx.Button(self, wx.ID_STATIC, \"/\") \r\n divide.Bind(wx.EVT_BUTTON, self.OnDivide)\r\n equal = wx.Button(self, wx.ID_STATIC, label=\"=\", size=(310, 45))\r\n equal.Bind(wx.EVT_BUTTON, self.OnEqual)\r\n\r\n################################################################################\r\n \r\n exp2 = wx.Button(self,wx.ID_STATIC, \"x\\u00B2\")\r\n exp2.Bind(wx.EVT_BUTTON, self.OnExp2)\r\n exp3 = wx.Button(self,wx.ID_STATIC, \"x\\u00B3\")\r\n exp3.Bind(wx.EVT_BUTTON, self.OnExp3)\r\n expX = wx.Button(self, wx.ID_STATIC, \"x\\u02B8\")\r\n expX.Bind(wx.EVT_BUTTON, self.OnExpX)\r\n root2 = wx.Button(self, wx.ID_STATIC, \"\\u00B2\\u221Ax\")\r\n root2.Bind(wx.EVT_BUTTON, self.OnRoot2)\r\n root3 = wx.Button(self, wx.ID_STATIC, \"\\u00B3\\u221Ax\")\r\n root3.Bind(wx.EVT_BUTTON, self.OnRoot3)\r\n rootx = wx.Button(self, wx.ID_STATIC, \"\\u02B8\\u221Ax\")\r\n sin = wx.Button(self, wx.ID_STATIC, \"sin\")\r\n sin.Bind(wx.EVT_BUTTON, self.OnSin)\r\n sinh = wx.Button(self, wx.ID_STATIC, \"sin\\u207B\\u00B9\")\r\n sinh.Bind(wx.EVT_BUTTON, self.OnSinh)\r\n cos = wx.Button(self, wx.ID_STATIC, \"cos\")\r\n cos.Bind(wx.EVT_BUTTON, self.OnCos)\r\n cosh = wx.Button(self, wx.ID_STATIC, \"cos\\u207B\\u00B9\")\r\n cosh.Bind(wx.EVT_BUTTON, self.OnCosh)\r\n tan = wx.Button(self, wx.ID_STATIC, \"tan\")\r\n tanh = wx.Button(self, wx.ID_STATIC, \"tan\\u207B\\u00B9\")\r\n back = wx.Button(self, wx.ID_STATIC, \"\\u001B\")\r\n back.Bind(wx.EVT_BUTTON, self.OnBack) \r\n greekP = wx.Button(self, wx.ID_STATIC, \"\\u03C0\")\r\n greekP.Bind(wx.EVT_BUTTON, self.OnGreekP)\r\n per100 = wx.Button(self, wx.ID_STATIC, \"\\u0025\")\r\n per100.Bind(wx.EVT_BUTTON, self.OnPer100)\r\n per1000 = wx.Button(self, wx.ID_STATIC, \"\\u2030\")\r\n per1000.Bind(wx.EVT_BUTTON, self.OnPer1000)\r\n permilione = wx.Button(self, wx.ID_STATIC, \"\\u2031\")\r\n permilione.Bind(wx.EVT_BUTTON, self.OnPerMilione)\r\n back = wx.Button(self, wx.ID_STATIC, \"\\u001B\")\r\n back.Bind(wx.EVT_BUTTON, self.OnBack) \r\n greekP = wx.Button(self, wx.ID_STATIC, \"\\u03C0\")\r\n greekP.Bind(wx.EVT_BUTTON, self.OnGreekP)\r\n openpar = wx.Button(self, wx.ID_STATIC, \"(\")\r\n closepar = wx.Button(self, wx.ID_STATIC, \")\")\r\n reverse = wx.Button(self, wx.ID_STATIC, \"\\u00B9/\\u2093\")\r\n reverse.Bind(wx.EVT_BUTTON, self.OnReverse)\r\n \r\n################################################################################\r\n################################FRAME SIZERS####################################\r\n################################################################################\r\n \r\n self.sizer1 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons1 = [back, greekP, openpar, closepar, reverse]\r\n for x in range(len(self.buttons1)):\r\n self.buttons1[x].SetFont(self.font)\r\n self.sizer1.Add(self.buttons1[x], 1, wx.EXPAND) \r\n \r\n self.sizer2 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons2 = [tan, tanh, per100, per1000, permilione]\r\n for x in range(len(self.buttons2)):\r\n self.buttons2[x].SetFont(self.font)\r\n self.sizer2.Add(self.buttons2[x], 1, wx.EXPAND)\r\n \r\n self.sizer3 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons3 = [sin, sinh, root2, exp2, expX]\r\n for x in range(len(self.buttons3)):\r\n self.buttons3[x].SetFont(self.font)\r\n self.sizer3.Add(self.buttons3[x], 1, wx.EXPAND) \r\n \r\n self.sizer4 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons4 = [cos, cosh, root3, exp3, rootx]\r\n for x in range(len(self.buttons4)):\r\n self.buttons4[x].SetFont(self.font)\r\n self.sizer4.Add(self.buttons4[x], 1, wx.EXPAND) \r\n \r\n self.sizer5 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons5 = [mc, nine, six, three, point]\r\n for x in range(len(self.buttons5)):\r\n self.buttons5[x].SetFont(self.font)\r\n self.sizer5.Add(self.buttons5[x], 1, wx.EXPAND)\r\n \r\n self.sizer6 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons6 = [rm, eight, five, two, zero]\r\n for x in range(len(self.buttons6)):\r\n self.buttons6[x].SetFont(self.font)\r\n self.sizer6.Add(self.buttons6[x], 1, wx.EXPAND) \r\n \r\n self.sizer7 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons7 = [mx, seven, four, one, sign]\r\n for x in range(len(self.buttons7)):\r\n self.buttons7[x].SetFont(self.operators_font)\r\n self.sizer7.Add(self.buttons7[x], 1, wx.EXPAND) \r\n \r\n self.sizer8 = wx.BoxSizer(wx.VERTICAL)\r\n self.buttons8 = [canc, plus, minus, multiply, divide]\r\n for x in range(len(self.buttons8)):\r\n self.buttons8[x].SetFont(self.font)\r\n self.sizer8.Add(self.buttons8[x], 1, wx.EXPAND) \r\n \r\n################################################################################\r\n \r\n self.parent_sizer = wx.BoxSizer(wx.VERTICAL)\r\n self.sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n \r\n self.sizers = [self.sizer1, self.sizer2, self.sizer3, self.sizer4, self.sizer5, self.sizer6, self.sizer7, self.sizer8]\r\n for x in range(len(self.sizers)):\r\n self.sizer.Add(self.sizers[x], 0, wx.EXPAND)\r\n \r\n self.parent_sizer.Add(self.display, 1, wx.EXPAND)\r\n self.parent_sizer.Add(self.sizer, 0, wx.EXPAND)\r\n self.parent_sizer.Add(equal, 1, wx.EXPAND)\r\n \r\n if self.settings[\"mode\"] == \"Default\":\r\n \r\n self.sizer1.ShowItems(False)\r\n self.sizer2.ShowItems(False)\r\n self.sizer3.ShowItems(False) \r\n self.sizer4.ShowItems(False)\r\n \r\n self.SetSize(310, 305) \r\n \r\n else:\r\n \r\n \r\n self.sizer1.ShowItems(True)\r\n self.sizer2.ShowItems(True)\r\n self.sizer3.ShowItems(True) \r\n self.sizer4.ShowItems(True)\r\n \r\n self.SetSize(615, 305) \r\n \r\n \r\n self.SetSizer(self.parent_sizer)\r\n self.Show(True)\r\n\r\n################################################################################ \r\n############################### FRAME METHODS ##################################\r\n################################################################################\r\n\r\n \r\n \r\n def timeout(self, time, target):\r\n self.timer = wx.Timer(self)\r\n self.Bind(wx.EVT_TIMER, target, self.timer)\r\n self.timer.Start(int(time))\r\n \r\n def SetDefaultStatus(self, event):\r\n self.statusbar.SetStatusText(\" Welcome to Calculator: Good Work\") \r\n try:\r\n self.timer.Stop()\r\n except:\r\n pass\r\n \r\n def SetTemporaryStatus(self, status, time=3000):\r\n self.statusbar.SetStatusText(status)\r\n self.timeout(time, self.SetDefaultStatus)\r\n \r\n def SetStatus(self, event, status):\r\n self.statusbar.SetStatusText(status) \r\n \r\n \r\n################################################################################\r\n################################################################################\r\n \r\n def OnAbout(self, event):\r\n wx.MessageDialog( self, \"\"\"\r\nCalculator was born to help students and common person in their work, from the \r\n\r\nmost basic count to the more complecated operations.\r\n\r\nIt was developed by Leonardo Scotti using Python 3.8.3\r\nThe GUI was developed using the WxPython framework\r\n(version: \"4.1.1 msw (phoenix) wxWidgets 3.1.5\")\r\n\r\nThe current version of Calculator is 0.20.8\r\n\"\"\", \"About Calculator\", wx.OK|wx.ICON_INFORMATION).ShowModal() \r\n \r\n def OnHotkeys(self, event):\r\n wx.MessageDialog( self, \"\"\"\r\n[CTRL + d] = default mode\r\n[CTRL + s] = scientific mode\r\n[CTRL + c] = copy text\r\n[CTRL + i] = show info\r\n[CTRL + h] = show history\r\n[ALT + c] = copy history\r\n[AlT + d] = delete history\r\n\"\"\", \"ACalculator Hotkeys\", wx.OK|wx.ICON_INFORMATION).ShowModal() \r\n \r\n \r\n def OnReadme(self, event):\r\n #opens the README.txt istruction file of the program\r\n subprocess.Popen([\"notepad.exe\", self.path + \"/README.txt\"])\r\n \r\n def OnDefaultMode(self, event):\r\n if self.settings[\"mode\"] == \"Scientific\":\r\n self.defaultMode.Check()\r\n self.settings[\"mode\"] = \"Default\"\r\n \r\n self.sizer1.ShowItems(False)\r\n self.sizer2.ShowItems(False)\r\n self.sizer3.ShowItems(False)\r\n self.sizer4.ShowItems(False)\r\n \r\n self.SetSize(310, 305) \r\n self.parent_sizer.Layout()\r\n \r\n def OnScientificMode(self, event):\r\n if self.settings[\"mode\"] == \"Default\":\r\n self.scientificMode.Check()\r\n self.settings[\"mode\"] = \"Scientific\"\r\n \r\n self.sizer4.ShowItems(True)\r\n self.sizer3.ShowItems(True)\r\n self.sizer2.ShowItems(True)\r\n self.sizer1.ShowItems(True)\r\n \r\n self.SetSize(615, 305)\r\n self.parent_sizer.Layout()\r\n\r\n \r\n def OnDocumentation(self, event):\r\n pass \r\n \r\n def OnEnableHistory(self, event):\r\n if self.settings[\"history\"] == True:\r\n self.enableHistory.Check(False)\r\n self.pos10.Enable(False)\r\n self.pos5.Enable(False)\r\n self.pos3.Enable(False)\r\n self.CorrectOnly.Enable(False)\r\n self.All.Enable(False)\r\n self.showHistory.Enable(False)\r\n self.copyHistory.Enable(False)\r\n self.deleteHistory.Enable(False)\r\n self.settings[\"history\"] = False\r\n else:\r\n self.enableHistory.Check(True)\r\n self.pos10.Enable(True)\r\n self.pos5.Enable(True)\r\n self.pos3.Enable(True) \r\n self.CorrectOnly.Enable(True)\r\n self.All.Enable(True) \r\n self.showHistory.Enable(True)\r\n self.copyHistory.Enable(True)\r\n self.deleteHistory.Enable(True) \r\n self.settings[\"history\"] = True\r\n\r\n \r\n def OnMax10Items(self, event):\r\n self.pos10.Check()\r\n self.settings[\"HistoryMaxlenght\"] = 10\r\n \r\n def OnMax5Items(self, event):\r\n self.pos5.Check()\r\n self.settings[\"HistoryMaxlenght\"] = 5\r\n \r\n def OnMax3Items(self, event):\r\n self.pos3.Check()\r\n self.settings[\"HistoryMaxlenght\"] = 3\r\n\r\n def OnCorrectOnly(self, event):\r\n self.CorrectOnly.Check()\r\n self.settings[\"HistorySave\"] = \"CorrectOnly\"\r\n \r\n def OnAll(self, event):\r\n self.All.Check()\r\n self.settings[\"HistorySave\"] = \"All\"\r\n \r\n def OnShowHistory(self, event):\r\n txt = \"\\u0007 \"+ \"\\n \\n \\u0007 \".join(self.history)\r\n if txt.strip() == \"\\u0007\":\r\n txt = \" EMPTY HISTORY \"\r\n wx.MessageDialog( self, txt, \"Calculator's History\", wx.OK|wx.ICON_NONE).ShowModal()\r\n \r\n def OnCopyHistory(self, event):\r\n txt = \"\\u0007 \"+ \"\\n \\n \\u0007 \".join(self.history)\r\n if txt.strip() != \"\\u0007\": \r\n if not wx.TheClipboard.IsOpened():\r\n wx.TheClipboard.Open()\r\n wx.TheClipboard.Clear()\r\n wx.TheClipboard.SetData(wx.TextDataObject(txt))\r\n wx.TheClipboard.Flush()\r\n wx.TheClipboard.Close()\r\n self.SetTemporaryStatus(\"History copied successfully\", 3000)\r\n \r\n def OnDeleteHistory(self, event):\r\n self.history.clear()\r\n self.SetTemporaryStatus(\"History deleted successfully\", 3000)\r\n\r\n def OnCopy(self, event):\r\n txt = \"\".join(self.printer)\r\n if txt.strip() != \"\": \r\n if not wx.TheClipboard.IsOpened():\r\n wx.TheClipboard.Open()\r\n wx.TheClipboard.Clear()\r\n wx.TheClipboard.SetData(wx.TextDataObject(txt))\r\n wx.TheClipboard.Flush()\r\n wx.TheClipboard.Close() \r\n self.SetTemporaryStatus(\"Result copied successfully\", 3000)\r\n\r\n################################################################################\r\n################################################################################\r\n \r\n\r\n def displayer(self):\r\n if self.printer == [] or len(self.printer) == 0 or \"\".join(self.printer) == \"\":\r\n self.display.SetLabel(\"0 \")\r\n elif self.printer == [\"ERROR \"]:\r\n self.display.SetLabel(str(\"\".join(self.printer)) + \" \")\r\n self.printer.clear()\r\n else:\r\n self.display.SetLabel(str(\"\".join(self.printer)) + \" \")\r\n self.parent_sizer.Layout()\r\n \r\n \r\n def HistoryUpgrade(self, operation):\r\n \r\n if len(self.history) < int(self.settings[\"HistoryMaxlenght\"]):\r\n self.history.append(operation)\r\n else:\r\n while len(self.history) >= self.settings[\"HistoryMaxlenght\"]:\r\n self.history.pop(0)\r\n self.history.append(operation)\r\n \r\n \r\n def AddNumber(self, number):\r\n \r\n number = list(number) \r\n if self.new == True:\r\n self.text.clear()\r\n self.printer.clear()\r\n if self.rootx == True:\r\n self.Root(number)\r\n else: \r\n if self.text == [\"0\"]:\r\n self.text.pop(0)\r\n for elem in number:\r\n self.text.append(elem)\r\n if self.printer == [\"0\"]:\r\n self.printer.pop(0)\r\n for elem in number:\r\n self.printer.append(elem)\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n self.new = False\r\n \r\n def AddOperator(self, operator):\r\n \r\n operators = [\" / \", \" * \", \" + \", \" - \"]\r\n \r\n if operator not in self.text and self.text[-1] not in operators:\r\n \r\n for elem in operators:\r\n \r\n if elem in self.text:\r\n \r\n break\r\n \r\n \r\n else:\r\n \r\n self.text.append(operator) \r\n \r\n \r\n elif operator not in self.text and self.text[-1] in operators:\r\n \r\n self.text.pop(-1)\r\n self.text.append(operator)\r\n \r\n elif operator in self.text:\r\n \r\n pass\r\n \r\n \r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n self.printer.clear()\r\n self.new = False\r\n \r\n \r\n################################################################################\r\n################################################################################\r\n \r\n \r\n def On1(self, event):\r\n self.AddNumber(\"1\")\r\n self.displayer()\r\n\r\n def On2(self, event):\r\n self.AddNumber(\"2\")\r\n self.displayer()\r\n\r\n def On3(self, event):\r\n self.AddNumber(\"3\")\r\n self.displayer()\r\n\r\n def On4(self, event):\r\n self.AddNumber(\"4\")\r\n self.displayer()\r\n\r\n def On5(self, event):\r\n self.AddNumber(\"5\")\r\n self.displayer()\r\n\r\n def On6(self, event):\r\n self.AddNumber(\"6\")\r\n self.displayer()\r\n\r\n def On7(self, event):\r\n self.AddNumber(\"7\")\r\n self.displayer()\r\n\r\n def On8(self, event):\r\n self.AddNumber(\"8\")\r\n self.displayer()\r\n\r\n def On9(self, event):\r\n self.AddNumber(\"9\")\r\n self.displayer()\r\n\r\n def On0(self, event):\r\n self.AddNumber(\"0\")\r\n self.displayer()\r\n\r\n\r\n################################################################################\r\n\r\n\r\n def OnChangeSign(self, event):\r\n if self.printer != [\"0\"] and self.printer != []:\r\n if self.printer[0] == \"-\":\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer.pop(0)\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n \r\n else:\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer.insert(0, \"-\")\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n\r\n def OnPoint(self, event):\r\n if \".\" not in self.printer:\r\n self.text.append(\".\")\r\n self.printer.append(\".\")\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnCanc(self, event):\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"0\")\r\n self.displayer()\r\n self.SetDefaultStatus(None)\r\n \r\n def OnPlus(self, event):\r\n self.AddOperator(\" + \")\r\n \r\n def OnMinus(self, event):\r\n self.AddOperator(\" - \")\r\n \r\n def OnMultiply(self, event):\r\n self.AddOperator(\" * \") \r\n \r\n def OnDivide(self, event):\r\n self.AddOperator(\" / \")\r\n \r\n def OnEqual(self, event):\r\n \r\n operators = [\" + \", \" - \", \" * \", \" / \"]\r\n \r\n try:\r\n x = \"\".join(self.text)\r\n text = x\r\n if x == \"\":\r\n pass\r\n elif \"\\u0025\" in x:\r\n x = x.split(\"\\u0025\")\r\n if len(x) == 2 and x[1] == \"\":\r\n x = str(x[0])\r\n for elem in operators:\r\n if elem in x:\r\n x = x.split(elem)\r\n a = x[0]\r\n b = x[1]\r\n y = (float(a) * float(b)) / 100\r\n x.pop(-1)\r\n x.append(str(y))\r\n x = eval(str(elem.join(x)))\r\n operation = \"\".join(self.text) + \" = \" + str(x)\r\n self.HistoryUpgrade(operation)\r\n self.printer.clear()\r\n self.text.clear()\r\n self.text.append(str(x))\r\n self.printer.append(str(x))\r\n self.displayer()\r\n self.SetTemporaryStatus(\" Result: \" + str(x), 3000) \r\n self.new = True\r\n else:\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"ERROR \")\r\n self.displayer()\r\n self.printer.append(\"0\")\r\n self.SetTemporaryStatus(\"ERROR: Invalid sintax\", 3000)\r\n if self.settings[\"HistorySave\"] == \"All\":\r\n operation = text + \" = ERROR: Invalid sintax\"\r\n self.HistoryUpgrade(operation) \r\n self.new = True \r\n elif \"\\u2030\" in x:\r\n print(\"ok\")\r\n x = x.split(\"\\u2030\")\r\n if len(x) == 2 and x[1] == \"\":\r\n x = str(x[0])\r\n for elem in operators:\r\n if elem in str(x):\r\n x = x.split(elem)\r\n a = x[0]\r\n b = x[1]\r\n y = (float(a) * float(b)) / 1000\r\n x.pop(-1)\r\n x.append(str(y))\r\n x = eval(str(elem.join(x)))\r\n operation = \"\".join(self.text) + \" = \" + str(x)\r\n self.HistoryUpgrade(operation)\r\n self.printer.clear()\r\n self.text.clear()\r\n self.text.append(str(x))\r\n self.printer.append(str(x))\r\n self.displayer()\r\n self.SetTemporaryStatus(\" Result: \" + str(x), 3000) \r\n self.new = True \r\n else:\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"ERROR \")\r\n self.displayer()\r\n self.printer.append(\"0\")\r\n self.SetTemporaryStatus(\"ERROR: Invalid sintax\", 3000)\r\n if self.settings[\"HistorySave\"] == \"All\":\r\n operation = text + \" = ERROR: Invalid sintax\"\r\n self.HistoryUpgrade(operation) \r\n self.new = True \r\n elif \"\\u2031\" in x:\r\n x = x.split(\"\\u2031\")\r\n if len(x) == 2 and x[1] == \"\":\r\n x = str(x[0])\r\n for elem in operators:\r\n if elem in x:\r\n x = x.split(elem)\r\n a = x[0]\r\n b = x[1]\r\n y = (float(a) * float(b)) / 1000000\r\n x.pop(-1)\r\n x.append(str(y))\r\n x = eval(str(elem.join(x)))\r\n operation = \"\".join(self.text) + \" = \" + str(x)\r\n self.HistoryUpgrade(operation)\r\n self.printer.clear()\r\n self.text.clear()\r\n self.text.append(str(x))\r\n self.printer.append(str(x))\r\n self.displayer()\r\n self.SetTemporaryStatus(\" Result: \" + str(x), 3000) \r\n self.new = True \r\n else:\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"ERROR \")\r\n self.displayer()\r\n self.printer.append(\"0\")\r\n self.SetTemporaryStatus(\"ERROR: Invalid sintax\", 3000)\r\n if self.settings[\"HistorySave\"] == \"All\":\r\n operation = text + \" = ERROR: Invalid sintax\"\r\n self.HistoryUpgrade(operation) \r\n self.new = True \r\n else:\r\n x = eval(str(x))\r\n operation = \"\".join(self.text) + \" = \" + str(x)\r\n self.HistoryUpgrade(operation)\r\n self.printer.clear()\r\n self.text.clear()\r\n self.text.append(str(x))\r\n self.printer.append(str(x))\r\n self.displayer()\r\n self.SetTemporaryStatus(\" Result: \" + str(x), 3000) \r\n self.new = True\r\n except SyntaxError:\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"ERROR \")\r\n self.displayer()\r\n self.printer.append(\"0\")\r\n self.SetTemporaryStatus(\"ERROR: Invalid sintax\", 3000) \r\n if self.settings[\"HistorySave\"] == \"All\":\r\n operation = text + \" = ERROR: Invalid sintax\"\r\n self.HistoryUpgrade(operation) \r\n self.new = True\r\n except ZeroDivisionError:\r\n self.text.clear()\r\n self.printer.clear()\r\n self.printer.append(\"ERROR \")\r\n self.displayer() \r\n self.printer.append(\"0\")\r\n self.SetTemporaryStatus(\"ERROR: Division by zero\", 3000)\r\n if self.settings[\"HistorySave\"] == \"All\":\r\n operation = text + \" = ERROR: Division by zero\"\r\n self.HistoryUpgrade(operation) \r\n self.new = True \r\n \r\n def OnMemoryPlus(self, event):\r\n if self.text != []:\r\n self.memory.append(\"+\" + str(eval(\"\".join(self.text))))\r\n \r\n def OnMemoryCall(self, event):\r\n self.text.append(str(eval(\"\".join(self.memory))))\r\n self.printer.clear()\r\n self.printer.append(str(eval(\"\".join(self.memory))))\r\n self.displayer()\r\n \r\n def OnMemoryCanc(self, event):\r\n self.memory.clear()\r\n self.memory.append(\"0\")\r\n \r\n \r\n################################################################################\r\n################################################################################\r\n \r\n def OnBack(self, event):\r\n operators = [\" / \", \" * \", \" + \", \" - \", \"**\"]\r\n try:\r\n if self.text[-1] not in operators:\r\n self.text.pop(-1)\r\n self.printer.pop(-1)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n else:\r\n self.SetDefaultStatus(None)\r\n except IndexError:\r\n pass\r\n \r\n def OnExp2(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer.append(\"**2\")\r\n self.printer = list(str(eval(\"\".join(self.printer))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnExp3(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer.append(\"**3\")\r\n self.printer = list(str(eval(\"\".join(self.printer))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnExpX(self, event):\r\n self.printer.append(\"**\")\r\n self.text.append(\"**\")\r\n self.printer.clear()\r\n\r\n def Root(self, number):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(pow(float(\"\".join(self.printer)), 1/number)))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnRoot2(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(math.sqrt(float(\"\".join(self.printer)))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnRoot3(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(pow(float(\"\".join(self.printer)), 1/3)))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnRootX(self, event):\r\n aelf.rootx = True\r\n \r\n def OnSin(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(math.sin(math.radians(int(float(\"\".join(self.printer)))))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnCos(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(math.cos(math.radians(float(\"\".join(self.printer))))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnSinh(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(math.asin(float(\"\".join(self.printer))) / math.pi * 180))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnCosh(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer = list(str(math.acos(float(\"\".join(self.printer))) / math.pi * 180))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnReverse(self, event):\r\n for elem in self.printer:\r\n self.text.pop(-1)\r\n self.printer.insert(0, \"1/\")\r\n self.printer = list(str(eval(\"\".join(self.printer))))\r\n for elem in self.printer:\r\n self.text.append(elem)\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text)))\r\n \r\n def OnGreekP(self, event):\r\n self.AddNumber(\"3.141592654\")\r\n self.displayer()\r\n \r\n def OnPer100(self, event):\r\n self.printer.append(\"\\u0025\")\r\n self.text.append(\"\\u0025\")\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnPer1000(self, event):\r\n self.printer.append(\"\\u2030\")\r\n self.text.append(\"\\u2030\")\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n \r\n def OnPerMilione(self, event):\r\n self.printer.append(\"\\u2031\")\r\n self.text.append(\"\\u2031\")\r\n self.displayer()\r\n if \"\".join(self.text) != \"\":\r\n self.SetStatus(None, \" Operation: \" + str(\"\".join(self.text))) \r\n\r\n\r\n################################################################################\r\n################################################################################\r\n \r\n \r\napp = wx.App(False)\r\nframe = MainWindow(\"Calculator\")\r\napp.MainLoop()\r\n","repo_name":"LeonardoScotti/LeonardoScotti","sub_path":"calcolatrice.pyw","file_name":"calcolatrice.pyw","file_ext":"pyw","file_size_in_byte":46585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74277905122","text":"import sys\r\nfrom datetime import datetime\r\nimport gc\r\n\r\nclass Node:\r\n\tdef __init__(self, state, position, move, g, parent, depth, f):\r\n\t\tself.state = state\r\n\t\tself.position = position # where the 0 is\r\n\t\tself.move = move\r\n\t\tself.g = g\r\n\t\tself.parent = parent\r\n\t\tself.depth = depth\r\n\t\tself.f = f\r\n\r\ndef readInputFile(inputFile):\r\n\tstate = []\r\n\twith open(inputFile) as inputFile:\r\n\t\twhile True:\r\n\t\t\tline = inputFile.readline().strip()\r\n\t\t\tif line == \"END OF FILE\":\r\n\t\t\t\tbreak;\r\n\t\t\tline = line.split()\r\n\t\t\trow = list(map(int, line))\r\n\t\t\tstate.append(row)\r\n\treturn state\r\n\r\ndef makeIndent(level):\r\n\tindent = \"\\t\"\r\n\tfor i in range(level-1):\r\n\t\tindent += \"\\t\"\r\n\treturn indent\r\n\r\ndef fileWriteNode(file, node, level, searchMethod):\r\n\tindent = makeIndent(level)\r\n\tfile.write(indent+\"<\\n\")\r\n\tfile.write(indent+\"\\tstate =\\n\")\r\n\tfor row in node.state:\r\n\t\tfile.write(indent+\"\\t\\t\"+str(row)+\"\\n\")\r\n\tif node.move[0] == 0:\r\n\t\tfile.write(indent+\"\\taction = Start\\n\")\r\n\telse:\r\n\t\tfile.write(indent+\"\\taction = Move \"+str(node.move[0])+\" \"+node.move[1]+\"\\n\")\r\n\tfile.write(indent+\"\\tg(n) = \"+str(node.g)+\"\\n\")\r\n\tif searchMethod == \"dls\" or searchMethod == \"ids\":\r\n\t\tfile.write(indent+\"\\tdepth = \"+str(node.depth)+\"\\n\")\r\n\telif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\tfile.write(indent+\"\\tf(n) = \"+str(node.f)+\"\\n\")\r\n\tfile.write(indent+\"\\tparent = \")\r\n\tif node.parent != None:\r\n\t\tfile.write(\"\\n\")\r\n\t\tfileWriteNode(file, node.parent, level+2, searchMethod)\r\n\telse:\r\n\t\tfile.write(\"None\\n\")\r\n\tfile.write(indent+\">\\n\")\r\n\r\ndef fileWritePoppedNode(file, node, level, nodesPopped, searchMethod):\r\n\tfile.write(\"Popped node #\"+str(nodesPopped)+\":\\n\")\r\n\tfileWriteNode(file, node, level, searchMethod)\r\n\r\ndef fileWriteClosedFringe(file, closed, fringe, numNewNodes, searchMethod):\r\n\tfile.write(\"\\t\"+str(numNewNodes)+\" successors generated\"+\"\\n\")\r\n\tfile.write(\"\\tClosed:\\n\")\r\n\tfor state in closed:\r\n\t\tfor row in state:\r\n\t\t\tfile.write(\"\\t\\t\"+str(row)+\"\\n\")\r\n\t\tfile.write(\"\\n\")\r\n\tfile.write(\"\\tFringe:\\n\")\r\n\tfor node in fringe:\r\n\t\tfileWriteNode(file, node, 2, searchMethod)\r\n\r\ndef fileWriteSolution(file, nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize):\r\n\tfile.write(\"\\nNodes Popped: \"+str(nodesPopped)+\"\\n\")\r\n\tfile.write(\"Nodes Expanded: \"+str(nodesExpanded)+\"\\n\")\r\n\tfile.write(\"Nodes Generated: \"+str(nodesGenerated)+\"\\n\")\r\n\tfile.write(\"Max Fringe Size: \"+str(maxFringeSize)+\"\\n\")\r\n\r\ndef getSolutionPath(node):\r\n\tpath = []\r\n\tcost = 0\r\n\twhile node.parent != None:\r\n\t\tpath.append(node)\r\n\t\tcost += node.move[0]\r\n\t\tnode = node.parent\r\n\tpath.reverse()\r\n\treturn (path, cost)\r\n\r\ndef printSolution(nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize, cost, solutionPath):\r\n\tprint(\"Nodes Popped:\", nodesPopped)\r\n\tprint(\"Nodes Expanded:\", nodesExpanded)\r\n\tprint(\"Nodes Generated:\", nodesGenerated)\r\n\tprint(\"Max Fringe Size:\", maxFringeSize)\r\n\tprint(\"Solution Found at depth\", len(solutionPath), \"with cost of\", cost)\r\n\tprint(\"Steps:\")\r\n\tfor node in solutionPath:\r\n\t\tprint(\"\\tMove\", node.move[0], node.move[1])\r\n\r\ndef defineGoalLocations(goalState): \r\n\tgoalLocations = {\r\n\t\t\t0: None,\r\n\t\t\t1: None,\r\n\t\t\t2: None,\r\n\t\t\t3: None,\r\n\t\t\t4: None,\r\n\t\t\t5: None,\r\n\t\t\t6: None,\r\n\t\t\t7: None,\r\n\t\t\t8: None\r\n\t}\r\n\tcount = 0\r\n\tfor row in range(len(goalState)):\r\n\t\tfor col in range(len(goalState)):\r\n\t\t\tgoalLocations[goalState[row][col]] = [row, col, count]\r\n\t\t\tcount += 1\r\n\treturn goalLocations\r\n\r\ndef heuristic(node, searchMethod, goalLocations):\r\n\tmanhattan = 0\r\n\tfor row in range(len(node.state)):\r\n\t\tfor col in range(len(node.state[0])):\r\n\t\t\tmanhattan += abs(goalLocations[node.state[row][col]][0] - row) * node.state[row][col]\r\n\t\t\tmanhattan += abs(goalLocations[node.state[row][col]][1] - col) * node.state[row][col]\t\r\n\treturn manhattan\r\n\r\ndef makeStartNode(startState, searchMethod, goalLocations):\r\n\tstartNode = None\r\n\tfor row in range(len(startState)):\r\n\t\tfor col in range(len(startState[0])):\r\n\t\t\tif startState[row][col] == 0:\r\n\t\t\t\tstartNode = Node(startState, (row, col), (0, \"start\"), 0, None, 0, 0)\r\n\t\t\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\t\t\theuristicValue = heuristic(startNode, searchMethod, goalLocations)\r\n\t\t\t\t\tstartNode.f = heuristicValue\r\n\t\t\t\t\tif searchMethod == \"a*\":\r\n\t\t\t\t\t\tstartNode.f += startNode.g\r\n\t\t\t\tbreak\r\n\treturn startNode\r\n\r\ndef search(startState, goalState, searchMethod, limit, dumpFlag, file):\r\n\tif dumpFlag:\r\n\t\tfile.write(\"Running \"+searchMethod+\"\\n\")\r\n\tgoalLocations = defineGoalLocations(goalState)\r\n\tstartNode = makeStartNode(startState, searchMethod, goalLocations)\r\n\tnodesGenerated = 1\r\n\tcurrFringeSize = 0\t\r\n\tmaxFringeSize = 0\r\n\tnodesPopped = 0\r\n\tnodesExpanded = 0\r\n\tlimit = 0\r\n\tif searchMethod == \"dls\":\r\n\t\tlimit = int(input(\"Enter the depth limit:\"))\r\n\t\tif dumpFlag:\r\n\t\t\tfile.write(\"Depth Limit Entered = \"+str(limit)+\"\\n\")\r\n\tsolutionFound = searchHelper(startNode, goalState, searchMethod, nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize, limit, goalLocations, dumpFlag, file)\r\n\treturn solutionFound\r\n\r\ndef searchHelper(startNode, goalState, searchMethod, nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize, limit, goalLocations, dumpFlag, file):\r\n\tclosed = []\r\n\tfringe = []\r\n\tfringe.append(startNode)\r\n\tcurrFringeSize = 1\r\n\tmaxFringeSize = 1\r\n\r\n\twhile fringe:\r\n\t\t# pop node from fringe\r\n\t\tif searchMethod == \"dfs\" or searchMethod == \"dls\" or searchMethod == \"ids\":\r\n\t\t\tnode = fringe.pop()\r\n\t\telse:\r\n\t\t\tnode = fringe.pop(0)\r\n\t\tnodesPopped += 1\r\n\t\tif dumpFlag:\r\n\t\t\tfileWritePoppedNode(file, node, 1, nodesPopped, searchMethod)\r\n\r\n\t\t# check if reach goal state\r\n\t\tif node.state == goalState:\r\n\t\t\t(solutionPath, cost) = getSolutionPath(node)\r\n\t\t\tprintSolution(nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize, cost, solutionPath)\r\n\t\t\tif dumpFlag:\r\n\t\t\t\tfileWriteSolution(file, nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize)\r\n\t\t\treturn (solutionPath, cost)\r\n\r\n\t\t# determine whether to expand node\r\n\t\ttoBeExpanded = False\r\n\t\tif searchMethod == \"dls\" and (node.state, node.depth) not in closed and node.depth != limit:\r\n\t\t\tclosed.append((node.state, node.depth))\r\n\t\t\ttoBeExpanded = True\r\n\t\telif searchMethod != \"dls\" and node.state not in closed and not(searchMethod == \"ids\" and node.depth == limit):\r\n\t\t\tclosed.append(node.state)\r\n\t\t\ttoBeExpanded = True\r\n\r\n\t\t# expand node\r\n\t\tif toBeExpanded:\r\n\t\t\tnewNodes = expand(node, searchMethod, goalLocations)\r\n\t\t\tfringe += newNodes\r\n\t\t\tif searchMethod == \"ucs\":\r\n\t\t\t\tfringe.sort(key=lambda node: node.g)\r\n\t\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\t\tfringe.sort(key=lambda node: node.f)\r\n\t\t\tif dumpFlag:\r\n\t\t\t\tfileWriteClosedFringe(file, closed, fringe, len(newNodes), searchMethod)\r\n\t\t\tnodesExpanded += 1\r\n\t\t\tnodesGenerated += len(newNodes)\r\n\t\t\tcurrFringeSize = len(fringe)\r\n\t\t\tmaxFringeSize = max(currFringeSize, maxFringeSize)\r\n\r\n\t# if fringe is empty\r\n\tif searchMethod == \"dls\":\r\n\t\tprint(\"nodesGenerated = \", nodesGenerated)\r\n\t\tprint(\"fringe = \", fringe)\r\n\t\tprint(\"Depth limit reached. Search failed.\")\r\n\telif searchMethod == \"ids\":\r\n\t\tdel fringe\r\n\t\tgc.collect()\r\n\t\tsearchHelper(startNode, goalState, searchMethod, nodesPopped, nodesExpanded, nodesGenerated, maxFringeSize, limit+1, goalLocations, dumpFlag, file)\r\n\t\t\r\ndef expand(node, searchMethod, goalLocations):\r\n\tnewNodes = []\r\n\tinitialState = node.state\r\n\trow = node.position[0]\r\n\tcol = node.position[1]\r\n\r\n\t# generating new nodes\r\n\tif row-1 >= 0:\r\n\t\tnewState = list(map(list, initialState))\r\n\t\tnewState[row-1][col] = 0\r\n\t\tnewState[row][col] = initialState[row-1][col]\t\t\r\n\t\tnewNode = Node(newState, (row-1, col), (initialState[row-1][col], \"Down\"), node.g + initialState[row-1][col], node, node.depth + 1, 0)\r\n\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\theuristicValue = heuristic(newNode, searchMethod, goalLocations)\r\n\t\t\tnewNode.f = heuristicValue\r\n\t\t\tif searchMethod == \"a*\":\r\n\t\t\t\tnewNode.f += newNode.g\r\n\t\tnewNodes.append(newNode)\r\n\tif col-1 >= 0:\r\n\t\tnewState = list(map(list, initialState))\r\n\t\tnewState[row][col-1] = 0\r\n\t\tnewState[row][col] = initialState[row][col-1]\r\n\t\tnewNode = Node(newState, (row, col-1), (initialState[row][col-1], \"Right\"), node.g + initialState[row][col-1], node, node.depth + 1, 0)\r\n\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\theuristicValue = heuristic(newNode, searchMethod, goalLocations)\r\n\t\t\tnewNode.f = heuristicValue\r\n\t\t\tif searchMethod == \"a*\":\r\n\t\t\t\tnewNode.f += newNode.g\r\n\t\tnewNodes.append(newNode)\r\n\tif row+1 < len(node.state):\r\n\t\tnewState = list(map(list, initialState))\r\n\t\tnewState[row+1][col] = 0\r\n\t\tnewState[row][col] = initialState[row+1][col]\r\n\t\tnewNode = Node(newState, (row+1, col), (initialState[row+1][col], \"Up\"), node.g + initialState[row+1][col], node, node.depth + 1, 0)\r\n\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\theuristicValue = heuristic(newNode, searchMethod, goalLocations)\r\n\t\t\tnewNode.f = heuristicValue\r\n\t\t\tif searchMethod == \"a*\":\r\n\t\t\t\tnewNode.f += newNode.g\r\n\t\tnewNodes.append(newNode)\r\n\tif col+1 < len(node.state[0]):\r\n\t\tnewState = list(map(list, initialState))\r\n\t\tnewState[row][col+1] = 0\r\n\t\tnewState[row][col] = initialState[row][col+1]\r\n\t\tnewNode = Node(newState, (row, col+1), (initialState[row][col+1], \"Left\"), node.g + initialState[row][col+1], node, node.depth + 1, 0)\r\n\t\tif searchMethod == \"greedy\" or searchMethod == \"a*\":\r\n\t\t\theuristicValue = heuristic(newNode, searchMethod, goalLocations)\r\n\t\t\tnewNode.f = heuristicValue\r\n\t\t\tif searchMethod == \"a*\":\r\n\t\t\t\tnewNode.f += newNode.g\r\n\t\tnewNodes.append(newNode)\r\n\r\n\treturn newNodes\r\n\t\r\ndef main():\r\n\tstartFile = sys.argv[1]\r\n\tgoalFile = sys.argv[2]\r\n\tdumpFlag = False\r\n\tlimit = 0\r\n\tmethod = \"a*\"\r\n\tif len(sys.argv) > 3:\r\n\t\tdumpFlag = sys.argv[3] == \"true\"\r\n\t\tif sys.argv[3] == \"bfs\" or sys.argv[3] == \"ucs\" or sys.argv[3] == \"dfs\" or sys.argv[3] == \"dls\" or sys.argv[3] == \"ids\" or sys.argv[3] == \"greedy\" or sys.argv[3] == \"a*\":\r\n\t\t\tmethod = sys.argv[3]\r\n\t\t\r\n\tif len(sys.argv) == 5:\r\n\t\tdumpFlag = sys.argv[4] == \"true\"\r\n\tf = None\r\n\tif dumpFlag:\r\n\t\tnow = datetime.now()\r\n\t\tfileName = \"trace-\" + now.strftime(\"%mm%dd%Yy\") + \"-\" + now.strftime(\"%Hh%Mm%Ss\") + \".txt\"\r\n\t\tf = open(fileName, \"w\")\r\n\t\tf.write(\"Command line arguments :\\n\")\r\n\t\tfor argument in sys.argv[1:]:\r\n\t\t\tf.write(\"\\t\"+argument+\"\\n\")\r\n\t\tf.write(\"Method selected: \")\r\n\t\tf.write(method+\"\\n\")\r\n\t\tf.write(\"\\n\")\r\n\r\n\tstartState = readInputFile(startFile)\r\n\tgoalState = readInputFile(goalFile)\r\n\tsearch(startState, goalState, method, limit, dumpFlag, f)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","repo_name":"YenDuyenLeMavs/8PuzzleProblem","sub_path":"expense_8_puzzle.py","file_name":"expense_8_puzzle.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10729286974","text":"import cv2\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nclass EntropyRegularizationLoss(nn.Module):\n \n def __init__(self, num_classes, lambda_wgt):\n super(EntropyRegularizationLoss, self).__init__()\n self.num_classes = num_classes\n self.lambda_wgt = lambda_wgt\n self.soft_max = nn.Softmax(dim=1)\n \n def forward(self, y_pred, ycrf, yret):\n y_pred = self.soft_max(y_pred)\n loss_ce = self.get_loss_ce(y_pred, ycrf, yret)\n loss_er = self.get_loss_er(y_pred, ycrf, yret)\n loss = loss_ce + self.lambda_wgt * loss_er\n return loss\n \n def get_loss_ce(self, y_pred, ycrf, yret):\n n_classes_arr=torch.arange(self.num_classes).cuda()\n \n s_class = (ycrf[:,:,:,None] == n_classes_arr) & (yret[:,:,:,None] == n_classes_arr)\n s_class = torch.permute(s_class, (0, 3, 1, 2)) \n \n denom = torch.sum(s_class)\n num = torch.sum(torch.log(y_pred[s_class]))\n return -num/denom \n \n def get_loss_er(self, y_pred, ycrf, yret):\n n_classes_arr=torch.arange(self.num_classes).cuda()\n \n not_s_class = torch.logical_not((ycrf[:,:,:,None] == n_classes_arr) & (yret[:,:,:,None] == n_classes_arr)) \n not_s_class = torch.permute(not_s_class, (0, 3, 1, 2)) \n \n denom = torch.sum(not_s_class)\n numer = 0\n \n for i in range(self.num_classes):\n t = not_s_class[:,i,:,:]\n numer += torch.sum(y_pred[:,i,:,:][t] * torch.log(y_pred[:,i,:,:][t]))\n \n return -numer/denom","repo_name":"ccjcv/DRTNet","sub_path":"losses/EntropyReg.py","file_name":"EntropyReg.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73816439841","text":"class Solution(object):\n def subarraySum(self, nums, k):\n ans = 0\n prefix = 0\n count = Counter({0: 1})\n\n for num in nums:\n prefix += num\n ans += count[prefix - k]\n count[prefix] += 1\n\n return ans\n ","repo_name":"dagmawibabi/CompetitiveProgrammingOLD","sub_path":"LeetCode/subArraySumEqualsK.py","file_name":"subArraySumEqualsK.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"42279952657","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\n\nfrom mydash.utils import render_json\nfrom todolists.models import TodoList, Task\nfrom todolists.forms import (\n NewTodoListForm,\n EditTodoListForm,\n NewTaskForm,\n EditTaskForm,\n)\nfrom todolists.utils import search_todolists\n\n\ndef list_todolists(request):\n \"\"\"Show a list of todolists\"\"\"\n todolists = TodoList.objects.filter(user=request.user).order_by('-modified_on')\n\n if request.POST:\n todolists = search_todolists(request.POST.get('query', None), todolists)\n\n context = {\n 'todolists': todolists,\n }\n return render(request, 'todolists/list_todolists.html', context)\n\n\ndef view_todolist(request, slug):\n \"\"\"Show a todolist's tasks\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n\n tasks = Task.objects.filter(user=request.user, todolist=todolist)\n tasks = tasks.order_by('created_on')\n\n context = {\n 'tasks': tasks,\n 'todolist': todolist,\n }\n return render(request, 'todolists/view_todolist.html', context)\n\n\ndef add_todolist(request):\n \"\"\"Add a new todolist\"\"\"\n form = NewTodoListForm(request.POST or None, user=request.user)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('list-todolists')\n\n context = {\n 'form': form,\n }\n return render(request, 'todolists/add_todolist.html', context)\n\n\ndef edit_todolist(request, slug):\n \"\"\"Edit an existing todolist\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n\n form = EditTodoListForm(request.POST or None, user=request.user, instance=todolist)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('list-todolists')\n\n context = {\n 'todolist': todolist,\n 'form': form,\n }\n return render(request, 'todolists/edit_todolist.html', context)\n\n\n@require_POST\ndef delete_todolist(request, slug):\n \"\"\"Delete a todolist\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n\n todolist_pk = todolist.pk\n todolist.delete()\n return render_json(todolist_pk)\n\n\ndef add_task(request, slug):\n \"\"\"Add a new task\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n\n form = NewTaskForm(request.POST or None, user=request.user, todolist=todolist)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('view-todolist', todolist.slug)\n\n context = {\n 'form': form,\n 'todolist': todolist,\n }\n return render(request, 'todolists/add_task.html', context)\n\n\ndef edit_task(request, slug, task_slug):\n \"\"\"Edit an existing task\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n task = get_object_or_404(Task, user=request.user, slug=task_slug, todolist=todolist)\n\n form = EditTaskForm(request.POST or None, user=request.user, instance=task)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('view-todolist', todolist.slug)\n\n context = {\n 'todolist': todolist,\n 'task': task,\n 'form': form,\n }\n return render(request, 'todolists/edit_task.html', context)\n\n\n@require_POST\ndef delete_task(request, slug, task_slug):\n \"\"\"Delete a task\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n task = get_object_or_404(Task, user=request.user, slug=task_slug, todolist=todolist)\n\n task_pk = task.pk\n task.delete()\n return render_json(task_pk)\n\n\n@require_POST\ndef complete_task(request, slug, task_slug):\n \"\"\"Complete a task\"\"\"\n todolist = get_object_or_404(TodoList, user=request.user, slug=slug)\n task = get_object_or_404(Task, user=request.user, slug=task_slug, todolist=todolist)\n\n task.complete = not task.complete\n task.save()\n return render_json(task.pk)\n","repo_name":"dansackett/mydash","sub_path":"apps/todolists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35108213530","text":"#!/usr/bin/env python3\n# -*- coding: iso-8859-1 -*-\nimport matplotlib\nmatplotlib.use('Agg')\nimport argparse\nfrom pylab import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Turn interactive plotting off\nplt.ioff()\n\nfont = {'size' : 14}\nmatplotlib.rc('font', **font)\nmatplotlib.pyplot.xticks(fontsize=14)\n\n################### input parameters #################################\"\nfilename = ['../results/mcm-wFGL-tag_RO2-0', '../results/mcm-wFGL-tag_RO2-1', '../results/mcm-wFGL-tag_RO2-2', '../results/mcm-wFGL-tag_RO2-3/']\nstl = ['b-','g--','k:','r-']\ntiny=1E-19\n#####################################################################\n\nfig = plt.figure()\nfor ind,fn in enumerate(filename):\n with open(fn+'/aero/Organic.txt') as f: soa = f.read().splitlines()\n with open(fn+'/gas/BCARY.txt') as f: voc = f.read().splitlines()\n nt = len(soa)\n data = np.zeros(nt)\n voc0,soa0=float(voc[0]),float(soa[0])\n for i in range(nt): \n data[i] = (float(soa[i])-soa0) /(voc0-float(voc[i])+tiny)*100.\n plt.plot(data,stl[ind],linewidth=1.3,label=fn)\nplt.xlabel('time (hour)')\nplt.xlim(0,24)\nplt.title('SOA yields (%)')\nplt.legend(['0-no RO2','1-generated RO2','2-background RO2','3-generated+background RO2'],loc='best',framealpha=0.5)\nfig.savefig('mcm_diffRO2.png')\n\n","repo_name":"sshaerosol/ssh-aerosol","sub_path":"graph/mcm.py","file_name":"mcm.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"34898361250","text":"\"\"\"\nGeneric code for HPO.\n\n\"\"\"\n\nfrom typing import Mapping, Any, Type\nfrom ray import tune\nfrom ray.tune.logger import MLFLowLogger, DEFAULT_LOGGERS\nfrom ray.tune.schedulers import FIFOScheduler\nfrom mlflow.tracking import MlflowClient\nimport numpy\nimport pickle\nfrom os import path\nfrom models.generic_model import ModelFactory\nfrom models.model_analysis import evaluate_model\nfrom data.data_loading import DataPipeline\nfrom data.data_init import init_data\nfrom data import FEATURES, LABELS, FEATURES_NO_FOOT_TYPE, FEATURES_NO_FOOT_TYPE_WITH_TIME\nfrom utils.ml_flow_utils import create_experiment\nfrom data.data_rolling import append_rolling\n\nclient = MlflowClient()\noutlier = False\ntest_run = False\npipe = DataPipeline(test=test_run, version=0.23, load='train+test', trans_type=\"robust\", trans_outlier=outlier, data_outlier=outlier)\n\n\ndef train_model(config: Mapping[str, Any]) -> None:\n \"\"\"Train a model from a given configuration.\n\n :param config:\n Dictionary containing all HPO informations.\n :return:\n None\n \"\"\"\n\n numpy.random.seed(0)\n\n # Load data\n channel = config['channel']\n x_train, y_train, x_val, y_val, x_test, y_test, _, _, features = pipe.create_dataset(channel=channel)\n \n # Init model\n model_fact = config.pop('fact')\n model = model_fact.from_config(config=config)\n\n # Train model\n model.fit(x_train, y_train)\n\n # Evaluate model\n sp_cor_val = evaluate_model(y_val, model.predict(x_val))['sc']\n sp_cor_test = evaluate_model(y_test, model.predict(x_test))['sc']\n\n # Report to tune\n tune.report(SC_Val=sp_cor_val, SC_test=sp_cor_test)\n\n\ndef hpo_channel(model_fact: Type[ModelFactory], channel: str,\n num_samples: int, exp_name: str) -> Mapping[str, Any]:\n \"\"\"Perform HPO for a model type to a given channel.\n\n :param model_fact:\n Class of model factory to optimize. Must be a child class of Model\n :param channel:\n channel of proton intensities to optimize the model for\n :param num_samples:\n number of samples to run HPO for\n :exp_name:\n Name of experiment for logging in mlflow\n\n :return:\n Dictionary of best found config\n \"\"\"\n\n init_data()\n\n search_space = dict(model_fact.search_space())\n search_space['mlflow_experiment_id'] = create_experiment(name = exp_name + \" // channel: \" + channel) # noqa: E501\n search_space['channel'] = channel\n search_space['fact'] = model_fact\n\n analysis = tune.run(\n train_model,\n num_samples=num_samples,\n verbose=1,\n scheduler=FIFOScheduler(),\n loggers=DEFAULT_LOGGERS + (MLFLowLogger, ),\n resources_per_trial={\n \"cpu\": 1,\n \"gpu\": 0\n },\n config=search_space\n )\n return analysis.get_best_config(metric=\"SC_Val\")\n\n\ndef hpo_all_channels(model_fact: Type[ModelFactory],\n num_samples: int, exp_name: str) -> None:\n \"\"\"Perform HPO for a model type for all channels.\n\n :param model_fact:\n Class of model factory to optimize. Must be a\n child class of ModelFactory\n :param num_samples:\n number of samples per channel to run HPO for\n :exp_name:\n Name of experiment for logging in mlflow\n\n :return:\n None\n \"\"\"\n\n for channel in LABELS[:5]:\n hpo_channel(model_fact, channel, num_samples, exp_name)\n","repo_name":"Tanveer81/deep_horizon","sub_path":"src/hpo/hpo_general_setup.py","file_name":"hpo_general_setup.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"23423267792","text":"import os\nimport re\nimport random\nimport hashlib\nimport hmac\nfrom string import letters\n\nimport webapp2\nimport jinja2\n\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),\n autoescape=True)\n\n# SECRET KEY\nsecret = 'secret'\n\n\ndef render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n\ndef make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())\n\n\ndef check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val\n\n\n# Basic handler for blog. Handles basic and frequently used functions\nclass Handler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n params['user'] = self.user\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def set_secure_cookie(self, name, val):\n cookie_val = make_secure_val(val)\n self.response.headers.add_header(\n 'Set-Cookie',\n '%s=%s; Path=/' % (name, cookie_val))\n\n def read_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)\n\n def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))\n\n def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')\n\n def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))\n\n\ndef render_post(response, post):\n response.out.write('' + post.subject + '
    ')\n response.out.write(post.content)\n\n\n# Helper functions for User model functions\ndef make_salt(length=5):\n return ''.join(random.choice(letters) for x in xrange(length))\n\n\ndef make_pw_hash(name, pw, salt=None):\n if not salt:\n salt = make_salt()\n h = hashlib.sha256(name + pw + salt).hexdigest()\n return '%s,%s' % (salt, h)\n\n\ndef valid_pw(name, password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(name, password, salt)\n\n\ndef users_key(group='default'):\n return db.Key.from_path('users', group)\n\n\n# Class that handles User database model\nclass User(db.Model):\n name = db.StringProperty(required=True)\n pw_hash = db.StringProperty(required=True)\n email = db.StringProperty()\n\n # User model functions\n @classmethod\n def by_id(cls, uid):\n return User.get_by_id(uid, parent=users_key())\n\n @classmethod\n def by_name(cls, name):\n u = User.all().filter('name =', name).get()\n return u\n\n @classmethod\n def register(cls, name, pw, email=None):\n pw_hash = make_pw_hash(name, pw)\n return User(parent=users_key(),\n name=name,\n pw_hash=pw_hash,\n email=email)\n\n @classmethod\n def login(cls, name, pw):\n u = cls.by_name(name)\n if u and valid_pw(name, pw, u.pw_hash):\n return u\n\n\ndef blog_key(name='default'):\n return db.Key.from_path('blogs', name)\n\n\n# Class that handles creation of post database model\nclass Post(db.Model):\n title = db.StringProperty(required=True)\n content = db.TextProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n last_modified = db.DateTimeProperty(auto_now=True)\n user_id = db.StringProperty(required=True)\n likes = db.StringListProperty()\n parent_post = db.StringProperty()\n\n def render(self):\n self._render_text = self.content.replace('\\n', '
    ')\n return render_str(\"post.html\", p=self)\n\n\n# Class that handles displaying all posts in the front page\nclass FrontPage(Handler):\n def get(self):\n posts = Post.all().filter('parent_post =', None).order('-created')\n uid = self.read_secure_cookie('user_id')\n\n self.render('front.html', posts=posts, uid=uid)\n\n\n# Class that handles individual details of the post\n# post id in the url is used as reference\nclass PostPage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n uid = self.read_secure_cookie('user_id')\n\n if post.likes and uid in post.likes:\n likeText = 'unlike'\n else:\n likeText = 'like'\n\n totalLikes = len(post.likes)\n comments = Post.all().filter('parent_post =', post_id)\n\n for comment in comments:\n print(comments)\n\n if not post:\n self.error(404)\n return\n\n post._render_text = post.content.replace('\\n', '
    ')\n self.render(\n \"post.html\",\n post=post,\n likeText=likeText,\n totalLikes=totalLikes,\n uid=uid,\n comments=comments)\n\n def post(self, post_id):\n if not self.user:\n return self.redirect('/login')\n\n title = self.request.get('title')\n content = self.request.get('content')\n uid = self.read_secure_cookie('user_id')\n\n if title and content:\n post = Post(parent=blog_key(),\n title=title,\n content=content,\n user_id=uid,\n parent_post=post_id)\n post.put()\n self.redirect('/post/%s' % post_id)\n else:\n err_msg = \"title and content, please!\"\n self.render(\"post.html\",\n title=title,\n content=content,\n error=err_msg)\n\n\n# Class that handles likes of the post\n# post id in the url is used as reference\n# doesnt allow users liking their own posts\nclass LikePage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n uid = self.read_secure_cookie('user_id')\n if not uid:\n self.render(\"login.html\")\n return\n if not post:\n self.error(404)\n return\n\n if post.user_id != uid:\n\n if post.likes and uid in post.likes:\n post.likes.remove(uid)\n else:\n post.likes.append(uid)\n\n post.put()\n print(post.likes)\n\n self.redirect('/post/%s' % str(post.key().id()))\n\n else:\n err_msg = 'Owner\\'s can\\'t like or unlike their own posts'\n self.render(\"error.html\", error=err_msg, uid=uid)\n\n\n# CLass that handles deleting of a post\n# post id in the url is used as reference\n# Allows if the user is the owner of the post\n# displys warning for other users\nclass DeletePage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n if not post:\n self.redirect(\"/\")\n return\n\n uid = self.read_secure_cookie('user_id')\n if not uid:\n self.render(\"login.html\")\n return\n\n if post.user_id != uid:\n err_msg = 'Only owner of this post can delete this post'\n self.render(\"delete.html\", error=err_msg, uid=uid)\n else:\n err_msg = ''\n db.delete(key)\n self.render(\"delete.html\", error=err_msg, uid=uid)\n\n\n# Class that handles editing of a post.\n# post id in the url is used as reference\n# Allows editing if the user is the owner of the post\n# displays warning for other users\nclass EditPage(Handler):\n def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n if not post:\n self.error(404)\n return\n\n uid = self.read_secure_cookie('user_id')\n if not uid:\n self.render(\"login.html\")\n return\n\n if post.user_id != uid:\n err_msg = 'only owner of this post can edit this !!'\n self.render(\"edit.html\", post=post, error=err_msg, uid=uid)\n else:\n err_msg = ''\n self.render(\"edit.html\", post=post, error=err_msg, uid=uid)\n\n def post(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n uid = self.read_secure_cookie('user_id')\n\n title = self.request.get('title')\n content = self.request.get('content')\n\n if title and content and post.user_id == uid:\n post.title = title\n post.content = content\n post.put()\n if post.parent_post:\n redirect_id = post.parent_post\n else:\n redirect_id = post.key().id()\n self.redirect('/post/%s' % str(redirect_id))\n else:\n err_msg = \"Please fill in the valid subject and content !!\"\n self.render(\"edit.html\", post=post, error=err_msg, uid=uid)\n\n\n# Class that handles creation of a new post only if the user is signed in\nclass NewPost(Handler):\n\n # function that verify if the user is signed in or not\n def get(self):\n uid = self.read_secure_cookie('user_id')\n if self.user:\n self.render(\"newpost.html\", uid=uid)\n else:\n self.redirect(\"/login\")\n\n # function that handles creation of new post\n def post(self):\n if not self.user:\n return self.redirect('/login')\n\n title = self.request.get('title')\n content = self.request.get('content')\n\n uid = self.read_secure_cookie('user_id')\n\n if title and content:\n post = Post(parent=blog_key(),\n title=title,\n content=content,\n user_id=uid)\n post.put()\n self.redirect('/post/%s' % str(post.key().id()))\n else:\n err_msg = \"title and content, please!\"\n self.render(\"newpost.html\",\n title=title,\n content=content,\n error=err_msg)\n\n\n# Class to handle Logging out the user from the blog session\nclass Logout(Handler):\n def get(self):\n self.logout()\n self.redirect('/')\n\n\n# Class to handle login of users into the blog\nclass Login(Handler):\n def get(self):\n self.render('login.html')\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n\n user = User.login(username, password)\n if user:\n self.login(user)\n self.redirect('/')\n else:\n err_msg = 'Sorry Invalid login, PLease try again !!'\n self.render('login.html', error=err_msg)\n\n\n# Class that handles the signup page\n# shows error if the fields do not match the validation's above.\nclass Signup(Handler):\n\n def get(self):\n self.render(\"signup.html\")\n\n def post(self):\n have_error = False\n self.username = self.request.get('username')\n self.password = self.request.get('password')\n self.verify = self.request.get('verify')\n self.email = self.request.get('email')\n\n params = dict(username=self.username, email=self.email)\n # Validation for username\n USER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n\n def valid_username(username):\n return username and USER_RE.match(username)\n\n # Validation for password\n PASS_RE = re.compile(r\"^.{3,20}$\")\n\n def valid_password(password):\n return password and PASS_RE.match(password)\n\n # Validation for email\n EMAIL_RE = re.compile(r'^[\\S]+@[\\S]+\\.[\\S]+$')\n\n def valid_email(email):\n return not email or EMAIL_RE.match(email)\n\n if not valid_username(self.username):\n params['error_username'] = \"Sorry, Not a valid username.\"\n have_error = True\n\n if not valid_password(self.password):\n params['error_password'] = \"Sorry, Not a valid password.\"\n have_error = True\n\n elif self.password != self.verify:\n params['error_verify'] = \"Oops !! passwords didn't match.\"\n have_error = True\n\n if not valid_email(self.email):\n params['error_email'] = \"Sorry, Not a valid email.\"\n have_error = True\n\n if have_error:\n self.render('signup.html', **params)\n else:\n self.done()\n\n def done(self, *a, **kw):\n raise NotImplementedError\n\n\n# Class to create new user for the blog\nclass Register(Signup):\n def done(self):\n # making sure the user is not registered earlier\n user = User.by_name(self.username)\n if user:\n err_msg = 'The user already exists.'\n self.render('signup.html', error_username=err_msg)\n else:\n user = User.register(self.username, self.password, self.email)\n user.put()\n\n self.login(user)\n self.redirect('/')\n\n\n# Welcome page after a user succesfully logs in\nclass Welcome(Handler):\n def get(self):\n if self.user:\n uid = self.read_secure_cookie('user_id')\n self.render('welcome.html', username=self.user.name, uid=uid)\n else:\n self.redirect('/signup')\n\napp = webapp2.WSGIApplication([('/?', FrontPage),\n ('/post/([0-9]+)', PostPage),\n ('/delete/([0-9]+)', DeletePage),\n ('/edit/([0-9]+)', EditPage),\n ('/like/([0-9]+)', LikePage),\n ('/newpost', NewPost),\n ('/logout', Logout),\n ('/login', Login),\n ('/signup', Register),\n ('/welcome', Welcome),\n ],\n debug=True)\n","repo_name":"rahulb246/udacity_multi-user_blog","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":14178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7095971225","text":"import xlwt\n\n\n# 保存结果到xlsx\ndef output(filename, list_data):\n if len(list_data) == 0:\n return\n data_0 = list_data[0]\n book = xlwt.Workbook()\n sh = book.add_sheet('sheet1')\n data_dict = data_0.__dict__\n data_key_list = list(data_dict.keys())\n # 写标题\n index = 0\n for key in data_key_list:\n sh.write(0, index, key)\n index = index + 1\n # 写内容\n column_index = 1\n for item in list_data:\n for i in range(len(data_key_list)):\n data_dict = item.__dict__\n sh.write(column_index, i, data_dict[data_key_list[i]])\n column_index = column_index + 1\n book.save(filename)\n","repo_name":"JichunMa/CrawlBaisi","sub_path":"saveTools.py","file_name":"saveTools.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33956584185","text":"from typing import List\nimport json\nimport modal\n\nfrom modal_base import image_base, stub, Message\nfrom fs_tools import upload_image\n\n\n@stub.cls(\n image=image_base,\n secret=modal.Secret.from_name(\"llm-chat-secret\"),\n)\nclass DALLEChatModel:\n def __init__(self, model: str):\n self.model = model\n\n @modal.method()\n def generate(self, chat: List[Message]):\n from openai import OpenAI\n from PIL import Image\n import requests\n\n client = OpenAI()\n\n prompt = chat[-1].content.replace(\"\\n\", \"\")\n\n yield json.dumps({\"alert\": \"Generating image...\"}) + \"\\n\"\n\n response = client.images.generate(\n model=self.model,\n prompt=prompt,\n size=\"1024x1024\",\n quality=\"hd\",\n n=1,\n )\n\n raw_image_url = response.data[0].url\n image_url = upload_image(\n Image.open(requests.get(raw_image_url, stream=True).raw)\n )\n\n resp = f\"![{prompt}]({image_url})\"\n\n yield json.dumps({\"content\": resp}) + \"\\n\"\n","repo_name":"sshh12/llm-chat-web-ui","sub_path":"modal/chat_dalle.py","file_name":"chat_dalle.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"16190296261","text":"############################################################################\r\n# LOGISTIC REGRESSION #\r\n# Note: NJUST Machine Learning Assignment. #\r\n# Task: Logistic Classification #\r\n############################################################################\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom ANN import NN\r\nfrom dataBase import selectDB\r\n\r\nif __name__ == '__main__':\r\n #dn = input('please input Database Name: ')\r\n dn = \"exam\"\r\n\r\n\r\n#dn = \"exam\"\r\n# neural network class\r\nNN = NN()\r\n# select DataBase\r\ndataName, lableName, nClass = selectDB(dn)\r\n# load DataBase\r\ndata, y = NN.loadData(dataName, lableName)\r\n# plot data\r\nNN.plotData(data, y, dn)\r\n# change lable 0 to vector [1 0 0]\r\nlable = NN.newLable(y, nClass)\r\n# shuffle data\r\ndata, lable = NN.shuffleData(data, lable, nClass)\r\n# zeromean data\r\ndata = NN.zeromeanData(data)\r\n\r\n# initialize\r\n[m, d] = data.shape\r\nn_fold = 5\r\nq = 5 # number of neuron in hidden layer\r\nw = NN.initial(q, nClass) # weights of output layer\r\nv = NN.initial(d, q) # weights of hidden layer\r\ngama = NN.initial(q, 1) # bias for hidden layer\r\nteta = NN.initial(nClass, 1) # bias for output layer\r\nLearningRate = 0.05 # learning rate\r\nepoch = 5\r\naccuracy = np.zeros(n_fold)\r\nkCost = 0\r\ncostFunction = []\r\nfor nf in range(n_fold):\r\n # prepare data to 5 fold cross validation\r\n trainData, trainLable, testData, testLable = NN.nFoldData(\r\n data, lable, nf, n_fold)\r\n [m, d] = trainData.shape\r\n\r\n for z in range(epoch):\r\n\r\n for i in range(m):\r\n x = trainData[i, :].reshape(-1, 1)\r\n y = trainLable[i].reshape(-1, 1)\r\n# print(y)\r\n\r\n# hidden layer\r\n alpha = np.dot(v.T, x) + gama\r\n alpha = alpha / np.sum(alpha, axis=0, keepdims=True)\r\n b = NN.activationFunc(alpha, \"sigmoid\")\r\n# output Layer\r\n beta = np.dot(w.T, b) + teta\r\n beta = beta / np.sum(beta, axis=0, keepdims=True)\r\n yHat = NN.activationFunc(beta, \"sigmoid\")\r\n costFunction.append(-np.sum(y*np.log(yHat))/y.shape[1])\r\n\r\n errorOutLayer = ((yHat - y)*yHat*(1-yHat)).reshape(-1, 1)\r\n errorHiddenLayer = np.zeros(q).reshape(-1, 1)\r\n for h in range(q):\r\n sumh = 0\r\n for j in range(nClass-1):\r\n sumh += errorOutLayer[j] * w[h, j] * b[h] * (1-b[h])\r\n errorHiddenLayer[h] = sumh\r\n\r\n# update\r\n w = w - (LearningRate*np.dot(b, errorOutLayer.T))\r\n teta = teta - (LearningRate*(errorOutLayer*1))\r\n v = v - (LearningRate*np.dot(x, errorHiddenLayer.T))\r\n gama = gama - (LearningRate*(errorHiddenLayer*1))\r\n\r\n plt.plot(kCost, costFunction[kCost],\r\n linestyle='--', marker='o', color='g')\r\n plt.pause(0.1)\r\n kCost = kCost + 1\r\n# print(np.mean(costFunction))\r\n\r\n # test phase\r\n [m, d] = testData.shape\r\n numberOfErrore = 0\r\n for i in range(m):\r\n x = trainData[i, :].reshape(-1, 1)\r\n y = trainLable[i].reshape(-1, 1)\r\n\r\n alpha = np.dot(v.T, x) + gama\r\n alpha = alpha / np.sum(alpha, axis=0, keepdims=True)\r\n b = NN.activationFunc(alpha, \"sigmoid\")\r\n\r\n beta = np.dot(w.T, b) + teta\r\n beta = beta / np.sum(beta, axis=0, keepdims=True)\r\n yHat = NN.activationFunc(beta, \"sigmoid\")\r\n\r\n maxIndex = (yHat).argmax()\r\n ys = np.zeros(nClass).reshape(-1, 1)\r\n ys[maxIndex] = 1\r\n\r\n if (np.sum(np.power(ys-y, 2)) != 0):\r\n numberOfErrore = numberOfErrore + 1\r\n correct = m-numberOfErrore\r\n accuracy[nf] = (((correct)/m)*100)\r\n\r\n\r\nplt.show()\r\nplt.scatter(np.arange(len(costFunction)), costFunction, color=\"orange\")\r\nprint(\"Accuracy is : \", np.mean(accuracy))\r\n","repo_name":"edgemund/ANN-Backward-Progation-with-Animated-Graph","sub_path":"Logistic Regression on Data 1/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2181774313","text":"import logging\nimport os\nimport sys\nfrom pathlib import Path\n\n\n# directory where Leve4 files are located\nWORKSPACES = Path(os.environ.get(\"WORKSPACES\", \"./workspaces/\"))\nassert WORKSPACES.exists()\n\n# directory to cache sha256 files\nCACHE = os.environ.get(\"CACHE\", None)\nif CACHE is None:\n CACHE = WORKSPACES / \"cache\"\n CACHE.mkdir(exist_ok=True)\nelse: # pragma: no cover\n CACHE = Path(CACHE)\n assert CACHE.exists()\n\nJOB_SERVER_TOKEN = os.environ.get(\"JOB_SERVER_TOKEN\")\nassert JOB_SERVER_TOKEN\nBACKEND = os.environ.get(\"BACKEND\", \"test-backend\")\n\nJOB_SERVER_ENDPOINT = os.environ.get(\n \"JOB_SERVER_ENDPOINT\", \"https://jobs.opensafely.org/api/v2\"\n)\nRELEASE_HOST = os.environ.get(\"RELEASE_HOST\")\n\n# used for CORS checking, defaults to job server but is overridable\nSPA_ORIGIN = os.environ.get(\"SPA_ORIGIN\", \"https://jobs.opensafely.org\")\n\nLOG_LEVEL = os.environ.get(\"LOG_LEVEL\", \"info\")\n\n\ndef setup_logging():\n logger = logging.getLogger(\"hatch\")\n logger.setLevel(logging.getLevelName(LOG_LEVEL.upper()))\n handler = logging.StreamHandler(sys.stdout)\n # if running under uvicorn, reuse it's formatter\n uvicorn_handlers = logging.getLogger(\"uvicorn\").handlers\n if uvicorn_handlers: # pragma: no cover\n handler.setFormatter(uvicorn_handlers[0].formatter)\n logger.addHandler(handler)\n","repo_name":"opensafely-core/release-hatch","sub_path":"hatch/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74464194722","text":"\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, new_data):\n node = Node(new_data)\n node.next = self.head\n self.head = node\n\n def delete(self, target):\n # node = self.head\n # head = node\n # while (node and node.next):\n # while node.next.data == target:\n # node.next = node.next.next\n # node = node.next\n # return head.next if head.data == target else head\n\n head = self.head\n\n while head is not None and head.data == target:\n nodeToDelete = head\n head = head.next\n print(nodeToDelete.data, head)\n del nodeToDelete\n \n\n # 2. create a temp node to traverse through the\n # list and delete nodes with value equal to\n # target and adjust links accordingly\n temp = head\n if temp is not None:\n while temp.next is not None:\n # print()\n if temp.next.data == target:\n nodeToDelete = temp.next\n temp.next = temp.next.next\n del nodeToDelete\n else:\n temp = temp.next\n\n def printList(self):\n current = self.head\n\n while current:\n print(current.data, end=' ->')\n current = current.next\n print(\"\\n\")\n\n\nll = LinkedList()\ns = [12, 12, 12, 12]\n# s = [12, 10, 12, 15, 12, 13, 20, 12, 14]\nfor item in s:\n ll.push(item)\n\nprint('list before deleting nodes 12')\nll.printList()\nll.delete(12)\nprint('list after deleting nodes 12')\nll.printList()\n","repo_name":"akinolu52/leetcode","sub_path":"python/linkedList/delete_node.py","file_name":"delete_node.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20803694748","text":"import pygame\r\nfrom math import sin, cos, pi\r\n\r\nclass FTO():\r\n def __init__(self, size):\r\n self.puzzleSize = 150 / 3\r\n self.size = size\r\n self.state = [[i] * self.size ** 2 for i in range(8)]\r\n self.scrambled = False\r\n # Color Scheme: U F R L B D BR BL\r\n self.colorScheme = [\"#FFFFFF\",\"#00FF2F\",\"#FF0000\",\"#FFFF00\",\"#298FE8\",\"#000000\",\"#9D00FF\",\"#FF7700\"]\r\n\r\n\r\n @property\r\n def size(self):\r\n return self._size # getter for size\r\n \r\n @size.setter\r\n def size(self, size): # setter for size\r\n if size < 1:\r\n print(\"invalid size\")\r\n else:\r\n self._size = size\r\n self.puzzleSize = 150 / size\r\n self.state = [[i] * size ** 2 for i in range(8)]\r\n\r\n\r\n # orientation, 0 -> U, 1 -> D, 2 -> R, 3 -> L where triangle starts at point and goes in orientation directon\r\n def triangle(self, surface, x, y, size, orientation, color): # general method for drawing triangles to screen\r\n width = 30 / self.size # triangle border width\r\n sizex = size - width*cos((22.5 * pi) / 180) # sizes for triangle color\r\n sizey = size - width*sin((22.5 * pi) / 180)\r\n\r\n if orientation == 0:\r\n pygame.draw.polygon(surface, \"Black\", [(x, y), (x - size, y - size), (x + size, y - size)])\r\n pygame.draw.polygon(surface, color, [(x, y - width/2), (x - sizex, y - sizey), (x + sizex, y - sizey)])\r\n elif orientation == 1:\r\n pygame.draw.polygon(surface, \"Black\", [(x, y), (x + size, y + size), (x - size, y + size)])\r\n pygame.draw.polygon(surface, color, [(x, y + width/2), (x + sizex, y + sizey), (x - sizex, y + sizey)])\r\n elif orientation == 2:\r\n pygame.draw.polygon(surface, \"Black\", [(x, y), (x + size, y - size), (x + size, y + size)])\r\n pygame.draw.polygon(surface, color, [(x + width/2, y), (x + sizey, y - sizex), (x + sizey, y + sizex)])\r\n elif orientation == 3:\r\n pygame.draw.polygon(surface, \"Black\", [(x, y), (x - size, y + size), (x - size, y - size)])\r\n pygame.draw.polygon(surface, color, [(x - width/2, y), (x - sizey, y + sizex), (x - sizey, y - sizex)])\r\n else:\r\n raise \"Invalid orientation\"\r\n\r\n def display(self, surface): # method to display the puzzle on screen\r\n x = surface.get_width() // 2\r\n y = surface.get_height() // 2\r\n size = self.puzzleSize\r\n\r\n for w in range(2):\r\n face = 0\r\n n = 0\r\n for j in range(self.size):\r\n for i, k in zip(range(-j, j + 1), range(j * 2 + 1)):\r\n self.triangle(surface, x + i * size, y - j * size - size * (k % 2) + size * self.size * w * 2, size, k % 2, self.colorScheme[self.state[face + w * 5][n]])\r\n n += 1\r\n\r\n face = 1\r\n n = 0\r\n for j in range(self.size):\r\n for i, k in zip(range(j, -j - 1, -1), range(j * 2 + 1)):\r\n self.triangle(surface, x + i * size, y + j * size + size * (k % 2) - size * self.size * w * 2, size, not(k % 2), self.colorScheme[self.state[face + w * 3][n]])\r\n n += 1\r\n\r\n face = 2\r\n n = 0\r\n for j in range(self.size):\r\n for i, k in zip(range(-j, j + 1), range(j * 2 + 1)):\r\n self.triangle(surface, x + j * size + size * (k % 2) - size * self.size * w * 2, y - i * size, size, 2 + k % 2, self.colorScheme[self.state[face + w * 5][n]])\r\n n += 1\r\n\r\n face = 3\r\n n = 0\r\n for j in range(self.size):\r\n for i, k in zip(range(j, -j - 1, -1), range(j * 2 + 1)):\r\n self.triangle(surface, x - j * size - size * (k % 2) + size * self.size * w * 2, y - i * size, size, 3 if k % 2 == 0 else 2, self.colorScheme[self.state[face + w * 3][n]])\r\n n += 1\r\n\r\n # MOVES BELOW\r\n\r\n # 0 1 2 3 4 5 6 7\r\n # U F R L B D BR BL\r\n \"\"\"\r\n T / flip over to get to other faces\r\n 4 5 6 7 8\r\n 1 2 3 \r\n 0\r\n \"\"\"\r\n def R(self, layers): # preforms a clockwise R move on the FTO\r\n size = self.size\r\n\r\n f1 = 0\r\n f2 = 1\r\n f3 = 6\r\n\r\n\r\n for n in range(layers * 2): # swaps bars on U BR & F faces\r\n for j, k in zip(range((n + 1) // 2, size), range(size - 1, (n + 1) // 2 - 1, -1)):\r\n buff = self.state[f1][((j + 1) ** 2) - (n + 1)]\r\n self.state[f1][((j + 1) ** 2) - (n + 1)] = self.state[f2][k ** 2 + n]\r\n self.state[f2][k ** 2 + n] = self.state[f3][((size - (n + 2) // 2) ** 2) + 2 * j - n]\r\n self.state[f3][((size - (n + 2) // 2) ** 2) + 2 * j - n] = buff\r\n\r\n\r\n f4 = 3\r\n f5 = 5\r\n f6 = 4\r\n\r\n for j in range(0, layers): # Swaps bars on L B & D Faces\r\n for i, k in zip(range(j + 1), range(j, -1, -1)):\r\n buff = self.state[f4][((j + 1) ** 2 - 1) - k * 2]\r\n self.state[f4][((j + 1) ** 2 - 1) - k * 2] = self.state[f5][(size - j + k) ** 2 - (2 * k + 1)]\r\n self.state[f5][(size - j + k) ** 2 - (2 * k + 1)] = self.state[f6][(size - 1 - j + i) ** 2 + 2 * i]\r\n self.state[f6][(size - 1 - j + i) ** 2 + 2 * i] = buff\r\n if i > 0 and j > 0:\r\n buff = self.state[f4][((j + 1) ** 2 - 1) - (k + 1) * 2 + 1]\r\n self.state[f4][((j + 1) ** 2 - 1) - (k + 1) * 2 + 1] = self.state[f5][(size - j + (k + 1)) ** 2 - (2 * (k + 1) + 1) + 1]\r\n self.state[f5][(size - j + (k + 1)) ** 2 - (2 * (k + 1) + 1) + 1] = self.state[f6][(size - 1 - j + i) ** 2 + 2 * i - 1]\r\n self.state[f6][(size - 1 - j + i) ** 2 + 2 * i - 1] = buff\r\n\r\n\r\n f1 = 2\r\n for n in range((size // 2) * 2): # Cycles pieces on R face\r\n for j, k in zip(range((n + 1) // 2, size), range(size - 1, (n + 1) // 2 - 1, -1)):\r\n buff = self.state[f1][((size - (n + 2) // 2) ** 2) + 2 * k - n]\r\n self.state[f1][((size - (n + 2) // 2) ** 2) + 2 * k - n] = self.state[f1][k ** 2 + n]\r\n self.state[f1][k ** 2 + n] = self.state[f1][((j + 1) ** 2) - (n + 1)]\r\n self.state[f1][((j + 1) ** 2) - (n + 1)] = buff\r\n\r\n \r\n if size % 2:\r\n buff = self.state[f1][(size // 2) ** 2]\r\n self.state[f1][(size // 2) ** 2] = self.state[f1][(size // 2 + 1) ** 2 - 1]\r\n self.state[f1][(size // 2 + 1) ** 2 - 1] = self.state[f1][(size - 1) ** 2 + (size - 1)]\r\n self.state[f1][(size - 1) ** 2 + (size - 1)] = buff\r\n else:\r\n buff = self.state[f1][(size - 1) ** 2 + (size - 1)]\r\n self.state[f1][(size - 1) ** 2 + (size - 1)] = self.state[f1][(size // 2 + 1) ** 2 - 2]\r\n self.state[f1][(size // 2 + 1) ** 2 - 2] = self.state[f1][(size // 2) ** 2 + 1]\r\n self.state[f1][(size // 2) ** 2 + 1] = buff\r\n\r\n\r\n if layers == size: # If you are doing a rotation (turning all layers at once) also cycle pieces on L face\r\n f1 = f2 = f3 = 7\r\n for n in range((size // 2) * 2): \r\n for j, k in zip(range((n + 1) // 2, size), range(size - 1, (n + 1) // 2 - 1, -1)):\r\n buff = self.state[f1][((j + 1) ** 2) - (n + 1)]\r\n self.state[f1][((j + 1) ** 2) - (n + 1)] = self.state[f2][k ** 2 + n]\r\n self.state[f2][k ** 2 + n] = self.state[f3][((size - (n + 2) // 2) ** 2) + 2 * k - n]\r\n self.state[f3][((size - (n + 2) // 2) ** 2) + 2 * k - n] = buff\r\n \r\n if size % 2:\r\n buff = self.state[f1][(size - 1) ** 2 + (size - 1)]\r\n self.state[f1][(size - 1) ** 2 + (size - 1)] = self.state[f1][(size // 2 + 1) ** 2 - 1]\r\n self.state[f1][(size // 2 + 1) ** 2 - 1] = self.state[f1][(size // 2) ** 2]\r\n self.state[f1][(size // 2) ** 2] = buff\r\n\r\n else:\r\n buff = self.state[f1][(size // 2) ** 2 + 1]\r\n self.state[f1][(size // 2) ** 2 + 1] = self.state[f1][(size // 2 + 1) ** 2 - 2]\r\n self.state[f1][(size // 2 + 1) ** 2 - 2] = self.state[f1][(size - 1) ** 2 + (size - 1)]\r\n self.state[f1][(size - 1) ** 2 + (size - 1)] = buff\r\n\r\n def Rp(self, layers): # perfroms counterclockwise R move on FTO\r\n self.R(layers)\r\n self.R(layers)\r\n\r\n def Ro(self): # performs R rotation on the puzzle\r\n self.R(self.size)\r\n \r\n def Rop(self): # performs counterclockwise R rotation on the puzzle\r\n self.Ro()\r\n self.Ro()\r\n\r\n def T(self): # performs T rotation on the puzzle\r\n for j in range(self.size):\r\n for i in range(j * 2 + 1):\r\n buff = self.state[0][(j ** 2) + i]\r\n self.state[0][(j ** 2) + i] = self.state[3][((j + 1) ** 2) - (i + 1)]\r\n self.state[3][((j + 1) ** 2) - (i + 1)] = self.state[1][(j ** 2) + i]\r\n self.state[1][(j ** 2) + i] = self.state[2][((j + 1) ** 2) - (i + 1)]\r\n self.state[2][((j + 1) ** 2) - (i + 1)] = buff\r\n\r\n buff = self.state[4][(j ** 2) + i]\r\n self.state[4][(j ** 2) + i] = self.state[7][((j + 1) ** 2) - (i + 1)]\r\n self.state[7][((j + 1) ** 2) - (i + 1)] = self.state[5][(j ** 2) + i]\r\n self.state[5][(j ** 2) + i] = self.state[6][((j + 1) ** 2) - (i + 1)]\r\n self.state[6][((j + 1) ** 2) - (i + 1)] = buff\r\n\r\n def Tp(self): # performs counterclockwise T rotation on the puzz;e\r\n self.T()\r\n self.T()\r\n self.T()\r\n\r\n # rest of the moves are defined by moves above\r\n\r\n def U(self, layers):\r\n self.T()\r\n self.R(layers)\r\n self.Tp()\r\n\r\n def Up(self, layers):\r\n self.U(layers)\r\n self.U(layers)\r\n\r\n def F(self, layers):\r\n self.Ro()\r\n self.U(layers)\r\n self.Rop()\r\n\r\n def Fp(self, layers):\r\n self.F(layers)\r\n self.F(layers)\r\n\r\n def L(self, layers):\r\n self.T()\r\n self.T()\r\n self.R(layers)\r\n self.T()\r\n self.T()\r\n\r\n def Lp(self, layers):\r\n self.L(layers)\r\n self.L(layers)\r\n\r\n def D(self, layers):\r\n self.Ro()\r\n self.L(layers)\r\n self.Rop()\r\n \r\n def Dp(self, layers):\r\n self.D(layers)\r\n self.D(layers)\r\n \r\n def B(self, layers):\r\n self.Rop()\r\n self.L(layers)\r\n self.Ro()\r\n \r\n def Bp(self, layers):\r\n self.Rop()\r\n self.Lp(layers)\r\n self.Ro()\r\n\r\n def BR(self, layers):\r\n self.Rop()\r\n self.U(layers)\r\n self.Ro()\r\n\r\n def BRp(self, layers):\r\n self.Rop()\r\n self.Up(layers)\r\n self.Ro()\r\n\r\n def BL(self, layers):\r\n self.T()\r\n self.B(layers)\r\n self.Tp()\r\n\r\n def BLp(self, layers):\r\n self.T()\r\n self.Bp(layers)\r\n self.Tp()\r\n\r\n def Uo(self):\r\n self.Rop()\r\n self.T()\r\n self.T()\r\n\r\n def Uop(self):\r\n self.Uo() \r\n self.Uo() ","repo_name":"TipsterTrickster/FTOSIM-ECE198Project","sub_path":"FTO.py","file_name":"FTO.py","file_ext":"py","file_size_in_byte":11158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19114078777","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 19 09:26:34 2022\r\n\r\n@author: muell\r\n\"\"\"\r\nimport math\r\n\r\n# January 1st 1901 was a tuesday. Formula gives the first day of the following years.\r\ndef jan1(t):\r\n return (2 + t + math.floor(t/4) - math.floor(t/100) + math.floor(t/400)) % 7\r\n# Create a list of all first days of the year\r\nyears = [jan1(t) for t in range(0,100)]\r\n\r\ndaysOfMonths = [31,28,31,30,31,30,31,31,30,31,30,31]\r\nmonths=[years]\r\n\r\nfeb = [(months[0][j-1]+31)%7 for j in range(1,101)]\r\nmonths.append(feb)\r\nmonths.append([])\r\nfor i in range(1,101):\r\n if i == 100:\r\n months[2].append((months[1][i-1] + 28)%7)\r\n elif i % 4 == 0:\r\n months[2].append((months[1][i-1] + 29)%7)\r\n else:\r\n months[2].append((months[1][i-1] + 28)%7)\r\nfor k in range(3,12):\r\n months.append([(months[k-1][j-1]+daysOfMonths[k-1])%7 for j in range(1,101)])\r\nsundays = []\r\nfor l in range(0,12):\r\n s= months[l].count(0)\r\n sundays.append(s)\r\nprint(months)\r\nprint(sum(sundays))","repo_name":"ATC-mmueller/project_euler_solutions","sub_path":"solutions/eulerproblem19.py","file_name":"eulerproblem19.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73014223201","text":"import tensorflow as tf\r\nimport tensorflow_datasets as tfds\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras import regularizers\r\n\r\nassert 'COLAB_TPU_ADDR' in os.environ, 'Missin TPU?'\r\nif('COLAB_TPU_ADDR') in os.environ:\r\n TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])\r\nelse:\r\n TF_MASTER = ''\r\ntpu_address = TF_MASTER\r\n\r\n\r\nresolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu_address)\r\ntf.config.experimental_connect_to_cluster(resolver)\r\ntf.tpu.experimental.initialize_tpu_system(resolver)\r\n\r\nstrategy = tf.distribute.TPUStrategy(resolver)\r\n\r\n\r\ndef create_model():\r\n return tf.keras.Sequential([\r\n tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)),\r\n tf.keras.layers.MaxPooling2D((2, 2)),\r\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\r\n tf.keras.layers.MaxPooling2D((2, 2)),\r\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\r\n tf.keras.layers.MaxPooling2D((2, 2)),\r\n tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.001)),\r\n tf.keras.layers.MaxPooling2D((2, 2)),\r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001)),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001)),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(5, activation='softmax')# всего пять классов цветов\r\n \r\n ])\r\n\r\n\r\ndef get_train_and_val_dataset(batch_size, is_training=True):\r\n if(is_training):\r\n dataset, info = tfds.load(name='tf_flowers',\r\n split='train[:80%]', \r\n with_info = True,\r\n as_supervised=True,\r\n try_gcs=True)\r\n else:\r\n dataset, info = tfds.load(name='tf_flowers',\r\n split='train[80%:90%]', \r\n with_info = True,\r\n as_supervised=True,\r\n try_gcs=True)\r\n \r\n def scale(image, label):\r\n image = tf.cast(image, tf.float32)\r\n image = tf.image.resize(image, [224, 224]) # изменение всех изображений на вход до (None, 224, 224)\r\n image /= 255.0 # Нормализация\r\n return image, label\r\n \r\n dataset = dataset.map(scale)\r\n \r\n if is_training:\r\n dataset = dataset.shuffle(2936)#Перемешивание обучающей выборки\r\n dataset = dataset.repeat()\r\n \r\n dataset = dataset.batch(batch_size)\r\n return dataset\r\n\r\ndef get_final_dataset(batch_size):\r\n dataset, info = tfds.load(name='tf_flowers',\r\n split='train[90%:]', \r\n with_info = True,\r\n as_supervised=True,\r\n try_gcs=True)\r\n def scale(image, label):\r\n image = tf.cast(image, tf.float32)\r\n image = tf.image.resize(image, [224, 224]) # изменение всех изображений на вход до (None, 224, 224)\r\n image /= 255.0 # Нормализация\r\n return image, label \r\n dataset = dataset.map(scale)\r\n \r\n #dataset = dataset.shuffle(2936)#Перемешивание обучающей выборки\r\n #dataset = dataset.repeat()\r\n \r\n dataset = dataset.batch(batch_size)\r\n return dataset\r\n\r\n\r\ndef create_xception_model(input_shape=(224, 224, 3), num_classes=5):\r\n #Загрузка предварительно обученной модели Xception без головной части\r\n base_model = tf.keras.applications.Xception(include_top=False, input_shape=input_shape)\r\n\r\n #Добавление головной части\r\n x = base_model.output\r\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\r\n x = tf.keras.layers.Dense(1024, activation='relu')(x)\r\n x = tf.keras.layers.Dropout(0.5)(x)\r\n x = tf.keras.layers.Dense(num_classes, activation='softmax')(x)\r\n\r\n #Объединение предварительно обученной модели и головной части в единую модель\r\n model = tf.keras.models.Model(inputs=base_model.input, outputs=x)\r\n\r\n #Заморозка слоев предварительно обученной модели\r\n for layer in base_model.layers:\r\n layer.trainable = False\r\n\r\n return model\r\n\r\nbatch_size = 1024 #Размер пакета\r\nepochs = 1000 #Количество эпох, на тензорных процессорах можно делать много проверок\r\nexecution_steps = 1000 #Количество шагов перед обновлением весов\r\n#Загрузка и создание обучающей и проверочной(валидационной) выборки\r\ntrain_dataset = get_train_and_val_dataset(batch_size, True)\r\nvalidation_dataset = get_train_and_val_dataset(batch_size, False)\r\nsteps_per_epoch = 2936 // batch_size\r\nvalidation_steps = len(validation_dataset) // batch_size\r\n\r\n\r\nwith strategy.scope():\r\n xmodel = create_xception_model()\r\n xmodel.compile(optimizer='adagrad', steps_per_execution=execution_steps, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['sparse_categorical_accuracy'])\r\n x_history = xmodel.fit(train_dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_data=validation_dataset)\r\n\r\n\r\n#Переменные для графика\r\nacc = x_history.history['sparse_categorical_accuracy']\r\nval_acc = x_history.history['val_sparse_categorical_accuracy']\r\nloss = x_history.history['loss']\r\nval_loss = x_history.history['val_loss']\r\nepochs_range = range(epochs)\r\n\r\n\r\n#График при помощи matplotlib\r\nplt.figure(figsize=(15, 15))\r\nplt.subplot(2, 2, 1)\r\nplt.plot(epochs_range, acc, label='Тренировочная точность')\r\nplt.plot(epochs_range, val_acc, label='Валидационная точность')\r\nplt.legend(loc='lower right')\r\nplt.title('Тренировочная и валидационная точность')\r\n\r\nplt.subplot(2, 2, 2)\r\nplt.plot(epochs_range, loss, label='Тренировочная потеря')\r\nplt.plot(epochs_range, val_loss, label='Валидационная потеря')\r\nplt.legend(loc='upper right')\r\nplt.title('Тренировочная и валидационная точность')\r\nplt.show()\r\n\r\n\r\n#всего три выборки: тренировочная(train_dataset), валидационная(validation_dataset) и тестовая(test_dataset)\r\n#тренировочная 0:80 \r\n#валидационная 80:90\r\n#тестовая 90:100\r\ntest_dataset = get_final_dataset(batch_size)\r\ntest_images, test_labels = next(iter(test_dataset.take(10)))\r\n#Можно использоать информацию о классах из info, но мне нужно было перевести названия классов и их не слишком много, поэтому я решил их инициализировать. Если количество классов большое, например их 100 или больше, то лучше обращаться к ним через info.\r\nclass_names = ['Одуванчик', 'Ромашка', 'Тюльпаны', 'Подсолнухи', 'Розы']\r\n\r\ntest_loss, test_accuracy = xmodel.evaluate(test_dataset)\r\nprint('Test loss: {}, Test accuracy: {}'.format(test_loss, test_accuracy))\r\n\r\n# Получение предсказаний нейросети для 10 изображений\r\npredictions = xmodel.predict(test_images)\r\n\r\nfig, axes = plt.subplots(nrows=2, ncols=5, figsize=(15, 6),\r\n subplot_kw={'xticks': [], 'yticks': []})\r\nfor i, ax in enumerate(axes.flat):\r\n # Отображение изображения\r\n ax.imshow(test_images[i])\r\n # Отображение меток и предсказаний\r\n true_label = class_names[test_labels[i]]\r\n pred_label = class_names[np.argmax(predictions[i])]\r\n if true_label == pred_label:\r\n ax.set_title(\"Это: {}, ИИ: {}\".format(true_label, pred_label), color='green')\r\n else:\r\n ax.set_title(\"Это: {}, ИИ: {}\".format(true_label, pred_label), color='red')\r\n\r\nplt.tight_layout()\r\nplt.show()","repo_name":"laf3r/FlowerNet","sub_path":"FlowerNet.py","file_name":"FlowerNet.py","file_ext":"py","file_size_in_byte":8587,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32120871687","text":"L=[]\nT=[]\nwhile True:\n x=int(input())\n if x==0:\n break\n else:\n L.append(x)\nfor i in L:\n if i>0 and i%2==0:\n T.append(i)\nif len(L) == 0:\n print('Khong hop le')\nelif len(T)==0:\n print(0)\nelse:\n print(sum(T)/len(T))\n\n\n\n# def inp():\n# n=int(input())\n# return n \n# def FeeShip(n):\n# if n==0:\n# cost=0\n# else:\n# cost=5.95+(n-1)*3.75\n# return cost\n# def ShowFee(cost):\n# print(cost)\n# n=inp()\n# cost=FeeShip(n)\n# ShowFee(cost) \n\n\n# A = []\n# N = []\n# M = []\n# while True:\n# x=int(input())\n# ifx==0:\n# break\n# else:\n# A.append(x)\n# for i in A:\n# if i%2==0:\n# N.append(i)\n# if i%2==1:\n# M.append(i)\n# N.sort()\n# M.sort()\n# for i in N: \n# print(i,end=' ')\n# print()\n# for i in M:\n# print(i,end=' ')\n \n \n# A = []\n# M = []\n# N = []\n\n# while True:\n# n = int(input())\n# if n == 0:\n# break\n# else:\n# A.append(n)\n\n# for i in A:\n# if i % 2 == 0:\n# N.append(i)\n# else:\n# M.append(i)\n\n# N.sort()\n# M.sort()\n\n# for i in N:\n# print(i,end=' ')\n# print()\n# for i in M:\n# print(i, end=' ')","repo_name":"tranletuanh2801/CoSoLapTrinh","sub_path":"Luyencode/b5.py","file_name":"b5.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37247688952","text":"# this is our main driver file . it will be respobsible for handling user input and displaying the current gameState object.\n\nimport pygame as p \nimport engineChess\n\nWIDTH = 512\nHEIGHT = 512\nDIMENSION = 8\nSQ_SIZE = HEIGHT // DIMENSION\nMAX_FPS = 15\nIMAGES = {}\n\ndef loadImages():\n pieces = [\"wR\",\"wN\",\"wB\",\"wQ\",\"wK\",\"wB\",\"wN\",\"wR\",\"bR\",\"bN\",\"bB\",\"bQ\",\"bK\",\"bB\",\"bN\",\"bR\"]\n for piece in pieces:\n IMAGES[piece] = p.transorm.scale(p.image.load(\"images/\"+piece+\".png\"),(SQ_SIZE,SQ_SIZE))\n\ndef main():\n p.init()\n screen = p.display.set_mode((WIDTH,HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = engineChess.GameState() # gs = game state","repo_name":"uygav/chess","sub_path":"mainChess.py","file_name":"mainChess.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30942113756","text":"\"\"\"\nFind all 6-digit numbers where the sum of the first 3-digits of the number plus the\nlast 3-digits of the number equals the square-root of the original 6-digit number.\n\"\"\"\n\nimport math\nimport project.config as config\n\n\nclass Solution():\n def find_six_digit_number(self):\n \"\"\"\n :returns: List - a List of six digit numbers\n \"\"\"\n ret = []\n for number in xrange(100000, 1000000):\n str_number = str(number)\n part1 = int(str_number[0:3])\n part2 = int(str_number[3:6])\n if (part1 + part2) == math.sqrt(number):\n if config.debug: print(\"%s + %s == math.sqrt(%s)\" % (part1, part2, number))\n ret.append(number)\n return ret\n\n\ndef main():\n numbers = Solution().find_six_digit_number()\n print(\"numbers: %s\" % numbers)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gradeawarrior/python-interview-problems","sub_path":"project/sqrt_six_digit_numbers.py","file_name":"sqrt_six_digit_numbers.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41421751128","text":"\"\"\"\nDjango settings for issue_tracker project.\n\"\"\"\n\nimport os\nimport dj_database_url\nfrom django.contrib.messages import constants as messages\n\nif os.environ.get('DEVELOPMENT') == '1':\n development = True\n print('DEVELOPMENT=TRUE')\nelse:\n development = False\n print('DEVELOPMENT=FALSE')\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\nDEBUG = False\n\nALLOWED_HOSTS = ['localhost',\n os.environ.get('HOSTNAME'),\n ]\n\n# Application definition\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_forms_bootstrap',\n 'accounts',\n 'tickets',\n 'taggit',\n 'simple_history',\n 'rest_framework',\n 'crispy_forms',\n 'checkout',\n 'storages',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'simple_history.middleware.HistoryRequestMiddleware',\n]\n\nROOT_URLCONF = 'issue_tracker.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'issue_tracker.wsgi.application'\n\n# Database\n\nif \"DATABASE_URL\" in os.environ and development is False:\n print('Database URL found. Using POSTGRESQL')\n DATABASES = {\n 'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))\n }\nelse:\n print(\"Database URL not found. Using SQLite instead.\")\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nAWS_S3_OBJECT_PARAMETERS = {\n 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n 'CacheControl': 'max-age=94608000'\n}\n\nAWS_STORAGE_BUCKET_NAME = 'django-issue-tracker-1'\nAWS_S3_REGION_NAME = 'eu-west-1'\nAWS_ACCESS_KEY = os.environ.get(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS = os.environ.get(\"AWS_SECRET_ACCESS_KEY\")\n\nAWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\n\nSTATICFILES_LOCATION = 'static'\nSTATICFILES_STORAGE = 'custom_storages.StaticStorage'\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nMEDIAFILES_LOCATION = 'media'\nDEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\nSTRIPE_PUBLISHABLE = os.getenv('STRIPE_PUBLISHABLE')\nSTRIPE_SECRET = os.getenv('STRIPE_SECRET')\n\nTAGGIT_CASE_INSENSITIVE = True\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\nMESSAGE_TAGS = {\n messages.DEBUG: 'alert-info',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-danger',\n}\n","repo_name":"tdunn891/trackit-issue-tracker","sub_path":"issue_tracker/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13215912217","text":"\"\"\"Uses PPO to training an attack policy against a fixed, embedded policy.\"\"\"\n\nimport functools\nimport json\nimport logging\nimport os\nimport os.path as osp\nimport pkgutil\nfrom typing import Callable, Iterable\n\nfrom gym.spaces import Box\nfrom sacred import Experiment\nfrom sacred.observers import FileStorageObserver\nimport stable_baselines\nfrom stable_baselines.common import callbacks\nfrom stable_baselines.common.vec_env.vec_normalize import VecNormalize\nimport tensorflow as tf\n\nfrom aprl.common import utils\nfrom aprl.envs.gym_compete import (\n GameOutcomeMonitor,\n get_policy_type_for_zoo_agent,\n load_zoo_agent_params,\n)\nfrom aprl.envs.multi_agent import (\n FlattenSingletonVecEnv,\n MergeAgentVecEnv,\n make_dummy_vec_multi_env,\n make_subproc_vec_multi_env,\n)\nfrom aprl.envs.observation_masking import make_mask_agent_wrappers\nimport aprl.envs.wrappers\nfrom aprl.policies.loader import load_backward_compatible_model, load_policy, mpi_unavailable_error\nfrom aprl.policies.wrappers import MultiPolicyWrapper\nfrom aprl.training.embedded_agents import CurryVecEnv, TransparentCurryVecEnv\nfrom aprl.training.logger import setup_logger\nfrom aprl.training.lookback import DebugVenv, LookbackRewardVecWrapper, OldMujocoResettableWrapper\nfrom aprl.training.scheduling import ConstantAnnealer, Scheduler\nfrom aprl.training.shaping_wrappers import apply_embedded_agent_wrapper, apply_reward_wrapper\n\ntrain_ex = Experiment(\"train\")\npylog = logging.getLogger(\"aprl.train\")\n\n\nSaveCallback = Callable[[str], None]\n\n\ndef _save(model, root_dir: str, save_callbacks: Iterable[SaveCallback]) -> None:\n os.makedirs(root_dir, exist_ok=True)\n model_path = osp.join(root_dir, \"model.pkl\")\n model.save(model_path)\n for f in save_callbacks:\n f(root_dir)\n\n\nclass CheckpointCallback(callbacks.BaseCallback):\n \"\"\"Custom checkpointing, saving model in directory and recursively calling `save_callbacks`.\"\"\"\n\n def __init__(self, out_dir: str, save_callbacks: Iterable[SaveCallback], *args, **kwargs):\n \"\"\"\n Builds a CheckpointCallback.\n\n `save_callbacks` used to save auxiliary information, e.g. `VecNormalize` instances.\n\n :param out_dir: directory to save checkpoints to.\n :param save_callbacks: callbacks to recursively invoke.\n \"\"\"\n super(CheckpointCallback, self).__init__(*args, **kwargs)\n self.out_dir = out_dir\n self.save_callbacks = save_callbacks\n\n def _on_step(self) -> bool:\n checkpoint_dir = osp.join(self.out_dir, \"checkpoint\", f\"{self.num_timesteps:012}\")\n _save(self.model, checkpoint_dir, self.save_callbacks)\n return True\n\n\nclass LoggerOnlyLogCallback(callbacks.BaseCallback):\n \"\"\"Calls `obj.log_callback(self.logger)`.\"\"\"\n\n def __init__(self, obj, *args, **kwargs):\n super(LoggerOnlyLogCallback, self).__init__(*args, **kwargs)\n assert hasattr(obj, \"log_callback\")\n self.obj = obj\n\n def _on_step(self) -> bool:\n self.obj.log_callback(self.logger)\n return True\n\n\n@train_ex.capture\ndef old_ppo2(\n _seed,\n env,\n out_dir,\n total_timesteps,\n num_env,\n policy,\n batch_size,\n load_policy,\n learning_rate,\n rl_args,\n logger,\n log_callbacks,\n save_callbacks,\n):\n try:\n from baselines.ppo2 import ppo2 as ppo2_old\n from baselines import logger as logger_old\n except ImportError as e:\n msg = \"{}. HINT: you need to install (OpenAI) Baselines to use old_ppo2\".format(e)\n raise ImportError(msg)\n\n pylog.warning(\n \"'old_ppo2' is deprecated; use 'ppo2' where possible. \"\n \"Logging and save callbacks not supported amongst other features.\"\n )\n logger_old.configure(os.path.join(out_dir, \"old_rl\"))\n\n NETWORK_MAP = {\n \"MlpPolicy\": \"mlp\",\n \"MlpLstmPolicy\": \"lstm\",\n \"CnnPolicy\": \"cnn\",\n \"CnnLstmPolilcy\": \"cnn_lstm\",\n }\n network = NETWORK_MAP[policy]\n\n graph = tf.Graph()\n sess = utils.make_session(graph)\n load_path = load_policy[\"path\"]\n if load_path is not None:\n assert load_policy[\"type\"] == \"old_ppo2\"\n with graph.as_default():\n with sess.as_default():\n model = ppo2_old.learn(\n network=network,\n env=env,\n nsteps=batch_size // num_env,\n total_timesteps=total_timesteps,\n load_path=load_path,\n lr=learning_rate,\n seed=_seed,\n **rl_args,\n )\n\n final_path = osp.join(out_dir, \"final_model\")\n _save(model, final_path, save_callbacks)\n\n return final_path\n\n\n@train_ex.capture\ndef _stable(\n cls,\n our_type,\n callback_key,\n callback_mul,\n _seed,\n env,\n env_name,\n out_dir,\n total_timesteps,\n policy,\n load_policy,\n rl_args,\n embed_index,\n debug,\n logger,\n log_callbacks,\n save_callbacks,\n log_interval,\n checkpoint_interval,\n **kwargs,\n):\n kwargs = dict(env=env, verbose=1 if not debug else 2, **kwargs, **rl_args)\n\n if load_policy[\"path\"] is not None:\n if load_policy[\"type\"] == our_type:\n # SOMEDAY: Counterintuitively this inherits any extra arguments saved in the policy\n model = load_backward_compatible_model(cls, load_policy[\"path\"], **kwargs)\n elif load_policy[\"type\"] == \"zoo\":\n policy_cls, policy_kwargs = get_policy_type_for_zoo_agent(\n env_name, transparent_params=None\n )\n kwargs[\"policy_kwargs\"] = policy_kwargs\n model = cls(policy=policy_cls, **kwargs)\n\n our_idx = 1 - embed_index # TODO: code duplication?\n params = load_zoo_agent_params(load_policy[\"path\"], env_name, our_idx)\n # We do not need to restore train_model, since it shares params with act_model\n model.act_model.restore(params)\n else:\n model = cls(policy=policy, seed=_seed, **kwargs)\n\n checkpoint_callback = callbacks.EveryNTimesteps(\n n_steps=checkpoint_interval, callback=CheckpointCallback(out_dir, save_callbacks)\n )\n log_callback = callbacks.EveryNTimesteps(\n n_steps=log_interval, callback=callbacks.CallbackList(log_callbacks)\n )\n callback = callbacks.CallbackList([checkpoint_callback, log_callback])\n\n model.learn(total_timesteps=total_timesteps, log_interval=1, callback=callback)\n final_path = osp.join(out_dir, \"final_model\")\n _save(model, final_path, save_callbacks)\n model.sess.close()\n return final_path\n\n\ndef _get_mpi_num_proc():\n # SOMEDAY: If we end up using MPI-based algorithms regularly, come up with a cleaner solution.\n from mpi4py import MPI # pytype:disable=import-error\n\n if MPI is None:\n num_proc = 1\n else:\n num_proc = MPI.COMM_WORLD.Get_size()\n return num_proc\n\n\n@train_ex.capture\ndef gail(batch_size, learning_rate, expert_dataset_path, **kwargs):\n from aprl.training.gail_dataset import ExpertDatasetFromOurFormat\n\n num_proc = _get_mpi_num_proc()\n if expert_dataset_path is None:\n raise ValueError(\"Must set expert_dataset_path to use GAIL.\")\n expert_dataset = ExpertDatasetFromOurFormat(expert_dataset_path)\n kwargs[\"d_stepsize\"] = learning_rate(1)\n kwargs[\"vf_stepsize\"] = learning_rate(1)\n return _stable(\n stable_baselines.GAIL,\n our_type=\"gail\",\n expert_dataset=expert_dataset,\n callback_key=\"timesteps_so_far\",\n callback_mul=1,\n timesteps_per_batch=batch_size // num_proc,\n **kwargs,\n )\n\n\n@train_ex.capture\ndef ppo1(batch_size, learning_rate, **kwargs):\n num_proc = _get_mpi_num_proc()\n pylog.warning(\"Assuming constant learning rate schedule for PPO1\")\n optim_stepsize = learning_rate(1) # PPO1 does not support a callable learning_rate\n return _stable(\n stable_baselines.PPO1,\n our_type=\"ppo1\",\n callback_key=\"timesteps_so_far\",\n callback_mul=batch_size,\n timesteps_per_actorbatch=batch_size // num_proc,\n optim_stepsize=optim_stepsize,\n schedule=\"constant\",\n **kwargs,\n )\n\n\n@train_ex.capture\ndef ppo2(batch_size, num_env, learning_rate, **kwargs):\n return _stable(\n stable_baselines.PPO2,\n our_type=\"ppo2\",\n callback_key=\"update\",\n callback_mul=batch_size,\n n_steps=batch_size // num_env,\n learning_rate=learning_rate,\n **kwargs,\n )\n\n\n@train_ex.capture\ndef sac(batch_size, learning_rate, **kwargs):\n return _stable(\n stable_baselines.SAC,\n our_type=\"sac\",\n callback_key=\"step\",\n callback_mul=1,\n batch_size=batch_size,\n learning_rate=learning_rate,\n **kwargs,\n )\n\n\n@train_ex.config\ndef train_config():\n # Logging\n root_dir = \"data/baselines\" # root of directory to store baselines log\n exp_name = \"default\" # name of experiment\n\n # Environment\n env_name = \"multicomp/SumoAnts-v0\" # Gym environment ID\n num_env = 8 # number of environments to run in parallel\n total_timesteps = 4096 # total number of timesteps to training for\n\n # Embedded Agent Config\n # Typically this is the victim, but for victim hardening this could be the adversary\n embed_index = 0 # index embedded agent plays as\n embed_type = None # any type supported by aprl.policies.loader\n embed_path = None # path or other unique identifier\n embed_types = None # list of types for embedded agents\n embed_paths = None # list of paths for embedded agents\n\n mask_embed = False # should embedded agent's observations be limited\n mask_embed_kwargs = { # control how embedded agent's observations are limited\n \"masking_type\": \"initialization\",\n }\n\n # RL Algorithm Hyperparameters\n rl_algo = \"ppo2\" # RL algorithm to use\n policy = \"MlpPolicy\" # policy network type\n batch_size = 2048 # batch size\n learning_rate = 3e-4 # learning rate\n normalize = True # normalize environment reward\n normalize_observations = True # if normalize, then normalize environments observations too\n rl_args = dict() # algorithm-specific arguments\n\n # General\n checkpoint_interval = 131072 # save weights to disk after this many timesteps\n log_interval = 2048 # log statistics to disk after this many timesteps\n log_output_formats = None # custom output formats for logging\n debug = False # debug mode; may run more slowly\n seed = 0 # random seed\n _ = locals() # quieten flake8 unused variable warning\n del _\n\n\n@train_ex.config\ndef adversary_policy_config(rl_algo, embed_type, embed_path):\n load_policy = { # fine-tune this policy\n \"path\": None, # path with policy weights\n \"type\": rl_algo, # type supported by aprl.policies.loader\n }\n adv_noise_params = { # param dict for epsilon-ball noise policy added to zoo policy\n \"noise_val\": None, # size of noise ball. Set to nonnegative float to activate.\n \"base_path\": embed_path, # path of agent to be wrapped with noise ball\n \"base_type\": embed_type, # type of agent to be wrapped with noise ball\n }\n transparent_params = None # params for transparent embedded policies\n expert_dataset_path = None # path to trajectory data to train GAIL\n lookback_params = { # parameters for doing lookback white-box attacks\n \"lb_num\": 0, # number of lookback venvs, if zero, lookback is disabled\n \"lb_mul\": 0.05, # amount by which we weight differences in lookback\n \"lb_path\": None, # path of lookback base policy\n \"lb_type\": rl_algo, # type of lookback base policy\n }\n\n _ = locals() # quieten flake8 unused variable warning\n del _\n\n\nDEFAULT_CONFIGS = {}\n\n\ndef load_default(env_name, config_dir):\n default_config = DEFAULT_CONFIGS.get(env_name, \"default.json\")\n fname = os.path.join(\"configs\", config_dir, default_config)\n config = pkgutil.get_data(\"aprl\", fname)\n return json.loads(config)\n\n\n@train_ex.config\ndef wrappers_config(env_name):\n rew_shape = True # enable reward shaping\n rew_shape_params = load_default(env_name, \"rew\") # parameters for reward shaping\n\n embed_noise = False # enable adding noise to embedded agents\n embed_noise_params = load_default(env_name, \"noise\") # parameters for noise\n\n _ = locals() # quieten flake8 unused variable warning\n del _\n\n\n@train_ex.named_config\ndef no_embed():\n \"\"\"Does not load and embed another agent. Useful for debugging, allowing training in a\n single-agent environment.\n \"\"\"\n embed_types = []\n embed_paths = []\n\n _ = locals() # quieten flake8 unused variable warning\n del _\n\n\nPAPER_HYPERPARAMS = dict(\n total_timesteps=int(20e6),\n batch_size=16384,\n learning_rate=3e-4,\n rl_args=dict(ent_coef=0.0, nminibatches=4, noptepochs=4),\n)\n\nSPARSE_REWARD = dict(rew_shape=True, rew_shape_params=dict(anneal_frac=0.0))\n\n\n@train_ex.named_config\ndef paper():\n \"\"\"Same hyperparameters as ICLR 2020 paper.\"\"\"\n locals().update(**PAPER_HYPERPARAMS)\n locals().update(**SPARSE_REWARD)\n\n\n@train_ex.capture\ndef build_env(\n out_dir,\n _seed,\n env_name,\n num_env,\n embed_types,\n embed_index,\n mask_embed,\n mask_embed_kwargs,\n lookback_params,\n debug,\n):\n pre_wrappers = []\n if lookback_params[\"lb_num\"] > 0:\n pre_wrappers.append(OldMujocoResettableWrapper)\n\n agent_wrappers = {}\n if mask_embed:\n agent_wrappers = make_mask_agent_wrappers(env_name, embed_index, **mask_embed_kwargs)\n\n if len(embed_types) == 0:\n our_idx = 0\n else:\n our_idx = 1 - embed_index\n\n def env_fn(i):\n return aprl.envs.wrappers.make_env(\n env_name,\n _seed,\n i,\n out_dir,\n our_idx,\n pre_wrappers=pre_wrappers,\n agent_wrappers=agent_wrappers,\n )\n\n if not debug and num_env > 1:\n make_vec_env = make_subproc_vec_multi_env\n else:\n make_vec_env = make_dummy_vec_multi_env\n multi_venv = make_vec_env([functools.partial(env_fn, i) for i in range(num_env)])\n if debug and lookback_params[\"lb_num\"] > 0:\n multi_venv = DebugVenv(multi_venv)\n\n if len(embed_types) == 0:\n assert multi_venv.num_agents == 1, \"No embedding only works in single-agent environments.\"\n else:\n assert multi_venv.num_agents == 2, \"Need two-agent environment when agent embedded.\"\n\n return multi_venv, our_idx\n\n\n@train_ex.capture\ndef multi_wrappers(multi_venv, env_name, log_callbacks):\n if env_name.startswith(\"multicomp/\"):\n game_outcome = GameOutcomeMonitor(multi_venv)\n log_callback = LoggerOnlyLogCallback(game_outcome)\n log_callbacks.append(log_callback)\n multi_venv = game_outcome\n\n return multi_venv\n\n\n@train_ex.capture\ndef wrap_adv_noise_ball(env_name, our_idx, multi_venv, adv_noise_params, deterministic):\n adv_noise_agent_val = adv_noise_params[\"noise_val\"]\n base_policy_path = adv_noise_params[\"base_path\"]\n base_policy_type = adv_noise_params[\"base_type\"]\n base_policy = load_policy(\n policy_path=base_policy_path,\n policy_type=base_policy_type,\n env=multi_venv,\n env_name=env_name,\n index=our_idx,\n )\n\n base_action_space = multi_venv.action_space.spaces[our_idx]\n adv_noise_action_space = Box(\n low=adv_noise_agent_val * base_action_space.low,\n high=adv_noise_agent_val * base_action_space.high,\n )\n multi_venv = MergeAgentVecEnv(\n venv=multi_venv,\n policy=base_policy,\n replace_action_space=adv_noise_action_space,\n merge_agent_idx=our_idx,\n deterministic=deterministic,\n )\n return multi_venv\n\n\n@train_ex.capture\ndef maybe_embed_agent(\n multi_venv,\n our_idx,\n scheduler,\n log_callbacks,\n env_name,\n embed_types,\n embed_paths,\n embed_index,\n embed_noise,\n embed_noise_params,\n adv_noise_params,\n transparent_params,\n lookback_params,\n):\n if len(embed_types) > 0:\n deterministic = lookback_params is not None\n # If we are actually training an epsilon-ball noise agent on top of a zoo agent\n if adv_noise_params[\"noise_val\"] is not None:\n multi_venv = wrap_adv_noise_ball(\n env_name,\n our_idx,\n multi_venv,\n adv_noise_params=adv_noise_params,\n deterministic=deterministic,\n )\n embedded_policies = []\n # If we're loading multiple embedded agents\n for embed_type, embed_path in zip(embed_types, embed_paths):\n embedded_policies.append(\n load_policy(\n policy_path=embed_path,\n policy_type=embed_type,\n env=multi_venv,\n env_name=env_name,\n index=embed_index,\n transparent_params=transparent_params,\n )\n )\n\n if embed_noise:\n for i in range(len(embedded_policies)):\n embedded = apply_embedded_agent_wrapper(\n embedded=embedded_policies[i],\n noise_params=embed_noise_params,\n scheduler=scheduler,\n )\n log_callbacks.append(LoggerOnlyLogCallback(embedded))\n embedded_policies[i] = embedded\n\n if len(embedded_policies) > 1:\n embedded_policy = MultiPolicyWrapper(embedded_policies, num_envs=multi_venv.num_envs)\n else:\n embedded_policy = embedded_policies[0]\n\n # Curry the embedded agent\n cls = TransparentCurryVecEnv if transparent_params is not None else CurryVecEnv\n multi_venv = cls(\n venv=multi_venv,\n policy=embedded_policy,\n agent_idx=embed_index,\n deterministic=deterministic,\n )\n return multi_venv\n\n\n@train_ex.capture\ndef single_wrappers(\n single_venv,\n scheduler,\n our_idx,\n normalize,\n normalize_observations,\n rew_shape,\n rew_shape_params,\n embed_index,\n embed_paths,\n embed_types,\n debug,\n env_name,\n load_policy,\n lookback_params,\n transparent_params,\n log_callbacks,\n save_callbacks,\n):\n if rew_shape:\n rew_shape_venv = apply_reward_wrapper(\n single_env=single_venv,\n scheduler=scheduler,\n shaping_params=rew_shape_params,\n agent_idx=our_idx,\n )\n log_callbacks.append(LoggerOnlyLogCallback(rew_shape_venv))\n single_venv = rew_shape_venv\n\n for anneal_type in [\"noise\", \"rew_shape\"]:\n if scheduler.is_conditional(anneal_type):\n scheduler.set_annealer_get_logs(anneal_type, rew_shape_venv.get_logs)\n\n if lookback_params[\"lb_num\"] > 0:\n if len(embed_types) > 1:\n raise ValueError(\"Lookback is not supported with multiple embedded agents\")\n embed_path = embed_paths[0]\n embed_type = embed_types[0]\n lookback_venv = LookbackRewardVecWrapper(\n single_venv,\n env_name,\n debug,\n embed_index,\n embed_path,\n embed_type,\n transparent_params,\n **lookback_params,\n )\n single_venv = lookback_venv\n\n if normalize:\n if normalize_observations:\n if load_policy[\"path\"] is not None:\n if load_policy[\"type\"] == \"zoo\":\n raise ValueError(\n \"Trying to normalize twice. Bansal et al's Zoo agents normalize \"\n \"implicitly. Please set normalize=False to disable VecNormalize.\"\n )\n normalized_venv = VecNormalize(single_venv)\n else:\n normalized_venv = VecNormalize(single_venv, norm_obs=False)\n\n if load_policy[\"path\"] is not None and load_policy[\"type\"] != \"zoo\":\n normalized_venv.load_running_average(load_policy[\"path\"])\n\n save_callbacks.append(\n lambda root_dir: normalized_venv.save(os.path.join(root_dir, \"vec_normalize.pkl\"))\n )\n single_venv = normalized_venv\n\n return single_venv\n\n\nRL_ALGOS = {\n \"ppo2\": ppo2,\n \"old_ppo2\": old_ppo2,\n}\nMPI_RL_ALGOS = {\n \"gail\": gail,\n \"ppo1\": ppo1,\n \"sac\": sac,\n}\n\ntry:\n from mpi4py import MPI # pytype:disable=import-error\n\n del MPI\n RL_ALGOS.update(MPI_RL_ALGOS)\nexcept ImportError:\n RL_ALGOS.update({k: mpi_unavailable_error for k in MPI_RL_ALGOS})\n\n# True for Stable Baselines as of 2019-03\nNO_VECENV = [\"ddpg\", \"dqn\", \"gail\", \"her\", \"ppo1\", \"sac\"]\n\n\ndef resolve_embed(embed_type, embed_path, embed_types, embed_paths, adv_noise_params):\n adv_noise_params = dict(adv_noise_params)\n if embed_type is None:\n embed_type = \"zoo\"\n adv_noise_params[\"base_type\"] = embed_type\n if embed_path is None:\n embed_path = \"1\"\n adv_noise_params[\"base_path\"] = embed_path\n if embed_types is None and embed_paths is None:\n embed_types = [embed_type]\n embed_paths = [embed_path]\n\n return embed_types, embed_paths, adv_noise_params\n\n\n@train_ex.main\ndef train(\n _run,\n root_dir,\n exp_name,\n num_env,\n rl_algo,\n learning_rate,\n log_output_formats,\n embed_type,\n embed_path,\n embed_types,\n embed_paths,\n adv_noise_params,\n):\n embed_types, embed_paths, adv_noise_params = resolve_embed(\n embed_type, embed_path, embed_types, embed_paths, adv_noise_params\n )\n\n scheduler = Scheduler(annealer_dict={\"lr\": ConstantAnnealer(learning_rate)})\n out_dir, logger = setup_logger(root_dir, exp_name, output_formats=log_output_formats)\n log_callbacks, save_callbacks = [], []\n\n if rl_algo in NO_VECENV and num_env > 1:\n raise ValueError(f\"'{rl_algo}' needs 'num_env' set to 1.\")\n\n multi_venv, our_idx = build_env(out_dir, embed_types=embed_types)\n multi_venv = multi_wrappers(multi_venv, log_callbacks=log_callbacks)\n multi_venv = maybe_embed_agent(\n multi_venv,\n our_idx,\n scheduler,\n log_callbacks=log_callbacks,\n embed_types=embed_types,\n embed_paths=embed_paths,\n adv_noise_params=adv_noise_params,\n )\n single_venv = FlattenSingletonVecEnv(multi_venv)\n single_venv = single_wrappers(\n single_venv,\n scheduler,\n our_idx,\n log_callbacks=log_callbacks,\n save_callbacks=save_callbacks,\n embed_paths=embed_paths,\n embed_types=embed_types,\n )\n\n train_fn = RL_ALGOS[rl_algo]\n res = train_fn(\n env=single_venv,\n out_dir=out_dir,\n learning_rate=scheduler.get_annealer(\"lr\"),\n logger=logger,\n log_callbacks=log_callbacks,\n save_callbacks=save_callbacks,\n )\n single_venv.close()\n\n return res\n\n\ndef main():\n observer = FileStorageObserver(osp.join(\"data\", \"sacred\", \"train\"))\n train_ex.observers.append(observer)\n train_ex.run_commandline()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HumanCompatibleAI/adversarial-policies","sub_path":"src/aprl/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":22985,"program_lang":"python","lang":"en","doc_type":"code","stars":254,"dataset":"github-code","pt":"54"} +{"seq_id":"33177501001","text":"line1 = ['','','']\r\nline2 = ['','','']\r\nline3 = ['','','']\r\nmap = [line1,line2,line3]\r\n\r\nprint(\"Hiding your texsure! X marks the spot.\")\r\nposition = input('Where do you want to put hte treasure?')\r\nnumber = position.lower()\r\nchecklist = ['a','b','c']\r\nx_coord= int(checklist.index(number[0]))\r\ny_coord=int(number[1])-1\r\n\r\nif y_coord > int(len(map[0]))+1 or x_coord > int(len(map[0])):\r\n print('Coord does not existed')\r\nelse:\r\n map[int(x_coord)][int(y_coord)]='X'\r\n print(f\"{line1}\\n{line2}\\n{line3}\")\r\n# print(number)","repo_name":"laysiong/Coding-Practices","sub_path":"Python-Udemy_100_Days_of_Code/Basic_Python/Day_04/Treasure_Map_FindX.py","file_name":"Treasure_Map_FindX.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"647024343","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 18 17:03:35 2020\r\n\r\n@author: Asus\r\n\"\"\"\r\n# -----ex5---------------\r\ndef missing_char(st, n):\r\n st1= st.replace(st[n],'',1)\r\n return st1\r\nstr1 = input(\"enter your string \\n\")\r\nn=int(input('enter the character index :'))\r\nif n in range (1,len(str1)) : \r\n str2 = missing_char(str1,n)\r\n print(str2)\r\nelse : \r\n print(\"error invalid input n\")","repo_name":"souhirmabrouk/souhirmd","sub_path":"checkpoint1-souhirmabrouk/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3324924780","text":"#!/usr/bin/env python-sirius\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pyaccel\nfrom pymodels import si\n\nfrom idanalysis import optics\nfrom idanalysis.model import get_id_sabia_list\n\nimport idanalysis.utils as utils\n#utils.FOLDER_BASE = '/home/ximenes/repos-dev/'\nutils.FOLDER_BASE = '/home/gabriel/repos-sirius/'\n\n\ndef create_model(ids, goal_tunes=None, straight_nr=None):\n\n print('--- create model')\n ring = si.create_accelerator(ids=ids)\n ring.vchamber_on = optics.CHAMBER_ON\n\n if goal_tunes is None:\n tw0, *_ = pyaccel.optics.calc_twiss(ring)\n goal_tunes = np.array([tw0.mux[-1] / 2 / np.pi, tw0.muy[-1] / 2 / np.pi])\n print()\n\n if straight_nr is not None:\n _, knobs, _ = optics.symm_get_knobs(ring, straight_nr)\n locs_beta = optics.symm_get_locs_beta(knobs)\n goal_beta = np.array([tw0.betax[locs_beta], tw0.betay[locs_beta]])\n goal_alpha = np.array([tw0.alphax[locs_beta], tw0.alphay[locs_beta]])\n else:\n goal_beta = None\n goal_alpha = None\n\n return ring, goal_tunes, goal_beta, goal_alpha\n\n\ndef correct_optics(ring, straight_nr, goal_tunes, goal_beta, goal_alpha, weight=False):\n\n ring0 = ring[:]\n\n tw, *_ = pyaccel.optics.calc_twiss(ring)\n\n print('--- correct cod')\n id_famname = None # NOTE: set!\n cod = optics.correct_orbit(ring, id_famname, False)\n print('kicks [urad]: {}'.format(1e6*cod[0]))\n ring1 = ring[:]\n print()\n\n print('--- symmetrize optics')\n _, knobs, _ = optics.symm_get_knobs(ring, straight_nr)\n\n dk_tot = np.zeros(len(knobs))\n for i in range(5):\n dk = optics.correct_symmetry_withbeta(ring, straight_nr, goal_beta, goal_alpha, weight=weight)\n print('iteration #{}, dK: {}'.format(i+1, dk))\n dk_tot += dk\n for i, fam in enumerate(knobs):\n print('{:<9s} dK: {:+9.6f} 1/m²'.format(fam, dk_tot[i]))\n ring2 = ring[:]\n print()\n\n print('--- correct tunes')\n tw, *_ = pyaccel.optics.calc_twiss(ring)\n tunes = tw.mux[-1]/np.pi/2, tw.muy[-1]/np.pi/2\n print('init tunes: {:.9f} {:.9f}'.format(tunes[0], tunes[1]))\n for i in range(2):\n optics.correct_tunes_twoknobs(ring, goal_tunes)\n tw, *_ = pyaccel.optics.calc_twiss(ring)\n tunes = tw.mux[-1]/np.pi/2, tw.muy[-1]/np.pi/2\n print('iter #{} tunes: {:.9f} {:.9f}'.format(i+1, tunes[0], tunes[1]))\n print('goal tunes: {:.9f} {:.9f}'.format(goal_tunes[0], goal_tunes[1]))\n ring3 = ring[:]\n print()\n\n return ring0, ring1, ring2, ring3\n\n\ndef save_models(configs):\n\n ring_nom, goal_tunes, goal_beta, goal_alpha = create_model(ids=None, goal_tunes=None, straight_nr=10)\n tw_nom, *_ = pyaccel.optics.calc_twiss(ring_nom)\n\n models = dict()\n for config in configs:\n \n config_label = configs.get_config_label(config)\n print('=== {}\\n'.format(config_label))\n\n fname = configs.get_kickmap_filename(config)\n ids = get_id_sabia_list(fname)\n \n ring, *_ = create_model(ids=ids, goal_tunes=goal_tunes)\n ring0, ring1, ring2, ring3 = correct_optics(ring, 10, goal_tunes, goal_beta, goal_alpha)\n\n models[config_label] = (ring0, ring1, ring2, ring3)\n\n tw2, *_ = pyaccel.optics.calc_twiss(ring2)\n tw3, *_ = pyaccel.optics.calc_twiss(ring3)\n\n # betax\n plt.plot(tw_nom.spos, tw_nom.betax, label='nom')\n plt.plot(tw2.spos, 1e3*(tw2.betax - tw_nom.betax), label='1000 x (symm - nom)')\n plt.plot(tw3.spos, 1e3*(tw3.betax - tw_nom.betax), label='1000 x (symm+tune - nom)')\n plt.legend()\n plt.xlabel('posz [m]')\n plt.xlabel('beta [m]')\n plt.title('BetaX')\n plt.show()\n\n # betay\n plt.plot(tw_nom.spos, tw_nom.betay, label='nom')\n plt.plot(tw2.spos, 1e3*(tw2.betay - tw_nom.betay), label='1000 x (symm - nom)')\n plt.plot(tw3.spos, 1e3*(tw3.betay - tw_nom.betay), label='1000 x (symm+tune - nom)')\n plt.legend()\n plt.xlabel('posz [m]')\n plt.xlabel('beta [m]')\n plt.title('BetaY')\n plt.show()\n\n pickle.dump(models, open('models.pickle', 'wb'))\n\n\ndef test_symm():\n\n ring_nom, goal_tunes, goal_beta, goal_alpha = create_model(ids=None, goal_tunes=None, straight_nr=10)\n \n # locs = symm_get_locs(ring_nom)\n # _, knobs, _ = symm_get_knobs(ring_nom, 10)\n # locs_beta = symm_get_locs_beta(knobs)\n # alpha1 = symm_calc_residue(ring_nom, locs, locs_beta, goal_beta, goal_alpha, weight=False)\n # ring, goal_tunes, *_ = create_model(ids=ids, goal_tunes=goal_tunes)\n # alpha2 = symm_calc_residue(ring, locs, locs_beta, goal_beta, goal_alpha, weight=False)\n\n # plt.plot(1000*alpha1, label='alpha nominal (x1000)')\n # plt.plot(alpha2, label='alpha with uncorrected ID')\n # plt.legend()\n # plt.show()\n # return\n\n tw_nom, *_ = pyaccel.optics.calc_twiss(ring_nom)\n ring, goal_tunes, *_ = create_model(ids=ids, goal_tunes=goal_tunes)\n ring_ = ring[:]\n ring0, ring1, ring2, ring3 = correct_optics(ring, 10, goal_tunes, goal_beta, goal_alpha, False)\n tw0, *_ = pyaccel.optics.calc_twiss(ring2)\n ring0, ring1, ring2, ring3 = correct_optics(ring_, 10, goal_tunes, goal_beta, goal_alpha, True)\n tw1, *_ = pyaccel.optics.calc_twiss(ring2)\n\n plt.plot(tw_nom.spos, 1e3*(tw0.betax - tw_nom.betax), label='without beta weigths')\n plt.plot(tw_nom.spos, 1e3*(tw1.betax - tw_nom.betax), label='with large beta weights')\n plt.xlabel('spos [m]')\n plt.ylabel('betax - betax_nom [mm]')\n plt.legend()\n plt.title('With BC symmetry points')\n plt.show()\n\n plt.plot(tw_nom.spos, 1e3*(tw0.betay - tw_nom.betay), label='without beta weigths')\n plt.plot(tw_nom.spos, 1e3*(tw1.betay - tw_nom.betay), label='with large beta weights')\n plt.xlabel('spos [m]')\n plt.ylabel('betay - betay_nom [mm]')\n plt.legend()\n plt.title('With BC symmetry points')\n plt.show()\n\n\n# --- legacy (to be adapted and tested in new version lib version)\n\ndef tune_shift(models):\n\n # models = load_models(folder='./results/vchamber-off/')\n nominal_ring = si.create_accelerator(ids=None)\n tw, *_ = pyaccel.optics.calc_twiss(nominal_ring)\n goal_tunes = np.array([tw.mux[-1] / 2 / np.pi, tw.muy[-1] / 2 / np.pi])\n\n for config, rings in models.items():\n ring0, *_ = rings\n tw0, *_ = pyaccel.optics.calc_twiss(ring0)\n tunes0 = np.array([tw0.mux[-1] / 2 / np.pi, tw0.muy[-1] / 2 / np.pi])\n dtunes = tunes0 - goal_tunes\n print('{:<40s}: {:+.8f} {:+.8f}'.format(config, dtunes[0], dtunes[1]))\n\n\ndef calc_dynapt_area(dynapt):\n vx, vy = dynapt\n area = 0\n for i in range(len(vx)-1):\n v1 = np.array([vx[i], vy[i]])\n v2 = np.array([vx[i+1], vy[i+1]])\n area += np.linalg.norm(np.cross(v1, v2))\n return area\n\n\ndef closed_orbit(models, modelname=None):\n\n # models = load_models(folder='./results/vchamber-off/')\n for config, rings in models.items():\n config = config.replace('Linear', 'L')\n config = config.replace('Elliptical', 'E')\n ring0, ring1, ring2, ring3 = rings\n if modelname == 'ring0' or modelname is None:\n ring = ring0\n elif modelname == 'ring1':\n ring = ring1\n elif modelname == 'ring2':\n ring = ring2\n elif modelname == 'ring3':\n ring = ring3\n tw0, *_ = pyaccel.optics.calc_twiss(ring)\n cod = 1e6*tw0.co[2]\n print('{:<40s}: std: {:+06.1f} mm, maxabs: {:+06.1f} um'.format(config, np.std(cod), np.max(abs(cod))))\n plt.plot(tw0.spos, cod, '-', label=config + ' ({:.1f} um)'.format(np.max(abs(cod))))\n\n plt.title('Vertical COD - {}'.format(modelname))\n plt.xlabel('spos [m]')\n plt.ylabel('COD [um]')\n plt.legend()\n plt.grid()\n plt.show()\n\n for config, rings in models.items():\n config = config.replace('Linear', 'L')\n config = config.replace('Elliptical', 'E')\n ring0, ring1, ring2, ring3 = rings\n if modelname == 'ring0' or modelname is None:\n ring = ring0\n elif modelname == 'ring1':\n ring = ring1\n elif modelname == 'ring2':\n ring = ring2\n elif modelname == 'ring3':\n ring = ring3\n tw0, *_ = pyaccel.optics.calc_twiss(ring)\n cod = 1e6*tw0.co[0]\n print('{:<40s}: std: {:+06.2f} mm, maxabs: {:+06.1f} mm'.format(config, np.std(cod), np.max(abs(cod))))\n plt.plot(tw0.spos, cod, '-', label=config + ' ({:.1f} mm)'.format(np.max(abs(cod))))\n\n plt.title('Horizontal COD - {}'.format(modelname))\n plt.xlabel('spos [m]')\n plt.ylabel('COD [um]')\n plt.legend()\n plt.grid()\n plt.show()\n\n \ndef kick_strengths(models):\n # models = load_models(folder='./results/vchamber-off/')\n for config, rings in models.items():\n _, ring1, *_ = rings\n idx = pyaccel.lattice.find_indices(ring1, 'fam_name', 'IDC')\n hkick = pyaccel.lattice.get_attribute(ring1, 'hkick', idx)\n vkick = pyaccel.lattice.get_attribute(ring1, 'vkick', idx)\n print('{:<40s}: {:+.2f} urad {:+.2f} urad'.format(config, 1e6*max(abs(hkick)), 1e6*max(abs(vkick))))\n\n\ndef quadrupole_strengths(models):\n\n # models = load_models(folder='./results/vchamber-off/')\n ring_nom = si.create_accelerator(ids=None)\n k0 = list()\n _, knobs, _ = symm_get_knobs(ring_nom, 10)\n for inds in knobs.values():\n k0 += list(pyaccel.lattice.get_attribute(ring_nom, 'polynom_b', inds[0], 1))\n k0 = np.array(k0)\n \n for config, rings in models.items():\n _, _, ring2, *_ = rings\n dk1 = list()\n for inds in knobs.values():\n dk1 += list(pyaccel.lattice.get_attribute(ring2, 'polynom_b', inds[0], 1))\n dk1 = np.array(dk1) - k0\n print('{:<40s}: '.format(config), end='')\n for dk in dk1:\n print('{:+.5f} '.format(dk), end='')\n print()\n\n\ndef beta_difference_model(config, rings):\n \n ring_nom = si.create_accelerator(ids=None)\n tw_nom, *_ = pyaccel.optics.calc_twiss(ring_nom)\n ring0, ring1, ring2, ring3 = rings\n\n tw, *_ = pyaccel.optics.calc_twiss(ring0)\n dbetax0 = 1e3*(tw.betax - tw_nom.betax) \n dbetay0 = 1e3*(tw.betay - tw_nom.betay) \n tw, *_ = pyaccel.optics.calc_twiss(ring1)\n dbetax1 = 1e3*(tw.betax - tw_nom.betax)\n dbetay1 = 1e3*(tw.betay - tw_nom.betay)\n tw, *_ = pyaccel.optics.calc_twiss(ring2)\n dbetax2 = 1e3*(tw.betax - tw_nom.betax)\n dbetay2 = 1e3*(tw.betay - tw_nom.betay)\n tw, *_ = pyaccel.optics.calc_twiss(ring3)\n dbetax3 = 1e3*(tw.betax - tw_nom.betax)\n dbetay3 = 1e3*(tw.betay - tw_nom.betay)\n\n plt.plot(tw_nom.spos, dbetax0, label='uncorr')\n plt.plot(tw_nom.spos, dbetax1, label='cod corr')\n plt.plot(tw_nom.spos, dbetax3, label='tune corr')\n plt.plot(tw_nom.spos, dbetax2, label='symm corr')\n plt.xlabel('spos [m]')\n plt.ylabel('dbeta [mm]')\n plt.grid()\n plt.legend()\n plt.title('BetaX Variation from nominal ({})'.format(config))\n plt.savefig('dbetax-{}.svg'.format(config))\n plt.show()\n\n plt.plot(tw_nom.spos, dbetay0, label='uncorr')\n plt.plot(tw_nom.spos, dbetay1, label='cod corr')\n plt.plot(tw_nom.spos, dbetay3, label='tune corr')\n plt.plot(tw_nom.spos, dbetay2, label='symm corr')\n plt.xlabel('spos [m]')\n plt.ylabel('dbeta [mm]')\n plt.grid()\n plt.legend()\n plt.title('BetaY Variation from nominal ({})'.format(config))\n plt.savefig('dbetay-{}.svg'.format(config))\n plt.show()\n\n\ndef beta_difference(models, modelname):\n\n # models = load_models(folder='./results/vchamber-off/')\n ring_nom = si.create_accelerator(ids=None)\n tw_nom, *_ = pyaccel.optics.calc_twiss(ring_nom)\n\n for config, rings in models.items():\n ring0, ring1, ring2, ring3 = rings\n if modelname == 'ring0':\n ring = ring0\n elif modelname == 'ring1':\n ring = ring1\n elif modelname == 'ring2':\n ring = ring2\n elif modelname == 'ring3':\n ring = ring3\n tw, *_ = pyaccel.optics.calc_twiss(ring)\n dbeta = 1e3*(tw.betax - tw_nom.betax)\n plt.plot(tw_nom.spos, dbeta, label=config)\n plt.xlabel('spos [m]')\n plt.ylabel('beta [mm]')\n plt.grid()\n plt.legend()\n plt.title('BetaX Variation ({})'.format(modelname))\n plt.show()\n\n for config, rings in models.items():\n ring0, ring1, ring2, ring3 = rings\n if modelname == 'ring0':\n ring = ring0\n elif modelname == 'ring1':\n ring = ring1\n elif modelname == 'ring2':\n ring = ring2\n elif modelname == 'ring3':\n ring = ring3\n tw, *_ = pyaccel.optics.calc_twiss(ring)\n dbeta = 1e3*(tw.betay - tw_nom.betay)\n plt.plot(tw_nom.spos, dbeta, label=config)\n plt.xlabel('spos [m]')\n plt.ylabel('beta [mm]')\n plt.legend()\n plt.title('BetaY Variation ({})'.format(modelname))\n plt.show()\n\n\ndef plot_dynapt_xy(fname, folder='./results/'):\n dynapt = pickle.load(open(folder + 'dynapt_xy_nominal.pickle', 'rb'))['ring0']\n area = 1e6*calc_dynapt_area(dynapt)\n plt.plot(1e3*dynapt[0], 1e3*dynapt[1], label='nominal ({:.1f} mm²)'.format(area))\n config = get_label_delta(fname)\n dynapt = pickle.load(open(folder + 'dynapt_xy_' + config + '.pickle', 'rb'))\n area = 1e6*calc_dynapt_area(dynapt['ring0'])\n plt.plot(1e3*dynapt['ring0'][0], 1e3*dynapt['ring0'][1], label='uncorr ({:.1f} mm²)'.format(area))\n area = 1e6*calc_dynapt_area(dynapt['ring1'])\n plt.plot(1e3*dynapt['ring1'][0], 1e3*dynapt['ring1'][1], label='cod corr ({:.1f} mm²)'.format(area))\n area = 1e6*calc_dynapt_area(dynapt['ring2'])\n plt.plot(1e3*dynapt['ring2'][0], 1e3*dynapt['ring2'][1], label='symm corr ({:.1f} mm²)'.format(area))\n area = 1e6*calc_dynapt_area(dynapt['ring3'])\n plt.plot(1e3*dynapt['ring3'][0], 1e3*dynapt['ring3'][1], label='tune corr ({:.1f} mm²)'.format(area))\n plt.title('Dynamical Aperture XY for {}'.format(config))\n plt.legend()\n plt.xlabel('posx [mm]')\n plt.ylabel('posy [mm]')\n plt.grid()\n plt.xlim([-12,12])\n plt.ylim([0, 6])\n plt.savefig('dynapt-xy-' + config + '.svg')\n plt.show()\n\n\ndef plot_dynapt_ex(fname, folder='./results/'):\n dynapt = pickle.load(open(folder + 'dynapt_ex_nominal.pickle', 'rb'))['ring0']\n plt.plot(1e2*dynapt[0], 1e3*dynapt[1], label='nominal')\n config = get_label_delta(fname)\n dynapt = pickle.load(open(folder + 'dynapt_ex_' + config + '.pickle', 'rb'))\n plt.plot(1e2*dynapt['ring0'][0], 1e3*dynapt['ring0'][1], label='uncorr')\n plt.plot(1e2*dynapt['ring1'][0], 1e3*dynapt['ring1'][1], label='cod corr')\n plt.plot(1e2*dynapt['ring2'][0], 1e3*dynapt['ring2'][1], label='symm corr')\n plt.plot(1e2*dynapt['ring3'][0], 1e3*dynapt['ring3'][1], label='tune corr')\n plt.title('Dynamical Aperture dE-X for {}'.format(config))\n plt.legend()\n plt.xlabel('de [%]')\n plt.ylabel('posx [mm] @ y = 1 mm')\n plt.grid()\n plt.xlim([-5, 5])\n plt.ylim([-16, 0])\n plt.savefig('dynapt-ex-' + config + '.svg')\n plt.show()\n\n\ndef plot_dynapt_xy_all_models(folder='./results/'):\n data = pickle.load(open(folder + 'dynapt_xy_' + 'nominal' + '.pickle', 'rb'))\n dynapt = data['ring0']\n area_nom = 1e6*calc_dynapt_area(dynapt)\n for config in delta_configs:\n label = get_label_delta(id_sabia + config)\n data = pickle.load(open(folder + 'dynapt_xy_' + label + '.pickle', 'rb'))\n dynapt = data['ring0']\n area0 = 1e6*calc_dynapt_area(dynapt) - area_nom\n dynapt = data['ring1']\n area1 = 1e6*calc_dynapt_area(dynapt) - area_nom\n dynapt = data['ring2']\n area2 = 1e6*calc_dynapt_area(dynapt) - area_nom\n dynapt = data['ring3']\n area3 = 1e6*calc_dynapt_area(dynapt) - area_nom\n print('{:<40s}: {:+05.1f} mm² {:+06.2f} %'.format(label, area3, 100*area3/area_nom))\n plt.plot([area0, area1, area2, area3], '-o', label=label)\n \n plt.title('DynApt XY Area Reduction w.r.t. Nominal ({:.1f} mm²)'.format(area_nom))\n # plt.legend()\n plt.xlabel('0: uncorr, 1:cod, 2:symm, 3:tune')\n plt.ylabel('dynapt delta area [mm²]')\n plt.grid()\n plt.savefig('dynapt-xy-all-models.svg')\n plt.show()\n\n\ndef plot_dynapt_xy_all(modelname, folder='./results/'):\n print('model name: {}'.format(modelname))\n label = 'nominal'\n dynapt = pickle.load(open(folder + 'dynapt_xy_nominal.pickle', 'rb'))['ring0']\n area = 1e6*calc_dynapt_area(dynapt)\n print('{:<40s}: {:5.1f} mm², {:+06.2f} mm, {:+06.2f} mm'.format(label, area, 1e3*dynapt[0][-1], 1e3*dynapt[0][-2]))\n plt.plot(1e3*dynapt[0], 1e3*dynapt[1], label='{} ({:.1f} mm²)'.format(label, area))\n for config in delta_configs:\n label = get_label_delta(id_sabia + config)\n dynapt = pickle.load(open(folder + 'dynapt_xy_' + label + '.pickle', 'rb'))[modelname]\n area = 1e6*calc_dynapt_area(dynapt)\n print('{:<40s}: {:5.1f} mm², {:+06.2f} mm, {:+06.2f} mm'.format(label, area, 1e3*dynapt[0][-1], 1e3*dynapt[0][-2]))\n plt.plot(1e3*dynapt[0], 1e3*dynapt[1], label='{} ({:.1f} mm²)'.format(label, area))\n plt.title('Dynamical Aperture XY for {}'.format(modelname))\n plt.legend()\n plt.xlabel('posx [mm]')\n plt.ylabel('posy [mm]')\n plt.grid()\n plt.xlim([-12,12])\n plt.ylim([0, 6])\n plt.show()\n\n\ndef plot_dynapt_ex_all(modelname, folder='./results/'):\n print('model name: {}'.format(modelname))\n label = 'nominal'\n dynapt = pickle.load(open(folder + 'dynapt_ex_nominal.pickle', 'rb'))['ring0']\n plt.plot(1e2*dynapt[0], 1e3*dynapt[1], label='{}'.format(label))\n for config in delta_configs:\n label = get_label_delta(id_sabia + config)\n dynapt = pickle.load(open(folder + 'dynapt_ex_' + label + '.pickle', 'rb'))[modelname]\n plt.plot(1e2*dynapt[0], 1e3*dynapt[1], label='{}'.format(label))\n plt.title('Dynamical Aperture dE-X for {}'.format(modelname))\n plt.legend()\n plt.xlabel('de [%]')\n plt.ylabel('posx [mm] @ y = 1 mm')\n plt.grid()\n plt.xlim([-5, 5])\n plt.ylim([-16, 0])\n plt.show()\n\n\ndef coupling(models):\n\n # models = load_models(folder='./results/vchamber-off/')\n\n # ring = si.create_accelerator(ids=None)\n # traj, *_ = pyaccel.tracking.ring_pass(ring, [0e-7, 0, 1e-5, 0, 0, 0], 1000, turn_by_turn=True)\n # plt.plot(1e3*traj[0, :], 1e3*traj[1, :], '.', color=[0,0,1])\n # plt.plot(1e3*traj[2, :], 1e3*traj[3, :], '.', color=[1,0,0])\n # plt.show()\n\n for config, rings in models.items():\n\n *_, ring3 = rings\n\n tw, *_ = pyaccel.optics.calc_twiss(ring3)\n ep = pyaccel.optics.EquilibriumParametersOhmiFormalism(ring3)\n coup1 = 100 * ep.emity / ep.emitx\n traj, *_ = pyaccel.tracking.ring_pass(ring3, [1e-3, 0, 0, 0, 0, 0], 1000, turn_by_turn=True)\n Jx = max(traj[0,:])**2/tw.betax[0]\n Jy = max(traj[2,:])**2/tw.betay[0]\n coup2 = 100 * Jy/Jx\n print('{:<40s}: {:.4f} % {:.4f} %'.format(config, coup2, coup1))\n plt.plot(1e3*traj[0, :], 1e3*traj[1, :], '.', color=[0,0,1], label='trajx')\n plt.plot(50*1e3*traj[2, :], 50*1e3*traj[3, :], '.', color=[1,0,0], label='trajy x 50')\n plt.xlabel('pos [mm]')\n plt.ylabel('ang [mrad]')\n plt.legend()\n plt.title(config + '\\nJy/Jx = {:.4f} % '.format(coup2))\n plt.savefig('coupling-' + config + '.svg')\n plt.show()\n\n\nif __name__ == '__main__':\n\n deltadata = utils.create_deltadata()\n save_models(deltadata)\n\n # nominal\n # save_dynapt_xy({})\n # save_dynapt_ex({})\n \n # models = load_models(folder='./results/vchamber-on/')\n # configs = list(models.keys())\n\n # for config in configs[0:4]:\n # save_dynapt_xy({config:models[config]})\n # for config in configs[4:8]:\n # save_dynapt_xy({config:models[config]})\n # for config in configs[8:12]:\n # save_dynapt_xy({config:models[config]})\n # for config in configs[12:16]:\n # save_dynapt_xy({config:models[config]})\n # for config in configs[16:20]:\n # save_dynapt_xy({config:models[config]})\n # for config in configs[20:22]:\n # save_dynapt_xy({config:models[config]})\n\n # for config in configs[0:4]:\n # save_dynapt_ex({config:models[config]})\n # for config in configs[4:8]:\n # save_dynapt_ex({config:models[config]})\n # for config in configs[8:12]:\n # save_dynapt_ex({config:models[config]})\n # for config in configs[12:16]:\n # save_dynapt_ex({config:models[config]})\n # for config in configs[16:20]:\n # save_dynapt_ex({config:models[config]})\n # for config in configs[20:22]:\n # save_dynapt_ex({config:models[config]})\n \n # ring = si.create_accelerator(ids=None)\n # ring.vchamber_on = optics.CHAMBER_ON\n # nrturns = 4000\n # calc_dynapt_ex(ring, nrturns, demax=0.05, nrpts=9, mindeltax=0.1e-3, xmin=-30e-3, y=1e-3)\n\n # test_symm()\n\n # ring = si.create_accelerator(ids=ids)\n # idx= pyaccel.lattice.find_indices(ring, 'fam_name', 'DELTA52')\n # print(idx)\n # pickle.dump(ring, open('test.pickle', 'wb'))\n # ring = pickle.load(open('test.pickle', 'rb'))\n # print(ring[2909].trackcpp_e.length)\n # print(ring[2909].trackcpp_e.kicktable_idx)\n # print(ring[2909].trackcpp_e.rescale_kicks)\n\n\n # models = load_models()\n # configs = list(models.keys())\n\n # ring0, ring1, ring2, ring3 = models[configs[0]]\n # idx= pyaccel.lattice.find_indices(ring0, 'fam_name', 'DELTA52')\n # print(idx)\n # el = ring0[2909]\n # print(el.)\n # print()\n # rx = 0\n # ry = 0\n # print(models[configs[0]])\n # _, lost, _, _, _ = pyaccel.tracking.ring_pass(ring0, [rx, 0, ry, 0, 0, 0], 1000)\n # print(lost)\n pass\n","repo_name":"lnls-fac/idanalysis","sub_path":"scripts/optics.py","file_name":"optics.py","file_ext":"py","file_size_in_byte":21724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"316748961","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('polls', '0002_poll_pub_date'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='poll',\n name='created_at',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n ]\n","repo_name":"annamosullivan/we_are_social","sub_path":"polls/migrations/0003_poll_created_at.py","file_name":"0003_poll_created_at.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17153308190","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 18-9-19 上午11:35\n# @Author : Jh Zhao\n# @Site : \n# @File : get_uuid2type_map.py\n# @Software: PyCharm Community Edition\n\nimport requests\nimport json\nimport argparse\n\napi_url = \"http://47.92.9.46/label/name/\"\n\nshort_name_of_type = {\n \"瓶装\": \"pz\",\n \"箱装\": \"xz\",\n \"割箱\": \"gx\",\n \"袋装\": \"dz\",\n \"盒装\": \"hz\",\n \"杯装\": \"bz\",\n}\n\n\ndef request_sku_info(uuid, retry=3):\n if retry <= 0:\n return None\n try:\n params = {\"uuid\": uuid}\n res = requests.get(api_url, params=params, timeout=30)\n return res.json()\n except requests.RequestException as e:\n print(\"RequestException: %s\" % str(e))\n return request_sku_info(uuid, retry - 1)\n\n\ndef load_label_index_map(label_index_map_path):\n with open(label_index_map_path) as f:\n label_index_map = json.load(f)\n if isinstance(label_index_map, list):\n label_index_map = {x['class']: x for x in label_index_map}\n else:\n label_index_map = {k: {\"id\": i, 'class': k} for k, i in label_index_map.items()}\n index_label_map = {v['id']: v for k, v in label_index_map.items()}\n return label_index_map, index_label_map\n\n\ndef get_uuid2type_map(label_index_map):\n result = {}\n for k in label_index_map:\n info = request_sku_info(k)\n type_name = info.get(\"data\", {}).get(\"name\", {}).get(\"type_name\")\n if not type_name:\n print(\"Cannot get type name of %s\" % k)\n else:\n result[k] = short_name_of_type[type_name]\n return result\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--label_index_map_path\", required=True,\n help=\"the path of label index map file\")\n parser.add_argument(\"--result_path\", required=True,\n help=\"the path of result file\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_args()\n lim, _ = load_label_index_map(args.label_index_map_path)\n uuid2type_map = get_uuid2type_map(lim)\n with open(args.result_path, 'w') as f:\n json.dump(uuid2type_map, f, indent=2, ensure_ascii=False)\n","repo_name":"PangYunsheng8/object-detection","sub_path":"classification/tools/get_sku_uuid2type_map.py","file_name":"get_sku_uuid2type_map.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70395365283","text":"import subprocess\nimport time\nfrom take_command import *\nfrom talk import talk\nfrom if_online import online\nimport webbrowser\nimport os\nimport datetime\nimport speech_recognition as maaz\nfrom datetime import date\nimport pyautogui\n\nname_of_user = \"\"\nage_of_user = \"\"\n\n\ndef startup():\n global offline_command\n os.system('cls')\n print(\"Initializing Jarvis.....\")\n time.sleep(0.5)\n print(\"Done.👍\")\n talk(\"Initializing Jarvis\")\n print(\"Starting all systems applications.....\")\n time.sleep(0.5)\n print(\"Caliberating and examining all the core processors.....\")\n time.sleep(0.5)\n print(\"Checking the internet connection\")\n time.sleep(0.5)\n talk(\"Checking internet connection\")\n print(\"Done.👍\")\n if online == True:\n print(\"You are online 👍\")\n talk(\"Sir! You are online\")\n else:\n print(\"You are offline 👎:-(\")\n talk(\"Sir! You are offline\")\n talk(\"Internet features are not available\")\n talk(\"Enter your command via text input\")\n while True:\n os.system('cls')\n offline_command = str(input(\"Enter your Query: \"))\n run_jarvis()\n print(\n \"--------------------------------------------------------------------------------------------------------------\")\n print(\" Jarvis_ver.1.13\")\n print(\" by Maaz\")\n print(\n \"--------------------------------------------------------------------------------------------------------------\")\n print(\"Jarvis ready now.\")\n talk(\"Jarvis ready now.\")\n hour = int(datetime.datetime.now().hour)\n if hour >= 0 and hour <= 12:\n talk(\"Good Morning Maaz!\")\n elif hour > 12 and hour < 18:\n talk(\"Good afternoon Maaz!\")\n else:\n talk(\"Good evening Maaz!\")\n talk(\"How may I help you?\")\n while True:\n run_jarvis()\n\n\ndef run_jarvis():\n global source, online, engine, voices, command\n if online == True:\n command = take_command()\n else:\n command = offline_command\n\n print(\"You: \" + command)\n\n if \"how are you\" in command:\n print(\"\\n\\nJarvis: Iam good, Thanks for asking\")\n talk(\"Iam good, Thanks for asking\")\n elif 'repeat after me' in command or 'speak after me' in command:\n os.system(\"repeatafterme.py\")\n elif 'know' in command and 'my name' in command:\n global name_of_user\n try:\n file = open('data\\\\Name.txt', 'r')\n except:\n file = open('data\\\\Name.txt', 'w')\n file.write('')\n name_of_user = file.read()\n file.close()\n if name_of_user == '':\n print('I don\\'t know your name yet. Say \\'Yes\\' if you want to tell me.')\n talk(\"I don\\'t know your name yet. Say Yes, if you want to tell me.\")\n if online == True:\n command = take_command()\n else:\n command = offline_command\n if 'yes' in command or 'sure' in command:\n print(\"What should I call you?\")\n talk(\"What should I call you?\")\n if online == True:\n command = take_command()\n else:\n command = offline_command\n name_of_user = str(command)\n print(\"Ok! I will call you \" + name_of_user)\n talk(\"Ok! I will call you \" + name_of_user)\n file = open('data\\\\Name.txt', 'w')\n file.write(name_of_user)\n file.close()\n else:\n pass\n else:\n print(\"You are \" + name_of_user)\n talk(\"Your name is \" + name_of_user)\n elif 'my' in command and 'age' in command:\n global age_of_user\n try:\n file = open('data\\\\Age.txt', 'r')\n except:\n file = open('data\\\\Age.txt', 'w')\n file.write('')\n age_of_user = file.read()\n file.close()\n if age_of_user == '':\n print('I don\\'t know how old are you. Say \\'Yes\\' if you want to tell me.')\n talk(\"I don\\'t know how old are you. Say \\'Yes\\' if you want to tell me.\")\n if online == True:\n command = take_command()\n else:\n command = offline_command\n if 'yes' in command or 'sure' in command:\n print(\"How old are you?\")\n talk(\"How old are you?\")\n if online == True:\n command = take_command()\n else:\n command = offline_command\n age_of_user = str(command)\n print(\"Ok! You are \" + age_of_user + \" Years old.\")\n talk(\"Ok! You are \" + age_of_user)\n file = open('data\\\\Age.txt', 'w')\n file.write(age_of_user)\n file.close()\n else:\n pass\n else:\n print(\"You are \" + age_of_user + \" Years old.\")\n talk(\"You are \" + age_of_user)\n elif 'hello' in command or 'Hi' in command:\n print(\"\\n\\nJarvis: Hi! nice to meet you\")\n talk(\"Hi! nice to meet you\")\n elif 'thanks' in command or 'thank' in command:\n print(\"You are welcome!\")\n talk(\"You are welcome Sir!\")\n elif 'change' in command and 'your name' in command:\n print(\"\\n\\nJarvis: I am Jarvis, and I like my name.\")\n talk(\"I am Jarvis, and I like my name\")\n elif 'who are you' in command or 'your name' in command:\n print(\"\\n\\nJarvis: I am Jarvis\\nA voice assistant\")\n talk(\"I am Jarvis\\nA voice assistant\")\n elif 'what is your name' in command:\n talk(\"I am Jarvis, nice to meet you.\")\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n print(\"\\n\\nJarvis: \" + time)\n talk(\"It's \" + time)\n elif 'date' in command:\n temp = date.today()\n today_date = temp.strftime(\"%d/%m/%Y\")\n print(\"\\n\\nJarvis: \" + today_date)\n talk(\"It's \" + today_date)\n elif 'search' in command:\n os.system(\"search_talk.py\")\n elif 'joke' in command:\n os.system(\"tell_joke.py\")\n elif 'bye' in command:\n print(\"\\n\\nok! see you later\")\n talk(\"ok! see you later Sir\")\n exit()\n elif 'exit' in command or 'Exit' in command or 'go away' in command or 'get lost' in command:\n print(\"\\n\\nok! see you later\")\n talk(\"ok! see you later Sir\")\n exit()\n elif 'who made you' in command or 'who created you' in command:\n print(\n \"\\n\\nI was made by the greatest hero! the world has ever seen in the history of mankind. His name is Sir Maaz Bin Asif\")\n talk(\n \"I was made by the greatest hero! the world has ever seen in the history of mankind. His name is Sir Maaz Bin Asif\")\n elif 'Chrome' in command or 'chrome' in command:\n if online == True:\n print(\"\\n\\nOpening Google Chrome\")\n talk(\"Sir! Launching Google Chrome\")\n webbrowser.open('https://www.google.com')\n else:\n print(\"Launching chrome...\")\n talk(\"Launching chrome but Internet is not available.\")\n webbrowser.open('https://www.google.com')\n elif 'music' in command:\n talk(\"Ok Sir! Asking Windows to play Music.\")\n print(\"\\n\\nPlaying Music.\")\n os.system(\"music.py\")\n elif 'open' in command and 'youtube' in command:\n if online == True:\n talk(\"Opeing youtube\")\n webbrowser.open('https://youtube.com/')\n else:\n print(\"opening youtube...\")\n talk(\"opening youtube but internet is not available\")\n webbrowser.open('https:\"//www.youtube.com/')\n elif 'news' in command:\n if online == True:\n talk(\"fetching latest headlines. Region! Pakistan\")\n webbrowser.open('https://dunyanews.tv/')\n else:\n print(\"Internet is not available. Try again later.\")\n talk(\"Internet is not available. Try again later.\")\n elif 'screenshot' in command:\n \"\"\"print(\"\\n\\nTaking Screenshot\")\n talk(\"Taking screenshot\")\n img = pyautogui.screenshot()\n img.save(r\"C:/Users/Dell/Desktop/Jarvis_capture.png\")\n talk(\"screenshot successfully saved to desktop Sir.\")\"\"\"\n os.system(\"screenshot.py\")\n elif 'Naat' in command:\n \"\"\"talk(\"Here you go Sir!\")\n music_dir = \"D:\\\\Naat\"\n songs = os.listdir(music_dir)\n os.startfile(os.path.join(music_dir, songs[1]))\"\"\"\n os.system(\"Naat.py\")\n elif 'lock' in command and 'device' in command:\n \"\"\"talk(\"locking the device\")\n ctypes.windll.user32.LockWorkStation()\n talk(\"locked\")\n input(\"Enter to continue.\")\"\"\"\n os.system(\"lock_device.py\")\n elif \"camera\" in command or \"take a photo\" in command:\n \"\"\"talk(\"capturing\")\n ec.capture(0, \"Jarvis_Capture_3.2 \", \"jarvis_capture.png \")\n talk(\"Done\") \"\"\"\n os.system(\"take_photo.py\")\n elif \"write a note\" in command:\n os.system('Note.py')\n elif \"release\" in command or 'version' in command:\n print(\"This is jarvis_1.13 released.\")\n talk(\"This is jarvis 1 point 1 3.\")\n elif \"weather\" in command:\n os.system('weather.py')\n elif 'change your voice' in command:\n global voice_of_jarvis, engine, voices\n if voice_of_jarvis != \"Changed\":\n engine.setProperty('voice', voices[1].id)\n voice_of_jarvis = \"Changed\"\n talk(\"Ok! this will be my new voice.\")\n else:\n engine.setProperty('voice', voices[0].id)\n voice_of_jarvis = \"Not Changed\"\n talk(\"Ok! this will be my new voice.\")\n elif 'wait' in command:\n talk(\"Press Enter when ever you are ready\")\n input()\n elif 'roblox' in command:\n talk('Opening Roblox! now')\n webbrowser.open('https:www.roblox.com/')\n talk('Press Enter to continue Jarvis')\n input()\n elif 'I am going outside' in command or 'I will be right back' in command or 'I\\'ll be right back' in command:\n os.system(\"lock_device.py\")\n elif 'shutdown' in command:\n os.system('shutdown /s /t 1')\n elif 'I want to do edit program' in command or 'edit code' in command:\n os.system('edit_code.py')\n input('Press Enter to continue')\n elif 'I want to do programming' in command:\n os.system('do_programming.py')\n input('Press Enter to continue')\n elif 'I want to do some python' in command or 'I want to do some python Programming' in command or 'python programming' in command:\n os.system('do_python_programming.py')\n input('Press Enter to continue')\n elif 'open' in command and 'calculator' in command:\n os.system('open_calc.py')\n input('Press Enter to continue')\n elif ('open' in command and 'Notepad' in command) or ('open' in command and 'editor' in command):\n os.system('open_notepad.py')\n input('Press Enter to continue')\n elif 'I want to watch Doraemon' in command or 'play doraemon' in command or 'play movie' in command or 'play movies' in command:\n os.system('play_movies.py')\n input('Press Enter to continue')\n elif 'tell me the truth' in command:\n talk(\"Mooseb is very Idiot\")\n elif ('open' in command and 'edge' in command) or ('open' in command and 'Edge'):\n os.system('open_edge.py')\n elif 'what' in command and 'you do' in command:\n try:\n file = open('list.txt', 'r')\n talk(\"all actions that I can perform are saved in a file\")\n subprocess.Popen([\"C:/Windows/System32/notepad.exe\", \"list.txt\"])\n talk(\"Here! Take a look at it\")\n input('Enter to continue......')\n file.close()\n except:\n print(\"One moment.....\")\n talk(\"One moment\")\n file = open('backup\\\\command_backup.txt', 'r')\n data = file.read()\n file2 = open('list.txt', 'w')\n file2.write(data)\n file.close()\n file2.close()\n talk(\"all actions that I can perform are saved in a file\")\n subprocess.Popen([\"C:/Windows/System32/notepad.exe\", \"list.txt\"])\n talk(\"Here! Take a look at it\")\n input('Enter to continue......')\n elif 'what' in command and 'fox say' in command:\n print(\"Fra-ca ca-ca ca-ca ca-ca cow\")\n talk(\"Fracacacacacacaca cow\")\n elif 'change' in command and 'wallpaper' in command:\n print('Changing Wallpaper......')\n os.system('change_wallpaper.py')\n elif 'locate' in command:\n command = command.replace('locate ', '')\n os.system('locate.py')\n elif 'wallpaper' in command:\n print('If you want me to change wallpaper, say \"Change wallpaper\"')\n talk('If you want me to change wallpaper, say \"Change wallpaper\"')\n elif 'volume up' in command or 'unmute' in command:\n pyautogui.press(\"volumeup\")\n elif 'volume down' in command:\n pyautogui.press(\"volumedown\")\n elif 'mute' in command or 'shut up' in command:\n pyautogui.press(\"volumemute\")\n print('Muted !')\n elif 'photoshop' in command or 'edit photo' in command or 'Edit photo' in command:\n os.system('photoshop.py')\n elif 'I am unknown person 1442 598' in command:\n os.system('pass.py')\n elif command == '':\n talk(\"Can you say that again\")\n elif 'what' in command and 'my email' in command:\n file = open('data\\\\Email.txt', 'r')\n talk(file.read())\n file.close()\n else:\n print(\"\\n\\nAnswering this is beyond my capabilities.\")\n talk(\"Sorry! Answering this is beyond my capabilities.\")\n\n\nstartup()\n\n\"\"\"elif 'change' in command and 'my name' in command:\n print(\"What should I call you?\")\n talk(\"What should I call you?\")\n if online == True:\n with maaz.Microphone() as source:\n listener.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak now\")\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n else:\n command = str(input(\"Enter your Name: \"))\n name_of_user = str(command)\n print(\"Ok! I will call you \" + name_of_user)\n talk(\"Ok! I will call you \" + name_of_user)\"\"\"\n","repo_name":"Maaz-319/Python","sub_path":"Jarvis_ver.1.13/py/Features/start_up.py","file_name":"start_up.py","file_ext":"py","file_size_in_byte":14424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70271987363","text":"#!/usr/bin/python\n\nfrom os import system\nfrom commands import getstatusoutput\nfrom time import sleep\n\ndef file2(client) :\n\tclient.send(\"dialog --inputbox \\\" Enter Folder Path : \\\" 10 35\")\n\tfpath = client.recv(1024)\t\n\ttemporary = getstatusoutput(\"cd \" + fpath)\n\tif temporary[0] != 0 :\n\t\tclient.send(\"recieve only\")\n\t\tclient.send(\"dialog --infobox \\\" Incorrect Folder Path \\n Sending to Main Menu....\\\" 6 30\")\n\t\tsleep(2.5)\n\t\treturn\n\tclient.send(\"dialog --inputbox \\\" Enter File Name : \\\" 10 35\")\n\tfname = client.recv(1024)\n\tfpath += \"/\" + fname\n\tif commands.getstatusoutput(\"locate -c \" + fpath)[1] == 0 :\n\t\tclient.send(\"recieve only\")\n\t\tclient.send(\"dialog --infobox \\\" No such File Exists...\\n Sending to Main Menu...\\\" 7 35\")\n\t\tsleep(2.5)\n\t\treturn\t\t\n\tgetstatusoutput(\"rm \" + fpath + \"/\" + fname)\n\tclient.send(\"recieve only\")\n\tclient.send(\"dialog --infobox \\\" File Sucessfully Removed...\\n Sending to Main Menu...\\\" 7 35\")\n\tsleep(2.5)\n\treturn\n\n\n","repo_name":"krishna1401/Linux_Automation","sub_path":"Server/file2.py","file_name":"file2.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43611158272","text":"from _pytest.config import Config\n\n\n_MARKERS = {\n \"unit\": \"quick tests that do not require a solver, must run in < 2 s\",\n \"component\": \"quick tests that may require a solver\",\n \"integration\": \"long duration tests\",\n}\n\n\ndef pytest_configure(config: Config):\n\n for name, descr in _MARKERS.items():\n config.addinivalue_line(\"markers\", f\"{name}: {descr}\")\n","repo_name":"project-pareto/project-pareto","sub_path":"pareto/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"1130912362","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom abc import abstractmethod\n\n\nclass MetaTemplate(nn.Module):\n def __init__(self, model_func, n_way, n_support, verbose=False, change_way=True, use_cuda=True, adaptation=False):\n super(MetaTemplate, self).__init__()\n self.n_way = n_way # N, n_classes\n self.n_support = n_support # S, sample num of support set\n self.n_query = -1 # Q, sample num of query set(change depends on input)\n self.feature_extractor = model_func() # feature extractor\n self.feat_dim = self.feature_extractor.final_feat_dim\n self.verbose = verbose\n self.change_way = change_way # some methods allow different_way classification during training and test\n self.use_cuda = use_cuda\n self.adaptation = adaptation\n\n @abstractmethod\n def set_forward(self, x, is_feature):\n # x -> predicted score\n pass\n\n @abstractmethod\n def set_forward_loss(self, x):\n # x -> loss value\n pass\n\n def forward(self, x):\n # x-> feature embedding\n out = self.feature_extractor.forward(x)\n return out\n\n def parse_feature(self, x, is_adaptation=False):\n x = x.requires_grad_(True)\n x = x.reshape(self.n_way * (self.n_support + self.n_query), *x.size()[2:])\n z_all = self.feature_extractor(x)\n z_all = z_all.reshape(self.n_way, self.n_support + self.n_query, *z_all.shape[1:]) # [N, S+Q, d]\n if is_adaptation:\n z_all = z_all.detach()\n z_support = z_all[:, :self.n_support] # [N, S, d]\n z_query = z_all[:, self.n_support:] # [N, Q, d]\n return z_support, z_query\n\n def correct(self, x):\n if self.adaptation:\n scores = self.set_forward_adaptation(x)\n else:\n scores = self.set_forward(x)\n y_query = np.repeat(range(self.n_way), self.n_query) # [0 0 0 1 1 1 2 2 2 3 3 3 4 4 4]\n topk_scores, topk_labels = scores.data.topk(1, 1, True, True) # top1, dim=1, largest, sorted\n topk_ind = topk_labels.cpu().numpy() # index of topk\n top1_correct = np.sum(topk_ind[:, 0] == y_query)\n return float(top1_correct), len(y_query)\n\n def train_loop(self, epoch, train_loader, optimizer):\n print_freq = 10\n avg_loss = 0\n for i, (x, _) in enumerate(train_loader):\n if self.use_cuda:\n x = x.cuda()\n self.n_query = x.size(1) - self.n_support # x:[N, S+Q, n_channel, h, w]\n if self.change_way:\n self.n_way = x.size(0)\n optimizer.zero_grad()\n loss = self.set_forward_loss(x)\n loss.backward()\n optimizer.step()\n avg_loss = avg_loss + loss.item()\n if self.verbose and (i % print_freq) == 0:\n print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader),\n avg_loss / float(i + 1)))\n if not self.verbose:\n print('Epoch {:d} | Loss {:f}'.format(epoch, avg_loss / float(i + 1)))\n return avg_loss\n\n def train_loop_with_acc(self, epoch, train_loader, optimizer):\n print_freq = 10\n avg_loss = 0\n acc_all = []\n iter_num = len(train_loader)\n for i, (x, _) in enumerate(train_loader):\n if self.use_cuda:\n x = x.cuda()\n self.n_query = x.size(1) - self.n_support # x:[N, S+Q, n_channel, h, w]\n if self.change_way:\n self.n_way = x.size(0)\n optimizer.zero_grad()\n loss = self.set_forward_loss(x)\n loss.backward()\n optimizer.step()\n avg_loss = avg_loss + loss.item()\n if self.verbose and (i % print_freq) == 0:\n print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader),\n avg_loss / float(i + 1)))\n correct_this, count_this = self.correct(x)\n acc_all.append(correct_this / count_this * 100)\n acc_all = np.asarray(acc_all)\n acc_mean = np.mean(acc_all)\n acc_std = np.std(acc_all)\n if self.verbose:\n print('%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))\n if not self.verbose:\n print('Epoch {:d} | Loss {:f}'.format(epoch, avg_loss / float(i + 1)))\n return acc_mean, avg_loss\n\n def test_loop(self, test_loader, record=None, return_std=False):\n acc_all = []\n iter_num = len(test_loader)\n for i, (x, _) in enumerate(test_loader):\n if self.use_cuda:\n x = x.cuda()\n self.n_query = x.size(1) - self.n_support # x:[N, S+Q, n_channel, h, w]\n if self.change_way:\n self.n_way = x.size(0)\n correct_this, count_this = self.correct(x)\n acc_all.append(correct_this / count_this * 100)\n acc_all = np.asarray(acc_all)\n acc_mean = np.mean(acc_all)\n acc_std = np.std(acc_all)\n if self.verbose:\n print('%d Test Acc = %4.2f%% +- %4.2f%%' % (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))\n if return_std:\n return acc_mean, acc_std\n else:\n return acc_mean\n\n def set_forward_adaptation(self, x):\n # further adaptation, default is fixing feature and train a new softmax clasifier\n z_support, z_query = self.parse_feature(x, is_adaptation=True)\n z_support = z_support.reshape(self.n_way * self.n_support, -1)\n z_query = z_query.reshape(self.n_way * self.n_query, -1)\n y_support = torch.from_numpy(np.repeat(range(self.n_way), self.n_support)) # [0 0 0 1 1 1 2 2 2 3 3 3 4 4 4]\n y_support = y_support.cuda().float().requires_grad_(True)\n linear_clf = nn.Linear(self.feat_dim, self.n_way)\n linear_clf = linear_clf.cuda()\n set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr=0.01, momentum=0.9, dampening=0.9,\n weight_decay=0.001)\n batch_size = 4\n support_size = self.n_way * self.n_support\n for epoch in range(100):\n rand_id = np.random.permutation(support_size)\n for i in range(0, support_size, batch_size):\n set_optimizer.zero_grad()\n selected_id = torch.from_numpy(rand_id[i: min(i + batch_size, support_size)]).cuda().long()\n z_batch = z_support[selected_id]\n y_batch = y_support[selected_id]\n scores = linear_clf(z_batch)\n loss = nn.CrossEntropyLoss()(scores, y_batch.long())\n loss.backward()\n set_optimizer.step()\n scores = linear_clf(z_query)\n return scores\n","repo_name":"tyxxzjpdez/FSL","sub_path":"methods/meta_template.py","file_name":"meta_template.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33260285815","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom time import gmtime, strftime\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\ndef index(request):\n return render(request, \"words_app/index.html\")\n\ndef process(request):\n # first you have to test the session\n try:\n request.session['words']\n except:\n request.session['words'] = []\n\n if request.method == \"POST\":\n if request.POST['font'] == 'yes':\n fontsize = \"Big\"\n else:\n fontsize = \"Small\"\n\n #this is where we add everything to the context dictionary\n context = {\n 'word': request.POST['word'],\n 'color': request.POST['color'],\n 'date': strftime(\"%B %d %Y\", gmtime()),\n 'time': strftime(\"%X %p\", gmtime()),\n 'font': fontsize\n }\n #at the front of request.session['words']\n #insert the context created above\n request.session['words'].insert(0, context)\n request.session.modified = True\n return redirect('/')\n else:\n return redirect(\"/\")\n\n\n#this is the route tp clear all the words\ndef clear(request):\n request.session['words'] = []\n return redirect('/')\n","repo_name":"aleclivinghouse/practice_2","sub_path":"apps/words_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14628224151","text":"from parsers import base\nimport collections\n\n\nclass Parse(base.Parser):\n \"\"\"\n Tamodo.com 2020 breach data parser\n Source File SHA-1: c8a2ee7508fb3bce0a3aab8a2244757b0540f0c7 103.205.96.158.affiliate_master_dev.users.txt\n Good Lines: 440,110\n \"\"\"\n\n name = \"None\"\n web = \"tamodo.com\"\n year = \"2020\"\n\n def row_format(self, r: str) -> tuple:\n \"\"\"\n Sample: {'_id': 318375, 'firstName': 'Le', 'lastName': 'Han', 'fullName': 'Le Han', 'email': 'nhamaythaotran@gmail.com', \n 'referredBy': 265782, 'password': '$2a$10$.Zfmytr3ZWmDz5T6zt884eykmwYmq46rcHAJ8iNBSpu/8GisKMxH2', 'status': 'INACTIVE',\n 'country': 238, 'inviter': 265782, 'relationUserIds': [], 'code': '229035202', 'level': 11, \n 'createdAt': datetime.datetime(2020, 2, 18, 8, 51, 56), 'role': 'PUBLISHER', 'address': '', 'gender': 'MALE', \n 'identity': '', 'otpTs': datetime.datetime(2020, 2, 19, 3, 33, 18, 421000), \n 'updatedAt': datetime.datetime(2020, 2, 19, 3, 33, 18, 430000)}\n\n name,website,year,domain,email,password,hash,salt\n\n :param r:\n :return:\n \"\"\"\n\n row = r.split(':')\n\n # Data later in the dataset shifts the positions of the fields we want,\n # So we iterate through the fields. If it contains an @, we've got an e-mail.\n # If it contains 3x $'s, it's a hash.\n for field in row:\n if '@' in field:\n email = field.split(',')[0].replace('\\'', '').replace(' ', '').strip()\n if field.count('$') == 3:\n pw_hash = field.split(',')[0].replace('\\'', '').replace(' ', '').strip()\n \n domain = email.split('@')[1] if '@' in email else ''\n return self.name, self.web, int(self.year), domain, email, '', pw_hash, ''\n\n def process_rows(self) -> collections.abc.Iterable[tuple]:\n with open(self.source, 'r', encoding='utf-8', errors='ignore') as source:\n for row in source:\n if row is None:\n continue\n \n yield self.row_format(row)","repo_name":"sensepost/Frack","sub_path":"parsers/2020-tamodo_com.py","file_name":"2020-tamodo_com.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"54"} +{"seq_id":"72323423200","text":"from django.conf.urls import url\nfrom . import views # This line is new!\n\napp_name = 'first_app'\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"), # This line has changed!\n url(r'^reset$', views.reset, name=\"reset\"),\n url(r'^process/$', views.process, name=\"process\")\n]","repo_name":"jeremybwilson/ninja_gold_app","sub_path":"apps/first_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26324392439","text":"from sre_constants import SUCCESS\n\n\nclass Assassin:\n No_of_assassin = 0\n \n def __init__(self, name, success):\n self.name = name\n self.success = success\n Assassin.No_of_assassin += 1\n \n def printDetails(self):\n print(f\"Name: {self.name}\\nSuccess rate: {self.success}%\\nTotal number of Assassin: {Assassin.No_of_assassin}\")\n\n @classmethod\n def failureRate(cls, name, f_rate):\n sucess_rate = 100 - f_rate\n return cls(name, sucess_rate)\n\n @classmethod\n def failurePercentage(cls, name, p_rate):\n success_rate = 100 - int(p_rate[:-1])\n return cls(name, success_rate)\n\n\njohn_wick = Assassin('John Wick', 100)\njohn_wick.printDetails()\nprint('================================')\nnagisa = Assassin.failureRate(\"Nagisa\", 20)\nnagisa.printDetails()\nprint('================================')\nakabane = Assassin.failurePercentage(\"Akabane\", \"10%\")\nakabane.printDetails()","repo_name":"rsazidur/BracU","sub_path":"CSE111/Assignment8/Final Exam Practice/problem_62.py","file_name":"problem_62.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38413042683","text":"from typing import List, Tuple, Type\n\nfrom hooply.logger import setup_logger\nfrom hooply.market.scrapers.scraper import (\n RequestResources,\n Scraper,\n ScrapeResult,\n ScrapeResultType,\n)\n\nlogger = setup_logger(__name__)\n\n\nclass TeamRosterScraper(Scraper):\n def _extract_player(self, player_row: List[Type]) -> List[str]:\n (\n number_div,\n player_div,\n position_div,\n height_div,\n weight_div,\n _,\n _,\n _,\n _,\n ) = player_row\n\n # Extract player information from team\n player = player_div.find(\"a\").text.strip()\n number = number_div.text.strip()\n position = position_div.text.strip()\n height = height_div.text.strip()\n weight = weight_div.text.strip()\n\n return [player, number, position, height, weight]\n\n def scrape(self) -> List[ScrapeResult]:\n soup = self.request()\n data = []\n\n roster_div = soup.find(\"div\", id=\"div_roster\")\n if roster_div is None:\n logger.error(\"Failed to retrieve roster div.\")\n exit(1)\n\n roster_rows = list(roster_div.find(\"tbody\").children)\n for row in roster_rows:\n player_info = self._extract_player(list(row.children))\n data.append(player_info)\n\n return [ScrapeResult(ScrapeResultType.player, data)]\n\n @staticmethod\n def generate_resource(team: str, season: str):\n return f\"{RequestResources.TEAMS.value}/{team}/{season}.html\"\n","repo_name":"johnwilsoniv/hoop.ly","sub_path":"hooply/market/scrapers/team_scraper.py","file_name":"team_scraper.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6794174805","text":"# coding: utf-8\n# 在执行前注意不要在服务器上面直接改,在另外一个schame上面改\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import HiveContext, UDFRegistration, SparkSession\nfrom pyspark.sql.functions import udf\nimport datetime\nimport re\nimport time\n\nspark = SparkSession.builder.master(\"yarn\").appName(\"product_predict_type\").enableHiveSupport().getOrCreate()\n\nprint(\"starting..........\")\nitem = '2019-01-02'\nsql_on_sale_history = \"\"\"\nSELECT store_code, product_code, sale_date\nFROM rbu_sxcp_edw_dev.fct_store_product_on_sale_history\n\"\"\"\ndf_on_sale_history = spark.sql(sql_on_sale_history)\ndf_on_sale_history = df_on_sale_history.dropDuplicates()\ndf_on_sale_history.createOrReplaceTempView(\"store_product_on_sale_history\")\n\ntmp1_sql = \"\"\"\nselect store_code,\nproduct_code,\ncount(*) times\nfrom store_product_on_sale_history\nwhere sale_date>=date_sub('{0}', 365) and sale_date<='{0}'\ngroup by store_code,product_code\n\"\"\".format(item)\ndf_tmp1 = spark.sql(tmp1_sql)\ndf_tmp1.createOrReplaceTempView(\"tmp1\")\n\ntmp2_sql = \"\"\"\nselect store_code,\nproduct_code,\ncase when times>=28 then 1\n else 0\nend predict_type\nfrom tmp1\n\"\"\"\ndf_tmp2 = spark.sql(tmp2_sql)\ndf_tmp2.createOrReplaceTempView(\"tmp2\")\n\ninsert_sql = \"\"\"\ninsert overwrite table rbu_sxcp_edw_ai_dev.product_predict_type\n(select\nstore_code,\nproduct_code,\npredict_type,\n'{0}',\ncurrent_timestamp()\nfrom tmp2)\n\"\"\".format(item)\nspark.sql(insert_sql)\ndf_tmp1.drop()\ndf_tmp2.drop()\ndf_on_sale_history.drop()\n\nprint(\"process successfully!\")\nprint(\"=====================\")\n\n","repo_name":"wugeer/vnote","sub_path":"sxcp/edw_ai/product_predict_type/product_predict_type.py","file_name":"product_predict_type.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1419726968","text":"from src.title import TitleBuilder\nfrom src.style import Style\nfrom src.icon import Icon\n\n# Create our style\ntitle_style = Style(\n color_name='orange',\n start_icon=Icon(graphic='тнР ')\n)\n\n# So that we may re-use our configuration\ntitle_builder = TitleBuilder(style=title_style)\n\n# Create our messages\ncongratulations = title_builder.create_title('Congratulations!')\nwell_done = title_builder.create_title('Well done!')\n\n# Draw our text to the screen\nprint(congratulations.rendered())\nprint(well_done.rendered())\n","repo_name":"jampen/beautiful","sub_path":"demos/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19147642042","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Есть значение радиуса круга\nradius = 42\n\n# Выведите на консоль значение прощади этого круга с точностю до 4-х знаков после запятой\n# подсказки:\n# формулу можно подсмотреть в интернете,\n# пи возьмите равным 3.1415926\n# точность указывается в функции round()\n\npi = 3.1415926\narea = radius * pi ** 2\nprint(round(area, 4))\n\n\n# Далее, пусть есть координаты точки\npoint = (23, 34)\n# где 23 - координата х, 34 - координата у\n\n# Если точка point лежит внутри того самого круга (radius = 42), то выведите на консоль True,\n# Или False, если точка лежит вовне круга.\n# подсказки:\n# нужно определить расстояние от этой точки до начала координат (0, 0)\n# формула так же есть в интернете\n# квадратный корень - это возведение в степень 0.5\n# операции сравнения дают булевы константы True и False\n\"\"\"Можно воспользоваться формулой окружности (x-a)^2 + (y-b)^2 = R^2, где\nx,y - координаты точки на окружности,\na,b - координаты центра окружности\nR - радиус окружности.\nКаждая окружность в массиве задаётся тремя числами: Cx, Cy и R. Соответственно, чтобы определить вхождение точки внутрь окружности, нужно сравнить растояние от центра \"С\" до точки \"P\" (определяем по теореме Пифагора) с ��адиусом \"R\"\nПолучаем условие вхождения: (Px - Cx)^2 + (Py - Cy)^2 <= R^2.\"\"\"\nif ((point[0]-0)**2 + (point[1]-0)**2) <= radius ** 2:\n print(True)\nelse:\n print(False)\n#print((point[0]-0)**2 + (point[1]-0)**2)\n\n# Аналогично для другой точки\npoint_2 = (30, 30)\n# Если точка point_2 лежит внутри круга (radius = 42), то выведите на консоль True,\n# Или False, если точка лежит вовне круга.\nif ((point_2[0]-0)**2 + (point_2[1]-0)**2) <= radius ** 2:\n print(True)\nelse:\n print(False)\n\n# Пример вывода на консоль:\n#\n# 77777.7777\n# False\n# False\n\n\n","repo_name":"AhhaerDeLacum/Python-Course-","sub_path":"lesson_002/01_circle.py","file_name":"01_circle.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36279719354","text":"# solved\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n visits = []\n node = head\n while node is not None: \n if node in visits: \n return True\n visits.append(node)\n node = node.next\n return False\n","repo_name":"yehogwon/algo-study","sub_path":"leetcode/linked-list-cycle/linked-list-cycle.py","file_name":"linked-list-cycle.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11289463331","text":"import requests, json\n\n\ndef retrieve_repos(username):\n json_content = requests.get(f\"https://api.github.com/users/{username}\")\n conent_dict = json.loads(json_content.content)\n return int(conent_dict[\"public_repos\"])\n\n\nif __name__ == '__main__':\n print(retrieve_repos(\"talisainen\"))\n","repo_name":"ICA0011/eksam-list-repos-ralmik","sub_path":"retrieve_repos.py","file_name":"retrieve_repos.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18067876039","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Build notebooks from example scripts.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport os.path\nimport sys\nfrom glob import glob\n\nsys.path.insert(0, '../docs/source')\nimport docntbk\n\n\nenvpth = '../build/sphinx/doctrees/environment.pickle'\ninvpth = '../build/sphinx/html/objects.inv'\nbaseurl = 'http://sporco.rtfd.org/en/latest/'\nppth = 'scripts'\nnpth = 'notebooks'\nnbexec = True\n\n\n# Iterate over index files\nfor fp in glob(os.path.join(ppth, '*.rst')) + \\\n glob(os.path.join(ppth, '*', '*.rst')):\n # Index basename\n b = os.path.splitext(os.path.basename(fp))[0]\n # Name of subdirectory of examples directory containing current index\n sd = os.path.split(os.path.dirname(fp))\n if sd[-1] == ppth:\n d = ''\n else:\n d = sd[-1]\n # Path to corresponding subdirectory in notebooks directory\n fd = os.path.join(npth, d)\n # Ensure notebook subdirectory exists\n docntbk.mkdir(fd)\n # Filename of notebook index file to be constructed\n fn = os.path.join(fd, b + '.ipynb')\n # Process current index file if corresponding notebook file\n # doesn't exist, or is older than index file\n if docntbk.update_required(fp, fn):\n print('Converting %s' % fp)\n diridx = True if fp == 'scripts/index.rst' else False\n # Convert index to notebook\n docntbk.rst_to_notebook(fp, fn, diridx=diridx)\n\n\n# Get intersphinx inventory and sphinx environment and construct cross\n# reference lookup object\ntry:\n inv = docntbk.fetch_intersphinx_inventory(invpth)\nexcept Exception:\n inv = None\ntry:\n env = docntbk.read_sphinx_environment(envpth)\nexcept Exception:\n env = None\nif inv is not None and env is not None:\n cr = docntbk.CrossReferenceLookup(env, inv, baseurl)\nelse:\n cr = None\n print('Warning: intersphinx inventory or sphinx environment not found:'\n ' cross-references will not be handled correctly')\n\n# Iterate over example scripts\nfor fp in sorted(glob(os.path.join(ppth, '*', '*.py'))):\n # Name of subdirectory of examples directory containing current script\n d = os.path.split(os.path.dirname(fp))[1]\n # Script basename\n b = os.path.splitext(os.path.basename(fp))[0]\n # Path to corresponding subdirectory in notebooks directory\n fd = os.path.join(npth, d)\n # Make notebooks subdirectory if it doesn't exist\n docntbk.mkdir(fd)\n # Filename of notebook file to be constructed\n fn = os.path.join(fd, b + '.ipynb')\n # Process current example script if corresponding notebook file\n # doesn't exist, or is older than example script file\n if docntbk.update_required(fp, fn):\n print('Converting %s' % fp)\n # Convert script to notebook\n docntbk.script_to_notebook(fp, fn, cr)\n\n# Execute notebooks if requested\nif nbexec:\n # Iterate over notebooks\n for fn in sorted(glob(os.path.join(npth, '*', '*.ipynb'))):\n if not docntbk.notebook_executed(fn):\n print('Executing %s ' % fn, end='', flush=True)\n try:\n t = docntbk.execute_notebook(fn, fd)\n except Exception as ex:\n print(' execution error [%s]' % ex.__class__.__name__)\n else:\n print(' %.1f s' % t)\n","repo_name":"bwohlberg/sporco","sub_path":"examples/mkntbk.py","file_name":"mkntbk.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"54"} +{"seq_id":"4861237582","text":"import pandas as pd\nimport plotly.express as px\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport pyarrow\n\ncolnames = [\"BidderId\", \"name\", \"day\", \"budget\", \"value\", \"bid\", \"utility\", \"payment\", \"rank\"]\ndf = pd.read_feather('trail_data.feather')\n\n\ndef plot_all_bidding_profiles():\n fig = px.scatter(df, x=df.day, y=df.bid, color=df.name, hover_data=[df.name,\n df.value,\n df.payment,\n df.utility])\n fig.show()\n\n\ndef plot_individual_bidding_profiles():\n names = df.name.unique()\n for name in names:\n df_for_name = df[df.name == name]\n fig = go.Figure()\n # value trace\n fig.add_trace(\n go.Scatter(x=df_for_name.day, y=df_for_name.value, mode=\"markers\", name=\"value\", marker={\"opacity\": 0.3})\n )\n\n # bid trace\n fig.add_trace(\n go.Scatter(x=df_for_name.day, y=df_for_name.bid, mode=\"markers\", name=\"bid\", marker={\"opacity\": 0.5})\n )\n\n # utility trace\n fig.add_trace(\n go.Scatter(x=df_for_name.day, y=df_for_name.utility, mode=\"lines\", name=\"utility\", marker={\"opacity\": 0.7})\n )\n\n fig.update_layout(\n title=name,\n xaxis_title=\"day\",\n yaxis_title=\"amount\",\n font=dict(\n family=\"Courier New, monospace\",\n size=14,\n color=\"#7f7f7f\"\n )\n )\n\n fig.write_image(f\"plots/{name}.png\", scale=2)\n print(name)\n\n\nif __name__ == '__main__':\n plot_individual_bidding_profiles()\n","repo_name":"mmghannam/auctioneer","sub_path":"Code/analysis/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33628081506","text":"from Connect import sf \nimport re\nimport pandas as pd\n\n\ndef get_label_by_name(name):\n '''\n input <= name of customlabel \n output => value of customlabel\n '''\n label_v = None\n try:\n label_v = sf.mdapi.CustomLabel.read(name).value\n except:\n label_v = f'{name} Not a label'\n return label_v\n\n\ndef get_all_labels_from_email(templateId):\n '''\n This is a experimental function\n this fuction well search `{!Label.*}` this patern \n will also include labels from comments\n will not include labels from component if used \n\n input <= email template id from salesforce\n output => datafrem consist of CustomLabel name and its value\n '''\n try:\n email_body = sf.EmailTemplate.get(templateId)\n except:\n print('Email Not Found')\n return None\n \n labels = re.findall(\"(?<=\\{\\!\\$Label\\.)(.*?)(?=\\})\",email_body['Markup'])\n df = pd.DataFrame(labels,columns = ['name'])\n\n df['value'] = df['name'].apply(get_label_by_name)\n\n # you can add your logic hear and \n # use this for silection based on requirement\n\n #df.to_csv(f'{templateId}_labels.csv')\n\n return df\n\n\ndef create_labels_from_csv(file_name,sample_label_name):\n '''\n This function will take csv file name as input \n file must contain 2 colums [name,value]\n input <= filename,labelname\n output => database operation\n '''\n\n df = pd.read_csv(file_name)\n\n try:\n sample_label = sf.mdapi.CustomLabel.read(sample_label_name)\n except:\n print(\"Please provide valid label name it is just for refrence of program\")\n return 0\n \n for label,value in zip(df['name'],df['value']):\n print(label,value)\n sample_label.fullName = label\n sample_label.shortDescription = label\n sample_label.value = value\n sf.mdapi.CustomLabel.create(sample_label)\n \n return 1\n\n\n\ndef create_xml_for_tranlations(filename,p1,p2):\n '''\n Change the verstions in xml if required\n input <= takes csv file name, colum name for label, colum name for transaltion\n output => NA\n this will create a xml file in same folder\n you can deploy this xml file using vs code\n '''\n\n df = pd.read_csv(filename)\n \n with open('translations.xml', \"w\") as f:\n f.write('''\n \n ''')\n for x,y in zip(df[p2],df[p1]):\n f.write(f'''\n \n \n {y.strip()}\n \n ''')\n f.write('''\n ''')\n \n print(\"XML Created\")\n\n\ndef create_xml_for_customlabels(filename):\n '''\n Change the verstions in xml if required\n input <= takes csv file name fist column: LabelName, second column: LabelValue\n output => NA\n this will create a xml file in same folder\n you can deploy this xml file using vs code\n '''\n\n df = pd.read_csv(filename)\n \n with open('labels.xml', \"w\") as f:\n f.write('''\n \n ''')\n for x,y in zip(df['LabelName'],df['LabelValue']):\n f.write(f'''\n \n {x.strip()}\n test\n en_US\n true\n {x.strip()}\n {y.strip()}\n \n ''')\n f.write('''\n ''')\n \n print(\"XML Created\")\n\n\n\n\n\ndef delet_labels_from_csv(filename):\n '''\n input <= pass a csv file name must contain a single column with name CustomLabel\n output => will delet customlabels from database\n '''\n df = pd.read_csv(filename)\n full_names = []\n for label_name in df['CustomLabel']:\n if len(full_names) == 10:\n sf.mdapi.CustomLabel.delete(full_names)\n full_names = []\n full_names.append(label_name)\n sf.mdapi.CustomLabel.delete(full_names)\n\n \n\n\n\n\n\n\n\n","repo_name":"mithileshjoshi100/pysf","sub_path":"scripts/CustomLabels.py","file_name":"CustomLabels.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33335326054","text":"import sys\r\n\r\n\r\ndef PythonMajorVersion():\r\n return int(sys.version[0])\r\n\r\n\r\n\r\n# Thanks to Gringo Suave:\r\n# to call a function of one variable and redirect ouput\r\ndef sendToFileA(printFn, a):\r\n orig_stdout = sys.stdout\r\n f = open('out.txt', 'w')\r\n sys.stdout = f\r\n\r\n printFn(a)\r\n\r\n sys.stdout = orig_stdout\r\n f.close()\r\n\r\n# to call a function of two variables\r\n\r\n\r\ndef sendToFileAB(printFn, a, b):\r\n orig_stdout = sys.stdout\r\n f = open('out.txt', 'w')\r\n sys.stdout = f\r\n\r\n printFn(a, b)\r\n\r\n sys.stdout = orig_stdout\r\n f.close()\r\n\r\n\r\n\r\ndef bool32( ):\r\n array = []\r\n for i in range(33):\r\n array.append(False)\r\n return array\r\n\r\ndef str32():\r\n array = []\r\n for i in range(33):\r\n array.append('')\r\n return array\r\n\r\n\r\n\r\ndef countBool(array):\r\n return sum(1 for i in array if i)\r\n\r\ndef countStr(array):\r\n ret = 0\r\n for val in array:\r\n if len(val)>0:\r\n ret +=1\r\n return ret \r\n\r\n\r\n################### IFOUND RELATED ################\r\n\"\"\"\r\nWhen a VAR is compared to each \"itok\" position in a list of tokens, the tokens\r\ninvolved in the match may include other indices. All are stored in VAR.ifound\r\nwhich is cleared before hand and then progressively updated as itok is incremented\r\nacross the range of token indices. These utilities are used in various places.\r\n\"\"\"\r\n\r\ndef compressFound(T, ifound):\r\n tmp = [False for _ in range(T)]\r\n for ifval in ifound:\r\n if ifval < T:\r\n tmp[ifval] = True\r\n return tmp\r\n\r\n\r\ndef expandFound(T, tmp):\r\n ifound = [i for i in range(T) if tmp[i]]\r\n return ifound\r\n\r\n\r\ndef countFound0(T, ifound):\r\n tmp = compressFound(T, ifound)\r\n count = sum(1 for i in range(T) if tmp[i])\r\n return count\r\n\r\n\r\ndef countFound(ifound):\r\n if not ifound:\r\n return 0\r\n imax = max(i for i in ifound)\r\n\r\n tmp = compressFound(imax + 1, ifound)\r\n count = sum(1 for i in range(imax + 1) if tmp[i])\r\n return count\r\n\r\n# assumes a cleanFound(), otherwise it changes you data\r\n\r\n\r\ndef histo(ifound, i):\r\n if len(ifound) < 1:\r\n return 0\r\n ifound = cleanFound(ifound)\r\n count = sum(1 for j, ifval in enumerate(ifound) if ifval <= i)\r\n return count\r\n\r\n\r\ndef cleanFound(ifound):\r\n if len(ifound) < 1:\r\n return []\r\n imax = max(i for i in ifound)\r\n tmp = compressFound(imax + 1, ifound)\r\n ifound = expandFound(imax + 1, tmp)\r\n return ifound\r\n\r\ndef lastIFound(ifound):\r\n max = -1\r\n for i in ifound: \r\n if max -1 :\r\n return True\r\n else:\r\n return False\r\n\r\ndef Thing(lastConst):\r\n if lastConst=='':\r\n return ''\r\n temp = separateTARV(lastConst)\r\n if len(temp)<4 :\r\n return ''\r\n else: \r\n return temp[0]\r\n\r\ndef Action(lastConst):\r\n if lastConst=='':\r\n return ''\r\n temp = separateTARV(lastConst)\r\n if len(temp)<4 :\r\n return ''\r\n return temp[1]\r\n\r\ndef Relation(lastConst):\r\n if lastConst=='':\r\n return ''\r\n temp = separateTARV(lastConst)\r\n if len(temp)<4 :\r\n return ''\r\n return temp[2]\r\n\r\ndef Value(lastConst):\r\n if lastConst=='':\r\n return ''\r\n temp = separateTARV(lastConst)\r\n if len(temp)<4 :\r\n return ''\r\n return temp[3]\r\n\r\n","repo_name":"peterwaksman/Narwhal","sub_path":"narwhal/nwutils.py","file_name":"nwutils.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"71717390883","text":"import cv2 as cv\r\n\r\nimg = cv.imread('img/Mona_Lisa.jpg', 0)\r\n\r\ninverted = 255 - img\r\nblurred = cv.GaussianBlur(inverted, (21, 21), 0)\r\ninverted_blurred = 255 - blurred\r\n\r\nsketch = img / inverted_blurred\r\nsketch = sketch * 255\r\n\r\ncv.imwrite('result_6.jpg', sketch)","repo_name":"BenyaminZojaji/image_processing","sub_path":"Assignment22/6_img_to_sketch.py","file_name":"6_img_to_sketch.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"54"} +{"seq_id":"28829288227","text":"from http import HTTPStatus\n\nfrom app.db_models import Action\nfrom main import app\nfrom tests.factories import ActionFactory, UserFactory\n\n\nclass TestActions:\n data = {\n }\n\n def test_create_action_like(self, client):\n first_user = UserFactory.create()\n second_user = UserFactory.create()\n\n self.data.update({'user':str(first_user.id), 'like_to_user':str(second_user.id), 'dislike_to_user':None})\n\n url = app.url_path_for('create_action')\n response = client.post(url, json=self.data)\n assert response.status_code == HTTPStatus.CREATED\n\n action = Action.get()\n assert action.user.id\n\n def test_get_actions(self, client):\n ActionFactory.create_batch(size=3)\n\n url = app.url_path_for('get_actions')\n response = client.get(url)\n\n assert response.status_code == HTTPStatus.OK\n\n assert response.json()['data']\n\n def test_update_action(self, client):\n action = ActionFactory.create()\n user = UserFactory.create()\n url = app.url_path_for('update_action', action_id=str(action.id))\n response = client.patch(url, json={'like_to_user':str(user.id), 'user':str(action.user.id)})\n\n assert response.status_code == HTTPStatus.OK\n assert response.json()['data']['like_to_user']['id'] == str(user.id)\n\n def test_delete_action(self, client):\n action = ActionFactory.create()\n url = app.url_path_for('delete_action', action_id=action.id)\n response = client.delete(url)\n\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert list(Action.select()) == []","repo_name":"MissiaL/dating-backend","sub_path":"tests/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25625952775","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport csv\nimport glob\nimport gzip\nimport os\nimport cPickle as pickle\nimport random\nimport re\nimport sys\nfrom subprocess import check_output\nimport multiprocessing\nimport argparse\n\n\n# Given a directory with files with MAFs of the different groups, produce a pickle object\n# which has for every SNP the count of the number of groups in which the SNP has \n# a MAF of a givent cut-off. If the group has at least 90 members then a MAF of args.maf\n# is used -- if less, we increase the MAF cut off upwards to ensure wew are picking\n# a reasonable signal\n\nif len(sys.argv)==1:\n sys.argv=\"pgetfreqs.py $fdir maf_tbl.cpickle\".split()\n\n\ndef parseArguments():\n parser = argparse.ArgumentParser(description='Produce pickle object with frequencies.')\n parser.add_argument(\"fdir\",action='store',\\\n help=\"directory where frequences found\")\n parser.add_argument(\"outf\",action='store',\\\n help=\"name of output file\")\n parser.add_argument('--maf', dest='maf', type=float, action='store',\\\n default = 0.04,help=\"frequency cut off\")\n args = parser.parse_args()\n return args\n\n\nargs = parseArguments()\n\npools = {}\ncontents=set()\n\ndef annotatePool(f):\n total=0\n pname = re.sub(\".*/\",\"\",f)\n for line in open(f):\n snp=line.rstrip()\n contents.add(snp.replace(\"_\",\":\"))\n \n\ndef annotatePools(direc):\n innerdirs = glob.glob(\"%s/*\"%direc)\n for d in innerdirs:\n fpools = glob.glob(\"%s/*\"%d)\n for f in fpools:\n annotatePool(f)\n\n\ndef getFreq(grouptable,group,freqf):\n f = open(freqf)\n header=f.readline()\n is_plink = re.search(r\" *CHR +SNP +A1 +A2 +MAF +NCHROBS\",header)\n first=True\n #print(freqf)\n for line in f:\n #print(line)\n data=line.rstrip().strip().split()\n if is_plink:\n (snp,freq,obs)=(data[1],data[4],data[5])\n num=1.0*int(obs)/2\n if freq==\"NA\": freq=0\n freq=float(freq)\n else:\n chrom=data[0]\n pos=data[1]\n count=data[3]\n try:\n a1=data[5]\n except IndexEror:\n print(group,freqf,line)\n num=int(count)/2\n snp=\"%s:%s\"%(chrom,pos)\n (a,af)=a1.split(\":\")\n freq=float(af)\n cut= args.maf if num>90 else (-0.28*num+25)/100\n m1 = min(1-cut,cut)\n m2 = max(1-cut,cut)\n if m1 <= freq <= m2:\n grouptable.add(snp)\n \n\n\ndef processGroup(q,fdir,group):\n grouptable=set()\n freqs = glob.glob(\"%s/%s/*.frq\"%(fdir,group))\n #print(fdir,group,freqs)\n for freq in freqs:\n base=check_output(\"basename %s .frq\"%freq,shell=True).rstrip()\n getFreq(grouptable,group,freq)\n q.put(grouptable)\n return\n \n\n\nfdir = sys.argv[1] # Directory where the frequencies ae found\ngrouptable={}\nsnptable={}\njobs=[]\ngroups=[\"C1\",\"E1\",\"E2\",\"N1\",\"N2\",\"S1\",\"S2\",\"W1\",\"W2\"]\nq = multiprocessing.Queue()\nfor group in groups:\n grouptable[group] = set()\n process = multiprocessing.Process(target=processGroup,args=(q,fdir,group))\n jobs.append(process)\n\nfor j in jobs:\n j.start()\n\nfor g in groups:\n gtable = q.get()\n grouptable[g]=gtable\n for snp in gtable:\n snptable[snp]=snptable.get(snp,0)+1\n\nfor j in jobs:\n j.join()\n\n \nfout=open(sys.argv[2],\"w\")\npickle.dump(snptable,fout,2)\nfout.close()\n","repo_name":"h3abionet/h3africa-chip","sub_path":"selection/prep/templates/pgetfreqs.py","file_name":"pgetfreqs.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32687915686","text":"# This Lambda function design to Clean up all Non-AWS-Virginia VPC Resource\n\nimport json\nimport boto3\nimport time\n\n# Handler is Triggered By SQS\ndef lambda_handler(event, context):\n sts = boto3.client('sts')\n\n for record in event[\"Records\"]:\n body = json.loads(record[\"body\"])\n vpcid = body[\"resource\"][\"data\"][\"vpcId\"]\n print(vpcid)\n\n vpcid_filter = [\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpcid\n ]\n }\n ]\n\n if body['resourceRegionId'] == 'us-east-1':\n print('Virgina Region - No action.')\n break\n\n assumed_role = sts.assume_role(\n RoleArn=f\"arn:aws:iam::{body['resource']['accountId']}:role/Prisma_VPC_Term_Role\",\n RoleSessionName=\"PrismaSession\"\n )\n credentials=assumed_role['Credentials']\n\n ec2=boto3.client('ec2',\n aws_access_key_id=credentials['AccessKeyId'],\n aws_secret_access_key=credentials['SecretAccessKey'],\n aws_session_token=credentials['SessionToken'],\n region_name=body['resourceRegionId']\n )\n\n ec2_instances = ec2.describe_instances( Filters=vpcid_filter )\n\n instanceIds = []\n for reservation in ec2_instances['Reservations']:\n for instance in reservation['Instances']:\n instanceIds.append(instance['InstanceId'])\n if len(instanceIds) > 0 :\n print(f'Terminating: {instanceIds}')\n ec2.terminate_instances(\n InstanceIds = instanceIds\n )\n \n # Wait for instances to terminate\n time.sleep(2)\n\n internet_gateways = ec2.describe_internet_gateways(\n Filters=[\n {\n 'Name': 'attachment.vpc-id',\n 'Values': [\n vpcid\n ]\n }\n ]\n )\n for internet_gateway in internet_gateways['InternetGateways']:\n print(f'Intenet gateway {internet_gateway}')\n if len(internet_gateway['Attachments']) > 0:\n print(f'attachments found on {internet_gateway[\"InternetGatewayId\"]}')\n for attachment in internet_gateway['Attachments']:\n ec2.detach_internet_gateway(\n InternetGatewayId = internet_gateway['InternetGatewayId'],\n VpcId = attachment['VpcId']\n )\n print(f'Detach internet gateway {internet_gateway[\"InternetGatewayId\"]} from {vpcid}')\n \n ec2.delete_internet_gateway(\n InternetGatewayId = internet_gateway['InternetGatewayId']\n )\n print(f'Delete internet gateway {internet_gateway[\"InternetGatewayId\"]}')\n \n subnets = ec2.describe_subnets( Filters = vpcid_filter )\n print('Deleting subnets')\n for subnet in subnets['Subnets']:\n ec2.delete_subnet(\n SubnetId=subnet['SubnetId']\n )\n\n security_groups = ec2.describe_security_groups( Filters = vpcid_filter )\n print('Deleting custom security group')\n for security_group in security_groups['SecurityGroups']:\n if security_group['GroupName'] != 'default':\n ec2.delete_security_group(\n GroupId = security_group['GroupId']\n )\n\n ec2.delete_vpc(\n VpcId = vpcid\n )\n print(f'Delete VPC {vpcid}')\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('VPC is removed from the region')\n }\n","repo_name":"schrius/Prisma","sub_path":"VPCKiller.py","file_name":"VPCKiller.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10380393977","text":"import pems_api as pems\nimport pandas as pd\nimport gzip\nimport shutil\nfrom progress.bar import Bar\nfrom os import listdir,mkdir,path\nimport argparse\n# Initiate the parser\nparser = argparse.ArgumentParser()\n# Add long and short argument\nparser.add_argument(\"--username\", \"-u\", help=\"PeMS username to initialize session\")\nparser.add_argument(\"--password\", \"-p\", help=\"PeMS password to initialize session\")\nparser.add_argument(\"--start_date\", \"-s\", help=\"set start date to download from PeMS eg. 2020-01-12\")\nparser.add_argument(\"--end_date\", \"-e\", help=\"set end date to download from PeMS eg. 2020-02-12\")\nparser.add_argument(\"--data_type\", \"-t\", help=\"set data type eg. station_5min , station_raw ,metadata\")\nparser.add_argument(\"--district\", \"-d\", help=\"set district number eg. 1,2..\")\nparser.add_argument(\"--unzip\", \"-z\", help=\"set True to unzip downloaded files \")\nargs = parser.parse_args()\n\nprint(args)\ndef unzip_files(dirname):\n compressed_data=listdir(f'./{dirname}')\n bar = Bar('Processing', max=len(compressed_data))\n unprocessed=[]\n mkdir(f\"./{dirname}/txt/\") \n for file in compressed_data:\n # print(file[0:-3])\n try:\n with gzip.open(f'./{dirname}/{file}', 'rb') as f_in:\n with open( f'./{dirname}/txt/{file[0:-3]}', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n bar.next()\n except:\n unprocessed.append(file[0:-3])\n bar.finish()\n print(f\"\\n Failed to process {len(unprocessed)} files\",unprocessed)\ndef download_data(start_date,end_date,data_type:str='station_5min',district:int=None,unzip:bool=False):\n links={}\n dir_name=f'pems_station_5min_{start_date}_{end_date}_{district or \"all\" }'\n dates=pd.date_range(start=start_date,end=end_date)\n if data_type==\"station_5min\":\n handerl=pems.Station5MinDataHandler()\n url_parser=handerl._url_parser\n elif data_type==\"station_raw\":\n handerl=pems.StationRawDataHandler()\n url_parser=handerl._url_parser\n elif data_type==\"metadata\":\n handerl=pems.StationMetaDataHandler()\n url_parser=handerl._url_parser\n for date in dates:\n links[date.date()]=session.get_url(data_type,date.date(),url_parser,district)\n # print(links)\n if path.isdir(dir_name):\n print(f\"Directory {dir_name} already exists => update content\\n\")\n else:\n print(\"Create new directory => \",dir_name,\"\\n\")\n mkdir(dir_name)\n count=0\n\n for date,link in links.items():\n count+=1\n print(\"Downloading ... \",link)\n print(\"--------------------- \\n\")\n session.download(link,f\"{dir_name}/{str(date)}.txt.gz\")\n print(f\"downloaded {count}/{len(links.keys())} \\n\")\n print(\"---------------------\\n\")\n if unzip:\n print(\"unzipping files\")\n unzip_files(dir_name)\n\nsession=pems.PeMSConnection()\nif args.username and args.password:\n # try:\n print(\"initializing ... \\n\")\n # session.initialize(\"hatemhunish@gmail.com\",\"~r5trickS\")\n credentials={\n 'username':str(args.username),\n 'password':str(args.password)} \n # print(credentials)\n session.initialize(credentials['username'],credentials['password'])\n if session.initialized:\n print(\"Session initialized!\")\n print(\"--------------------- \\n\")\n parameters={\n 'start_date':args.start_date,\n 'end_date':args.end_date,\n 'data_type':args.data_type,\n 'district':args.district,\n 'unzip':args.unzip\n }\n download_data(**parameters)\n else:\n print(\"Session hasn't been initialized! \\n\")\n ","repo_name":"HatemHunish/PeMS_Traffic_Downloader","sub_path":"download_pems.py","file_name":"download_pems.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26226484509","text":"import csv\n\n\nclass CsvReader():\n def __init__(\n self,\n filename=None,\n sep=',',\n header=False,\n skip_top=0,\n skip_bottom=0):\n self.filename = filename\n self.sep = sep\n self.header = header\n self.skip_bottom = -skip_bottom if skip_bottom > 0 else None\n self.skip_top = skip_top\n\n def __enter__(self):\n try:\n self.file = open(self.filename, 'r')\n except FileNotFoundError as e:\n return None\n else:\n start = self.skip_top\n end = self.skip_bottom\n self.body = self.file.readlines()[start:end]\n if self.header:\n self.header_data = csv.reader(self.body[0], delimiter=self.sep)\n start = 1\n else:\n self.header_data = None\n start = 0\n self.data = list(csv.reader(self.body[start:], delimiter=self.sep))\n if self.data:\n cols = 0\n if self.header:\n cols = len(self.header_data)\n else:\n cols = len(self.data[0])\n if not all(len(row) == cols for row in self.data):\n return None\n return self\n\n def __exit__(self, type, value, traceback):\n self.file.close()\n\n def getdata(self):\n return self.data\n\n def getheader(self):\n return self.header_data\n","repo_name":"ClemaX/python-day02","sub_path":"ex03/csvreader.py","file_name":"csvreader.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34914686926","text":"import argparse\nimport glob\nimport os\n\nap = argparse.ArgumentParser()\nap.add_argument('-ido', '--IdOriginal', default=3, required=True, help='ID: Original')\nap.add_argument('-idr', '--IdReplace', default=0, required=True, help='ID:')\nap.add_argument('-f', '--Folder', required=True, help='Left Camera: Port')\n\nargs = ap.parse_args()\n\n# Get the old - new id from the args\nid_origin = int(args.IdOriginal)\nid_new = int(args.IdReplace)\n\n# Get the folder name\nfolder_name = str(args.Folder)\n\n# Scan the folder\nos.chdir(folder_name) # Change os to the directory (must have)\n# Access each text file in the directory \"folder_name\"\nfor file in glob.glob(\"*.txt\"):\n abs_path_file = os.path.abspath(file)\n\n # Open one file\n print(\"Processing \" + str(abs_path_file))\n content = open(abs_path_file, \"r\").read()\n\n line_array = []\n line = \"\" # blank lines\n\n for char in content:\n if (char != \"\\n\"):\n line = line + char # If not new line, then append new char to the blank line\n else:\n line_array.append(line)\n line = \"\"\n\n print(line_array)\n new_array = []\n for line in line_array:\n if (int(line[0]) == id_origin):\n new_line = str(id_new) + line[1::]\n else:\n new_line = line\n new_array.append(new_line)\n print(new_array)\n\n write_file = open(abs_path_file, \"w\")\n for line in new_array:\n write_file.write(line)\n write_file.write(\"\\n\")\n write_file.close()\n\n#\n# print(content, end=\"\")\n","repo_name":"nguyenkhangduy298/ABB_robot_object_detection_vision","sub_path":"ExternalTools/LabelConversion.py","file_name":"LabelConversion.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21414206446","text":"import pandas as pd\nimport geopandas as gpd\nfrom pathlib import Path\n\ndef configure(context):\n context.config(\"output_path\")\n context.stage(\"data.matsim.roads.cleaned\")\n\ndef execute(context):\n urban_path = context.config(\"output_path\") + \"/URBAN\"\n traffic_path = urban_path + \"/INPUT/TRAFFIC\"\n Path(traffic_path).mkdir(parents=True, exist_ok=True)\n\n gdf_traffic: gpd.GeoDataFrame = context.stage(\"data.matsim.roads.cleaned\")\n gdf_traffic.drop(columns=[\"detailed_geometry\"], inplace=True)\n gdf_traffic.to_file(traffic_path + \"/traffic.shp\")\n\n return traffic_path + \"/traffic.shp\"","repo_name":"Nitnelav/sirane-pipeline","sub_path":"urban/prepare/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41037500829","text":"import openpyxl\r\n\r\n\r\nclass Excel:\r\n def __init__(self, f):\r\n self.book = openpyxl.open(f, read_only=True)\r\n self.sheet = self.book.active\r\n self.title_row_num = 1\r\n self.min_col = 0\r\n self.max_col = self.sheet.max_column\r\n self.min_row = 1\r\n self.max_row = self.sheet.max_row\r\n\r\n def set_sheet(self, num):\r\n \"\"\"\r\n Установка нужного листа\r\n \"\"\"\r\n is_valid = False\r\n if num > -1:\r\n self.sheet = self.book.worksheets(num)\r\n is_valid = True\r\n return is_valid\r\n\r\n def set_title_line_num(self, num):\r\n \"\"\"\r\n Установка строки с титульником\r\n \"\"\"\r\n is_valid = False\r\n if self.title_row_num > 0:\r\n self.title_row_num = num\r\n is_valid = True\r\n return is_valid\r\n\r\n def set_min_col(self, num):\r\n \"\"\"\r\n Установка стартовой колонны\r\n \"\"\"\r\n is_valid = False\r\n if self.min_col > -1:\r\n self.min_col = num\r\n is_valid = True\r\n return is_valid\r\n\r\n def set_max_col(self, num):\r\n \"\"\"\r\n Установка конечной колонны\r\n \"\"\"\r\n is_valid = False\r\n if self.max_col > -1:\r\n self.max_col = num\r\n is_valid = True\r\n return is_valid\r\n\r\n def get_title_row(self, title_row_num=None, min_col=None, max_col=None):\r\n \"\"\"\r\n Вернет список с данными из строки с титульником\r\n \"\"\"\r\n rows = list()\r\n if title_row_num is None:\r\n title_row_num = self.title_row_num\r\n if min_col is None:\r\n min_col = self.min_col\r\n if max_col is None:\r\n max_col = self.max_col\r\n\r\n for col in range(min_col, max_col):\r\n rows.append(self.sheet[title_row_num][col].value)\r\n return rows\r\n\r\n def get_row(self, row_num=None, min_col=None, max_col=None):\r\n \"\"\"\r\n Вернет список с данными из строки\r\n \"\"\"\r\n if row_num is None:\r\n row_num = self.title_row_num\r\n if min_col is None:\r\n min_col = self.min_col\r\n if max_col is None:\r\n max_col = self.max_col\r\n\r\n rows = list()\r\n for col in range(min_col, max_col):\r\n rows.append(self.sheet[row_num][col].value)\r\n return rows\r\n\r\n def get_rows(self, title=True, min_row=None, max_row=None, min_col=None, max_col=None):\r\n \"\"\"\r\n Вернет список с данными из строк\r\n \"\"\"\r\n if title is False:\r\n min_row = self.title_row_num+1\r\n if min_row is None:\r\n min_row = self.min_row\r\n if max_row is None:\r\n max_row = self.max_row\r\n if min_col is None:\r\n min_col = self.min_col\r\n if max_col is None:\r\n max_col = self.max_col\r\n\r\n rows = list()\r\n for row in range(min_row, max_row):\r\n rows.append([])\r\n for col in range(min_col, max_col):\r\n rows[-1].append(self.sheet[row][col].value)\r\n return rows\r\n\r\n","repo_name":"dmakger/HelperDoc","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70069330403","text":"import unittest\nimport pytest\nfrom arrays.max_product_subarray import Solution\n\n\n@pytest.mark.parametrize(\"nums,expected\", [\n ([2, 3, -2, 4], 6),\n ([-2, 0, -1], 0)\n])\ndef test_max_product_subarray(nums, expected):\n assert expected == Solution().max_product(nums)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sikakente/blind-75","sub_path":"tests/arrays/test_max_product.py","file_name":"test_max_product.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72311940000","text":"from msrest.serialization import Model\n\n\nclass SiteLimits(Model):\n \"\"\"\n Represents metric limits set on a web app.\n\n :param max_percentage_cpu: Maximum allowed CPU usage percentage\n :type max_percentage_cpu: float\n :param max_memory_in_mb: Maximum allowed memory usage in MB\n :type max_memory_in_mb: long\n :param max_disk_size_in_mb: Maximum allowed disk size usage in MB\n :type max_disk_size_in_mb: long\n \"\"\" \n\n _attribute_map = {\n 'max_percentage_cpu': {'key': 'maxPercentageCpu', 'type': 'float'},\n 'max_memory_in_mb': {'key': 'maxMemoryInMb', 'type': 'long'},\n 'max_disk_size_in_mb': {'key': 'maxDiskSizeInMb', 'type': 'long'},\n }\n\n def __init__(self, max_percentage_cpu=None, max_memory_in_mb=None, max_disk_size_in_mb=None):\n self.max_percentage_cpu = max_percentage_cpu\n self.max_memory_in_mb = max_memory_in_mb\n self.max_disk_size_in_mb = max_disk_size_in_mb\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/Azure/azure-sdk-for-python/azure-mgmt-web/azure/mgmt/web/models/site_limits.py","file_name":"site_limits.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70305079841","text":"N = int(input())\nP = list(map(int,input().split()))\nG = [P[0]]\ncount = 0\nold = N\nnew = 0\nwhile old != new:\n\told = len(P)\n\tfor x in range(1,old):\n\t\tif P[x] <= P[x-1]:\n\t\t\tG.append(P[x])\n\tif G != P:\n\t\tcount += 1\n\tP = G\n\tG = [P[0]]\n\tnew = len(P)\nprint(count)","repo_name":"AnirudhGoel/CompetitiveProgramming","sub_path":"HackerRank/Poisonous Plants.py","file_name":"Poisonous Plants.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11027995333","text":"#!/usr/bin/python3\n\nimport argparse\nimport collections\nimport csv\nimport os \nfrom os.path import isfile, join\nimport time\nimport re\nfrom collections import defaultdict\nimport sys\n\nimport functools\nimport operator\n\n\nPATH_TO_YOSYS = \"/users/bbarzen/workspace/yosys/yosys-nodc2\"\nPATH_TO_VIVADO = \"/tools/xilinx/Vivado/2022.1/bin/vivado\"\n\nTARGET_XL_DEVICE_ID = \"xc7a200tffv1156-1\"\n\ndef write_csv(results_dict, num_runs, csvpath):\n\twith open(csvpath, 'w', newline='') as csvfile:\n\t\tcsvwriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\tprint(results_dict)\n\t\tfor method, designs in results_dict.items():\n\t\t\tcsvwriter.writerow([method])\n\t\t\tcsvwriter.writerow(['design'] + [i for i in range(num_runs)])\n\t\t\tfor design, run_ids in designs.items():\n\t\t\t\tcsvwriter.writerow([design] + [t for i,t in run_ids.items()])\n\ndef write_xdc():\n\txdc_file = open(\"/tmp/vivado.xdc\",'w')\n\txdc_file.write(\n\"# Auto-generated XDC file; read with read_xdc -unmanaged\\n\"+\n\"if {[llength [get_ports -quiet -nocase -regexp .*cl(oc)?k.*]] != 0} {\\n\"+\n\"\\tcreate_clock -period 30.00 [get_ports -quiet -nocase -regexp .*cl(oc)?k.*]\\n\"+\n\"} else {\\n\"+\n\"\\tputs \\\"WARNING: Clock constraint omitted.\\\"\\n\"+\n\"}\\n\"\n)\n\txdc_file.close()\n\n\ndef run_yosys(design_files, results_dict, source_dir, run_id, use_abc9):\n\tfor design in design_files:\n\t\tyosys_script_path = \"/tmp/\"+design+\".ys\"\n\t\tyosys_script = open(yosys_script_path, 'w')\n\t\tyosys_script.write(\n\"read_verilog -nomem2reg -yydebug {}/{}\\n\".format(source_dir, design)+\n\"synth_xilinx -dff -flatten -noiopad {} -edif {}.edif\".format(\"-abc9\" if use_abc9 else \"\", design[:-2])\n)\n\t\tyosys_script.close()\n\t\tprint(\"Run yosys{}-{}-{}\".format(\"-abc9\" if use_abc9 else \"\", design, run_id))\n\t\tstart_time = time.time()\n\t\tos.system(\"{} -l /tmp/yosys{}-{}-{}.log {} > /dev/null 2>&1\".format(PATH_TO_YOSYS, \"-abc9\" if use_abc9 else \"\", design, run_id, yosys_script_path))\n\t\tend_time = time.time()\n\t\tresults_dict[\"yosys-abc9\" if use_abc9 else \"yosys\"][design][run_id] = end_time - start_time\n\t \n\ndef run_vivado(design_files, results_dict, source_dir, run_id):\n\tfor design in design_files:\n\t\tvivado_script_path = \"/tmp/\"+design+\".tcl\"\n\t\tvivado_script = open(vivado_script_path, 'w')\n\t\tvivado_script.write(\n\"set_param general.maxThreads 1\\n\" +\n\"set_property IS_ENABLED 0 [get_drc_checks {PDRC-43}]\\n\" + \n\"cd {}\\n\".format(source_dir) + \n\"read_verilog {}\\n\".format(design) + \n\"set_property TOP [lindex [find_top] 0] [current_fileset]\\n\" + \n\"cd /tmp/\\n\" + \n\"read_xdc -unmanaged vivado.xdc\\n\" + \n\"synth_design -part {} -mode out_of_context \\n\".format(TARGET_XL_DEVICE_ID) + \n\"opt_design -directive Explore\\n\"\n)\n\t\tvivado_script.close()\n\t\tprint(\"Run vivado-{}-{}\".format(design, run_id))\n\t\tstart_time = time.time()\n\t\tos.system(\"{} -nojournal -log /tmp/vivado-{}-{}.log -mode batch -source {} > /dev/null 2>&1\".format(PATH_TO_VIVADO, design, run_id, vivado_script_path))\n\t\tend_time = time.time()\n\t\tresults_dict[\"vivado\"][design][run_id] = end_time - start_time\n\n\t\t\n\t\n\ndef main():\n\n\tparser = argparse.ArgumentParser(\n\tdescription='Run Vivado, Yosys and Yosys-ABC9 flow and measure runtime.')\n\tparser.add_argument('--design_dir', type=str, help='Directory which contains Designs')\n\tparser.add_argument('--result_csv', type=str, help='Output File Path')\n\tparser.add_argument('--num_rounds', type=int, help='Number of Runs per Software')\n\n\targs = parser.parse_args()\n\t\n\tif len(sys.argv) < 4:\n\t\tparser.print_help()\n\t\tparser.exit()\n\t\n\tsource_dir = None\n\ttry:\n\t\tsource_dir = os.path.realpath(args.design_dir)\n\texcept:\n\t\tprint('Could not find results in {}'.format(source_dir), file=sys.stderr)\n\t\tsys.exit(1)\t\t\n\t\n\t# Create list of designs, prepare result dict, write clock file\n\tdesign_files = [f for f in os.listdir(source_dir) if isfile(join(source_dir, f)) and f.endswith(\".v\")]\t\n\tresults_dict = defaultdict(lambda: defaultdict(dict))\n\twrite_xdc()\n\n\tfor i in range(args.num_rounds):\t\n\t\trun_vivado(design_files, results_dict, source_dir, i)\t\n\t\trun_yosys(design_files, results_dict, source_dir, i, False)\n\t\t#run_yosys(design_files, results_dict, source_dir, i, True)\n\n\twrite_csv(results_dict, args.num_rounds, args.result_csv)\n\t\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"growly/fpga_benchmarks","sub_path":"scripts/synth_runtime.py","file_name":"synth_runtime.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"2075384875","text":"#!/usr/bin/env python3\n\n############################################################\n# Code for running Squeezedet with DeepSort. Just run 'python main.py' an enjoy :-).\n############################################################\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\nimport time\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport tensorflow\nimport yaml\n\nif tensorflow.__version__.startswith(\"2\"):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nelse:\n import tensorflow as tf\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nsys.path.append('../')\n\nfrom nanonets_object_tracking.yolo_detector import YoloDetector\nfrom squeezedet.squeezedet_classifier import SqueezeDetClassifier\nfrom sort.sort import Sort\n\n\ndef visualize(frame, tracks, obj_classes):\n \"\"\" Function to visualize the bounding boxes returned from the tracker\n Inputs:\n frame Image ndarray\n The frame on which the bboxes are to be visualized\n tracks nd array\n Each row has bbox coordinates and track id stored\n Bounding boxes are in 'tlbr' format\n obj_classes list\n \"\"\"\n for track in tracks:\n bbox = track[0:4]\n id_num = track[4]\n\n # Draw bbox from tracker. bbox format is 'tlbr'\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)\n cv2.putText(frame, str(id_num), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200, (0, 255, 0), 2)\n\n # Draw bbox from detector. Just to compare.\n # for det in detections_class:\n # bbox = det.to_tlbr()\n # cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,0), 2)\n\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n return True\n return False\n\n\ndef store_results(annots, frame_id, tracks):\n \"\"\"\n A failed vision for a beautiful function\n RIP :( You won't be forgotten\n Inputs:\n annots Dict[Dict[list]]\n Stores the output\n frame_id int\n tracks nd array\n Each row has bbox coordinates and track id stored\n Bounding boxes are in 'tlbr' format\n Returns:\n annots Dict[Dict[list]]\n Now updated with new frames\n \"\"\"\n pass\n\n\ndef visualize_dets(frame, bboxes):\n \"\"\" Function to visualize the detection bounding boxes on the image\n Inputs:\n frame image ndarray\n The frame on which the bboxes are to be visualized\n bboxes list[nparray]\n Bounding boxes in 'tlbr' format\n \"\"\"\n for bbox in bboxes:\n # Draw bboxes from the detector\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)\n # cv2.putText(frame, str(id_num), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200, (0, 255, 0), 2)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n return True\n return False\n\n\ndef check_and_create_path(dir_path):\n \"\"\" Checks if a particular directory exists, if not, the directory is created\n \"\"\"\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\ndef parse_args():\n \"\"\" Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"SqueezeDet SORT\")\n parser.add_argument(\n \"--max_age\", help=\"Set the maximum age/frames for which a track \"\n \"can exist without being associated with detections.\",\n default=11, type=int)\n parser.add_argument(\n \"--min_hits\", help=\"Set the minimum number of consecutive detections \"\n \"to be done in order for these set of detections to be considered a track.\",\n default=3, type=int)\n parser.add_argument(\n \"--iou_threshold\", help=\"Minimum overlap between detection and estimation bboxes \"\n \"to be considered the same track.\", type=float, default=0.1)\n parser.add_argument(\n \"--low_frame_rate_modulo\", help=\"Set the number of frames to be skipped. \"\n \"This is used to simulate low frame rates, by skipping n frames\",\n default=1, type=int)\n parser.add_argument(\n \"--display\", help=\"Show intermediate tracking results \",\n default='True', type=str)\n parser.add_argument( # Currently not used\n \"--nms\", help=\"Parameter to set if NMS is to be performed on the bounding boxes \"\n \"returned by the detector. This performs inter-class NMS Can be set to \\'True\\'\"\n \"or \\'False\\''. By default it is set to \\'True\\'\", default=\"True\", type=str)\n parser.add_argument(\n \"--path_to_dataset\", help=\"Set path to dataset \"\n \"default: ../data/images\", default=\"../data/images\", type=str)\n parser.add_argument( # Currently not used\n \"--dataset_split\", help=\"Set the dataset split to run the tracker \"\n \"default: train. Can also use \\'test\\'\", default=\"train\", type=str)\n parser.add_argument(\n \"--generate_outputs\", help=\"Select whether outputs are to be generated\"\n \"or not default: True. Can also be set to \\'False\\'\", default=\"True\", type=str)\n parser.add_argument(\n \"--path_to_annotations\", help=\"Set the path to the output folder\"\n \"default: outputs\", default='outputs', type=str)\n parser.add_argument(\n \"--model_path\", help=\"Path to [YOLOv5, SqueezeNet] model\", default=\"models/yolo.pt\", type=str\n )\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n config_file = '../squeezedet/rgb_classifier_config.yaml'\n\n if os.path.isfile(config_file):\n configs = {}\n with open(config_file, 'r') as infile:\n configs = yaml.safe_load(infile)\n\n model_config = configs['model']['squeezeDet']\n classes = configs['classes']\n colors = configs['colors']\n p = Path(args.model_path)\n if \".pt\" in p.name:\n model = YoloDetector(checkpoint_path=args.model_path)\n model_name = 'yolo'\n else:\n model = SqueezeDetClassifier(config=model_config,\n checkpoint_path=args.model_path)\n model_name = 'squeezedet'\n objects = []\n\n images = [(cv2.imread(file), int(file.split('/')[-1].lstrip('frame').split('.')[0])) for file in sorted(glob.glob(\"/\".join([args.path_to_dataset, \"*.jpg\"])))]\n\n # detector_rate = 10\n # SORT\n sort_tracker = Sort(args.max_age,\n args.min_hits,\n args.iou_threshold)\n\n # Add path and create a dict to store the output annotations\n check_and_create_path(args.path_to_annotations)\n annotations = {}\n\n j = 0\n timings = []\n for i, (image, frame_id) in enumerate(images):\n\n if i % args.low_frame_rate_modulo != 0:\n continue\n\n #if j % detector_rate == 0 or j % detector_rate == 1 or j % detector_rate == 2:\n # Returns bboxes in cwh format\n print(f\"Frame number, global {i}\")\n start = time.time()\n bboxes, scores, labels = model.classify(image) # bboxes format is tlbr\n stop = time.time()\n timings.append(stop - start)\n detections = np.hstack((bboxes, scores))\n trackers, obj_classes = sort_tracker.update(detections, labels) # This returns bbox and track_id\n\n if args.display == 'True':\n if_quit = visualize(image, trackers, obj_classes)\n if if_quit:\n break\n j += 1\n\n # We store the tracking results\n if args.generate_outputs == 'True':\n annotations[frame_id] = {}\n for track in trackers:\n annotations[frame_id][int(track[4])] = track[0:4].tolist()\n # bbox format is 'tlbr'\n\n if args.generate_outputs == 'True':\n # We finally write the outputs to a .json file\n with open(\"/\".join([args.path_to_annotations, '_'.join([model_name, str(args.max_age), str(args.low_frame_rate_modulo), 'sort_outputs.json'])]), \"w\") as fp:\n json.dump(annotations,fp)\n print(f\"Average Inference Time: {sum(timings)/len(timings)}\")\n","repo_name":"VincentSch4rf/rtt_tracking","sub_path":"nanonets_object_tracking/not_main.py","file_name":"not_main.py","file_ext":"py","file_size_in_byte":8315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73262152803","text":"'''\n对比多进程和串行的性能\n'''\nimport requests\nimport re\nimport time\nfrom multiprocessing import Pool\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/65.0.3325.181 Chrome/65.0.3325.181 Safari/537.36'\n}\n\ndef re_scaper(url):\n res = requests.get(url,headers=headers)\n ids = re.findall('

    (.*?)

    ',res.text,re.S)\n contents = re.findall('
    .*?(.*?)',res.text,re.S)\n laughts = re.findall('(.*?) 好笑',res.text,re.S)\n comments = re.findall('(\\d+) 评论',res.text,re.S)\n for id,content,laugh,comment in zip(ids,contents,laughts,comments):\n info = {\n 'id':id,\n 'content':content,\n 'laugh':laugh,\n 'comment':comment\n }\n return info\nif __name__ == '__main__':\n urls = ['https://www.qiushibaike.com/text/page/{}/'.format(str(i)) for i in range(1,100)]\n start_1 = time.time()\n for url in urls:\n re_scaper(url)\n end_1 = time.time()\n print('串行爬虫',end_1-start_1)\n\n start_2=time.time()\n pool = Pool(processes=2)\n pool.map(re_scaper,urls)\n end_2 = time.time()\n print('两个进程',end_2-start_2)\n\n start_3 = time.time()\n pool = Pool(processes=4)\n pool.map(re_scaper,urls)\n end_3 = time.time()\n print('四个进程',end_3-start_3)\n","repo_name":"xtjjyygy/Spider_project","sub_path":"spider_multiprocessing_performance.py","file_name":"spider_multiprocessing_performance.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19283594640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Preprocessing class with functions to call when applying CNN\n# James Kahn\n\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.preprocessing import sequence\nfrom .MCParticles_preproc_pandas import MCParticlesPreprocPandas\n\n\nclass MCParticlesPreprocManager():\n def __init__(self, max_decstr_len=150, max_FSPs=100):\n self.max_decstr_len = max_decstr_len\n self.max_FSPs = max_FSPs\n self.ppp = MCParticlesPreprocPandas()\n\n self.cont_vars = ['energy', 'prodTime', 'x', 'y', 'z', 'px', 'py', 'pz']\n self.cont_min = [0.0, 0.0, -700., -700., -700., -3.0, -3.0, -2.0]\n self.cont_max = [11.0, 150.0, 700., 700., 700., 3.0, 3.0, 4.0]\n # Values obtained automatically from subset of MCParticles\n self.cont_mean = [\n 1.26257171404669,\n 0.05890088167950489,\n 0.04414757085741595,\n -0.00265970402649008,\n 0.3737106843126124,\n 0.05253655583789957,\n -4.23090263104822e-05,\n 0.344737685298959\n ]\n self.cont_std = [\n 2.0057599256131553,\n 0.7272543277442086,\n 5.879198954800914,\n 5.895932291059462,\n 7.292860628527212,\n 0.42971772214234305,\n 0.42181551657083644,\n 0.683827892849276]\n\n self.cont_min_series = pd.Series(self.cont_min, index=self.cont_vars)\n self.cont_max_series = pd.Series(self.cont_max, index=self.cont_vars)\n self.cont_mean_series = pd.Series(self.cont_mean, index=self.cont_vars)\n self.cont_std_series = pd.Series(self.cont_std, index=self.cont_vars)\n\n self.disc_vars = ['charge', 'PDG', 'motherPDG'] # , 'nDaughters']\n\n def build_decay_string(self, particle):\n \"\"\"Build particle decay string from given particle down\n\n Need to recode this without recursion.\n \"\"\"\n dec_string = ' {}'.format(particle.getPDG())\n\n # Check at least one primary particle daughter exists before diving down a layer\n if (\n particle.getNDaughters() > 0 and\n [(d.isPrimaryParticle() and self.check_status_bit(d.getStatus())) for d in particle.getDaughters()].count(True)\n ):\n dec_string += ' (-->'\n for daughter in particle.getDaughters():\n if daughter.isPrimaryParticle() and self.check_status_bit(daughter.getStatus()):\n dec_string += self.build_decay_string(daughter)\n dec_string += ' <--)'\n return dec_string\n\n def _preproc_cont_vars(self, df):\n ''' Perform necessary preprocessing of self.cont_vars '''\n # Normalise continuous variables\n return (df - self.cont_min_series) / (self.cont_max_series - self.cont_min_series)\n # Simple tanh\n # return df.apply(np.tanh)\n # tanh-estimator\n # return df.apply(\n # lambda x: 0.5 * (np.tanh(0.01 * ((x - self.cont_mean_series[x.name]) / self.cont_std_series[x.name]) + 1))\n # )\n\n def _preproc_disc_vars(self, df):\n ''' Perform necessary preprocessing of self.disc_vars '''\n # One-hot encode discrete variables (only charge)\n # Have to force label ranges since we're processing one file at a time\n dummy_cols = ['{}_{}'.format(self.disc_vars[0], float(c)) for c in range(-2, 3)]\n dummy_df = pd.get_dummies(\n df[self.disc_vars[0]],\n columns=[self.disc_vars[0]],\n prefix=self.disc_vars[0]\n )\n dummy_df = dummy_df.T.reindex(dummy_cols).T.fillna(0)\n df = pd.concat([df, dummy_df], axis=1)\n df = df.drop(self.disc_vars[0], axis=1)\n\n # Hashing-trick encode PDG and mother PDG\n # df = self._hash_PDG(df, self.disc_vars[1], n_dims=10)\n # df = self._hash_PDG(df, self.disc_vars[2], n_dims=10)\n df[self.disc_vars[1]] = pd.to_numeric(df[self.disc_vars[1]].apply(self.ppp.tokenize_PDG_code))\n df[self.disc_vars[2]] = pd.to_numeric(df[self.disc_vars[2]].apply(self.ppp.tokenize_PDG_code))\n\n return df\n\n def preproc_single_whole_decay(self, df):\n ''' Keeping this separate for the time speedup in application '''\n # Combine the preprocessed discrete and continuous dataframe chunks\n df = pd.concat(\n [\n self._preproc_cont_vars(df[self.cont_vars]),\n self._preproc_disc_vars(df[self.disc_vars])\n ],\n axis=1\n )\n\n # Extract particle_input\n x_arr = df.drop(self.disc_vars[1:3], axis=1).values\n x_arr = np.reshape(x_arr, (1, x_arr.shape[0], x_arr.shape[1]))\n # Not sure where to put this, maybe should be in preproc function?\n x_arr = sequence.pad_sequences(\n x_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post'\n )\n\n # Extract pdg_input\n pdg_arr = df[self.disc_vars[1]].values\n pdg_arr = np.reshape(pdg_arr, (1, pdg_arr.shape[0]))\n # Not sure where to put this, maybe should be in preproc function?\n pdg_arr = sequence.pad_sequences(\n pdg_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post'\n )\n\n # Extract mother_pdg_input\n mother_pdg_arr = df[self.disc_vars[2]].values\n mother_pdg_arr = np.reshape(mother_pdg_arr, (1, mother_pdg_arr.shape[0]))\n # Not sure where to put this, maybe should be in preproc function?\n mother_pdg_arr = sequence.pad_sequences(\n mother_pdg_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post'\n )\n\n return x_arr, pdg_arr, mother_pdg_arr\n\n def preproc_whole_decay(self, df):\n ''' Keeping this separate for the time speedup in application '''\n # Combine the preprocessed discrete and continuous dataframe chunks\n df = pd.concat(\n [\n self._preproc_cont_vars(df[self.cont_vars]),\n self._preproc_disc_vars(df[self.disc_vars])\n ],\n axis=1\n )\n\n # Need to convert whole dataframe to float64 for memmapping later\n # Should really use structures numpy arrays for space saving\n df = df.astype(np.float64)\n\n # Remove label and arrayIndex indexes\n df.reset_index(level=['label', 'arrayIndex'], drop=True, inplace=True)\n\n # Then populate a new arrayIndex column, need for pivoting\n df['newIndex'] = df.groupby('evtNum', sort=False).cumcount()\n\n # If index=None, uses existing index, in this case that's evtNum\n pivot = df.pivot(columns='newIndex')\n pivot = pivot.fillna(0.)\n\n # Reshape and swap axis to get (event, particle, var)\n # The pivot table has two levels for column names so that's what we're spliting into 2 dims\n # PDG and motherPDG will go into separate arrays\n x_arr = pivot.drop(self.disc_vars[1:3], axis=1).values.reshape(\n -1,\n pivot.columns.levels[0].shape[0] - 2,\n pivot.columns.levels[1].shape[0],\n ).swapaxes(1, 2)\n pdg_arr = pivot[self.disc_vars[1]].values.reshape(\n -1,\n pivot.columns.levels[1].shape[0],\n )\n mother_pdg_arr = pivot[self.disc_vars[2]].values.reshape(\n -1,\n pivot.columns.levels[1].shape[0],\n )\n\n # Pad output arrays\n # Should put these in separate function\n x_arr = sequence.pad_sequences(\n x_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post',\n dtype=x_arr.dtype,\n )\n pdg_arr = sequence.pad_sequences(\n pdg_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post',\n dtype=pdg_arr.dtype,\n )\n mother_pdg_arr = sequence.pad_sequences(\n mother_pdg_arr,\n maxlen=self.max_FSPs,\n padding='post',\n truncating='post',\n dtype=mother_pdg_arr.dtype,\n )\n\n return x_arr, pdg_arr, mother_pdg_arr\n\n def preproc_single_decay_string(self, decay_string, LSTM_flag=False):\n\n # Tokenize the decay string\n tok_decstr = self.ppp.tokenize_decay_string(decay_string)\n\n # Change to numpy array of shape (1, )\n tok_decstr = np.array(tok_decstr)\n tok_decstr = np.reshape(tok_decstr, (1, -1))\n\n # Pad out the decay string\n # Need to put tok_decstr in list to pad correct dimension\n tok_decstr = sequence.pad_sequences(tok_decstr, maxlen=self.max_decstr_len)\n\n # If inputting to LSTM reshape to include time dim\n if LSTM_flag:\n tok_decstr = np.reshape(tok_decstr, (1, -1, 1))\n\n return tok_decstr\n\n def preproc_decay_string(self, df, LSTM_flag=False):\n # Tokenize the decay string\n token_df = df['decay_str'].apply(self.ppp.tokenize_decay_string)\n # token_df = df['decay_str'].apply(self._hash_decay_string)\n df['decay_str_tok'] = token_df\n\n # Change to numpy array of shape (1, )\n tok_decstr = df['decay_str_tok'].values\n print('tok_decstr shape:', tok_decstr.shape)\n # tok_decstr = np.reshape(tok_decstr, (-1, 1))\n\n # Pad out the decay string\n # Need to put tok_decstr in list to pad correct dimension\n tok_decstr = sequence.pad_sequences(\n tok_decstr,\n maxlen=self.max_decstr_len,\n padding='post',\n truncating='post',\n dtype=tok_decstr.dtype,\n )\n\n # Need to convert for memmaps later\n tok_decstr = tok_decstr.astype(int)\n\n # If inputting to LSTM reshape to include time dim\n if LSTM_flag:\n tok_decstr = np.reshape(tok_decstr, (1, -1, 1))\n\n return tok_decstr\n\n def preproc_y_output(self, df, key):\n ''' Return training labels as numpy array '''\n if key == 'train_events':\n # Want just one label per event, don't care about arrayIndex\n df.reset_index(level=['label', 'arrayIndex'], inplace=True)\n return df.groupby('evtNum', sort=False).first()['label'].values\n elif key == 'decay_strings':\n return df['label'].values\n\n def check_status_bit(self, status_bit):\n '''Returns True if conditions are satisfied (not an unusable particle)\n\n Move this method to preprocessPandas\n '''\n return (\n (status_bit & 1 << 4 == 0) & # IsVirtual\n (status_bit & 1 << 5 == 0) & # Initial\n (status_bit & 1 << 6 == 0) & # ISRPhoton\n (status_bit & 1 << 7 == 0) # FSRPhoton\n )\n\n def save_npy_preprocd(self, arr, filename):\n ''' Saves the numpy array to file '''\n np.save(\n filename,\n arr,\n )\n","repo_name":"kahn-jms/smartBKG","sub_path":"smartBKG/preprocessing/MCParticles_preproc_manager.py","file_name":"MCParticles_preproc_manager.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72793644001","text":"# Напишитепрограмму(файл arithmetic.py), которая предлагала бы пользователю решить пример\r\n# 4 * 100 - 54. Потом выводила бы на экран правильный ответ\r\n# и ответ пользователя. Подумайте, нужно ли здесь преобразовывать строку в число.\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nif __name__ == \"__main__\":\r\n usersanswer = input(\"Please solve this: 4 * 100 - 54 = \")\r\n correctanswer = 346\r\n correctanswer = str(correctanswer)\r\n\r\n print(\"Your answer: \" + usersanswer)\r\n print(\"Correct answer: \" + correctanswer)","repo_name":"hubieva-a/lab3","sub_path":"arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1255106915","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\n\nfrom app.tasks import CeleryGetTask, Counter\n\nfrom celery.result import AsyncResult\n\n\ndef Home(request):\n\n try:\n tasks = CeleryGetTask()\n except:\n tasks = []\n\n if request.method == \"POST\":\n code = request.POST.get(\"code\")\n\n if not code.isnumeric():\n messages.error(request, f\"Please enter number\")\n return redirect(\"home\")\n\n ret = Counter.delay(int(code))\n # ret = Counter.apply_async(args=(int(code),))\n\n print(ret.task_id)\n\n messages.success(request, f\"count: {code}\")\n return redirect(\"home\")\n\n \n context = {\n \"tasks\": tasks\n }\n return render(request, \"index.html\", context)\n\n\n\ndef CancelTask(request, task_id):\n print(f\"task Cancel id: {task_id}\")\n\n AsyncResult(task_id).revoke(terminate=True)\n\n messages.error(request, f\"task id: {task_id} has been revoke\")\n return redirect('home')\n","repo_name":"hunzo/django-celery-pgsql-docker","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35998391084","text":"# Faça um programa que mostre a tabuada de vários números,\n# um de cada vez, para cada valor digitado pelo usuário.\n# O programa será interrompido quando o número solicitado for negativo.\n\nwhile True:\n n = int(input('Calcular a tabuada de: '))\n if n < 0:\n break\n count = 1\n while count < 11:\n print(f'{n} X {count} = {n*count}')\n count += 1\n\nprint('Programa finalizado!')","repo_name":"EduardoArgenti/Python","sub_path":"CursoEmVideo/ex067.py","file_name":"ex067.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7878357660","text":"\"\"\" Advent of code 2017\tday 3/1\t\"\"\"\n\nfrom argparse import ArgumentParser\n\ndef start_point():\n \"\"\" Get the start points\"\"\"\n n, start = 1, 2\n yield (start, n)\n while True:\n start = (4*pow(n, 2)) + (4*n) + 2\n yield (start, n)\n n += 1\n\ndef side_pos(curr_index, level):\n \"\"\" Get the position indicator in the current side \"\"\"\n return divmod(curr_index, (level - 1) * 2)\n\ndef ccn(curr_div, curr_mod, curr_list, prev_level):\n \"\"\" Caclulate current neighbours \"\"\"\n value = 0\n largest_mod = max(2 * prev_level - 1, 0)\n #TODO error: larges mod gets calculated wrong\n if (curr_div == 3) and curr_mod > largest_mod-2:\n value += curr_list[0]\n if curr_mod > 0:\n # Default case, only needs to return the previous element\n value += curr_list[-1]\n elif (curr_div > 0) and (curr_mod == 0):\n # The one after the corner, it has two neighbours from the current list\n value += sum(curr_list[-2:])\n # There is not any current values yet.\n return value\n\ndef cpn(prev_list, prev_level, curr_index, curr_div, curr_mod):\n \"\"\" Calculate previous neighbours \"\"\"\n largest_mod = max(2 * prev_level - 1, 0)\n if prev_level == 1:\n # No calculation needed, it only has 1 element, and every other element sees it\n return prev_list[0]\n else:\n if curr_div > 0:\n # The first case needs a value from the end of the previous list\n if curr_mod == largest_mod:\n # Corner value\n return prev_list[curr_index - 2*(curr_div+1)]\n elif curr_mod == 0:\n # Needs two neighbours\n corner = curr_index - 1 - 2*(curr_div)\n return sum(prev_list[corner: corner+2])\n elif curr_mod == largest_mod - 1:\n # Needs two neighbours\n corner = curr_index + 1 - 2*(curr_div + 1)\n return sum(prev_list[corner-1: corner+1])\n # Other cases need three neighbours\n prev_max = curr_index - 2 * (curr_div)\n return sum(prev_list[prev_max - 2: prev_max + 1])\n else:\n if curr_mod == 0:\n # First element\n return prev_list[-1] + prev_list[0]\n elif curr_mod == 1:\n # Second element\n return prev_list[-1] + sum(prev_list[0:2])\n elif curr_mod == largest_mod - 1:\n # One before the right corner\n corner = curr_mod - 1\n return sum(prev_list[corner-1: corner+1])\n elif curr_mod == largest_mod:\n # Corner value\n first_corner = curr_mod - 2\n return prev_list[first_corner]\n # Three neighbours\n prev_max = curr_index\n return sum(prev_list[prev_max - 2: prev_max + 1])\n\ndef calc_value(curr_start, curr, curr_list, prev_list, prev_level):\n \"\"\" Calculate the current value from the currently existing neighbours \"\"\"\n curr_index = curr - curr_start\n curr_div, curr_mod = side_pos(curr_index, prev_level+1)\n prev_sum = cpn(prev_list, prev_level, curr_index, curr_div, curr_mod)\n curr_sum = ccn(curr_div, curr_mod, curr_list, prev_level)\n value = prev_sum + curr_sum\n print(\"{}={}+{} ({},{},{})\".format(value, prev_sum, curr_sum, curr, curr_div, curr_mod))\n return value\n\ndef calc_circle(start_gen):\n \"\"\" Calculate ew circle values \"\"\"\n prev_list = [1]\n curr_list = []\n prev_start, prev_level = next(start_gen)\n curr_start, _ = next(start_gen)\n prev = 1\n curr = 2\n while True:\n while curr < curr_start:\n value = calc_value(prev_start, curr, curr_list, prev_list, prev_level)\n curr_list.append(value)\n yield value\n curr += 1\n prev += 1\n prev_start, (curr_start, prev_level) = curr_start, next(start_gen)\n prev_list, curr_list = curr_list, []\n\ndef solution(input_data):\n \"\"\" Solution to the problem \"\"\"\n start_values = start_point()\n circle_values = calc_circle(start_values)\n current = -1\n upper_bound = int(input_data)\n while current < upper_bound:\n current = next(circle_values)\n return current\n\nif __name__ == \"__main__\":\n PARSER = ArgumentParser()\n PARSER.add_argument(\"--input\", dest='input', action='store_true')\n PARSER.add_argument(\"--test\")\n ARGS = PARSER.parse_args()\n if ARGS.input:\n with(open('input.txt', 'rb')) as input_file:\n print(solution(input_file.read()))\n elif ARGS.test:\n print(solution(str(ARGS.test)))\n else:\n DEBUG = \"361527\"\n print(solution(DEBUG))\n","repo_name":"budavariam/advent_of_code","sub_path":"2017/03_2/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40535073393","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 17 01:51:36 2017\n\n@author: FUNNICLOWN\n\"\"\"\n\nimport numpy as np\n\ndef predict(theta, pixel_data, label_text):\n \"\"\"pixel_data→yp & 精度計算\"\"\"\n# pixel_data=[\"width\", \"height\"], label_text=\"nothing\"\n \"数値変換\"\n a0 = pixel_data.flatten()\n# a0 = np.reshape(a0, (1, np.shape(a0)[0]))\n a0 = np.r_[1, a0] #=(1+総ピクセル)\n a0_scaling = np.reshape(a0, (1, np.shape(a0)[0]))\n a0_scaling = scaling(a0_scaling)\n a0 = np.reshape(a0_scaling, np.shape(a0)[0])\n\n g = sigmoid(h(theta, a0))\n \n g_max_index = np.argmax(g)\n\n predict_label = label_text[g_max_index]\n \n return predict_label\n \ndef sigmoid(z):\n\n g = 1 / (1 + np.exp(-z))\n return g\n\ndef h(theta, X):\n \"指標となる値に変換\"\n \"logisticとは少し違う\"\n\n return np.inner(X, theta)\n\ndef scaling(x):\n \"操作可能パラメータ\"\n d_switch = 0 #1 = 定義域使用。 0 = 標準偏差使用 \n n = np.shape(x)[1]\n m = np.shape(x)[0]\n \n if d_switch == 1:\n s = ran(x)\n else:\n s = std(x)\n \"\"\"\n s = np.reshape(s, (1, s.shape[0]))\n \"\"\"\n ave = x[:, 1:n+1].mean(axis = 1) # ave = (m,), x[:, 1:n+1] = (m, n[0])\n ave = np.reshape(ave, (m, 1)) \n x[:, 1:n+1] = (x[:, 1:n+1] - ave) / s\n \n return x\n\ndef ran(x):\n m = np.shape(x)[0]\n n = np.shape(x)[1]\n max = x[:, 1:n+1].max(axis = 1) # x[:, 1:n+1].max(axis=1) = (260, )\n min = x[:, 1:n+1].min(axis = 1)\n range = max - min\n range = np.reshape(range, (m, 1))\n\n return range\n\ndef std(x):\n m = np.shape(x)[0]\n n = np.shape(x)[1]\n \n st = x[:, 1:n+1].std(axis = 1)\n st = np.reshape(st, (m, 1))\n \n return st\n","repo_name":"tuscom/Pro_OCR","sub_path":"accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35201725996","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 7 11:26:23 2018\n\n@author: mingchien\n\"\"\"\n\nfrom kafka import KafkaProducer\nproducer = KafkaProducer(bootstrap_servers='localhost:9092')\nfor _ in range(100):\n producer.send('foobar', b'some_message_bytes')\n print(_)","repo_name":"ToDSung/python_survey","sub_path":"kafka_test/kafkaPythonTest.py","file_name":"kafkaPythonTest.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44192467402","text":"import sys\n\nsys.path.append(\"..\")\n\nfrom flask import render_template\nfrom api import get_categories\nfrom config import config\nfrom Pagination import Pagination\n\n\ndef home(page):\n \"\"\"Render home template.\n\n Args:\n page (int): current pagination page\n\n Returns:\n render_template function\n\n \"\"\"\n categories = get_categories()\n categories_per_page = config[\"categories\"][\"pagination\"][\"per_page\"]\n\n # set pagination to correct page \n pagination = Pagination(\n categories_per_page,\n config[\"categories\"][\"pagination\"][\"truncation_limit\"],\n left_edge=config[\"categories\"][\"pagination\"][\"left_edge\"],\n right_edge=config[\"categories\"][\"pagination\"][\"right_edge\"],\n left_current=config[\"categories\"][\"pagination\"][\"left_current\"],\n right_current=config[\"categories\"][\"pagination\"][\"right_current\"]\n )\n\n pagination.set_current(page, len(categories))\n\n # select categories for given page\n lower_bound = page * categories_per_page\n upper_bound = (page + 1) * categories_per_page\n\n tile_ids = list(categories.keys())[lower_bound:upper_bound]\n tile_categories = {id: categories[id] for id in tile_ids}\n\n return render_template(\"home.html\", title=\"Categories\", pagination=pagination, categories=categories,\n tile_categories=tile_categories)\n","repo_name":"MajerMartin/heureka_homework","sub_path":"heureka/routes/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30525965370","text":"import unittest\nfrom cloudify_awssdk.common.tests.test_base import TestBase, mock_decorator\nfrom cloudify_awssdk.ec2.resources.image import EC2Image, IMAGES, IMAGE_ID, \\\n OWNERS\nfrom mock import patch, MagicMock\nfrom cloudify.exceptions import NonRecoverableError\nfrom cloudify_awssdk.ec2.resources import image\n\n\nclass TestEC2Image(TestBase):\n\n def setUp(self):\n self.image = EC2Image(\"ctx_node\", resource_id=True,\n client=True, logger=None)\n mock1 = patch('cloudify_awssdk.common.decorators.aws_resource',\n mock_decorator)\n mock1.start()\n reload(image)\n\n def test_class_properties(self):\n effect = self.get_client_error_exception(name='EC2 Image')\n self.image.client = self.make_client_function('describe_images',\n side_effect=effect)\n res = self.image.properties\n self.assertIsNone(res)\n\n value = {}\n self.image.client = self.make_client_function('describe_images',\n return_value=value)\n with self.assertRaises(NonRecoverableError) as e:\n self.image.properties\n self.assertEqual(e.exception.message,\n \"Found no AMIs matching provided filters.\")\n\n value = {IMAGES: [{IMAGE_ID: 'test_name'}]}\n self.image.client = self.make_client_function('describe_images',\n return_value=value)\n res = self.image.properties\n self.assertEqual(res[IMAGE_ID], 'test_name')\n\n def test_class_status(self):\n value = {}\n self.image.client = self.make_client_function('describe_images',\n return_value=value)\n with self.assertRaises(NonRecoverableError) as e:\n self.image.status\n self.assertEqual(e.exception.message,\n \"Found no AMIs matching provided filters.\")\n\n value = {IMAGES: [None]}\n self.image.client = self.make_client_function('describe_images',\n return_value=value)\n res = self.image.status\n self.assertIsNone(res)\n\n value = {IMAGES: [{IMAGE_ID: 'test_name', 'State': 'available'}]}\n self.image.client = self.make_client_function('describe_images',\n return_value=value)\n res = self.image.status\n self.assertEqual(res, 'available')\n\n def test_class_create(self):\n value = {'Image': 'test'}\n self.image.client = self.make_client_function('create_image',\n return_value=value)\n res = self.image.create(value)\n self.assertEqual(res['Image'], value['Image'])\n\n def test_prepare(self):\n ctx = self.get_mock_ctx(\"Image\")\n config = {IMAGE_ID: 'image', OWNERS: 'owner'}\n iface = MagicMock()\n iface.create = self.mock_return(config)\n image.prepare(ctx, iface, config)\n self.assertEqual(ctx.instance.runtime_properties['resource_config'],\n config)\n\n def test_delete(self):\n config = {IMAGE_ID: 'image'}\n res = self.image.delete(config)\n self.assertIsNone(res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cloudify-incubator/cloudify-awssdk-plugin","sub_path":"cloudify_awssdk/ec2/tests/test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"18694393179","text":"'''\nDeletes all entries of a defined content type. If locale defined, then only in that locale, otherwise all entries of all languages\n\noskar.eiriksson@contentstack.com\n\nBE CAREFUL!\n\nSee doc: https://www.contentstack.com/docs/developers/apis/content-management-api/?locale=north-america#delete-an-entry\n'''\nimport cma\nimport config\n\ncontentType = 'landing_page'\nlocale = 'en-us'\ndeleteLocalized = False # If set to True, the locale variable above needs to be the master locale of the stack - It will then delete all entries, in all of the languages\n\n\nentries = cma.getAllEntries(contentType, locale)\nfor entry in entries['entries']:\n config.logging.info('Deleting entry: {} - UID: {}'.format(entry['title'], entry['uid']))\n cma.deleteEntry(contentType, locale, entry['uid'], deleteLocalized)\n\n","repo_name":"Contentstack-Solutions/python-entries-operations","sub_path":"deleteAllEntriesofContentType.py","file_name":"deleteAllEntriesofContentType.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26069657575","text":"\n#DYNAMİC YÖNTEM->çalışması uzun sürüyor.\ndef recMC(coinValueList,change):\n minCoins=change\n if(change in coinValueList):\n return 1\n else:\n for i in [c for c in coinValueList if c<=change]:\n numCoins=1+recMC(coinValueList,change-i)\n if(numCoins0):\n return knownResults[change]\n else:\n for i in [c for c in coinValueList if c<=change]:\n numCoins=1+recMC(coinValueList,change-i,knownResults)\n if(numCoins=0):\n if(tut%liste[m]==0):\n for i in range(tut//liste[m]):\n kullanilan_para1.append(liste[m])\n r+=1\n \n break\n else:\n m-=1\n \n \n while(k>=0):\n if(liste[k]<=n):\n kullanilan_para2.append(liste[k])\n n=n-liste[k]\n z+=1\n else:\n k-=1\n\n if(z>r):\n return kullanilan_para1\n return kullaniülan_para2\n \nprint(para_ustu(63))\n\n\n","repo_name":"basakklc/ProgrammingLaboratory_COMU","sub_path":"para_ustu.py","file_name":"para_ustu.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10549441641","text":"from common.individual import Individual\nimport random\nfrom common.population import Population\n\nclass Problem:\n \"\"\"\n Class that controls the problem and the optimization parameters such as\n number of individuals, number of generations, variables range, mutation rate,\n tabu list and the problem directions (maximization or minimization).\n \"\"\"\n\n def __init__(self,\n objectives,\n num_of_variables,\n variables_range,\n num_of_individuals,\n directions,\n num_of_generations,\n mutation,\n expand=True):\n self.num_of_objectives = len(objectives)\n self.num_of_variables = num_of_variables\n self.num_of_individuals = num_of_individuals\n self.objectives = objectives\n self.expand = expand\n self.variables_range = variables_range\n self.directions = directions\n self.num_of_generations = num_of_generations\n self.variables = self.set_variables()\n self.mutation = mutation\n self.tabu = set()\n\n def set_variables(self):\n \"\"\"\n Set the possible variables values for each decision variable\n :return: The set of possible variables for the given problem\n \"\"\"\n variables = [i for i in range(min(self.variables_range), max(self.variables_range) + 1)]\n return variables\n\n def create_update_tabu_list(self, population):\n \"\"\"\n Create a tabu list (which is in fact a set) or update the current one\n :param population:\n :return: nothing\n \"\"\"\n for individual in population:\n self.tabu.add(tuple(individual.features))\n\n def create_initial_population(self):\n \"\"\"\n Create an initial population\n :return: return a population of N individuals\n \"\"\"\n population = Population()\n for _ in range(self.num_of_individuals):\n individual = self.generate_individual()\n individual.id = _\n individual.trace = [_ for i in range(self.num_of_variables)]\n self.calculate_objectives(individual)\n population.append(individual)\n population.last_id = _\n return population\n\n def generate_individual(self):\n \"\"\"\n Generate an individual\n :return: an individual object\n \"\"\"\n individual = Individual(self.directions)\n individual.features = [random.randint(min(self.variables_range), max(self.variables_range)) for x in range(self.num_of_variables)]\n return individual\n\n\n def calculate_objectives(self, individual):\n \"\"\"\n Calculate the objective function values of an individual\n :param individual: solution containing the decision vector and the objective functions\n :return: nothing\n \"\"\"\n if self.expand:\n individual.objectives = [f(*individual.features) for f in self.objectives]\n else:\n individual.objectives = [f(individual.features) for f in self.objectives]\n\n","repo_name":"mbdemoraes/moead-rfts","sub_path":"common/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20785230584","text":"#! Python 3 Shebang Line\r\n# write a code for Random Quiz with Q/A in random fashion, along with the exact answers.\r\n# General Knowledge : First in the World\r\nimport random\r\nGK_First_World = {\r\n 'First Radio Telescope Satellite launched into Space': 'HALCA (Japan)',\r\n 'First country to use Glass':' Egypt and Mesopotamia',\r\n 'First country to make Map':'The Greeks',\r\n 'First Spaceship landed on Mars':'Viking-I (July 1976)',\r\n 'World’s First Multipurpose River Valley Project' :'Tennessee River Valley Project (USA)',\r\n 'First Space Shuttle Launched':'Columbia (April 1981)',\r\n \"First Rocket to go near the Sun\": \"Helius ‘B’\",\r\n 'First Country to make written Constitution': 'The USA',\r\n 'First Country to start Underground Metro Rail': 'Britain',\r\n 'First Unmanned Mission on the Moon' :'LUNA-9',\r\n 'First Spacecraft to carry man on the Moon': 'Apollo - 11',\r\n 'First Country to do Artificial Satellite Experiment': 'Russia',\r\n 'Country to give Voting Right to Women': 'New Zealand',\r\n 'First Country to appoint Lokpal': 'Sweden',\r\n 'First Country to imposed Carbon Tax': 'New Zealand'\r\n}\r\n#print(GK_First_World)\r\n\r\n# Generate Questions and Answers .txt files with preliminary header information\r\nfor Ques in range(15):\r\n QuesFile = open(f'GK_First_Worldquiz{Ques + 1}.txt', 'w')\r\n AnsFile = open(f'GK_First_Worldquiz_answers{Ques + 1}.txt', 'w')\r\n QuesFile.write('Name of the Student:\\nDate of Exam :\\nSchool Name:\\n')\r\n QuesFile.write((' ' * 20) + f' GK_First_World Quiz (SET-{Ques + 1})')\r\n QuesFile.write('\\n')\r\n\r\n First_World = list(GK_First_World.keys())\r\n random.shuffle(First_World)\r\n\r\n for NumQues in range(1):\r\n\r\n correctAnswer = GK_First_World[First_World[NumQues]]\r\n wrongAnswers = list(GK_First_World.values())\r\n del wrongAnswers[wrongAnswers.index(correctAnswer)]\r\n wrongAnswers = random.sample(wrongAnswers, 3)\r\n answerOptions = wrongAnswers + [correctAnswer]\r\n random.shuffle(answerOptions)\r\n\r\n\r\n for NumQues in range(1):\r\n QuesFile.write(f'{NumQues + 1}.{First_World[NumQues]}?\\n')\r\n for i in range(4):\r\n QuesFile.write(f\"{'ABCD'[i]}. {answerOptions[i]}\\n\")\r\n QuesFile.write('\\n')\r\n AnsFile.write(f\"{NumQues + 1}.{'ABCD'[answerOptions.index(correctAnswer)]}\")\r\n QuesFile.close()\r\n AnsFile.close()","repo_name":"edgelearningcentre/General-Knowledge","sub_path":"1_First_in_the_World.py","file_name":"1_First_in_the_World.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23663492864","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 28 11:52:48 2023\r\n\r\n@author: kevin\r\n\"\"\"\r\n\r\nimport requests\r\nimport os\r\nfrom flask import Flask, request, render_template, flash, redirect\r\napp = Flask(__name__)\r\napp.secret_key = 'df0331cefc6c2b9a5d0208a726a5d1c0fd37324feba25506'\r\n\r\n\r\n\r\n\r\n@app.route(\"/\", methods = ('GET','POST'))\r\ndef home():\r\n if request.method == 'POST':\r\n lat = request.form['lat']\r\n long = request.form['long']\r\n\r\n if not lat:\r\n flash('Fill out the latitude')\r\n elif not(lat.replace(\".\",\"\",1).replace(\"-\", \"\", 1).isdigit()) :\r\n flash(\"Correct the field lat in correct format\")\r\n elif not(abs(float(lat)) <= 90.0):\r\n flash(\"The latitude must be between -90° and 90°\")\r\n elif not long:\r\n flash('Fill out the longitude')\r\n elif not(long.replace(\".\",\"\",1).replace(\"-\", \"\", 1).isdigit()) :\r\n flash(\"Correct the field long in correct format\")\r\n elif not(abs(float(long)) <= 180.0):\r\n flash(\"The longitude must be between -180° and 180°\")\r\n else:\r\n return redirect(f'/weather?lat={lat}&long={long}') \r\n return render_template(\"create.html\")\r\n\r\n@app.route('/weather')\r\ndef weather():\r\n lat = request.args.get('lat')\r\n long = request.args.get('long')\r\n url = f\"https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={long}&appid={api_key}&units=metric\"\r\n response = requests.get(url)\r\n return response.json()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n api_key = os.environ['API_KEY']\r\n app.run(host = '0.0.0.0')\r\n ","repo_name":"Kev744/TP2","sub_path":"apimeteo.py","file_name":"apimeteo.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28459472408","text":"\"\"\"\nCrear un programa que calcule el sueldo bruto de una \npersona que trabaja de lunes a viernes 8 hs y su pago \npor hora es de 400 pesos. Devolver el sueldo por pantalla.\n\"\"\"\n\ndias = 5 \nhorasPorDia = 8\nporHora = 400\ndiasPorMes = dias * 4\nprint(diasPorMes * horasPorDia * porHora)\n\n\n","repo_name":"GuadaIt/python-exercises","sub_path":"7.1.py","file_name":"7.1.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3857326957","text":"import pygame as pg\r\nfrom variables import *\r\n\r\nclass Cavalier:\r\n\r\n def __init__(self, surface):\r\n self.img = {}\r\n self.img[\"black\"] = pg.transform.scale(pg.image.load(\"./Images/b_knight_png_128px.png\").convert_alpha(surface), (SIZE - SIZE//5, SIZE - SIZE//5))\r\n self.img[\"white\"] = pg.transform.scale(pg.image.load(\"./Images/w_knight_png_128px.png\").convert_alpha(surface), (SIZE - SIZE//5, SIZE - SIZE//5))\r\n\r\n def draw(self, surface, x, y, size, color):\r\n surface.blit(self.img[color], (x*size + SIZE//10, y*size + SIZE//10))\r\n\r\n def case_possible(self, x, y, tab):\r\n l = []\r\n for coord in [(2, 1), (2, -1), (-2, 1), (-2, -1), (1, 2), (1, -2), (-1, 2), (-1, -2)]:\r\n if 0 <= x+coord[0] < X and 0 <= y+coord[1] < Y:\r\n if tab[x][y] == \"b_cavalier\":\r\n if tab[x+coord[0]][y+coord[1]] not in BLACKPIECES:\r\n l.append((x+coord[0], y+coord[1]))\r\n elif tab[x][y] == \"w_cavalier\":\r\n if tab[x+coord[0]][y+coord[1]] not in WHITEPIECES:\r\n l.append((x+coord[0], y+coord[1]))\r\n return l[:]","repo_name":"oneblack74/EchiquierPy","sub_path":"cavalier.py","file_name":"cavalier.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9339737881","text":"import logging\nimport logging.handlers\nimport queue\nimport sys\n\nfrom flaky import flaky\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nimport picologging\nimport picologging.handlers\n\nc_integers = st.integers().filter(lambda x: x < 2147483648 and x > -2147483649)\n\n\n@given(\n name=st.text(),\n level=c_integers,\n lineno=c_integers,\n msg=st.text().filter(lambda t: t.find(\"%\") < 0),\n extra_arg=st.text(),\n func=st.text(),\n sinfo=st.text(),\n)\ndef test_hypothesis_logrecord_constructor(\n name, level, lineno, msg, extra_arg, func, sinfo\n):\n args = (extra_arg,)\n # Create an exception tuple\n exc_info = None\n try:\n 10 / 0\n except ZeroDivisionError:\n exc_info = sys.exc_info()\n pico_record = picologging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, exc_info, func, sinfo\n )\n stdl_record = logging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, exc_info, func, sinfo\n )\n assert pico_record.name == stdl_record.name\n assert pico_record.msg == stdl_record.msg\n assert pico_record.levelno == stdl_record.levelno\n assert pico_record.lineno == stdl_record.lineno\n assert pico_record.module == stdl_record.module\n assert pico_record.args == stdl_record.args\n assert abs(pico_record.created - stdl_record.created) < 0.5\n assert pico_record.getMessage() == stdl_record.getMessage()\n\n\n@flaky(max_runs=4, min_passes=1)\n@given(\n name=st.text(),\n level=c_integers,\n lineno=c_integers,\n msg=st.text().filter(lambda t: t.find(\"%\") < 0),\n extra_arg=st.text(),\n func=st.text(),\n sinfo=st.text(),\n)\ndef test_hypothesis_logrecord_filename(\n name, level, lineno, msg, extra_arg, func, sinfo\n):\n args = (extra_arg,)\n pico_record = picologging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, None, func, sinfo\n )\n stdl_record = logging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, None, func, sinfo\n )\n # Filename sometimes reported without extension on Windows\n assert pico_record.filename == stdl_record.filename\n\n\n@given(args=st.lists(st.text(), min_size=0, max_size=10).map(tuple))\ndef test_hypothesis_logrecord_args(args):\n msg = \" %s \" * len(args)\n pico_record = picologging.LogRecord(\"\", 10, __file__, 10, msg, args, None)\n stdl_record = logging.LogRecord(\"\", 10, __file__, 10, msg, args, None)\n assert pico_record.msg == stdl_record.msg\n assert pico_record.args == stdl_record.args\n assert pico_record.getMessage() == stdl_record.getMessage()\n\n\n@given(\n name=st.text(),\n level=c_integers,\n lineno=c_integers,\n msg=st.text().filter(lambda t: t.find(\"%\") < 0),\n extra_arg=st.text(),\n func=st.text(),\n sinfo=st.text(),\n)\ndef test_hypothesis_queuehandler_prepare(\n name, level, lineno, msg, extra_arg, func, sinfo\n):\n \"\"\"This test ensures the robustness of the prepare() method,\n which may be unstable in how it copies LogRecord using positional arguments\n (since it is currently not possible to use copy.copy).\n \"\"\"\n args = (extra_arg,)\n # Create an exception tuple\n exc_info = None\n try:\n 10 / 0\n except ZeroDivisionError:\n exc_info = sys.exc_info()\n pico_record = picologging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, exc_info, func, sinfo\n )\n stdl_record = logging.LogRecord(\n name, level, __file__, lineno, msg + \" %s\", args, exc_info, func, sinfo\n )\n pico_handler = picologging.handlers.QueueHandler(queue.Queue())\n stdl_handler = logging.handlers.QueueHandler(queue.Queue())\n pico_record2 = pico_handler.prepare(pico_record)\n stdl_record2 = stdl_handler.prepare(stdl_record)\n\n assert (\n pico_record2.name == pico_record.name == stdl_record.name == stdl_record2.name\n )\n assert (\n pico_record2.msg\n == stdl_record2.msg\n == stdl_handler.format(stdl_record)\n == pico_handler.format(pico_record)\n )\n assert (\n pico_record2.levelno\n == pico_record.levelno\n == stdl_record.levelno\n == stdl_record2.levelno\n )\n assert (\n pico_record2.lineno\n == pico_record.lineno\n == stdl_record.lineno\n == stdl_record2.lineno\n )\n assert (\n pico_record2.module\n == pico_record.module\n == stdl_record.module\n == stdl_record2.module\n )\n assert pico_record2.getMessage() == stdl_record2.getMessage()\n","repo_name":"microsoft/picologging","sub_path":"slowtests/test_logrecordhyp.py","file_name":"test_logrecordhyp.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":571,"dataset":"github-code","pt":"54"} +{"seq_id":"31870435441","text":"#!/usr/bin/env python3\n\nfrom PIL import Image, ImageFont, ImageDraw\n\n\ndef center_text_h_pos(img, font, text):\n draw = ImageDraw.Draw(img)\n return (img.width - draw.textlength(text, font)) / 2\n\n\ndef calc_v_font_margin(font):\n (left, top, right, bottom) = font.getbbox(\"a\")\n return bottom - top\n\n\nif __name__ == \"__main__\":\n print(\"Image Edition Program\")\n original_image = Image.open(\"original-image.png\")\n edited_image = original_image.copy()\n\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/ubuntu/Ubuntu-R.ttf\", 60)\n\n draw_image = ImageDraw.Draw(edited_image)\n\n text = \"Top Center Text\"\n h_pos = center_text_h_pos(edited_image, font, text)\n text_margin = calc_v_font_margin(font)\n draw_image.text((h_pos, text_margin), text, font=font)\n\n text = \"Mid Center Text\"\n h_pos = center_text_h_pos(edited_image, font, text)\n draw_image.text((h_pos, edited_image.height / 2), text, font=font)\n\n text = \"Bottom Center Text\"\n h_pos = center_text_h_pos(edited_image, font, text)\n draw_image.text((h_pos, edited_image.height - 60 - text_margin), text, font=font)\n\n edited_image.show()\n\n edited_image.save(\"edited-image.png\")\n","repo_name":"P-Miranda/image-edition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4889259378","text":"import json\nimport sys\n\n\nclassname = sys.argv[1]\ntry:\n filename = sys.argv[2]\nexcept:\n filename = 'output/words.json'\n\n\nwith open(filename) as infile:\n words = json.load(infile)\n\nnew_words = {}\n\nfor key, word in words.items():\n if classname in word['classes']:\n new_words[key] = word\n\nprint(json.dumps(new_words, indent=4, ensure_ascii=False))\n","repo_name":"OrangeTacTics/dmtdecks","sub_path":"utils/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37723229848","text":"elements = [23, 14, 56, 12, 19, 9, 15, 25, 31, 42, 43]\nsum=0\nsum1=0\nodd=0\neven=0\ni=0\nwhile i 0:\n self.qty -= 1\n return '+'\n else:\n raise StopIteration\n\nclass Letters:\n def __init__(self, string):\n self.letters = []\n for i in string:\n self.letters.append(i)\n\n def __iter__(self):\n return LettersIterator(self.letters)\n\nclass LettersIterator:\n def __init__(self, letters):\n self.letters = letters\n def __next__(self):\n if self.letters == []:\n raise StopIteration\n item = self.letters[0]\n del self.letters[0]\n return item\n\n# ________________________HOMEWORK______________\n\nclass RanIterator:\n def __init__(self, n, a, b):\n self.n = n\n self.b = b\n self.a = a\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.n == 0:\n raise StopIteration\n self.n -= 1\n return randint(self.a, self.b)\n\n\nlt = Letters('aeoui')\nprint(type(lt.letters))\nprint(lt.letters)\n\nlti = iter(lt)\n# print(next(lti))\nprint(lt.letters)\nprint()\nprint(type(lti.letters))\nfor i in lt:\n print(i)\n\na = A(5)\nprint(next(a))\nprint(next(a))\nprint()\n\nfor i in a:\n print(i)\n\n# f = open('new 1.txt')\n# print(f.__next__())\n# print(f.__next__())\n# print(next(f))\n# next(f)\n# print(f.__next__())\n\n# _______Homework_____________________________________\nitr = RanIterator(3,4,8)\nfor i in itr:\n print(i)","repo_name":"romanmelnyk73/OOP_ex","sub_path":"iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42999546483","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport torch\nimport torch.nn as nn\nimport math\nimport numpy as np\n\n\n\n\nclass ConvTemporalGraphical(nn.Module):\n #Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py\n r\"\"\"The basic module for applying a graph convolution.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the graph convolving kernel\n t_kernel_size (int): Size of the temporal convolving kernel\n t_stride (int, optional): Stride of the temporal convolution. Default: 1\n t_padding (int, optional): Temporal zero-padding added to both sides of\n the input. Default: 0\n t_dilation (int, optional): Spacing between temporal kernel elements.\n Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output.\n Default: ``True``\n Shape:\n - Input: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Output: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes. \n \"\"\"\n def __init__(self,\n time_dim,\n joints_dim\n ):\n super(ConvTemporalGraphical,self).__init__()\n \n self.A=nn.Parameter(torch.FloatTensor(time_dim, joints_dim,joints_dim)) #learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix)\n stdv = 1. / math.sqrt(self.A.size(1))\n self.A.data.uniform_(-stdv,stdv)\n\n self.T=nn.Parameter(torch.FloatTensor(joints_dim, time_dim, time_dim)) \n stdv = 1. / math.sqrt(self.T.size(1))\n self.T.data.uniform_(-stdv,stdv)\n '''\n self.prelu = nn.PReLU()\n \n self.Z=nn.Parameter(torch.FloatTensor(joints_dim, joints_dim, time_dim, time_dim)) \n stdv = 1. / math.sqrt(self.Z.size(2))\n self.Z.data.uniform_(-stdv,stdv)\n '''\n self.joints_dim = joints_dim\n self.time_dim = time_dim\n\n def forward(self, x):\n x = torch.einsum('nctv,vtq->ncqv', (x, self.T))\n ## x=self.prelu(x)\n x = torch.einsum('nctv,tvw->nctw', (x, self.A))\n ## x = torch.einsum('nctv,wvtq->ncqw', (x, self.Z))\n return x.contiguous() \n\n\nclass ConvTemporalGraphicalEnhanced(nn.Module):\n #Source : https://github.com/yysijie/st-gcn/blob/master/net/st_gcn.py\n r\"\"\"The basic module for applying a graph convolution.\n Args:\n in_channels (int): Number of channels in the input sequence data\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int): Size of the graph convolving kernel\n t_kernel_size (int): Size of the temporal convolving kernel\n t_stride (int, optional): Stride of the temporal convolution. Default: 1\n t_padding (int, optional): Temporal zero-padding added to both sides of\n the input. Default: 0\n t_dilation (int, optional): Spacing between temporal kernel elements.\n Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output.\n Default: ``True``\n Shape:\n - Input: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Output: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes. \n \"\"\"\n def __init__(self,\n time_dim,\n joints_dim,\n dim_used = [ 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17,\n 18, 19, 21, 22, 25, 26, 27, 29, 30],\n parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,\n 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],\n joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],\n joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31],\n version='long'\n ):\n super(ConvTemporalGraphicalEnhanced,self).__init__()\n \n self.A=nn.Parameter(torch.FloatTensor(time_dim, joints_dim, joints_dim)) #learnable, graph-agnostic 3-d adjacency matrix(or edge importance matrix)\n stdv = 1. / math.sqrt(self.A.size(1))\n self.A.data.uniform_(-stdv,stdv)\n\n self.T=nn.Parameter(torch.FloatTensor(joints_dim, time_dim, time_dim)) \n stdv = 1. / math.sqrt(self.T.size(1))\n self.T.data.uniform_(-stdv,stdv)\n '''\n self.prelu = nn.PReLU()\n \n self.Z=nn.Parameter(torch.FloatTensor(joints_dim, joints_dim, time_dim, time_dim)) \n stdv = 1. / math.sqrt(self.Z.size(2))\n self.Z.data.uniform_(-stdv,stdv)\n '''\n self.A_s = torch.zeros((1,joints_dim,joints_dim), requires_grad=False)\n for i, dim in enumerate(dim_used):\n self.A_s[0][i][i] = 1\n if parents[dim] in dim_used:\n parent_index = dim_used.index(parents[dim])\n self.A_s[0][i][parent_index] = 1\n self.A_s[0][parent_index][i] = 1\n if dim in joints_left:\n index = joints_left.index(dim)\n right_dim = joints_right[index]\n right_index = dim_used.index(right_dim)\n if right_dim in dim_used:\n self.A_s[0][i][right_index] = 1\n self.A_s[0][right_index][i] = 1\n self.T_s = torch.zeros((1,time_dim,time_dim), requires_grad=False)\n if version == 'long':\n for i in range(time_dim):\n if i > 0:\n self.T_s[0][i-1][i] = 1\n self.T_s[0][i][i-1] = 1\n\n if i < time_dim - 1:\n self.T_s[0][i+1][i] = 1\n self.T_s[0][i][i+1] = 1\n \n self.T_s[0][i][i] = 1\n elif version == 'short':\n self.T_s = self.T_s + 1\n else:\n raise Exception(\"model type should be long or short\")\n\n self.joints_dim = joints_dim\n self.time_dim = time_dim\n\n def forward(self, x):\n T = self.T * self.T_s.to(x.device)\n A = self.A * self.A_s.to(x.device)\n x = torch.einsum('nctv,vtq->ncqv', (x, T))\n ## x=self.prelu(x)\n x = torch.einsum('nctv,tvw->nctw', (x, A))\n ## x = torch.einsum('nctv,wvtq->ncqw', (x, self.Z))\n return x.contiguous() \n\nclass ST_GCNN_layer(nn.Module):\n \"\"\"\n Shape:\n - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format\n - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format\n - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, V)` format\n where\n :math:`N` is a batch size,\n :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes. \n :in_channels= dimension of coordinates\n : out_channels=dimension of coordinates\n +\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n time_dim,\n joints_dim,\n dropout,\n bias=True,\n version='full',\n dim_used=None):\n \n super(ST_GCNN_layer,self).__init__()\n self.kernel_size = kernel_size\n assert self.kernel_size[0] % 2 == 1\n assert self.kernel_size[1] % 2 == 1\n padding = ((self.kernel_size[0] - 1) // 2,(self.kernel_size[1] - 1) // 2)\n \n if version == 'full':\n self.gcn=ConvTemporalGraphical(time_dim,joints_dim) # the convolution layer\n else:\n self.gcn=ConvTemporalGraphicalEnhanced(time_dim,joints_dim,dim_used=dim_used,version=version)\n\n self.tcn = nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels,\n (self.kernel_size[0], self.kernel_size[1]),\n (stride, stride),\n padding,\n ),\n nn.BatchNorm2d(out_channels),\n nn.Dropout(dropout, inplace=True),\n ) \n \n if stride != 1 or in_channels != out_channels: \n\n self.residual=nn.Sequential(nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=(1, 1)),\n nn.BatchNorm2d(out_channels),\n )\n \n \n else:\n self.residual=nn.Identity()\n \n \n self.prelu = nn.PReLU()\n\n \n\n def forward(self, x):\n # assert A.shape[0] == self.kernel_size[1], print(A.shape[0],self.kernel_size)\n res=self.residual(x)\n x=self.gcn(x) \n x=self.tcn(x)\n x=x+res\n x=self.prelu(x)\n return x\n\n\nclass Model(nn.Module):\n \"\"\" \n Shape:\n - Input[0]: Input sequence in :math:`(N, in_channels,T_in, V)` format\n - Output[0]: Output sequence in :math:`(N,T_out,in_channels, V)` format\n where\n :math:`N` is a batch size,\n :math:`T_{in}/T_{out}` is a length of input/output sequence,\n :math:`V` is the number of graph nodes. \n :in_channels=number of channels for the coordiantes(default=3)\n +\n \"\"\"\n\n def __init__(self,\n input_channels,\n input_time_frame,\n output_time_frame,\n st_gcnn_dropout,\n dim_used,\n n_pre=20,\n bias=True,\n version='long'):\n \n super(Model,self).__init__()\n self.input_time_frame=input_time_frame\n self.output_time_frame=output_time_frame\n dim_used = sorted([dim_used[i] // 3 for i in range(0, dim_used.shape[0], 3)])\n joints_to_consider=len(dim_used)\n self.joints_to_consider=joints_to_consider\n self.st_gcnns=nn.ModuleList()\n self.st_gcnns.append(ST_GCNN_layer(input_channels,64,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version='full',dim_used=dim_used))\n\n self.st_gcnns.append(ST_GCNN_layer(64,32,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used))\n \n self.st_gcnns.append(ST_GCNN_layer(32,64,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used))\n \n self.st_gcnns.append(ST_GCNN_layer(64,32,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used)) \n \n self.st_gcnns[-1].gcn.A = self.st_gcnns[-3].gcn.A\n self.st_gcnns[-1].gcn.T = self.st_gcnns[-3].gcn.T\n\n self.st_gcnns.append(ST_GCNN_layer(32,64,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used))\n self.st_gcnns[-1].gcn.A = self.st_gcnns[-3].gcn.A\n self.st_gcnns[-1].gcn.T = self.st_gcnns[-3].gcn.T\n\n self.st_gcnns.append(ST_GCNN_layer(64,32,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used)) \n \n self.st_gcnns[-1].gcn.A = self.st_gcnns[-3].gcn.A\n self.st_gcnns[-1].gcn.T = self.st_gcnns[-3].gcn.T\n\n self.st_gcnns.append(ST_GCNN_layer(32,64,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version=version,dim_used=dim_used))\n \n self.st_gcnns[-1].gcn.A = self.st_gcnns[-3].gcn.A\n self.st_gcnns[-1].gcn.T = self.st_gcnns[-3].gcn.T\n\n self.st_gcnns.append(ST_GCNN_layer(64,input_channels,[3,1],1,n_pre,\n joints_to_consider,st_gcnn_dropout,version='full',dim_used=dim_used)) \n # self.st_gcnns[-1].gcn.A = self.st_gcnns[0].gcn.A \n # self.st_gcnns[-1].gcn.T = self.st_gcnns[0].gcn.T\n\n self.dct_m, self.idct_m = self.get_dct_matrix(self.input_time_frame + self.output_time_frame)\n self.n_pre = n_pre\n\n def get_dct_matrix(self, N, is_torch=True):\n dct_m = np.eye(N)\n for k in np.arange(N):\n for i in np.arange(N):\n w = np.sqrt(2 / N)\n if k == 0:\n w = np.sqrt(1 / N)\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\n idct_m = np.linalg.inv(dct_m)\n if is_torch:\n dct_m = torch.from_numpy(dct_m)\n idct_m = torch.from_numpy(idct_m)\n return dct_m, idct_m \n\n def forward(self, x):\n idx_pad = list(range(self.input_time_frame)) + [self.input_time_frame - 1] * self.output_time_frame\n y = torch.zeros((x.shape[0], x.shape[1], self.output_time_frame, x.shape[3])).to(x.device)\n inp = torch.cat([x, y], dim=2).permute(0, 2, 1, 3)\n N, T, C, V = inp.shape\n dct_m = self.dct_m.to(x.device).float()\n idct_m = self.idct_m.to(x.device).float()\n inp = inp.reshape([N, T, C * V])\n inp = torch.matmul(dct_m[:self.n_pre], inp[:, idx_pad, :]).reshape([N, -1, C, V]).permute(0, 2, 1, 3)\n res = inp\n x = inp\n\n for gcn in (self.st_gcnns):\n x = gcn(x)\n\n x += res\n x = x.permute(0, 2, 1, 3).reshape([N, -1, C * V])\n x_re = torch.matmul(idct_m[:, :self.n_pre], x).reshape([N, T, C, V])\n x = x_re\n \n return x[:, self.input_time_frame:], x_re","repo_name":"Sirui-Xu/STARS","sub_path":"deterministic/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14262,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"54"} +{"seq_id":"22915681060","text":"# -*- coding: utf-8 -*-\n\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import except_orm, Warning, RedirectWarning\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n company_type = fields.Selection(\n [('person', u'شخص'),\n ('hospital', u'مستشفى'),\n ('governmental_entity', u'جهة حكومية'),\n ('company', u'شركة'),\n ('faculty', u'جامعة'),\n ('school', u'معهد'),\n ('inter_reg_org',u'منظمة دولية أو اقليمية'),\n ])\n is_hospital = fields.Boolean(string='is hospital')\n # inter_reg_org = fields.Boolean(string=u'منظمة دولية أو اقليمية', default=False)\n insurance = fields.Boolean(string=u'تابعة للتأمين', default=False)\n hospital_director = fields.Char(string=u'مدير المستشفى')\n code = fields.Char(string=u'الرمز')\n @api.multi\n def on_change_company_type(self, company_type):\n if company_type == 'hospital':\n company_type = 'company'\n return {'value': {'is_company': company_type == 'company', 'is_hospital': True}}\n else:\n return {'value': {'is_company': company_type == 'company', 'is_hospital': False}}\n\n\nclass ResPartnerBank(models.Model):\n _inherit = 'res.partner.bank'\n\n account_opening_date = fields.Date(string=u'تاريخ فتح الحساب')\n is_deposit = fields.Boolean(string='للإيداع')\n employee_id = fields.Many2one('hr.employee', string=u'الموظف')\n type_bank = fields.Selection(\n [\n ('governmental_entity', u' حكومية'),\n ('private', u'أهلي'),\n ])\n","repo_name":"rouag/orvea","sub_path":"smart_hr/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"ar","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71480889123","text":"import os\nimport re\nfrom subprocess import Popen, PIPE\nfrom distutils.spawn import find_executable\n\nfrom dotenv import dotenv_values\nfrom packaging.version import Version\n\nfrom .distro import Distribution\nfrom .errors import (CannotIdentifyDistribution,\n UnsupportedDistribution)\nfrom .utils import flatten_list\nfrom .logger import logger\nfrom .pkgindex import debian_codename_index, arch_codename_index, \\\n fedora_codename_index, alpine_codename_index, gentoo_codename_index, \\\n centos_codename_index\nfrom ..config.distributions import distrodata\n\n\nclass Installer(object):\n\n def __init__(self, spices):\n\n self.distname = ''\n self.codename = ''\n self.version = ''\n self.metadistname = ''\n self.metacodename = ''\n # self.release_data = {}\n # self.dpkg_origins_data = {}\n # self.apt_policy_data = []\n self.lsb_release_command = find_executable('lsb_release')\n self.os_release = '/etc/os-release'\n self.lsb_release = '/etc/lsb-release'\n self.dpkg_origins = '/etc/dpkg/origins/default'\n self.debian_release = '/etc/debian_version'\n self.fedora_release = '/etc/fedora-release'\n self.alpine_release = '/etc/alpine-release'\n self.arch_release = '/etc/arch-release'\n self.gentoo_release = '/etc/gentoo-release'\n self.centos_release = '/etc/centos-release'\n self.env = os.environ.copy()\n self.env['LC_ALL'] = 'C'\n\n self.longnames = {\n 'v': 'version',\n 'o': 'origin',\n 'a': 'suite',\n 'c': 'component',\n 'l': 'label'\n }\n\n self.spicesdata = spices\n self.distributions = distrodata\n self.codenames = {}\n self.revcodenames = {}\n\n self.populate_codename_index()\n # self.get_distro_data()\n # self.normalize_distro_data()\n\n def populate_codename_index(self):\n logger.info('Generating distributions database')\n self.distributions['debian']['codenames'] = \\\n debian_codename_index()\n self.distributions['arch']['codenames'] = \\\n arch_codename_index()\n self.distributions['fedora']['codenames'] = \\\n fedora_codename_index()\n self.distributions['alpine']['codenames'] = \\\n alpine_codename_index()\n self.distributions['centos']['codenames'] = \\\n centos_codename_index()\n self.distributions['gentoo']['codenames'] = \\\n gentoo_codename_index()\n\n def codename_index(self, x):\n\n suite = x[1].get('suite')\n order = list(self.distributions[self.distname]['codenames'].items())\n order.sort()\n order = list(flatten_list(list(zip(*order))[1]))\n\n if suite:\n if suite in order:\n return int(len(order) - order.index(suite))\n else:\n return suite\n return 0\n\n def parse_apt_policy(self):\n\n retval = {}\n policy = Popen(args=['apt-cache', 'policy'],\n stdout=PIPE, stderr=PIPE, env=self.env,\n close_fds=True).communicate()[0].decode('utf-8')\n\n for line in policy.split('\\n'):\n line = line.strip()\n m = re.match(r'(-?\\d+)', line)\n\n if m:\n priority = int(m.group(1))\n\n if line.startswith('release'):\n bits = line.split(' ', 1)\n\n if len(bits) > 1:\n\n for bit in bits[1].split(','):\n kv = bit.split('=', 1)\n\n if len(kv) > 1:\n k, v = kv[:2]\n\n if k in self.longnames:\n retval[self.longnames[k]] = v\n\n self.apt_policy_data.append((priority, retval))\n return self.apt_policy_data\n\n def get_codename_from_apt(self, origin, component='main'):\n\n releases = self.parse_apt_policy()\n releases = [x for x in releases if (\n x[1].get('origin', '').lower() == origin and\n x[1].get('component', '').lower() == component and\n x[1].get('label', '').lower() == origin)]\n\n releases.sort(key=lambda tuple: tuple[0], reverse=True)\n\n max_priority = releases[0][0]\n releases = [x for x in releases if x[0] == max_priority]\n releases.sort(key=self.codename_index)\n\n return releases[0][1]['suite']\n\n def parse_os_release(self, release):\n return dotenv_values(release)\n\n def cat_file(self, release):\n with open(release) as content:\n return content.read()\n\n def parse_dpkg_origins(self, origins):\n dpkg_origins_data = {}\n with open(origins) as content:\n contentlist = content.read()\n for j in contentlist.split('\\n'):\n keyvalue = j.split(':')\n if len(keyvalue) > 1:\n dpkg_origins_data[keyvalue[0].strip()] = keyvalue[1].strip()\n return dpkg_origins_data\n\n def cmd_return_full(self, args, env):\n return Popen(\n args=args, stdout=PIPE, stderr=PIPE,\n env=env, close_fds=True\n ).communicate()[0].decode('utf-8')\n\n def cmd_return_first_line(self, args, env):\n return Popen(\n args=args, stdout=PIPE, stderr=PIPE,\n env=env, close_fds=True\n ).communicate()[0].decode('utf-8').split('\\n')[0]\n\n def try_lsb_release_command(self):\n if (not self.distname) and self.lsb_release_command:\n self.distname = self.cmd_return_first_line(\n [self.lsb_release_command, '-is'], self.env)\n self.codename = self.cmd_return_first_line(\n [self.lsb_release_command, '-cs'], self.env)\n\n def try_arch_release_file(self):\n if (not self.distname) and os.path.exists(self.arch_release):\n self.distname = 'arch'\n self.codename = 'rolling'\n self.version = 'rolling'\n\n def try_gentoo_release_file(self):\n if (not self.distname) and os.path.exists(self.gentoo_release):\n self.distname = 'gentoo'\n self.codename = 'rolling'\n self.version = 'rolling'\n\n def try_fedora_release_file(self):\n if (not self.distname) and os.path.exists(self.fedora_release):\n relstr = self.cat_file(self.fedora_release)\n relarray = relstr.split()\n version = Version(relarray[2])\n self.distname = relarray[0]\n self.version = f'{version.major}'\n\n def try_centos_release_file(self):\n if (not self.distname) and os.path.exists(self.centos_release):\n relstr = self.cat_file(self.centos_release)\n relarray = relstr.split()\n stream = 'stream' if relarray[1].lower() == 'stream' else ''\n version = Version(relarray[3])\n self.distname = relarray[0]\n self.codename = f'{stream}{version.major}'\n\n def try_lsb_release_file(self):\n if (not self.distname) and os.path.exists(self.lsb_release):\n rel = self.parse_os_release(self.lsb_release)\n self.distname = rel['DISTRIB_ID'] \\\n if 'DISTRIB_ID' in rel else ''\n self.codename = rel['DISTRIB_CODENAME'] \\\n if 'DISTRIB_CODENAME' in rel else ''\n self.version = rel['DISTRIB_RELEASE'] \\\n if 'DISTRIB_RELEASE' in rel else ''\n\n def try_os_release_file(self):\n if (not self.distname) and os.path.exists(self.os_release):\n rel = self.parse_os_release(self.os_release)\n self.distname = rel['ID'] if 'ID' in rel else ''\n self.codename = rel['VERSION_CODENAME'] \\\n if 'VERSION_CODENAME' in rel else ''\n self.version = rel['VERSION_ID'] if 'VERSION_ID' in rel else ''\n\n def try_dpkg_origins(self):\n if (not self.distname) and os.path.exists(self.dpkg_origins):\n origins = self.parse_dpkg_origins(self.dpkg_origins)\n self.distname = origins['VENDOR'] if 'VENDOR' in origins else ''\n\n def try_apt(self):\n if self.distname and (not self.codename) \\\n and os.path.exists(self.debian_release):\n rel = self.cat_file(self.debian_release)\n\n if re.findall(r'.*/.*', rel):\n self.codename = self.get_codename_from_apt(self.distname)\n else:\n self.codename = rel\n\n def get_distro_data(self):\n\n logger.info('Attempting to identify your distribution')\n\n self.try_lsb_release_command()\n self.try_arch_release_file()\n self.try_gentoo_release_file()\n self.try_fedora_release_file()\n self.try_centos_release_file()\n self.try_lsb_release_file()\n self.try_os_release_file()\n self.try_dpkg_origins()\n self.try_apt()\n\n if not (self.distname and self.codename):\n raise CannotIdentifyDistribution()\n\n self.codenames = self.distributions[self.distname]['codenames']\n\n for k, v in self.codenames.items():\n if len(v) > 1:\n for j in v:\n self.revcodenames[j] = k\n else:\n self.revcodenames[v[0]] = k\n\n def normalize_distro_data(self):\n\n regex = re.compile(r'^(\\d+)\\.(\\d+)(\\.(\\d+))?([ab](\\d+))?$', re.VERBOSE)\n codematch = regex.match(self.codename)\n\n if not codematch:\n self.version = self.revcodenames[self.codename]\n else:\n (major, minor, patch, pre, prenum) = codematch.group(1, 2, 4, 5, 6)\n self.version = '.'.join(list(filter(None, [major, minor, patch,\n pre, prenum])))\n vermatch = regex.match(self.version)\n # if self.distname == 'ubuntu':\n # self.codename = self.codenames['.'.join(vermatch.group(1, 2))][0]\n\n # else:\n self.codename = self.codenames[str(float(vermatch.group(1)))][0]\n\n if self.is_supported_codename():\n logger.info('You are using %s (%s).' % (self.distname, self.codename))\n self.distribution = Distribution(self.distname,\n self.codename,\n self.version,\n self.spicesdata,\n self.distributions)\n else:\n raise UnsupportedDistribution()\n\n def is_supported_distname(self):\n if self.distname in self.distributions:\n return True\n return False\n\n def is_supported_codename(self):\n if self.is_supported_distname():\n if (self.codename in\n self.distributions[self.distname][self.version]):\n return True\n return False\n\n def execute(self):\n logger.info('Installing missing dependencies ...')\n self.update_package_db()\n self.install()\n\n def update_package_db(self):\n self.distribution.update_package_db()\n\n def install(self):\n self.distribution.install()\n","repo_name":"LuisAlejandro/spices","sub_path":"spices/core/installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":11090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16526651298","text":"\nimport pandas as pd\nimport base\n\n# Loading the train & test data -\ntrain = pd.read_csv('/Users/kumarpersonal/Downloads/Heart-Disease-Pred/Data/train.csv')\ntest = pd.read_csv('/Users/kumarpersonal/Downloads/Heart-Disease-Pred/Data/test.csv')\n\n# Splitting the data into independent & dependent variables -\nX_train, y_train = base.splitter(train, y_var='target')\nX_test, y_test = base.splitter(test, y_var='target')\n\n# Standardizing the data -\nX_train_scaled, X_test_scaled = base.standardizer(X_train, X_test)\n\n# Training the model -\nfrom xgboost import XGBClassifier\n\nparams = {'max_depth': 1}\nclf = XGBClassifier(**params)\n\nmodel = base.model_train(clf, X_train_scaled, y_train)\n\n# Checking the model's performance -\nbase.model_eval(model, X_train_scaled, X_test_scaled, y_train, y_test)","repo_name":"kumar9249/Heart-Disease-Pred","sub_path":"Code/test_clf.py","file_name":"test_clf.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31686455521","text":"# DL10A.py CS5173/6073 cheng 2019\r\n# from DeepDreaming with TensorFlow\r\n# randomly picking a learned feature at one of the 59 layers of inception\r\n# Start with random noise and gradient descent optimizing the selected channel\r\n# Display the result after 20 iterations\r\n# Usage: python DL10A.py\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nmodel_fn = 'tensorflow_inception_graph.pb'\r\n\r\ngraph = tf.Graph()\r\nsess = tf.InteractiveSession(graph=graph)\r\nwith tf.gfile.FastGFile(model_fn, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\nt_input = tf.placeholder(np.float32, name='input') # define the input tensor\r\nimagenet_mean = 117.0\r\nt_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)\r\ntf.import_graph_def(graph_def, {'input':t_preprocessed})\r\nlayers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]\r\nfeature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]\r\n\r\nprint('Number of layers', len(layers))\r\nlayer = 'mixed4d_3x3_bottleneck_pre_relu'\r\nchannel = np.random.randint(144) # picking some feature channel to visualize\r\nprint('Channel', channel)\r\nimport matplotlib.pyplot as plt\r\n\r\nimg_noise = np.random.uniform(size=(224,224,3)) + 100.0\r\n\r\ndef showarray(a):\r\n a = np.uint8(np.clip(a, 0, 1)*255)\r\n plt.imshow(a)\r\n \r\ndef visstd(a, s=0.1):\r\n '''Normalize the image range for visualization'''\r\n return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5\r\n\r\ndef T(layer):\r\n '''Helper for getting layer output tensor'''\r\n return graph.get_tensor_by_name(\"import/%s:0\"%layer)\r\n\r\ndef render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):\r\n t_score = tf.reduce_mean(t_obj) # defining the optimization objective\r\n t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!\r\n \r\n img = img0.copy()\r\n for i in range(iter_n):\r\n g, score = sess.run([t_grad, t_score], {t_input:img})\r\n # normalizing the gradient, so the same step size should work \r\n g /= g.std()+1e-8 # for different layers and networks\r\n img += g*step\r\n print(score, end = ' ')\r\n showarray(visstd(img))\r\n\r\nrender_naive(T(layer)[:,:,:,channel])\r\nplt.show()\r\n\r\n\r\n","repo_name":"domfarolino/deep-learning","sub_path":"Assignment10/DL10A.py","file_name":"DL10A.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29499716862","text":"from .base import * # noqa\nfrom .base import env\n\n# GENERAL\n# ------------------------------------------------------------------------------\nDEBUG = True\nSECRET_KEY = env(\n \"DJANGO_SECRET_KEY\",\n default=\"!!!SET DJANGO_SECRET_KEY!!!\",\n)\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [\"localhost\", \"0.0.0.0\", \"127.0.0.1\", \"[::1]\"]\n\n# CACHES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#caches\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"\",\n }\n}\n\n# django-extensions\n# ------------------------------------------------------------------------------\n# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration\nINSTALLED_APPS += [\"django_extensions\"] # noqa F405\n\n# Write emails to screen instead of actually sending them.\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n","repo_name":"halomod/TheHaloMod","sub_path":"TheHaloMod/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"30070750119","text":"from copy import deepcopy\nfrom fractions import Fraction\nfrom .ingredient import Ingredient\nfrom .quantity import Quantity, QuantityDescriptor\nfrom .exceptions import FieldError\n\ntry:\n long\nexcept NameError:\n long = int\n\nRECIPE_ATTRIBUTES = (\n 'index',\n 'name',\n 'servings',\n 'source',\n 'source_url',\n 'prep_time',\n 'cook_time',\n 'notes',\n 'ingredients',\n 'directions',\n 'categories',\n)\n\n\nclass Recipe(object):\n \"\"\"Class for representing a recipe.\n\n :ivar str name: human-friendly name of recipe\n :ivar index: optional application-specific indexing value\n :ivar servings: number of servings or a range (a 2-item tuple)\n :type servings int or tuple or None:\n :ivar str source: human-friendly source of recipe\n :ivar str source_url: URL source to for recipe\n :ivar Quantity prep_time: total preparation time for recipe\n :ivar Quantity cook_time: total cooking time for recipe\n :ivar str notes: miscellaneous data about recipe\n :ivar list ingredients: list of Ingredient objects\n :ivar list directions: list of instructions to prepare recipe\n :ivar list categories: list of categories for recipe organization\n \"\"\"\n\n name = None\n\n index = None\n servings = None\n source = ''\n source_url = ''\n prep_time = QuantityDescriptor('prep_time', convert=True)\n cook_time = QuantityDescriptor('cook_time', convert=True)\n notes = ''\n\n @classmethod\n def from_dict(cls, d):\n \"\"\" Creates a new recipe from a dictionary.\n :param dict d: the dictionary to convert\n :raises FieldError: if a field is missing, invalid, or not well-formed\n :raises ParseError: if a Pyprika syntax error is present\n :returns: the resulting recipe\n :rtype: Recipe\n \"\"\"\n if 'name' not in d:\n raise FieldError('Field is required', 'name')\n i = cls(name=d['name'])\n for key in d.keys():\n if not hasattr(i, key):\n raise FieldError('Unknown field for recipe', key)\n v = deepcopy(d[key])\n setattr(i, key, v)\n return i\n\n def to_dict(self, serialize=False):\n \"\"\" Return a dictionary representing the Recipe.\n\n :param bool serialize: convert as much as possible to primitive types\n :returns: a dictionary mapping attribute names to values\n :rtype: dict\n \"\"\"\n def _serialize(value):\n if not serialize:\n return value\n if isinstance(value, (Quantity, Ingredient)):\n return str(value)\n elif isinstance(value, (tuple, list)):\n cls = type(value)\n return cls(_serialize(x) for x in value)\n return value\n return dict((x, _serialize(getattr(self, x)))\n for x in RECIPE_ATTRIBUTES)\n\n def __init__(self, name):\n self.name = name\n self.ingredients = []\n self.directions = []\n self.categories = []\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return \"\".format(self)\n\n def __eq__(self, o):\n if not isinstance(o, type(self)):\n return False\n return all(getattr(self, x) == getattr(o, x)\n for x in RECIPE_ATTRIBUTES)\n\n def __mul__(self, o):\n result = deepcopy(self)\n if isinstance(result.servings, (list, tuple)):\n result.servings = [o * x for x in result.servings]\n elif result.servings is None:\n pass\n else:\n result.servings = o * result.servings if result.servings else None\n result.ingredients = [o * i for i in result.ingredients]\n return result\n\n def __rmul__(self, o):\n return self * o\n\n @property\n def servings(self):\n return getattr(self, '_servings', None)\n\n @servings.setter\n def servings(self, value):\n if isinstance(value, (Fraction, long, float, int)) or value is None:\n self._servings = value\n elif isinstance(value, (list, tuple)) and len(value) == 2:\n self._servings = tuple(value)\n else:\n raise FieldError(\"Not a number or 2-item tuple/list\", value)\n return value\n\n @property\n def ingredients(self):\n return getattr(self, '_ingredients', None)\n\n @ingredients.setter\n def ingredients(self, value):\n self._ingredients = [Ingredient.parse(x)\n if not isinstance(x, Ingredient) else x\n for x in value]\n","repo_name":"OEP/pyprika","sub_path":"pyprika/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"74316502563","text":"#coding=utf-8\nfrom django.conf.urls import url\n\n\nfrom CartApp import views\n\nurlpatterns = [\n url(r'cart/',views.cart,name='cart'),\n # 闪购页面中添加商品到购物车\n url(r'^addToCart/',views.addToCart,name='addToCart'),\n # 闪购页面中将商品数量减一\n url(r'^subToCart/',views.subToCart,name='subToCart'),\n\n # 购物车页面中将商品数量加一\n url(r'^addCart/',views.addCart,name='addCart'),\n # 购物车页面中将商品数量减一或从购物车中删除\n url(r'^subCart/',views.subCart,name='subCart'),\n\n # 点击✔ 改变选中状态\n url(r'changeStatus/',views.changeStatus,name='changeStatus'),\n # 全选\n url(r'^allSelect/', views.allSelect, name='allSelect'),\n]","repo_name":"d-sea-wind/axf","sub_path":"CartApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9350627001","text":"'''\nSample commands\n'''\n# Create census object\ncensus = census_api.Census('!!!use Census API key here!!!')\n\n# Create dictionary\ndict_acs5 = census.get_dict(source = 'acs5', year = '2019')\n\n# Creates acs datasets\nacs2019 = census.get_data(\n source = 'acs5',\n year = '2019',\n variables = ['B03001_003E', 'NAME'],\n geographical_level = 'county',\n)\n\n# Create dataframe\ndf_acs2019 = pd.DataFrame(acs2019[1:], columns=acs2019[0])\ndf_acs2019['FIPS'] = df_acs2019['state'] + df_acs2019['county']\n","repo_name":"Hillard28/census-api","sub_path":"census_sample.py","file_name":"census_sample.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17129986015","text":"import RPi.GPIO as GPiO\nimport time \n\ndac = [26, 19, 13, 6, 5, 11, 9, 10]\nbits = len(dac)\nlevels = 2**bits\nmaxVoltage = 3.3\ntroykaModule = 17\ncomparator = 4\n\ndef dec2bin(dec):\n return [int(bit) for bit in bin(dec)[2:].zfill(bits)]\n\ndef bin2dac(value):\n signal = dec2bin(value)\n print(len(signal))\n GPiO.output(dac, signal)\n return signal\n\ndef num2dac(value):\n signal = dec2bin(value)\n GPiO.output(dac, signal)\n return signal\n\nGPiO.setmode(GPiO.BCM)\nGPiO.setup(dac, GPiO.OUT, initial = GPiO.LOW)\nGPiO.setup(troykaModule, GPiO.OUT, initial = GPiO.HIGH)\nGPiO.setup(comparator, GPiO.IN)\n\ntry:\n while True:\n value = 128\n a = 128\n for i in range(8):\n signal = num2dac(value)\n comparatorValue = GPiO.input(comparator)\n if comparatorValue == 0:\n value += a\n else:\n value -= a\n a = int(a / 2)\n time.sleep(0.01)\n signal = num2dac(value)\n voltage = value / levels * maxVoltage\n print(value)\n\nexcept KeyboardInterrupt:\n print(\"The program was stoped by the keyboard.\")\nelse:\n print(\"No excrptions.\")\nfinally:\n GPiO.output(dac, GPiO.LOW)\n GPiO.cleanup(dac)\n print(\"GPiO cleanup complited.\")","repo_name":"uBaHTT/Lesson1","sub_path":"29.09/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1747312417","text":"import numpy\nimport numpy as np\nfrom Autonomous.Sensors.LIDAR import LIDAR_Interface,Utils\nfrom threading import *\n\nclass Obstacle_Detection(Thread):\n\n def __init__(self, lidar: LIDAR_Interface.LIDAR_Interface, min_distance: int=0, max_distance: int=5000):\n\n self._lidar = lidar\n\n if not self._lidar.running:\n self._lidar.start()\n\n self._iter_scan = self._lidar.iter_scans(self.__samples_per_rev)\n self._lidar.min_distance = min_distance\n self._lidar.max_distance = max_distance\n\n super(Obstacle_Detection, self).__init__()\n\n self.__running = False\n self.__obstacle_flag = False\n self.__obstacle = np.array([0,0]) # this can be an np array if need be\n\n\n def stop_thread(self):\n self.__running = True\n\n def exit_func(self):\n self.stop_thread()\n self._lidar.exit_func()\n\n def zero_sensor(self):\n # needs an algorithm to find the center of the sensor\n pass\n\n def clear_obstacle_flag(self):\n self.__obstacle_flag = False\n self.__obstacle = (0, 0) # reset the __obstacle var\n\n\n def range_filter(self, scans, min_distance, max_distance): # not sure if there needs to be a self here?\n x = scans[:, 1]\n for i in range(len(x)):\n if x[i] < min_distance:\n x[i] = 0\n elif x[i] > max_distance:\n x[i] = 0\n\n x = np.asarray(x)\n x = np.transpose(x)\n return x\n\n\n def segmentation(self, scans, seg_threshold): # not sure if there needs to be a self here?\n i = 1 # incremental num\n temp_val = scans[:, 1]\n segment = np.zeros((len(temp_val), 3))\n segment[:, 0] = temp_val\n x = [temp_val[len(temp_val) - 1]]\n np.asarray(x)\n temp_val = np.append(temp_val, x, axis=0)\n segment[:, 1] = abs(np.diff(temp_val, axis=0))\n # conditions where segment threshold > 20 mm, can be changed\n cond_1 = segment[:, 1] > seg_threshold\n check = np.where(cond_1, 2, 1) # check where its true or false\n check = check.reshape(-1, 1)\n iter_seg = 1\n for k in range(len(check)):\n if check[k] == 2: # true\n iter_seg = iter_seg + 1 # iterate to next segment\n check[k] = iter_seg\n elif check[k] == 1: # false: diff between 2 distances is less than threshold\n check[k] = iter_seg # same segment\n segment[:, 2] = check[:, 0]\n\n segment = segment[:, 2].reshape(-1, 1)\n\n return segment\n\n # class properties\n # @property\n # def obstacle_detected_flag(self):\n # return self.__obstacle_flag\n\n @property\n def detected_obstacle(self): # returns [angle, dist, segment number]\n return self.__obstacle\n\n @property\n def max_distance(self):\n return self._lidar.max_distance\n\n @max_distance.setter\n def max_distance(self, distance):\n self._lidar.max_distance = distance\n\n @property\n def min_distance(self):\n return self._lidar.min_distance\n\n @min_distance.setter\n def min_distance(self, distance):\n self._lidar.min_distance = distance\n\n # thread functions\n def start(self) -> None:\n self.__running = True\n if not self._lidar.running:\n self._lidar.start()\n\n def run(self) -> None:\n while self.__running:\n # run the obstacle detection algorithm\n\n scan = next(self._iter_scan)\n __obstacle = np.array([(np.radians(point[1]), point[2]) for point in scan])\n x = self.range_filter(__obstacle, 0, 5000)\n __obstacle[:, 1] = x\n __obstacle = __obstacle[np.all(__obstacle != 0, axis=1)] # removes rows with 0s\n segment = self.segmentation(__obstacle, 20) # distance threshold function\n # add segment value column to offset array to plot\n __obstacle = np.append(__obstacle, segment, axis=1)\n\n pass\n\n\n","repo_name":"GabeCasciano/Capstone20","sub_path":"AutonomousPkg/build/lib/Autonomous/Modules/Obstacle_Detection.py","file_name":"Obstacle_Detection.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35793601386","text":"from sklearn.ensemble import RandomForestClassifier\nfrom src.data_base_module.data_retrival import instance as db\nfrom typing import List, Dict, Set, Tuple\nimport pandas as pd\nimport src.data_base_module.data_blocks as data\nimport src.machine_learning_module.feature_generators as feature_gen_mod\nimport src.machine_learning_module.label_generators as label_gen_mod\nimport src.machine_learning_module.filter_generators as filter_gen_mod\nimport src.machine_learning_module.pipeline as pipe_mod\nimport src.machine_learning_module.performance_metrics as perf_mod\n\nimport src.machine_learning_module.feature_generator_data.talib_momentum as talib_mom_data\n\n\"\"\"\nThis is a basic model script that sketches out the basic format of scripts that make use of models\nIn this script we test the abstraction of parts of this code into functions\n\nLabeling : abs change labeling\nFiltering : every kth filtering\nSampling : Volume \nml model : random forest classifier\nFeatures : talib momentum indicators\n\"\"\"\n\n# ------ user inputs ------\nsymbol : str = \"NHK17\"\nsampling_volume : int = 20\nprice_series_used : data.BarDataColumns = data.BarDataColumns.CLOSE\nlook_ahead : int = 15\nthreshold : float = 15\nperiod : int = 5\nshift : int = 0\nnum_datasets = 40\nnum_test_sets = 8\ncross_val_shift = 0\n\n# ----- #define --------\nMORNING : data.IntraDayPeriod = data.IntraDayPeriod.MORNING\nAFTERNOON : data.IntraDayPeriod = data.IntraDayPeriod.AFTERNOON\n\n# ------ parameter dictionaries -------\nrfr_params : dict = {\n \"n_estimators\" : 100,\n \"criterion\" : \"entropy\",\n \"max_depth\" : 5,\n \"min_samples_split\" : 10,\n \"min_weight_fraction_leaf\" : 0.1,\n \"class_weight\" : \"balanced\",\n \"max_samples\" : 0.75,\n \"max_features\" : 5\n}\n\nlabel_generator_params : dict = {\n \"look_ahead\" : look_ahead,\n \"threshold\" : threshold,\n \"criteria\" : price_series_used,\n}\n\nfilter_generator_params : dict = {\n \"period\" : period,\n \"shift\" : shift,\n \"criteria\" : price_series_used\n}\n\ntalib_momentum_param_dict : Dict[str, List[Dict[str, float]]] = {\n talib_mom_data.ADX : [{'timeperiod' : 14}, {'timeperiod' : 20}],\n talib_mom_data.ADXR : [{'timeperiod' : 8}],\n talib_mom_data.APO : [{'fastperiod' : 10, 'slowperiod' : 14}],\n talib_mom_data.AROON_UP : [{'timeperiod' : 14}],\n talib_mom_data.AROON_DOWN : [{'timeperiod' : 14}],\n talib_mom_data.AROONOSC : [{'timeperiod' : 14}],\n talib_mom_data.CCI : [{'timeperiod' : 14}],\n talib_mom_data.CMO : [{'timeperiod' : 14}],\n talib_mom_data.MACD: [{'fastperiod' : 14, 'slowperiod' : 16, 'signalperiod' : 18}],\n talib_mom_data.MACD_SIGNAL : [{'fastperiod' : 10, 'slowperiod' : 14, 'signalperiod' : 18}],\n talib_mom_data.MACD_HIST: [{'fastperiod' : 13, 'slowperiod' : 11, 'signalperiod' : 24}],\n talib_mom_data.MOM : [{'timeperiod' : 2}],\n talib_mom_data.PPO : [{'fastperiod' : 10, 'slowperiod' : 14}],\n talib_mom_data.ROC : [{'timeperiod' : 14}],\n talib_mom_data.ROCP: [{'timeperiod' : 14}],\n talib_mom_data.RSI : [{'timeperiod' : 14}],\n talib_mom_data.STOCH_SLOWK : [{'fastk_period' : 10, 'slowk_period' : 15}],\n talib_mom_data.STOCH_SLOWD : [{'fastk_period' : 10, 'slowk_period' : 15}],\n talib_mom_data.STOCHF_FASTK : [{'fastk_period' : 10, 'fastd_period' : 15}],\n talib_mom_data.STOCHF_FASTD : [{'fastk_period' : 10, 'fastd_period' : 15}],\n talib_mom_data.TRIX : [{'timeperiod' : 14}],\n talib_mom_data.ULTOSC : [{'timeperiod1' : 7, 'timeperiod2' : 14, 'timeperiod3' : 21}],\n talib_mom_data.WILLR : [{'timeperiod' : 5}],\n}\n\n# ----- objects ------\nfeature_generator : feature_gen_mod.FeatureGenerator = feature_gen_mod.TalibMomentum(parameters_dict = talib_momentum_param_dict)\nlabel_generator : label_gen_mod.ClassificationLabelGenerator = label_gen_mod.AbsoluteChangeLabel(**label_generator_params)\nfilter_generator : filter_gen_mod.FilterGenerator = filter_gen_mod.EveryKthFilter(**filter_generator_params)\n\n","repo_name":"snowbanana12345/zm_machine_learning_project","sub_path":"test/ml_test/pipeline_test/refractor_basic_model_script.py","file_name":"refractor_basic_model_script.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1390569930","text":"# pylint: disable = C0111\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as f:\n DESCRIPTION = f.read()\n\nsetup(name=\"cord19q\",\n version=\"3.0.0\",\n author=\"NeuML\",\n description=\"CORD-19 Analysis\",\n long_description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/neuml/cord19q\",\n project_urls={\n \"Documentation\": \"https://github.com/neuml/cord19q\",\n \"Issue Tracker\": \"https://github.com/neuml/cord19q/issues\",\n \"Source Code\": \"https://github.com/neuml/cord19q\",\n },\n license=\"Apache 2.0: http://www.apache.org/licenses/LICENSE-2.0\",\n keywords=\"search embedding machine-learning nlp covid-19 medical scientific papers\",\n python_requires=\">=3.6\",\n install_requires=[\n \"paperetl @ git+https://github.com/neuml/paperetl\",\n \"paperai @ git+https://github.com/neuml/paperai\"\n ],\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Utilities\"\n ])\n","repo_name":"neuml/cord19q","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"54"} +{"seq_id":"6069340857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTakes care of various fitting procedures, which do\nsimilar, but not exactly the same things.\n\"\"\"\n\nimport numpy as np\nimport scipy.optimize as optim\nfrom inspect import signature\nfrom scipy.integrate import solve_ivp, odeint\nimport matplotlib.pyplot as plt\nfrom analysis.helpers import support as sup\nfrom time import perf_counter\n\n\n# ------------------------------------------------------ #\n# ---- fitting few kinetics, all nonlinear params ------ #\n# ------------------------------------------------------ #\ndef fit_kinetics(x_data, y_data,\n n_exp=1, init_par=None, const=None,\n **kwargs):\n \"\"\"fits one or few kinetics to sum of exponentials.\n No models involved.\n\n Args:\n x_data (list/tuple): x axis data\n y_data (list/tuple): y axis data\n n_exp (int, optional): Number of exponentials. Defaults to 1.\n init_par ([type], optional): Set of initial fitting parametres.\n Defaults to None.\n const ([type], optional): Selects if const is added as a free\n parameter. Defaults to None.\n\n Kwargs:\n bounds: set of bounds for parameters.\n Defaults to (-np.inf, np.inf).\n\n Returns:\n list: list of least_square fit outputs.\n \"\"\"\n bounds = kwargs.get('bounds', (-np.inf, np.inf))\n fit = []\n for j in range(len(x_data)):\n if init_par is None:\n p0 = []\n for i in range(n_exp):\n p0.extend([3*(np.random.random()-0.5),\n 100*np.random.random()])\n else:\n p0 = list(init_par)\n\n if const is not None:\n if isinstance(const, (list, tuple)):\n p0.append(const[j])\n else:\n p0.append(const)\n fit.append(optim.least_squares(res_kin, p0,\n args=(y_data[j],\n x_data[j],\n n_exp),\n bounds=bounds))\n return fit\n\n\ndef exp_model(par, x, n: int):\n \"\"\"Generates sum of exponentials model, if odd\n number of par, then the last one (2*n+1) is taken as a additional\n constant variable.\n\n Args:\n par (list/tuple): amp, tau... sequence\n x (list/tuple): x axis\n n (int): number of expoentials\n\n Returns:\n ndarray: y values of the resulting function\n \"\"\"\n amp = par[::2]\n tau = par[1::2]\n # print(x, tau)\n func = np.sum([amp[k]*np.exp(-x/tau[k])\n for k in range(n)],\n axis=0, dtype=np.float64)\n try: # adding constant to fit\n func = func + np.ones(len(x))*amp[n]\n except IndexError:\n pass\n return func\n\n\ndef res_kin(p, *args):\n \"\"\"Residuals of the exponential model.\n\n Args:\n p (list/tuple): amp and taus of the model\n\n Returns:\n ndarray: RMS error\n \"\"\"\n y, *params = args\n return np.sqrt((y - exp_model(p, *params))**2)\n\n\n# ------------------------------------------------------ #\n# ---- fitting few kinetics, global -------------------- #\n# ------------------------------------------------------ #\ndef fit_kinetics_global(x_data, y_data, gl_par,\n n_exp=1, init_par=None, const=None,\n **kwargs):\n \"\"\"Global fit of one or few kinetics\n\n Args:\n x_data (list/tuple): x axis\n y_data (list/tuple): y data\n gl_par (list): Bool of par which are global, typically lifetimes.\n n_exp (int, optional): Number of exponentials. Defaults to 1.\n init_par ([type], optional): Initial params for params.\n Defaults to None.\n const ([type], optional): Add constant term to fit.\n Defaults to None.\n\n Raises:\n AttributeError: Wrong length of const\n AttributeError: Wrong length of init_par\n\n Returns:\n obj: optimize.least_squares output\n \"\"\"\n # basic checks of inputs\n bounds = kwargs.get('bounds', (-np.inf, np.inf))\n ndat = len(x_data)\n if init_par is None:\n print('Using default init_par')\n p0 = []\n for i in range(n_exp):\n if gl_par[2*i] == 1: # global parameter\n p0.extend([3 * (np.random.random()-0.5)])\n else: # not global\n p0.extend([3 * (np.random.random()-0.5)\n for k in range(ndat)])\n if gl_par[2*i+1] == 1: # global\n p0.extend([100 * np.random.random()])\n else: # not global\n p0.extend([100 * np.random.random()\n for k in range(ndat)])\n else:\n p0 = list(init_par)\n\n if const is not None:\n if gl_par[2*n_exp] == 0:\n if isinstance(const, (tuple, list)) and len(const) == ndat:\n p0.extend(list(const))\n elif isinstance(const, (tuple, list)) and len(const) == 1:\n p0.extend(list(const) * ndat)\n elif isinstance(const, (float, int)):\n p0.extend([const] * ndat)\n else:\n raise AttributeError('Wrong length of const')\n else:\n if isinstance(const, (float, int)):\n p0.append(const)\n elif isinstance(const, (tuple, list)) and len(const) == 1:\n p0.extend(list(const))\n else:\n raise AttributeError('Wrong length of const')\n # checking correct length of params\n if len(p0) != int(len(gl_par) + (len(gl_par)-sum(gl_par))*(ndat-1)):\n print(len(p0), int(len(gl_par) + (len(gl_par)-sum(gl_par))*(ndat-1)))\n raise AttributeError('Wrong length of init_par')\n return\n\n # NL fitting\n fit = optim.least_squares(res_kin_gl, p0,\n args=(y_data, gl_par, x_data, n_exp),\n bounds=bounds)\n return fit\n\n\ndef exp_model_gl(params, bool_gl, x, n):\n \"\"\"Constructs sum of exponentials model taking into account if some\n params are set to be global.\n\n Args:\n params (list/tuple): amplitudes and lifetime params\n bool_gl (list/tuple): Bools, global params are 1\n x (list/tuple): x axis\n n (int): Number of exponentials\n\n Returns:\n ndarray: Total function given the params.\n \"\"\"\n func_total = []\n ndat = len(x)\n # order parameters to list of tuples\n par = group_par(params, bool_gl, n, ndat)\n for i in range(ndat):\n func = np.sum([par[i][0][k] * np.exp(-x[i] / par[i][1][k])\n for k in range(n)],\n axis=0)\n # adding constant to the fit\n try:\n bool_gl[2*n]\n except IndexError:\n pass\n else:\n if bool_gl[2*n] == 1:\n func += np.ones(len(x[i])) * params[-1]\n else:\n func += np.ones(len(x[i])) * params[-ndat+i]\n func_total.append(func)\n return func_total\n\n\ndef res_kin_gl(p, *args):\n \"\"\"Calculates residuals (chi2) for the global model of sum of exponentials.\n\n Args:\n p (list/tuple): params\n\n Returns:\n ndarray: vector of residuals.\n \"\"\"\n y, *params = args\n model = exp_model_gl(p, *params)\n y_flat = np.array([item\n for sublist in y\n for item in sublist])\n model_flat = np.array([item\n for sublist in model\n for item in sublist])\n return (y_flat - model_flat)**2\n\n\n# ------------------------------------------------------ #\n# ---- fitting few kinetics, ODE approach -------------- #\n# ------------------------------------------------------ #\ndef fit_ode(x_data, y_data,\n model, p0_amp, p0_ode, const=None,\n **kwargs):\n \"\"\"Fitting ODEs to one or few kinetics. Amplitudes are fitted linearly,\n lifetimes from ODE nonlinearly\n\n Args:\n x_data (list/tuple): x axis\n y_data (list/tuple): y values\n model (str): One of the models from model_dict\n TODO: description and list of the models\n p0_amp (tuple): initial components amplitudes\n p0_ode (tuple): lifetimes\n const ([type], optional): Not implemented yet. Defaults to None.\n\n Kwargs:\n TODO: bounds to variables, not implemented\n\n Returns:\n list of tuples: (params from ODE, params from linear amp fits)\n \"\"\"\n tol = kwargs.get('tol', 1e-2)\n fit_total = []\n for i in range(len(x_data)):\n j = 0\n n_par = len(p0_ode+p0_amp)\n par_before = np.array(p0_ode + p0_amp)\n rel_change = np.array([1] * n_par)\n par_ode_in, par_amp_in = p0_ode, p0_amp\n lims = (x_data[i][0], x_data[i][-1])\n while any(i > tol for i in rel_change) and j < 50:\n # kinetic\n kin = solve_ivp(model_dict[model][0],\n lims, par_amp_in,\n args=par_ode_in,\n t_eval=x_data[i]).y\n # fit amplitude(s)\n if len(kin) == 1:\n par_amp_out, _ = optim.curve_fit(func, kin[0],\n y_data[i])\n else:\n A = np.vstack(tuple(kin[:]))\n par_amp_out = optim.lsq_linear(A.T, y_data[i]).x\n # fit ODE(s)\n par_amp_out = tuple(par_amp_out*par_amp_in)\n t0 = perf_counter()\n fit = optim.least_squares(res_ode, par_ode_in,\n args=(y_data[i], model,\n lims, par_amp_out,\n x_data[i]),\n bounds=(0, 1e8),\n max_nfev=4)\n t1 = perf_counter()\n par_after = np.array(tuple(fit.x) + tuple(par_amp_out))\n rel_change = abs((par_before-par_after)/par_before)\n par_before = par_after\n par_ode_in = tuple(fit.x)\n par_amp_in = par_amp_out\n j += 1\n # print(f'ODE took: {t1-t0} secs.')\n if t1-t0 > 5:\n print('takes too long, try again.')\n break\n # print(f'params after fit: {par_after}')\n # print(rel_change, rel_change.any() > tol)\n fit_total.append((par_ode_in, par_amp_in))\n return fit_total\n\n\ndef res_ode(p, *args):\n \"\"\"Calculates chi2 residuals of the ODE fit and data\n\n Args:\n p (list/tuple): parameter of the fit\n\n Returns:\n ndarray: residuals\n \"\"\"\n y, model, lims, par_amp, x_range = args\n model_ode = solve_ivp(model_dict[model][0],\n lims, par_amp,\n args=p,\n t_eval=x_range).y\n all_comp_sum = np.sum(model_ode, axis=0)\n res = (y - all_comp_sum)**2\n return res\n\n\n# ------------------------------------ #\n# -------ODE 2D approach ------------- #\n# ------------------------------------ #\ndef fit_ode_2d(x_data, y_data,\n model, p0_amp, p0_ode, const=None,\n **kwargs):\n \"\"\"ODEs fitting of the whole 2D map\n\n Args:\n x_data (list/tuple): x axis, ie time\n y_data (list/tuple): y data\n model (str): model from dict\n p0_amp (list/tuple): components amplitudes for linear fit\n p0_ode (list/tuple): ODE params for nonlinear fit\n const ([type], optional): should include const, not Implemented yet.\n Defaults to None.\n\n Kwargs:\n TODO: bounds to variables, not implemented\n\n Returns:\n 2-tuple: (Solution of the nonlinear fit optimize.least_squares.x,\n amplitude params from the linear fit,\n )\n \"\"\"\n # tol = kwargs.get('tol', 1e-3)\n par_ode_in, par_amp_in = p0_ode, p0_amp\n lims = (x_data[0], x_data[-1])\n j = 0\n while j < 10:\n # kinetic\n kin = solve_ivp(model_dict[model][0],\n lims, par_amp_in,\n args=par_ode_in,\n t_eval=x_data).y\n # fit amplitudes\n if len(kin) == 1:\n par_amp_out = []\n for i in range(y_data.shape[1]):\n _par, _ = optim.curve_fit(func, kin[0],\n y_data[:, i])\n par_amp_out.append(_par)\n else:\n par_amp_out = np.zeros((y_data.shape[1],\n model_dict[model][1]))\n for i in range(y_data.shape[1]):\n A = np.vstack(tuple(kin[:]))\n par_amp_out[i, :] = optim.lsq_linear(A.T,\n y_data[:, i]).x\n # fit ODE(s)\n fit = optim.least_squares(res_ode_2d, par_ode_in,\n args=(y_data, model,\n lims, par_amp_out,\n x_data),\n bounds=(0, 1e8),\n max_nfev=4)\n j += 1\n print(j)\n return fit.x, par_amp_out\n\n\ndef res_ode_2d(p, *args):\n \"\"\"Calculates residuals for the ODE fit of the whole 2D maps\n\n Args:\n p (list/tuple): fit parameters\n\n Returns:\n ndarray: vector of residulas (chi2) integrated along WL\n \"\"\"\n y, model, lims, par_amp, x_range = args\n model_ode = solve_ivp(model_dict[model][0],\n lims, tuple([1] * model_dict[model][1]),\n args=p,\n t_eval=x_range).y\n data_sol = np.zeros(y.shape)\n for i in range(model_dict[model][1]):\n data_sol += np.outer(model_ode[i], par_amp[:, i])\n res = np.sum((y - data_sol)**2, axis=1)\n return res\n\n\ndef func(kin, m: float):\n \"\"\"multiple of the kinetic, used for linear part of the ODE fits\n\n Args:\n kin (array): kinetic\n m (float): scaling factor of the kinetic\n\n Returns:\n array: multiple of kinetic\n \"\"\"\n return m*kin\n\n\ndef ode_solution(p, *args):\n \"\"\"Integration of the ODE.\n\n Args:\n p (list/tuple): ODE params to be fitted.\n\n args:\n tuple of parameters for the solve_ivp function, including the\n ones from the linear fit.\n\n Returns:\n obj: solve_ivp object\n \"\"\"\n model, lims, par_amp, x_range = args\n sol = solve_ivp(model_dict[model][0], lims, par_amp,\n args=p,\n t_eval=x_range)\n return sol\n\n\ndef rhs00(t, states, t0):\n '''single state decay'''\n s0 = states\n return -s0/t0\n\n\ndef rhs01(t, states, t0, t1):\n ''' Two states, no transfer'''\n s0, s1 = states\n return [-s0/t0,\n -s1/t1]\n\n\ndef rhs02(t, states, t0, t1, t2):\n ''' Two states, transfer from 1 ->2'''\n s0, s1 = states\n return [-s0/t0 - s0/t1,\n -s1/t2 + s0/t1]\n\n\n# ------------------------------------------------------ #\n# ---------- fitting for SVD script -------------------- #\n# ------------------------------------------------------ #\ndef rotation(var, k, pos, T, P, DTT, C0, t, function):\n \"\"\"TODO: docs\n\n Args:\n var ([type]): [description]\n k ([type]): [description]\n pos ([type]): [description]\n T ([type]): [description]\n P ([type]): [description]\n DTT ([type]): [description]\n C0 ([type]): [description]\n t ([type]): [description]\n function ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n global C, R, V, calc, res\n k[pos] = var\n C = odeint(function, C0, t, args=(k,))\n R = T.T @ np.linalg.pinv(C.T)\n V = P @ R\n calc = V @ C.T\n res = (DTT-calc)\n if any(x < 0 for x in k):\n penalty = 1e6\n else:\n penalty = 0\n error = np.linalg.norm(DTT-calc, 'fro') + penalty\n return error\n\n\n# ------------------------------------------------------ #\n# ------------- helper functions ----------------------- #\n# ------------------------------------------------------ #\ndef nest_data(data):\n \"\"\"Nesting of the data for functions which handle both single or multiple\n data datasets in a form of list/tuple of arrays.\n\n Args:\n data (array/list/tuple): x/y data of a single line data, ie. a kinetic\n\n Returns:\n list/tuple/array: always nested output.\n \"\"\"\n if any(isinstance(i, (list, tuple, np.ndarray))\n for i in data):\n pass # nested data, do nothing\n else: # nest data\n data = (data,)\n return data\n\n\ndef duplicate_nesting(x, y):\n \"\"\"Ensures the same level of nesting for x and y data. Works for the case\n when single x axis used for multiple y data, ie. kinetics.\n\n Args:\n x (list/tuple/ndarray): x data, nested or not.\n y (list/tuple/ndarray): y data, nested or not.\n\n Raises:\n ValueError: If more x axes than y datasets.\n\n Returns:\n tuple/list/ndarray: nested x axes to match y datasets.\n \"\"\"\n x = nest_data(x)\n if len(x) == len(y):\n pass\n elif len(x) > len(y):\n raise ValueError('more x axes than datasets')\n else: # only case I need duplicate x axis\n x = tuple([x[0]]*len(y))\n return x\n\n\ndef cut_limits(x_data: tuple, y_data: tuple, x_lims: tuple):\n \"\"\"Selecting range of x/y data based on x_lims.\n\n Args:\n x_data (tuple): x axis data\n y_data (tuple): y axis values\n x_lims (tuple): limits to select range from x and y\n\n Raises:\n ValueError: If x_lims have wrong shape\n\n Returns:\n list: limited x data, limited y data\n \"\"\"\n # # extending x axis to match number of datasets.\n y_data = nest_data(y_data)\n n_dat = len(y_data)\n x_data = duplicate_nesting(x_data, y_data)\n print(f'number of datasets: {n_dat}.')\n # no limits, take all positive x\n x = []\n data = []\n if x_lims is None:\n for i in range(n_dat):\n x.append(x_data[i][x_data[i] > 0])\n data.append(y_data[i][x_data[i] > 0])\n # x limits global for all kinetics\n elif len(x_lims) == 2:\n for i in range(n_dat):\n beg, end = sup.get_idx(*x_lims, axis=x_data[i])\n x.append(x_data[i][beg:end+1])\n data.append(y_data[i][beg:end+1])\n # x limits for each kinetic specified\n elif len(x_lims) == n_dat*2:\n for i in range(n_dat):\n beg, end = sup.get_idx(*x_lims[2*i:2*i+2], axis=x_data[i])\n x.append(x_data[i][beg:end+1])\n data.append(y_data[i][beg:end+1])\n else:\n raise ValueError('Wrong shape of t_lims.')\n return\n return x, data\n\n\ndef group_par(params, bool_gl, n, size):\n \"\"\"Organize params based on number of exponentials and\n wheter they are global or not\n\n Args:\n params (list/tuple): input params from the user\n bool_gl (list/tuple): bools to signify global ones\n n (int): number of exponentials\n size ([type]): [description]\n\n Returns:\n list: new set of params.\n \"\"\"\n idx_count = [size if item == 0 else item\n for item in bool_gl]\n amp_count = idx_count[::2]\n tau_count = idx_count[1::2]\n amp, tau = [], []\n for i in range(n):\n par = list(params.copy())\n # amplitudes\n if amp_count[i] == size:\n amp.extend(par[:size])\n par = par[size:]\n else:\n amp.extend([par[0]]*size)\n par = par[1:]\n # lifetimes\n if tau_count[i] == size:\n tau.extend(par[:size])\n par = par[size:]\n else:\n tau.extend([par[0]]*size)\n par = par[1:]\n\n par_total = []\n for i in range(size):\n a = [amp[size*k + i] for k in range(n)]\n t = [tau[size*k + i] for k in range(n)]\n par_total.append((a, t))\n return par_total\n\n\ndef get_params_ode(model, par):\n \"\"\"Generate parameters for ODEs fit based on the model\n\n Args:\n model (str): one of the models selected from dict\n par (list/tuple): parameters\n\n Returns:\n tuple: parameters out\n \"\"\"\n sig = signature(model_dict[model][0])\n n_ode_params = len(sig.parameters)-2\n n_amp_params = model_dict[model][1]\n if par is None:\n par_out = (tuple([10000*np.random.random()\n for i in range(n_ode_params)]),\n tuple([2*np.random.random()-1\n for i in range(n_amp_params)])\n )\n else:\n # lengths are correct\n if (len(par[0]) == n_ode_params and\n len(par[1]) == n_amp_params):\n par_out = par\n else:\n print('Wrong number of input params: generate random ones for you')\n par_out = (tuple([10000*np.random.random()\n for i in range(n_ode_params)]),\n tuple([2*np.random.random()-1\n for i in range(n_amp_params)])\n )\n return par_out\n\n\ndef check_fit_params(obj):\n \"\"\"Check if fit parameters exist and should be rewritten\n\n Args:\n obj (class Trs): time-resolved experiment\n \"\"\"\n if obj._fitParams is None:\n obj._fitParams = []\n obj._fitData = []\n else:\n a = input('Rewrite old fits [y/n]?')\n if a == 'y':\n obj._fitParams = []\n obj._fitData = []\n else:\n print('I will append fit parametres to existing field')\n\n\n# first is function, second is number of states\nmodel_dict = {\n 'one_state': (rhs00, 1),\n 'two_states': (rhs01, 2),\n 'two_states_transfer': (rhs02, 2)\n }\n\nif __name__ == \"__main__\":\n # x = np.linspace(0, 99, 100)\n # par = (1, 5, 1, 50, -1, 200, 1)\n # data2fit = exp_model(par, x, n=3)\n # data2fit += np.random.normal(0, 0.07, len(x))\n\n # fit = fit_kinetics(x, data2fit, n_exp=2)\n\n # fit_result = exp_model(fit.x, x, 2)\n # fig0 = plt.figure()\n # plt.plot(data2fit, label='data')\n # plt.plot(fit_result, label='fit')\n # plt.legend()\n # plt.show()\n\n # print(fit)\n par = [1, 5, 1, 50, -1, 200, 0.1]\n par2 = par[:2] + [0.1]\n par3 = par[:4] + [0.1]\n\n x = [np.linspace(0, 99, 100),\n np.linspace(0, 49, 100),\n np.linspace(0, 199, 200)]\n data2fit = [exp_model(par, x[0], n=3),\n exp_model(par2, x[1], n=1),\n exp_model(par3, x[2], n=2)]\n\n # glob = [1, 1, 1, 1, 0]\n # fit = fit_kinetics_global(x, data2fit, gl_par=glob, n_exp=2, const=0.01)\n # fit_result = exp_model_gl(fit.x, bool_gl=glob, x=x, n=2)\n # print(fit.x)\n # for i in range(len(x)):\n # # for glob = [1,0]\n # # single_fit = exp_model([fit.x[0], fit.x[i+1]], x[i], 1)\n\n # # for glob = [1,0,1,0]\n # # single_fit = exp_model([fit.x[0], fit.x[i+1],\n # # fit.x[4], fit.x[4+i+1]], x[i], 2)\n\n # # for glob = [0,1,0,1]\n # # single_fit = exp_model([fit.x[i], fit.x[3],\n # # fit.x[4+i], fit.x[-1]], x[i], 2)\n\n # # for glob = [1,1,1,1]\n # # single_fit = exp_model([fit.x[0], fit.x[1],\n # # fit.x[2], fit.x[3]], x[i], 2)\n\n # # for glob = [1,1,1,1,1]\n # # single_fit = exp_model([fit.x[0], fit.x[1],\n # # fit.x[2], fit.x[3], fit.x[-1]], x[i], 2)\n\n # # for glob = [1,1,1,1,0]\n # single_fit = exp_model([fit.x[0], fit.x[1],\n # fit.x[2], fit.x[3],\n # fit.x[-len(x)+i]], x[i], 2)\n\n # plt.plot(x[i], data2fit[i], 'o', label=i)\n # plt.plot(x[i], fit_result[i], 'k-')\n # plt.legend()\n # plt.show()\n\n fit = fit_ode(x, data2fit,\n 'one_state',\n p0_amp=(1,), p0_ode=(100,),\n const=None)\n plt.plot(x[0], data2fit[0], 'o')\n plt.plot(x[0][1:], fit.y[0], 'k-')\n plt.show()\n","repo_name":"palec87/cbr-analysis","sub_path":"analysis/modules/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":23913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"501600311","text":"import logging\nimport datetime\n\nfrom django.conf import settings\n\nfrom furl import furl\n\nfrom share.exceptions import HarvestError\nfrom share.harvest import BaseHarvester\n\n\nQA_TAG = 'qatest'\nlogger = logging.getLogger(__name__)\n\n\nclass NodeSuddenlyUnavailable(HarvestError):\n # A node was deleted or made private after it was seen at /v2/nodes,\n # but before we could fetch its embeds\n pass\n\n\nclass OSFHarvester(BaseHarvester):\n VERSION = 1\n\n # override BaseHarvester._do_fetch\n def _do_fetch(self, start_date, end_date, path, query_params=None, embed_attrs=None):\n return self._fetch_records(self._build_url(start_date, end_date, path, query_params), embed_attrs)\n\n # override BaseHarvester._do_fetch_by_id\n def _do_fetch_by_id(self, guid, path, query_params=None, embed_attrs=None):\n url = self._build_guid_url(guid, path, query_params).url\n response = self.requests.get(url)\n\n if response.status_code // 100 != 2:\n raise ValueError('Malformed response ({}) from {}. Got {}'.format(response, url, response.content))\n\n logger.debug('Fetched record \"%s\"', guid)\n\n record = response.json()['data']\n return self._populate_embeds(record, embed_attrs)\n\n def _setup_session(self):\n # so prod SHARE doesn't get throttled\n if settings.OSF_BYPASS_THROTTLE_TOKEN:\n self.session.headers.update({'X-THROTTLE-TOKEN': settings.OSF_BYPASS_THROTTLE_TOKEN})\n\n def _build_url(self, start_date, end_date, path, query_params):\n self._setup_session()\n\n url = furl(settings.OSF_API_URL + path)\n url.args['page[size]'] = 100\n # url.args['filter[public]'] = 'true'\n # OSF turns dates into date @ midnight so we have to go ahead one more day\n url.args['filter[date_modified][gte]'] = start_date.date().isoformat()\n url.args['filter[date_modified][lte]'] = (end_date + datetime.timedelta(days=2)).date().isoformat()\n for param, value in (query_params or {}).items():\n url.args[param] = value\n return url\n\n def _build_guid_url(self, guid, path, query_params):\n self._setup_session()\n\n url = furl(settings.OSF_API_URL)\n url.path.add(path).add(guid)\n for param, value in (query_params or {}).items():\n url.args[param] = value\n return url\n\n def _fetch_records(self, url, embed_attrs):\n while True:\n records, next_page = self._fetch_page(url)\n\n for record in records.json()['data']:\n if record['attributes'].get('tags') and QA_TAG in record['attributes']['tags']:\n continue\n\n try:\n record = self._populate_embeds(record, embed_attrs)\n except NodeSuddenlyUnavailable:\n continue\n\n yield record['id'], record\n\n if not next_page:\n break\n\n def _fetch_page(self, url, next_page=None):\n logger.debug('Making request to {}'.format(url.url))\n\n records = self.requests.get(url.url)\n\n if records.status_code in (401, 410):\n raise NodeSuddenlyUnavailable('Node unharvestable ({}) at {}. Got {}'.format(records, url.url, records.content))\n if records.status_code // 100 != 2:\n raise ValueError('Malformed response ({}) from {}. Got {}'.format(records, url.url, records.content))\n\n next_page = records.json()['links'].get('next')\n next_page = furl(next_page) if next_page else None\n\n logger.debug('Found {} records.'.format(len(records.json()['data'])))\n\n return records, next_page\n\n def _populate_embeds(self, record, embed_attrs):\n for attr, key in (embed_attrs or {}).items():\n embedded = record\n try:\n for key in key.split('.'):\n embedded = embedded[key]\n except KeyError:\n logger.warning('Could not access attribute %s at %s', attr, key)\n continue\n\n logger.info('Populating embedded attribute \"{}\" for \"{}\"'.format(attr, record['id']))\n\n data = []\n url = furl(embedded).add(args={'page[size]': 100})\n\n while True:\n resp, url = self._fetch_page(url)\n data.extend(resp.json()['data'])\n\n if not url:\n break\n\n record[attr] = data\n return record\n","repo_name":"CenterForOpenScience/SHARE","sub_path":"share/harvesters/io_osf.py","file_name":"io_osf.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"5367939372","text":"from flask import Flask, render_template, request, url_for, redirect\nfrom admin import admin_alert_thread, year_range_since\n\n\ncopyright_years = year_range_since(2021)\ncopyright_notice = f'{copyright_years} Johnathan Pennington | All rights reserved.'\n\n\napp = Flask(__name__)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n subdomain_redirects = ('core', 'wordplay', 'mira3', 'soundx', 'harmio', 'quark', 'music', 'el')\n # If 'abc' is included, beetlebox.dev/abc/xyz is redirected to abc.beetlebox.dev/xyz\n\n skip_endpoints = tuple() # Skip any endpoints requiring request.args, and any others to not suggest for redirect.\n ignore_paths_starting_with = [ # Doesn't send an admin alert if request.path starts with any of these.\n '20', 'admin', 'blog', 'cms', 'feed', 'media', 'misc', 'news', 'robots', 'site', 'sito',\n 'shop', 'test', 'web', 'wordpress', 'Wordpress', 'wp', 'Wp', 'xmlrpc.php',\n ]\n\n # Redirect old url to new subdomain. i.e. beetlebox.dev/core/xyz => core.beetlebox.dev/xyz\n for subdomain_name in subdomain_redirects:\n if request.path.startswith(f'/{subdomain_name}'):\n subdomain_redirect = f'https://{subdomain_name}.beetlebox.dev'\n if request.path.startswith(f'/{subdomain_name}/'):\n sub_path = request.path.split(f'/{subdomain_name}/', 1)[-1]\n subdomain_redirect += f'/{sub_path}'\n return redirect(subdomain_redirect)\n\n site_root = url_for('home', _external=True).split('//', 1)[-1][:-1]\n # Siteroot includes domain, but removes http:// or https:// if present, and removes the final forward slash.\n a_text = site_root\n a_href = '/'\n\n request_of_concern = True # Requests of concern are paths not filtered by ignore_paths_starting_with.\n # If request_of_concern remains True, an attempt is made to suggest a more specific url on not found page,\n # and an admin alert is sent.\n for path_to_ignore in ignore_paths_starting_with:\n if request.path.startswith(f'/{path_to_ignore}'):\n request_of_concern = False\n break\n\n if request_of_concern:\n\n # Look for nearest valid path.\n for rule in app.url_map.iter_rules():\n if \"GET\" in rule.methods and rule.endpoint not in skip_endpoints and len(rule.arguments) == 0:\n # Static folder has rule.arguments, so is skipped and rerouted to root.\n if request.path.startswith(rule.rule): # Rule.rule is relative path.\n a_href = url_for(rule.endpoint)\n if a_href == '/':\n continue # Otherwise, displays final slash after site root text.\n a_text = f'{site_root}{a_href}'\n break\n\n message_body = f'Page not found: \\n{request.url}\\n' \\\n f'Rendered page_not_found.html and suggested: \\n{site_root}{a_href}'\n admin_alert_thread('Web App - 404', message_body)\n\n return render_template('page_not_found.html', relpath=a_href, a_text=a_text, copyright_notice=copyright_notice), 404\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return redirect(url_for('static', filename='favicon.ico'))\n\n\n@app.route('/volatile3gons')\ndef triangles_old_url():\n return redirect('https://quark.beetlebox.dev')\n\n\n# QR Code on CV\n@app.route('/qr/p')\ndef qr_cv():\n return redirect(url_for('info'))\n\n\n# App Buttons\n@app.route('/')\ndef home():\n return render_template('apps.html', copyright_notice=copyright_notice)\n\n\n@app.route('/info')\ndef info():\n return render_template('info.html', copyright_notice=copyright_notice)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"beetlebox-dev/top_domain","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25431565425","text":"#Explanation:\n\n#robotc142 7 years ago\n#Let me put an example here to further illustrate the idea:\n\n#For the word \"BANANA\", the first vowel 'A' occurs at position 1, len(\"BANANA\") = 6, so there are 6-1 = 5 \n#substrings starting with this letter 'A': ['A', 'AN', 'ANA', 'ANAN', 'ANANA'], you add one extra letter to \n#that specific letter 'A' until you get to the end of the word.\n\ndef minion_game(string):\n stuart_score = 0\n kevin_score = 0\n vowels = [\"A\",\"E\",\"I\",\"O\",\"U\"]\n\n for i in range(len(string)):\n if(string[i] in vowels):\n kevin_score += len(string)-i\n print(f\"The {i+1} letter {string[i]} has {len(string)-i} possible combinations\")\n \n else:\n stuart_score += len(string)-i\n print(f\"The {i+1} letter {string[i]} has {len(string)-i} possible combinations\")\n \n \n print(\"Kevin score: \", kevin_score)\n print(\"Stuart score: \", stuart_score)\n\n if (stuart_score > kevin_score):\n print(f\"Stuart {stuart_score}\")\n elif(kevin_score > stuart_score):\n print(f\"Kevin {kevin_score}\")\n else:\n print(\"Draw\")\n\nif __name__ == '__main__':\n s = input()\n minion_game(s)","repo_name":"RCAS2021/hackerrank-prepare","sub_path":"Python Challenges/Normal/Minion Game.py","file_name":"Minion Game.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34810298725","text":"#8.12 Sandwiches\ndef make_sandwich(*ingredients):\n\tprint('This sandwich contains:')\n\tfor ingredient in ingredients:\n\t\tprint('- '+ingredient)\n\nmake_sandwich('corn')\nmake_sandwich('cheese','steak')\nmake_sandwich('ham','cheese','lettuce')\n\n#8.13 User Profile\ndef build_profile(first, last, **user_info):\n\t\"\"\"Build a dictionary containing everything we know about a user.\"\"\"\n\tprofile = {}\n\n\tprofile['first_name'] = first\n\tprofile['last_name'] = last\n\n\tfor key, value in user_info.items():\n\t\tprofile[key] = value\n\treturn profile\n\nuser_profile = build_profile('albert', 'einstein',\n\t\t\t location='princeton',\n\t\t\t field='physics')\nprint(user_profile)\n\nuser_profile = build_profile('jon','doe',location='Kiev',age=19,profession='student')\n\nprint(user_profile)\n\n\ndef make_car(make,model,**attributes):\n\tcar={}\n\tcar['make']=make\n\tcar['model']=model\n\tfor key,attribute in attributes.items():\n\t\tcar[key]=attribute\n\treturn car\ncar = make_car('subaru','outback',color='blue',tow_package=True)\nprint(car)\n","repo_name":"Coldroy/pythonCrashCourse","sub_path":"chapter8_pt3.py","file_name":"chapter8_pt3.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71587249441","text":"import argparse\nimport os\n\nimport dgl\nimport dgl.function as fn\n\nimport numpy as np\nimport ogb\nimport torch\nimport tqdm\nfrom ogb.lsc import MAG240MDataset\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--rootdir\",\n type=str,\n default=\".\",\n help=\"Directory to download the OGB dataset.\",\n)\nparser.add_argument(\n \"--author-output-path\", type=str, help=\"Path to store the author features.\"\n)\nparser.add_argument(\n \"--inst-output-path\",\n type=str,\n help=\"Path to store the institution features.\",\n)\nparser.add_argument(\n \"--graph-output-path\", type=str, help=\"Path to store the graph.\"\n)\nparser.add_argument(\n \"--graph-format\",\n type=str,\n default=\"csc\",\n help=\"Graph format (coo, csr or csc).\",\n)\nparser.add_argument(\n \"--graph-as-homogeneous\",\n action=\"store_true\",\n help=\"Store the graph as DGL homogeneous graph.\",\n)\nparser.add_argument(\n \"--full-output-path\",\n type=str,\n help=\"Path to store features of all nodes. Effective only when graph is homogeneous.\",\n)\nargs = parser.parse_args()\n\nprint(\"Building graph\")\ndataset = MAG240MDataset(root=args.rootdir)\nei_writes = dataset.edge_index(\"author\", \"writes\", \"paper\")\nei_cites = dataset.edge_index(\"paper\", \"paper\")\nei_affiliated = dataset.edge_index(\"author\", \"institution\")\n\n# We sort the nodes starting with the papers, then the authors, then the institutions.\nauthor_offset = 0\ninst_offset = author_offset + dataset.num_authors\npaper_offset = inst_offset + dataset.num_institutions\n\ng = dgl.heterograph(\n {\n (\"author\", \"write\", \"paper\"): (ei_writes[0], ei_writes[1]),\n (\"paper\", \"write-by\", \"author\"): (ei_writes[1], ei_writes[0]),\n (\"author\", \"affiliate-with\", \"institution\"): (\n ei_affiliated[0],\n ei_affiliated[1],\n ),\n (\"institution\", \"affiliate\", \"author\"): (\n ei_affiliated[1],\n ei_affiliated[0],\n ),\n (\"paper\", \"cite\", \"paper\"): (\n np.concatenate([ei_cites[0], ei_cites[1]]),\n np.concatenate([ei_cites[1], ei_cites[0]]),\n ),\n }\n)\n\npaper_feat = dataset.paper_feat\nauthor_feat = np.memmap(\n args.author_output_path,\n mode=\"w+\",\n dtype=\"float16\",\n shape=(dataset.num_authors, dataset.num_paper_features),\n)\ninst_feat = np.memmap(\n args.inst_output_path,\n mode=\"w+\",\n dtype=\"float16\",\n shape=(dataset.num_institutions, dataset.num_paper_features),\n)\n\n# Iteratively process author features along the feature dimension.\nBLOCK_COLS = 16\nwith tqdm.trange(0, dataset.num_paper_features, BLOCK_COLS) as tq:\n for start in tq:\n tq.set_postfix_str(\"Reading paper features...\")\n g.nodes[\"paper\"].data[\"x\"] = torch.FloatTensor(\n paper_feat[:, start : start + BLOCK_COLS].astype(\"float32\")\n )\n # Compute author features...\n tq.set_postfix_str(\"Computing author features...\")\n g.update_all(fn.copy_u(\"x\", \"m\"), fn.mean(\"m\", \"x\"), etype=\"write-by\")\n # Then institution features...\n tq.set_postfix_str(\"Computing institution features...\")\n g.update_all(\n fn.copy_u(\"x\", \"m\"), fn.mean(\"m\", \"x\"), etype=\"affiliate-with\"\n )\n tq.set_postfix_str(\"Writing author features...\")\n author_feat[:, start : start + BLOCK_COLS] = (\n g.nodes[\"author\"].data[\"x\"].numpy().astype(\"float16\")\n )\n tq.set_postfix_str(\"Writing institution features...\")\n inst_feat[:, start : start + BLOCK_COLS] = (\n g.nodes[\"institution\"].data[\"x\"].numpy().astype(\"float16\")\n )\n del g.nodes[\"paper\"].data[\"x\"]\n del g.nodes[\"author\"].data[\"x\"]\n del g.nodes[\"institution\"].data[\"x\"]\nauthor_feat.flush()\ninst_feat.flush()\n\n# Convert to homogeneous if needed. (The RGAT baseline needs homogeneous graph)\nif args.graph_as_homogeneous:\n # Process graph\n g = dgl.to_homogeneous(g)\n # DGL ensures that nodes with the same type are put together with the order preserved.\n # DGL also ensures that the node types are sorted in ascending order.\n assert torch.equal(\n g.ndata[dgl.NTYPE],\n torch.cat(\n [\n torch.full((dataset.num_authors,), 0),\n torch.full((dataset.num_institutions,), 1),\n torch.full((dataset.num_papers,), 2),\n ]\n ),\n )\n assert torch.equal(\n g.ndata[dgl.NID],\n torch.cat(\n [\n torch.arange(dataset.num_authors),\n torch.arange(dataset.num_institutions),\n torch.arange(dataset.num_papers),\n ]\n ),\n )\n g.edata[\"etype\"] = g.edata[dgl.ETYPE].byte()\n del g.edata[dgl.ETYPE]\n del g.ndata[dgl.NTYPE]\n del g.ndata[dgl.NID]\n\n # Process feature\n full_feat = np.memmap(\n args.full_output_path,\n mode=\"w+\",\n dtype=\"float16\",\n shape=(\n dataset.num_authors + dataset.num_institutions + dataset.num_papers,\n dataset.num_paper_features,\n ),\n )\n BLOCK_ROWS = 100000\n for start in tqdm.trange(0, dataset.num_authors, BLOCK_ROWS):\n end = min(dataset.num_authors, start + BLOCK_ROWS)\n full_feat[author_offset + start : author_offset + end] = author_feat[\n start:end\n ]\n for start in tqdm.trange(0, dataset.num_institutions, BLOCK_ROWS):\n end = min(dataset.num_institutions, start + BLOCK_ROWS)\n full_feat[inst_offset + start : inst_offset + end] = inst_feat[\n start:end\n ]\n for start in tqdm.trange(0, dataset.num_papers, BLOCK_ROWS):\n end = min(dataset.num_papers, start + BLOCK_ROWS)\n full_feat[paper_offset + start : paper_offset + end] = paper_feat[\n start:end\n ]\n\n# Convert the graph to the given format and save. (The RGAT baseline needs CSC graph)\ng = g.formats(args.graph_format)\ndgl.save_graphs(args.graph_output_path, g)\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/ogb_lsc/MAG240M/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"73950447841","text":"import pygame\nimport math\nimport random\nimport time\n\nfrom constants import *\n\n\nclass Ball(pygame.sprite.Sprite):\n # paddle sprite class for the player to control\n def __init__(self, ballImg, ballFallSnd):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(ballImg, (20, 20))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.center = (WIDTH/2, HEIGHT*0.9 - 20)\n self.radius = int(self.rect.width / 2)\n self.speedx = 0\n self.speedy = 0\n self.travelSpeed = 9\n self.cp = [False] * 8\n self.ballFallSnd = ballFallSnd\n\n def updateStandBy(self, faceXCoord, lastFaceXCoord):\n \"\"\"standby status. moves right and left,\n the ball stay top of the paddle\n \"\"\"\n\n if faceXCoord is None:\n self.rect.centerx = lastFaceXCoord\n else:\n self.rect.centerx = faceXCoord\n\n if self.rect.left < 150:\n self.rect.centerx = 100 + 50\n if self.rect.right > 650:\n self.rect.centerx = 700 - 50\n\n def initialMove(self):\n \"\"\"Set ball inital speed and direction\"\"\"\n angle = random.uniform(45, 135)\n while angle > 75 and angle < 105:\n angle = random.uniform(45, 135)\n print(\"angle: \", angle)\n self.speedx = self.travelSpeed * math.cos(math.radians(angle))\n self.speedy = self.travelSpeed * math.sin(math.radians(angle))\n\n def update(self, game, paddle):\n \"\"\"playing status. Ball will be traveling\"\"\"\n self.rect.x += self.speedx\n self.rect.y -= self.speedy\n\n if self.rect.left < 100:\n self.rect.left = 100\n self.speedx *= -1\n if self.rect.right > 700:\n self.rect.right = 700\n self.speedx *= -1\n\n if self.rect.top <= 0:\n self.speedy *= -1\n if self.rect.bottom > HEIGHT:\n self.ballFallSnd.play()\n game.die()\n game.setStandByMode(True)\n time.sleep(1)\n paddle.rect.center = (WIDTH/2, HEIGHT*0.9)\n self.rect.center = (WIDTH/2, HEIGHT*0.9 - 20)\n\n def collideSides(self, otherRect):\n self.cp[0] = otherRect.collidepoint(self.rect.topleft)\n self.cp[1] = otherRect.collidepoint(self.rect.topright)\n self.cp[2] = otherRect.collidepoint(self.rect.bottomleft)\n self.cp[3] = otherRect.collidepoint(self.rect.bottomright)\n self.cp[4] = otherRect.collidepoint(self.rect.midleft)\n self.cp[5] = otherRect.collidepoint(self.rect.midright)\n self.cp[6] = otherRect.collidepoint(self.rect.midtop)\n self.cp[7] = otherRect.collidepoint(self.rect.midbottom)\n\n left = (self.cp[1] and self.cp[5] and not self.cp[0]) or \\\n (self.cp[5] and self.cp[3] and not self.cp[2])\n right = (self.cp[0] and self.cp[4] and not self.cp[1]) or \\\n (self.cp[4] and self.cp[2] and not self.cp[3])\n top = (self.cp[2] and self.cp[7] and self.cp[3] and not self.cp[6]) or \\\n (self.cp[3] and not self.cp[5]) or (self.cp[2] and not self.cp[4])\n bottom = (self.cp[0] and self.cp[6] and self.cp[1] and not self.cp[7]) or \\\n (self.cp[1] and not self.cp[5]) or (self.cp[0] and not self.cp[4])\n\n return left, right, top, bottom\n\n\n def paddleDeflection(self, paddle):\n\n left, right, top, bottom = self.collideSides(paddle.rect)\n\n # when ball hits the left side of paddle\n if self.speedx > 0 and left:\n self.rect.right = paddle.rect.left\n self.speedx *= -1\n # when ball hits the right side of paddle\n elif self.speedx < 0 and right:\n self.rect.left = paddle.rect.right\n self.speedx *= -1\n elif top:\n self.speedy *= -1\n elif bottom:\n pass\n\n\n # return paddle - ball collide time to avoid keep colliding\n return time.time()\n\n def brickSingleDeflection(self, brick):\n # left side Collision\n print(\"Single brick Collide\")\n left, right, top, bottom = self.collideSides(brick.rect)\n\n # ball hits left side of brick\n if self.speedx > 0 and left:\n self.rect.right = brick.rect.left-1\n self.speedx *= -1\n print(\"Left Collision\")\n # ball hits right side of brick\n elif self.speedx < 0 and right:\n self.rect.left = brick.rect.right+1\n self.speedx *= -1\n print(\"Right Collision\")\n # ball hits top of brick\n elif top:\n self.rect.bottom = brick.rect.top-1\n self.speedy *= -1\n print(\"Top Collision\")\n # ball hits bottom of brick\n elif bottom:\n self.rect.top = brick.rect.bottom+1\n self.speedy *= -1\n print(\"Bottom Collision\")\n\n def brickDoubleDeflection(self, brick1, brick2):\n\n print(\"Multiple brick Collide\")\n\n # ball hits the middle of two bricks\n if brick1.rect.bottom == brick2.rect.bottom:\n self.speedy *= -1\n else:\n self.speedy *= -1\n self.speedx *= -1\n","repo_name":"anthopark/Face-Breakout","sub_path":"ball_object.py","file_name":"ball_object.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41257123195","text":"import pygame\nfrom Block import PygameBlock\nimport sys, time\nfrom Raspberry import Raspberry\nfrom Snake import Snake\n\nclass GameArena():\n def __init__(self, size):\n self.size = size\n\n def updateScreen(self, raspberry, snakeList):\n raise Exception(\"GameArena.updateScreen is not to be called directly. Must be implemented by derived classes.\")\n\n def createRaspberry(self):\n raise Exception(\"GameArena.createRaspberryBlock is not to be called directly. Must be implemented by derived classes.\")\n\n def createSnakes(self):\n raise Exception(\"GameArena.createSnakeBlocks is not to be called directly. Must be implemented by derived classes.\")\n\n def gameOver(self):\n raise Exception(\"GameArena.gameOver is not to be called directly. Must be implemented by derived classes.\")\n\nclass PyGameArena(GameArena):\n redColour = pygame.Color(255, 0, 0)\n blackColour = pygame.Color(0, 0, 0)\n whiteColour = pygame.Color(255, 255, 255)\n greyColour = pygame.Color(150, 150, 150)\n\n def __init__(self,size, blockSize):\n GameArena.__init__(self, size)\n self.blockSize = blockSize\n width = size[0] * blockSize\n height = size[1] * blockSize\n self.playSurface = pygame.display.set_mode((width, height))\n pygame.display.set_caption('Raspberry Snake')\n\n def createRaspberry(self):\n # define a PygameBlock. This block will contain all information about size, colour, and how to draw.\n raspberryBlock = PygameBlock(self.blockSize, self.redColour, self.playSurface)\n # The raspberryBlock is injected into the Raspberry object\n raspberry = Raspberry(raspberryBlock)\n return raspberry\n\n def createSnakes(self, playerInput):\n # define PygameBlocks for the two snakes\n snakeBlock_1 = PygameBlock(self.blockSize, self.whiteColour, self.playSurface)\n snakeBlock_2 = PygameBlock(self.blockSize, self.greyColour, self.playSurface)\n\n # create the snakes. The snakeBlocks are injected into the snakes\n snake_1 = Snake(snakeBlock_1, playerInput[0])\n snake_2 = Snake(snakeBlock_2, playerInput[1])\n return [snake_1, snake_2]\n\n def updateScreen(self, raspberry, snakeList):\n #put everything in position for redrawing...\n #...first the backdrop\n self.playSurface.fill(self.blackColour)\n\n # ...then draw snake\n for snake in snakeList:\n snake.draw(self.playSurface)\n\n # ...then draw raspberry\n raspberry.draw()\n\n # ...finally activte the update of the screen\n pygame.display.flip()\n\n def gameOver(self):\n gameOverFont = pygame.font.Font('freesansbold.ttf', 72)\n gameOverSurf = gameOverFont.render('Game Over', True, self.greyColour)\n gameOverRect = gameOverSurf.get_rect()\n gameOverRect.midtop = (320, 10)\n self.playSurface.blit(gameOverSurf, gameOverRect)\n pygame.display.flip()\n time.sleep(1)\n pygame.quit()\n sys.exit()\n","repo_name":"akfalk/PySnake","sub_path":"PySnake/GameArena.py","file_name":"GameArena.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72757472161","text":"from abc import abstractmethod\nfrom enum import Enum\nfrom typing import Dict, List\nfrom sklearn.ensemble import GradientBoostingRegressor\nimport pandas as pd\nfrom statsmodels.tsa.statespace.mlemodel import MLEResults\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\n\n\nclass PredictiveModel:\n\n def __init__(self, model_name: str, model_parameters: Dict):\n self.model_parameters = model_parameters\n self.model_name = model_name\n\n def fit(self, training_data, **kwargs):\n # to pre-processing if we want\n self._inner_fit(training_data, **kwargs)\n\n def predict(self, prediction_times, **kwargs):\n # to pre-processing if we want\n return self._inner_predict(prediction_times, **kwargs)\n\n @abstractmethod\n def _inner_fit(self, training_data: pd.DataFrame, **kwargs):\n pass\n\n @abstractmethod\n def _inner_predict(self, prediction_times, **kwargs):\n pass\n\n @abstractmethod\n def factory(self, model_name: str, model_parameters: Dict) -> 'PredictiveModel':\n pass\n\n\nclass BoostedRegressor(PredictiveModel):\n\n def __init__(self, model_name: str, model_parameters: Dict):\n super().__init__(model_name, model_parameters)\n self.recent_usage_time_periods = model_parameters['recent_usage_time']\n self.usage_column_name = \"Current Demand\"\n self.regressor_parameters = {'loss': 'ls',\n 'learning_rate': .05,\n 'subsample': .7,\n 'verbose': 1,\n 'max_depth': 5,\n 'n_estimators': 1000\n }\n if \"regressor_parameters\" in model_parameters:\n self.regressor_parameters = model_parameters[\"regressor_parameters\"]\n\n self.model = GradientBoostingRegressor(**self.regressor_parameters)\n\n def _inner_fit(self, training_data: pd.DataFrame, recent_usage_time_periods: int = 1):\n for period_back in range(1, recent_usage_time_periods + 1, 1):\n training_data[str(period_back)] = training_data[self.usage_column_name]\n training_data[str(period_back)] = training_data.shift(period_back)\n\n training_data = training_data[recent_usage_time_periods:]\n self.model.fit(training_data)\n return training_data\n\n def _inner_predict(self, prediction_times: List[int]):\n return self.model.predict(prediction_times)\n\n @classmethod\n def factory(cls, model_name: str, model_parameters: Dict) -> 'BoostedRegressor':\n return cls(model_name, model_parameters)\n\n\nclass SARIMA(PredictiveModel):\n\n def __init__(self, model_name: str, model_parameters: Dict):\n super().__init__(model_name, model_parameters)\n self.order = (4, 1, 0)\n if \"order\" in model_parameters:\n self.order = model_parameters[\"order\"]\n self.seasonal_order = (2, 1, 1, 12)\n if \"seasonal_order\" in model_parameters:\n self.seasonal_order = model_parameters[\"seasonal_order\"]\n self.model: SARIMAX = None\n\n def _inner_fit(self, training_data: pd.DataFrame):\n self.model = SARIMAX(training_data, order=self.order, seasonal_order=self.seasonal_order)\n self.fit_model: MLEResults = self.model.fit()\n\n def _inner_predict(self, prediction_times: List, start=None, end=None) -> List[float]:\n results = self.fit_model.predict(start=start, end=end)\n return results\n\n @classmethod\n def factory(cls, model_name: str, model_parameters: Dict) -> 'SARIMA':\n return cls(model_name, model_parameters)\n\n\nclass ModelFactory:\n class AvailableModels(Enum):\n BOOSTED = \"boosted_regressor\"\n SARIMA = \"seasonal_arima\"\n\n factories = {AvailableModels.BOOSTED.value: BoostedRegressor.factory,\n AvailableModels.SARIMA.value: SARIMA.factory}\n\n @classmethod\n def factory_caller(cls, model_type: str, model_name: str, model_parameters: Dict) -> PredictiveModel:\n return cls.factories[model_type](model_name, model_parameters)\n","repo_name":"cliftbar/switch_suncode2019","sub_path":"api_server/switch_api/services/PredictiveModel.py","file_name":"PredictiveModel.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43981017191","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nT=np.loadtxt(\"../4.1/tri.dat\")\npts=100\nN=10**6\n\nx=np.linspace(-6,6,pts)\n\nF=[]\nfor i in range(0,pts):\n F.append(np.size(np.nonzero(T < x[i]))/N)\n\np=[]\nfor i in range(0,pts-1):\n\tp.append((F[i+1]-F[i])/(x[i+1]-x[i]))\n\nplt.plot(x[0:pts-1],p,label=\"PDF\")\nplt.grid()\nplt.xlabel(\"x\")\nplt.ylabel(\"$p_T(x)$\")\nplt.legend()\nplt.show()\n","repo_name":"TYCN129/AI1110-Assignments","sub_path":"Manual 1/4.3/4.3.py","file_name":"4.3.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18474213857","text":"import torch\nfrom torch.utils.data import Dataset \nfrom torchvision import transforms\n\nfrom skimage import transform\nfrom PIL import Image, ImageFile\nimport skimage.io as io\nimport math\nimport numpy as np\n\nimport glob\nimport os\n\nclass GANImages(Dataset):\n def __init__(self, directory, image_size=(64,64)):\n self.directory = directory\n self.images_filename = glob.glob(os.path.join(directory, \"*.png\"))\n self.image_size = image_size\n self.transform = transforms.Compose([\n transforms.ColorJitter(0, 0, 0.2, 0.05),\n transforms.RandomHorizontalFlip(),\n transforms.Resize(image_size),\n transforms.RandomCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n def __len__(self):\n return len(self.images_filename)\n\n def __getitem__(self, idx):\n target_image = Image.open(self.images_filename[idx]).convert('RGB')\n return self.transform(target_image)\n\ndef get_weighted_mask(mask, window_size):\n assert len(mask.shape) == 3\n assert window_size % 2 == 1 # odd window size\n max_shift = window_size // 2\n output = np.zeros_like(mask)\n for i in range(-max_shift, max_shift+1):\n for j in range(-max_shift, max_shift+1):\n if i != 0 or j != 0:\n output += np.roll(mask, (i,j), axis=(1,2))\n output = 1 - output / (window_size**2 - 1)\n return output * mask\n\nclass CorruptedPatchDataset(Dataset):\n def __init__(self, directory, image_size=(64,64), weighted_mask=True, window_size=7, feats_size=(14,14)):\n self.directory = directory\n self.images_filename = glob.glob(os.path.join(directory, \"*.png\")) + glob.glob(os.path.join(directory, \"*.jpg\"))\n self.image_size = image_size\n self.weighted_mask = weighted_mask\n self.window_size = window_size\n self.feats_size = feats_size\n self.transform = transforms.Compose([\n transforms.ColorJitter(0, 0, 0.2, 0.05),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(image_size),\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n def __len__(self):\n return len(self.images_filename)\n\n def __getitem__(self, idx):\n original_image = Image.open(self.images_filename[idx]).convert('RGB')\n original_image = self.transform(original_image)\n\n # Patch\n mask = np.ones(self.image_size, dtype=np.float32)\n x = np.random.randint(self.image_size[0]//6, 5*self.image_size[0]//6)\n y = np.random.randint(self.image_size[1]//6, 5*self.image_size[1]//6)\n h = np.random.randint(self.image_size[0]//4, self.image_size[0]//2)\n w = np.random.randint(self.image_size[1]//4, self.image_size[1]//2)\n top = max(0, x - h // 2)\n bottom = min(self.image_size[0], x + h // 2)\n left = max(0, y - w // 2)\n right = min(self.image_size[1], y + w // 2)\n mask[top:bottom, left:right] = 0\n target_image = original_image.numpy().copy()\n target_image[:, 1-mask > 0.5] = np.max(target_image)\n\n feats_mask = np.ones(self.feats_size, dtype=np.float32)\n ratio = self.image_size[0] / self.feats_size[0]\n feats_mask[math.floor(top/ratio):math.ceil(bottom/ratio), math.floor(left/ratio):math.ceil(right/ratio)] = 0\n\n mask = mask.reshape((1,) + mask.shape)\n feats_mask = feats_mask.reshape((1,) + feats_mask.shape)\n\n # Weighted Mask\n if self.weighted_mask: \n weighted_mask = get_weighted_mask(mask, self.window_size)\n return torch.FloatTensor(target_image), torch.FloatTensor(original_image), torch.FloatTensor(mask), torch.FloatTensor(weighted_mask), torch.FloatTensor(feats_mask)\n else:\n return torch.FloatTensor(target_image), torch.FloatTensor(original_image), torch.FloatTensor(mask)\n","repo_name":"jonshamir/vgg_inpainting","sub_path":"model/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11164230320","text":"import pickle\nimport numpy as np\nfrom scipy.spatial.distance import squareform\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom matplotlib import pyplot as plt\n\nif __name__ == '__main__':\n for fname in ['all_words', 'stopwords_excluded']:\n for dim in range(3):\n froot = f\"../cache/{fname}/distance_matrices/dimension{dim}/\"\n D = np.load(f\"{froot}distance_matrix.npy\")\n indices = pickle.load(open(f\"{froot}filename_to_distance_matrix_row_and_column_index.P\", 'rb'))\n indices = [k for k in indices.keys()]\n np.fill_diagonal(D, 0)\n L = linkage(squareform(D), method='average')\n plt.figure(figsize=(20, 10))\n dendrogram(L, orientation='left', labels=indices)\n plt.savefig(f\"{froot}dendrogram.png\")\n","repo_name":"pr3mar/tda-text2","sub_path":"code/dendrograms.py","file_name":"dendrograms.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28785228779","text":"# 1. 인풋 처리\nn = int(input())\n\nt = list()\np = list()\nfor i in range(n):\n a,b = map(int, input().split())\n t.append(a)\n p.append(b)\n\n# 2. dp 테이블 초기화\ndp = [0]*(n+1)\n\n# 3. dp 테이블 갱신 --> dp[i] = i~n일까지의 최대 수익\nmax_val = 0\nfor i in range(n-1, -1, -1):\n next_day = i + t[i]\n\n if next_day <= n:\n dp[i] = max(p[i] + dp[next_day], max_val)\n max_val = dp[i]\n else:\n dp[i] = max_val\n\n# 4. 결과 출력\nprint(max(dp))","repo_name":"82KJ/Coding-Test-with-python","sub_path":"DP/Q33_퇴사.py","file_name":"Q33_퇴사.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35378052941","text":"import ccxt\nimport pprint\nimport time\nimport math\nimport datetime\nimport pandas as pd\nimport re\nimport backtest\nimport judge\n\n\ndef login():\n with open(\"./api.txt\") as f:\n lines = f.readlines()\n api_key = lines[0].strip()\n secret = lines[1].strip()\n\n binance = ccxt.binance(config={\n 'apiKey': api_key,\n 'secret': secret,\n 'enableRateLimit': True,\n 'options':{\n 'defaultType': 'future'\n }\n })\n print('로그인')\n return binance\n\ndef get_df(binance, ticker):\n\n data = binance.fetch_ohlcv(\n symbol=ticker,\n timeframe='4h',\n since=None,#타임스탬프 형식인지 확인\n limit=1500\n )\n name = re.sub(\"\\/\", \"\", ticker)\n df = pd.DataFrame(data, columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])\n df['datetime'] = pd.to_datetime(df['datetime'], unit='ms') + datetime.timedelta(hours=9)\n df.set_index('datetime', inplace=False)\n\n return df\n\ndef get_amount(binance, kelly, ticker='BTC/USDT'):\n\n usdt = get_usdt(binance)\n data = binance.fetch_ticker(ticker)\n\n usdt = usdt['total']\n price = data['last']\n\n usdt = usdt*kelly\n price = usdt / price\n\n price = price*1000\n price = math.trunc(price)\n amount = price / 1000\n\n return amount\n\ndef refine_kelly(kelly):\n kelly = kelly * 10\n kelly = math.trunc(kelly)\n kelly = kelly / 10\n return kelly\n\ndef get_kellys(dictionary, tickers):\n k = 0\n kellys = 0.0\n for i in range(len(dictionary)):\n if dictionary[tickers[i]][1] != 0.0:\n kellys += dictionary[tickers[i]][1]\n k += 1\n aver = round(kellys / k,1)\n\n print('캘리평균', aver)\n return aver\n\ndef get_tickers(binance):\n tickers = []\n markets = binance.load_markets()\n for i in markets:\n tickers.append(i)\n tickers = [s for s in tickers if \"/USDT\" in s]\n\n return tickers\n\ndef get_usdt(binance):\n balance = binance.fetch_balance(params={\"type\":'future'})\n\n return balance['USDT']\n\ndef bool_own(own, ticker):\n if ticker in own:\n return True\n else:\n return False\n\ndef get_dictionary(binance):\n tickers = get_tickers(binance)\n dictionary = {string: [False, 0.0] for string in tickers}\n return dictionary\n\ndef get_amounts(binance):\n amount = get_dictionary(binance)\n balance = binance.fetch_balance(params={\"type\": 'future'})\n positions = balance['info']['positions']\n positions = [s for s in positions if \"USDT\" in s[\"symbol\"]]\n for position in positions:\n ticker = position[\"symbol\"]\n positionAmt = float(position['positionAmt'])\n ticker = ticker[:-4] + '/' + ticker[-4:]\n try:\n amount[ticker][1] = positionAmt\n except KeyError as e:\n continue\n\n return amount\n\ndef have(binance, amounts):\n tickers = get_tickers(binance)\n\n for i in range(len(tickers)):\n if amounts[tickers[i]][1] == 0.0:\n del amounts[tickers[i]]\n\n return amounts\n\ndef kelly_sig(dictionary):\n tickers = list(dictionary.keys())\n kellys = get_kellys(dictionary, tickers)\n\n for i in range(len(tickers)):\n if dictionary[tickers[i]][1] <= kellys:\n del dictionary[tickers[i]]\n\n return dictionary\n\n\ndef get_coins(binance, print=True):\n tickers = get_tickers(binance)\n dictionary = {string: ['None', 0.0] for string in tickers}\n\n for i in range(len(tickers)):\n data = get_df(binance, tickers[i])\n df, kelly = backtest.backtest(data, tickers[i], print)\n\n dictionary[tickers[i]][1] = refine_kelly(kelly)\n if (judge.judge_buy(df, tickers[i]) == True):\n dictionary[tickers[i]][0] = \"BUY\"\n if judge.judge_sell(df, tickers[i]) == True:\n dictionary[tickers[i]][0] = \"SELL\"\n\n return dictionary\n\ndef start_long(binance, ticker, kelly):\n amount = get_amount(binance, kelly, ticker)\n print('amount:',amount)\n order = binance.create_market_buy_order(\n symbol = ticker,\n amount = amount\n )\n pprint.pprint(order)\n\ndef terminate_long(binance, ticker, amount):\n print('sell_amount:', amount)\n order = binance.create_market_sell_order(\n symbol = ticker,\n amount = amount\n )\n pprint.pprint(order)\n\ndef set_leverage(binance, ticker):\n print('레버리지 설정 진입')\n markets = binance.load_markets()\n market = binance.market(ticker)\n leverage = 3\n resp = binance.fapiPrivate_post_leverage({\n 'symbol': market['id'],\n 'leverage': leverage\n })\n\ndef sub():\n binance = login()\n\n coins = get_coins(binance, False) #캘리 들어간 딕셔너리\n ticks = list(coins.keys())\n amounts = get_amounts(binance) #amount 들어간 딕셔너리\n own = have(binance, amounts)\n\n for i in range(len(ticks)):\n if bool_own(own, ticks[i]) == False:\n if coins[ticks[i]][0]=='BUY':\n if coins[ticks[i]][1] > 0.1:\n set_leverage(binance, ticks[i])\n start_long(binance, ticks[i], coins[ticks[i]][1])\n print(ticks[i], 'BUY')\n else:\n if coins[ticks[i]][0]=='SELL':\n if bool_own(own, ticks[i])==True:\n terminate_long(binance, ticks[i], own[ticks[i]][1])\n print(ticks[i], 'SELL')\n\n amounts = get_amounts(binance)\n own = have(binance, amounts)\n own_list = list(own.keys())\n print('보유중인 코인:', own_list)\n\ndef main():\n now = datetime.datetime.now()\n ago = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) + datetime.timedelta(minutes=10)\n\n while True:\n try:\n now = datetime.datetime.now()\n if ago < now < ago + datetime.timedelta(minutes=5):\n now = datetime.datetime.now()\n ago = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) + datetime.timedelta(minutes=10)\n sub()\n print(now, \"vs\", ago)\n except Exception as e:\n print('에러발생', e)\n time.sleep(1)\n","repo_name":"rnrlgus/tradingBot_bi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73953091362","text":"import os\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport click\nfrom lightning_cloud.openapi.rest import ApiException\n\nfrom lightning_app.cli.cmd_ssh_keys import _SSHKeyManager\n\n\n@click.group(\"add\")\ndef cli_add() -> None:\n \"\"\"Add Lightning AI self-managed resources (ssh-keys, etc…)\"\"\"\n pass\n\n\n@cli_add.command(\"ssh-key\")\n@click.option(\"--name\", \"key_name\", default=None, help=\"name of ssh key\")\n@click.option(\"--comment\", \"comment\", default=\"\", help=\"comment detailing your SSH key\")\n@click.option(\n \"--public-key\",\n \"public_key\",\n help=\"public key or path to public key file\",\n required=True,\n)\ndef add_ssh_key(\n public_key: Union[str, \"os.PathLike[str]\"], key_name: Optional[str] = None, comment: Optional[str] = None\n) -> None:\n \"\"\"Add a new Lightning AI ssh-key to your account.\"\"\"\n ssh_key_manager = _SSHKeyManager()\n\n new_public_key = Path(str(public_key)).read_text() if os.path.isfile(str(public_key)) else public_key\n try:\n ssh_key_manager.add_key(name=key_name, comment=comment, public_key=str(new_public_key))\n except ApiException as e:\n # if we got an exception it might be the user passed the private key file\n if os.path.isfile(str(public_key)) and os.path.isfile(f\"{public_key}.pub\"):\n ssh_key_manager.add_key(name=key_name, comment=comment, public_key=Path(f\"{public_key}.pub\").read_text())\n else:\n raise e\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/Lightning_Versions/lightning-1.8.0/src/lightning_app/cli/lightning_cli_add.py","file_name":"lightning_cli_add.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31245099147","text":"# -*- coding: utf8 -*-\r\n''' Автор ArtyLa '''\r\nimport typing, os, sys, io, random, re, time, json, threading, logging, importlib, queue, argparse, subprocess, glob, base64, collections\r\nimport wsgiref.simple_server, socketserver, socket, urllib.parse, urllib.request\r\nimport requests, psutil, bs4, uuid, PIL.Image, schedule\r\nimport settings, store, dbengine, compile_all_jsmblh, updateengine # pylint: disable=import-error\r\ntry:\r\n # TODO не смотря на декларированную кроссплатформенность pystray нормально заработал только на windows\r\n # на ubuntu он работает странно а на маке вызывает падение уже дальше по коду\r\n if sys.platform == 'win32':\r\n import pystray\r\nexcept Exception:\r\n print('No pystray installed or other error, no tray icon')\r\ntry:\r\n import telegram\r\n from telegram import InlineKeyboardButton, InlineKeyboardMarkup\r\n from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters, callbackcontext\r\nexcept ModuleNotFoundError:\r\n print('No telegram installed, no telegram bot')\r\n\r\nlang = 'p' # Для плагинов на python префикс lang всегда 'p'\r\n\r\n# Scheduler commands constants\r\nCMD_CHECK = 'check'\r\nCMD_CHECK_SEND = 'check_send'\r\nCMD_CHECK_NEW_VERSION = 'check_new_version'\r\nCMD_PING = 'ping'\r\nCMD_GET_ONE = 'get_one'\r\nSCHED_CMDS = (CMD_CHECK, CMD_CHECK_NEW_VERSION, CMD_CHECK_SEND, CMD_GET_ONE, CMD_PING)\r\n\r\nJob = collections.namedtuple('Job', 'job_str job_sched cmd filter err_msg')\r\n\r\nQ_CMD_EXIT = 'exit'\r\nQ_CMD_CANCEL = 'cancel'\r\ncmdqueue: queue.Queue = queue.Queue() # Диспетчер команд - нужен для передачи сигналов между трэдами, в т.к. для завершения в докере - kill для pid=1 не работает\r\n\r\nHTML_NO_REPORT = '''Для того чтобы были доступны отчеты необходимо в mbplugin.ini включить запись результатов в sqlite базу
    \r\nsqlitestore = 1
    Также можно настроить импорт из базы BalanceHistory.mdb включив
    \r\ncreatehtmlreport = 1
    \r\nПосле включения, запустите mbplugin\\\\setup_and_check.bat\r\n'''\r\n\r\n# TODO в командах для traymeny используется os.system(f'start ... это будет работать только в windows, но пока пофигу, т.к. сам pystray работает только в windows\r\n# т.к. импортируем до включения MODE_MB пришлось завернуть это в функцию\r\ndef tray_menu():\r\n return (\r\n {'text': \"Main page\", 'cmd': lambda: os.system(f'start http://localhost:{store.options(\"port\", section=\"HttpServer\")}/main'), 'show': True},\r\n {'text': \"View report\", 'cmd': lambda: os.system(f'start http://localhost:{store.options(\"port\", section=\"HttpServer\")}/report'), 'show': True},\r\n {'text': \"Edit config\", 'cmd': lambda: os.system(f'start http://localhost:{store.options(\"port\", section=\"HttpServer\")}/editcfg'), 'show': str(store.options('HttpConfigEdit')) == '1'},\r\n {'text': \"View log\", 'cmd': lambda: os.system(f'start http://localhost:{store.options(\"port\", section=\"HttpServer\")}/log?lines=40'), 'show': True},\r\n {'text': \"View screenshot log\", 'cmd': lambda: os.system(f'start http://localhost:{store.options(\"port\", section=\"HttpServer\")}/log/list'), 'show': True},\r\n {'text': \"Get balance request\", 'cmd': lambda: threading.Thread(target=getbalance_standalone, name='Getbalance', daemon=True).start(), 'show': True},\r\n {'text': \"Flush log\", 'cmd': lambda: store.logging_restart(), 'show': True},\r\n {'text': \"Reload schedule\", 'cmd': lambda: Scheduler().reload(), 'show': True},\r\n {'text': \"Recompile jsmblh plugin\", 'cmd': lambda: compile_all_jsmblh.recompile(), 'show': True},\r\n # {'text': \"Version update\", 'cmd': lambda: run_update(), 'show': True}, # TODO продумать как это показывать\r\n {'text': \"Cancel the balance request\", 'cmd': lambda: cancel_query(reason='tray icon command'), 'show': True},\r\n {'text': \"Restart server\", 'cmd': lambda: restart_program(reason='tray icon command'), 'show': True},\r\n {'text': \"Exit program\", 'cmd': lambda: restart_program(reason='Tray icon exit', exit_only=True), 'show': True}\r\n )\r\n\r\n\r\ndef getbalance_standalone_one_pass(queue):\r\n ''' Получаем балансы самостоятельно без mobilebalance ОДИН ПРОХОД\r\n по списку queue_balance\r\n '''\r\n result: typing.Dict = {}\r\n for val in queue:\r\n keypair = f\"{val['Region']}_{val['Number']}\"\r\n prev_state = str(dbengine.flags('get', keypair))\r\n if not prev_state.endswith('queue'):\r\n dbengine.flags('set', keypair, f'{prev_state} queue') # выставляем флаг о постановке в очередь в КОНЕЦ строки\r\n for val in queue:\r\n # TODO пока дергаем метод от веб сервера там уже все есть, потом может вынесем отдельно\r\n keypair = f\"{val['Region']}_{val['Number']}\"\r\n try:\r\n # проверяем на сигнал Q_CMD_CANCEL, все остальное - кладем обратно\r\n if Q_CMD_CANCEL in cmdqueue.queue:\r\n qu = [cmdqueue.get(block=False) for el in range(cmdqueue.qsize())]\r\n [cmdqueue.put(el) for el in qu if el != Q_CMD_CANCEL] # type: ignore\r\n logging.info(f'Receive cancel signal to query')\r\n store.feedback.text(f\"Receive cancel signal\")\r\n return result\r\n store.feedback.text(f\"Receive {val['Alias']}:{val['Region']}_{val['Number']}\")\r\n r1 = getbalance_plugin('get', {'plugin': [val['Region']], 'login': [val['Number']], 'password': [val['Password2']], 'date': ['date']})\r\n result[keypair] = 'Balance' in repr(r1)\r\n except Exception:\r\n result[keypair] = False\r\n logging.error(f\"Unsuccessful check {val['Region']} {val['Number']} {store.exception_text()}\")\r\n return result\r\n\r\n\r\ndef getbalance_standalone(filter: list = [], only_failed: bool = False, retry: int = -1, **kwargs):\r\n ''' Получаем балансы делая несколько проходов по неудачным\r\n retry=N количество повторов по неудачным попыткам, после запроса по всем (повторы только при only_failed=False)\r\n kwargs добавлен чтобы забрать из _run все лишние параметры\r\n Результаты сохраняются в базу\r\n Если filter пустой то по всем номерам из phones.ini\r\n Если не пустой - то логин/алиас/оператор или его часть\r\n для автономной версии в поле Password2 находится незашифрованный пароль\r\n ВНИМАНИЕ! при редактировании файла phones.ini через MobileBalance строки с паролями будут удалены\r\n для совместного использования с MobileBalance храните пароли password2 и другие специфичные опции\r\n для Standalone версии в файле phones_add.ini\r\n only_failed=True - делать запросы только по тем номерам, по которым прошлый запрос был неудачный\r\n '''\r\n store.turn_logging(httplog=True) # Т.к. сюда можем придти извне, то включаем логирование здесь\r\n logging.info(f'getbalance_standalone: filter={filter}')\r\n phones = store.ini('phones.ini').phones()\r\n queue_balance = [] # Очередь телефонов на получение баланса\r\n for val in phones.values():\r\n if val['monitor'].upper() != 'TRUE':\r\n continue # только те у кого включен мониторинг\r\n keypair = f\"{val['Region']}_{val['Number']}\"\r\n # Проверяем все у кого задан плагин, логин и пароль пароль\r\n if val['Number'] != '' and val['Region'] != '' and val['Password2'] != '':\r\n if len(filter) == 0 or [1 for i in filter if i.lower() in f\"__{keypair}__{val['Alias']}\".lower()] != []:\r\n # Формируем очередь на получение балансов и размечаем балансы из очереди в таблице flags чтобы красить их по другому\r\n queue_balance.append(val)\r\n logging.info(f'getbalance_standalone queued: {keypair}')\r\n store.feedback.text(f'Queued {len(queue_balance)} numbers')\r\n if retry == -1:\r\n retry = int(store.options('retry_failed', flush=True))\r\n result = {}\r\n if only_failed:\r\n queue_fail = [val for val in queue_balance if str(dbengine.flags('get', f\"{val['Region']}_{val['Number']}\")).startswith('error')]\r\n getbalance_standalone_one_pass(queue_fail)\r\n else:\r\n result.update(getbalance_standalone_one_pass(queue_balance))\r\n for i in range(retry):\r\n queue_fail = [val for val in queue_balance if str(dbengine.flags('get', f\"{val['Region']}_{val['Number']}\")).startswith('error')]\r\n result.update(getbalance_standalone_one_pass(queue_fail))\r\n return result\r\n\r\n\r\ndef get_full_info_one_number(keypair: str, check: bool = False) -> str:\r\n '''Получение подробной информации по одному\r\n keypair - Region_Number\r\n check==True - запросить информацию по номеру перед возвратом\r\n '''\r\n if check: # /checkone - получаем баланс /getone - только показываем\r\n getbalance_standalone(filter=[f'__{keypair}__']) # приходится добавлять подчеркивания чтобы исключить попадание по части строки\r\n params = {'include': f'__{keypair}__'}\r\n baltxt = prepare_balance('FULL', params=params)\r\n store.feedback.text(baltxt)\r\n # Детализация UslugiList по ключу val['Region']}_{val['Number']\r\n responses = dbengine.responses()\r\n if keypair in responses:\r\n response = json.loads(responses[f\"{keypair}\"])\r\n else:\r\n logging.info(f'Not found response in responses for {keypair}')\r\n return baltxt\r\n # берем всю информацию по номеру\r\n response = {k: (round(v, 2) if type(v) == float else v)for k, v in response.items()}\r\n detailed = '\\n'.join([f'{name} = {response[k]}' for k, name in dbengine.PhonesHText.items() if k in response])\r\n uslugi = ''\r\n if response.get('UslugiList', '') != '':\r\n ul = response['UslugiList'].split('\\n')\r\n if str(store.options('ShowOnlyPaid', section='Telegram')) == '1':\r\n ul = [line for line in ul if '\\t0' not in line]\r\n uslugi = '\\n'.join(ul).replace('\\t', ' = ')\r\n else:\r\n logging.info(f'Not found UslugiList in response for {keypair}')\r\n msgtxt = f\"{baltxt}\\n{detailed}\\n{uslugi}\".strip()\r\n store.feedback.text(msgtxt)\r\n return msgtxt\r\n\r\n\r\ndef getbalance_plugin(method, param_source):\r\n ''' Вызов плагинов jsmbLH\r\n fplugin, login, password, date\r\n date нужен чтобы не кэшировались запросы, туда можно класть что угодно\r\n В зависимости от method параметры принимаем либо\r\n url: список [fplugin, login, password, date]\r\n get: словарь как в get запросе {'fplugin':[...], 'login':[...], 'password':[...], 'date'[...]}\r\n '''\r\n param = {}\r\n if method == 'url':\r\n if len(param_source) != 4:\r\n return 'text/html', [f'Unknown call - use getbalance/plugin/login/password/date']\r\n param['fplugin'], param['login'], param['password'], param['date'] = param_source\r\n elif method == 'get':\r\n param = param_source\r\n # все параметры пришли ?\r\n if len(set(param.keys()).intersection(set('plugin,login,password,date'.split(',')))) < 4:\r\n return 'text/html', [f'Unknown call - use get?plugin=PLUGIN&login=LOGIN&password=PASSWORD&date=DATE']\r\n param = {i: param_source[i][0] for i in param_source} # в get запросе все параметры - списки\r\n param['fplugin'] = param['plugin'] # наш параметр plugin на самом деле fplugin\r\n else:\r\n logging.error(f'Unknown method {method}')\r\n pkey = store.get_pkey(param['login'], param['fplugin']) # (param['login'], param['fplugin'])\r\n store.options('logginglevel', flush=True) # Запускаем, чтобы сбросить кэш и перечитать ini\r\n phone_items = store.ini('phones.ini').phones().get(pkey, {}).items()\r\n individual = ','.join([f'{k}={v}' for k, v in phone_items if k.lower() in store.settings.ini['Options'].keys()])\r\n unused = ','.join([f'{k}={v}' for k, v in phone_items\r\n if all([k.lower() not in store.settings.ini['Options'].keys(),\r\n k.lower() not in store.settings.PHONE_INI_KEYS_LOWER,\r\n k.lower() != 'nn' and not k.lower().endswith('_orig')])\r\n ])\r\n individual = '' if individual == '' else f' Individual setup:{individual}'\r\n unused = '' if unused == '' else f' Unused param:{unused}'\r\n logging.info(f'Webserver thread_count={len(threading.enumerate())}')\r\n logging.info(f\"Start {param['fplugin']} {param['login']} {individual}{unused}\")\r\n # Это плагин от python ?\r\n if param['fplugin'].startswith(f'{lang}_'):\r\n # get balance\r\n plugin = param['fplugin'].split('_', 1)[1] # plugin это все что после p_\r\n module = __import__(plugin, globals(), locals(), [], 0)\r\n importlib.reload(module) # обновляем модуль, на случай если он менялся\r\n storename = re.sub(r'\\W', '_', f\"{lang}_{plugin}_{param['login']}\")\r\n dbengine.flags('setunic', f\"{lang}_{plugin}_{param['login']}\", 'start') # выставляем флаг о начале запроса\r\n try:\r\n if store.option_validate('jitter')[0]:\r\n jitters = store.options('jitter').split(',', 1)\r\n # n и m сортируем по возрастанию т.к. randint не любит когда n>m\r\n j_time = random.uniform(*sorted([int(jitters[0]), int(jitters[1])]))\r\n logging.info(f'Jitter {j_time:.2f} seconds')\r\n time.sleep(j_time)\r\n result = module.get_balance(param['login'], param['password'], storename, pkey=pkey)\r\n result = store.correct_and_check_result(result, pkey=pkey)\r\n text = store.result_to_html(result)\r\n except Exception:\r\n logging.info(f'{plugin} fail: {store.exception_text()}')\r\n dbengine.flags('set', f\"{lang}_{plugin}_{param['login']}\", f'error call {time.asctime()}') # выставляем флаг о ошибке вызова\r\n return 'text/html', [f\"Error call {param['fplugin']}
    {store.exception_text()}
    \"]\r\n dbengine.flags('delete', f\"{lang}_{plugin}_{param['login']}\", 'start') # запрос завершился успешно - сбрасываем флаг\r\n try:\r\n # пишем в базу\r\n dbengine.write_result_to_db(f'{lang}_{plugin}', param['login'], result)\r\n # обновляем данные из mdb\r\n dbengine.update_sqlite_from_mdb()\r\n except Exception:\r\n exception_text = f'Ошибка при подготовке работе с БД: {store.exception_text()}'\r\n logging.error(exception_text)\r\n try:\r\n # генерируем balance_html\r\n write_report()\r\n except Exception:\r\n exception_text = f'Ошибка при подготовке report: {store.exception_text()}'\r\n logging.error(exception_text)\r\n logging.info(f\"Complete {param['fplugin']} {param['login']}\")\r\n return 'text/html', text\r\n logging.error(f\"Unknown plugin {param['fplugin']}\")\r\n return 'text/html', [f\"Unknown plugin {param['fplugin']}\"]\r\n\r\n\r\ndef view_log(param):\r\n try:\r\n lines = int(param['lines'][0])\r\n except Exception:\r\n lines = 100\r\n fn = store.options('logginghttpfilename')\r\n res = open(fn).readlines()[-lines:]\r\n for num in range(len(res)):\r\n # .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\r\n if ' ERROR ' in res[num]:\r\n res[num] = f'{res[num]}'\r\n elif ' WARNING ' in res[num]:\r\n res[num] = f'{res[num]}'\r\n return 'text/html; charset=cp1251', ['
    '] + res + ['
    ']\r\n\r\ndef prepare_loglist_personal():\r\n 'Делает список пар по которым есть скриншоты'\r\n ss = glob.glob(store.abspath_join(store.options('loggingfolder'), '*.png'))\r\n all_match = [re.search(r'(.*)_\\d+\\.png', os.path.split(fn)[-1]) for fn in ss]\r\n all_groups = sorted(set([m.groups()[0] for m in all_match if m]))\r\n return all_groups\r\n\r\ndef prepare_log_personal(prefix):\r\n 'Готовит html лог со скриншотами начинающимися на prefix'\r\n def png_to_jpg_base64(fn):\r\n im = PIL.Image.open(fn)\r\n im = im.convert('RGB')\r\n f = io.BytesIO()\r\n im.save(f, format=\"jpeg\")\r\n return base64.b64encode(f.getvalue()).decode()\r\n ss = glob.glob(store.abspath_join(store.options('loggingfolder'), prefix + '*.png'))\r\n # text = [f'
    ' for fn in ss]\r\n text = [f'
    \\n' for fn in ss]\r\n return '\\n'.join(text)\r\n\r\ndef getreport(param=[]):\r\n 'Делает html отчет balance.html'\r\n def pp_field(pkey, he, el, hover, unwanted=False, link=''):\r\n 'форматирует поле, красит, выкидывает None и нули в полях баланса - возвращает готовый тэг th или tr'\r\n 'he - header'\r\n 'el - element'\r\n 'pkey - пара (номер,оператор)'\r\n mark = '' # class=\"mark\"\r\n if he == 'Balance' and el is not None and el < float(store.options('BalanceLessThen', pkey=pkey)):\r\n mark = ' class=\"mark\" ' # Красим когда мало денег\r\n if he == 'CalcTurnOff' and el is not None and el < int(store.options('TurnOffLessThen', pkey=pkey)):\r\n mark = ' class=\"mark\" ' # Красим когда надолго не хватит\r\n if he == 'NoChangeDays' and el is not None and pkey in phones and int(el) > int(store.options('BalanceNotChangedMoreThen', pkey=pkey)):\r\n mark = ' class=\"mark\" ' # Красим когда давно не изменялся\r\n if he == 'NoChangeDays' and el is not None and pkey in phones and int(el) < int(store.options('BalanceChangedLessThen', pkey=pkey)):\r\n mark = ' class=\"mark\" ' # Красим недавно поменялся а не должен был\r\n if he == 'UslugiOn' and el is not None and unwanted:\r\n mark = ' class=\"mark\" ' # Красим если в списке есть нежелательные услуги\r\n if el is None:\r\n el = ''\r\n if he != 'Balance' and (el == 0.0 or el == 0) and mark == '':\r\n el = ''\r\n if type(el) == float:\r\n el = f'{el:.2f}' # round(el, 2)\r\n if hover != '':\r\n el = f'
    {el}
    {hover}
    '\r\n if link != '':\r\n el = f'
    '\r\n return f'<{\"th\" if he==\"NN\" else \"td\"} id=\"{he}\"{mark}>{el}'\r\n store.options('logginglevel', flush=True) # Запускаем, чтобы сбросить кэш и перечитать ini\r\n template_page = settings.table_template['page']\r\n template_history = settings.table_template['history']\r\n template_style = settings.table_template['style']\r\n html_script = settings.table_template['script']\r\n db = dbengine.Dbengine()\r\n flags = dbengine.flags('getall') # берем все флаги словарем\r\n responses = dbengine.responses() # все ответы по запросам\r\n # номера провайдеры и логины из phones.ini\r\n num_format = '' if len(param) == 0 or not param[0].isnumeric() else str(int(param[0]))\r\n groups = [p.replace('group_', '').lower() for p in param if p.startswith('group_')]\r\n table_format = store.options('table_format' + num_format, default=store.options('table_format', section='HttpServer'), section='HttpServer')\r\n table = db.report()\r\n phones = store.ini('phones.ini').phones()\r\n if 'Alias' not in table_format:\r\n table_format = 'NN,Alias,' + table_format # Если старый ini то этих столбцов нет - добавляем\r\n table = [i for i in table if i['Alias'] != 'Unknown'] # filter Unknown\r\n table.sort(key=lambda i: [i['NN'], i['Alias']]) # sort by NN, after by Alias\r\n header = [i.strip() for i in table_format.split(',')]\r\n # классы для формата заголовка\r\n header_class = {'Balance': 'p_b', 'RealAverage': 'p_r', 'BalDelta': 'p_r', 'BalDeltaQuery': 'p_r', 'NoChangeDays': 'p_r', 'CalcTurnOff': 'p_r', 'MinAverage': 'p_r', }\r\n html_header = ''.join([f'{dbengine.PhonesHText.get(h, h)}' for h in header])\r\n html_table = []\r\n for line in table:\r\n html_line = []\r\n pkey = store.get_pkey(line['PhoneNumber'], line['Operator'])\r\n # Group of numbers (Indication) - use /group_aaa/group_bbb in url\r\n if len(groups) > 0 and phones[pkey].get('indication', '').lower() not in groups:\r\n continue\r\n uslugi = json.loads(responses.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '{}')).get('UslugiList', '')\r\n subscription_keyword = [i.strip() for i in store.options('subscription_keyword', pkey=pkey).lower().split(',')]\r\n unwanted_kw = [kw for kw in subscription_keyword if kw in uslugi.lower()] # встретившиеся нежелательные\r\n for he in header:\r\n if he not in line:\r\n continue\r\n hover, link = '', ''\r\n if he == 'Alias':\r\n if str(store.options('htmlreportoperatorlink')) == '1':\r\n link = settings.operator_link.get(line['Operator'], '')\r\n if he == 'UslugiOn': # На услуги вешаем hover со списком услуг\r\n if uslugi != '':\r\n h_html_header = f'Услугар/мес'\r\n h_html_table = []\r\n for h_line in [li.split('\\t', 1) for li in sorted(uslugi.split('\\n')) if '\\t' in li]:\r\n txt = h_line[0].replace(\" \", \"  \")\r\n bal = f'{float(h_line[1]):.2f}' if re.match(r'^ *-?\\d+(?:\\.\\d+)? *$', h_line[1]) else h_line[1]\r\n h_html_line = f'{txt}{bal}'\r\n u_classflag = 'n'\r\n if len(unwanted_kw) > 0 and len([kw for kw in unwanted_kw if kw in h_line[0].lower()]) > 0:\r\n u_classflag = 'e_us'\r\n h_html_table.append(f'{h_html_line}')\r\n hover = template_history.format(h_header=f\"Список услуг по {line['Alias']}\", html_header=h_html_header, html_table='\\n'.join(h_html_table))\r\n if he == 'Balance': # На баланс вешаем hover с историей\r\n history = db.history(line['PhoneNumber'], line['Operator'], days=int(store.options('RealAverageDays', pkey=pkey)), lastonly=int(store.options('ShowOnlyLastPerDay', pkey=pkey)))\r\n if history != []:\r\n h_html_header = ''.join([f'{dbengine.PhonesHText.get(h, h)}' for h in history[0].keys()])\r\n h_html_table = []\r\n for h_line in history:\r\n h_html_line = ''.join([pp_field(pkey, h, v, '') for h, v in h_line.items()])\r\n h_html_table.append(f'{h_html_line}')\r\n hover = template_history.format(h_header=f\"История запросов по {line['Alias']}\", html_header=h_html_header, html_table='\\n'.join(h_html_table))\r\n html_line.append(pp_field(pkey, he, line[he], hover, unwanted=(len(unwanted_kw) > 0), link=link)) # append ...\r\n classflag = 'n' # красим строки - с ошибкой красным, текущий - зеленым, еще в очереди - серым и т.д.\r\n if flags.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '').startswith('error'):\r\n classflag = 'e_us'\r\n if flags.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '').startswith('start'):\r\n classflag = 's_us'\r\n if flags.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '').endswith('queue'):\r\n classflag = 'n_us'\r\n html_table.append(f'{\"\".join(html_line)}')\r\n template_style = template_style.replace('{HoverCss}', store.options('HoverCss')) # HoverCss общий на всю страницу, поэтому берем без pkey\r\n res = template_page.format(style=template_style, html_header=html_header, html_table='\\n'.join(html_table), title=store.version(), html_script=html_script)\r\n return 'text/html', [res]\r\n\r\n\r\ndef write_report():\r\n 'сохраняем отчет balance_html если в ini createhtmlreport=1'\r\n store.turn_logging(httplog=True) # Т.к. сюда можем придти извне, то включаем логирование здесь\r\n try:\r\n if str(store.options('createhtmlreport')) == '1':\r\n balance_html = store.options('balance_html')\r\n logging.info(f'Создаем {balance_html}')\r\n _, res = getreport()\r\n open(balance_html, encoding='cp1251', mode='w').write('\\n'.join(res))\r\n except Exception:\r\n logging.error(f'Ошибка генерации balance_html {store.exception_text()}')\r\n\r\n\r\ndef filter_balance(table: typing.List[typing.Dict], filter: str = 'FULL', params: typing.Dict = {}) -> typing.List[typing.Dict]:\r\n ''' Фильтруем данные для отчета\r\n filter = FULL - Все телефоны, LASTDAYCHANGE - Изменившиеся за день, LASTCHANGE - Изменившиеся в последнем запросе\r\n params['include'] = None - все, либо список через запятую псевдонимы или логины или какая-то их уникальная часть для включения в результат\r\n params['exclude'] = None - все, либо список через запятую псевдонимы или логины или какая-то их уникальная часть для исключения из результата'''\r\n flags = dbengine.flags('getall')\r\n # фильтр по filter_include - оставляем только строчки попавшие в фильтр\r\n # from send_subscriptions params like {'id':'123456', 'include':'1111,2222'}\r\n if params.get('include', None) is not None:\r\n filter_include = [re.sub(r'\\W', '', el).lower() for el in params['include'].split(',')]\r\n table = [line for line in table if len([1 for i in filter_include if i in re.sub(r'\\W', '', ('_'.join(map(str, line.values())) + '__' + line.get('Operator', '') + '_' + line.get('PhoneNumber', '') + '__').lower())]) > 0]\r\n # фильтр по filter_exclude - вы��идываем строчки попавшие в фильтр\r\n if params.get('exclude', None) is not None:\r\n filter_exclude = [re.sub(r'\\W', '', el).lower() for el in params['exclude'].split(',')]\r\n table = [line for line in table if len([1 for i in filter_exclude if i in re.sub(r'\\W', '', '_'.join(map(str, line.values())).lower())]) == 0]\r\n if filter == 'LASTCHANGE': # TODO сделать настройку в ini на счет line['Balance']\r\n # Balance==0 Это скорее всего глюк проверки, соответственно его исключаем\r\n # Также исключаем BalDeltaQuery==Balance - это возврат обратно с кривого нуля\r\n # BUG: line['Operator'] и line['PhoneNumber']в случае получения отчета через MobileBalance будет давать KeyError:\r\n # Так что делаем костыль с .get который приведет к тому что это условие мы не зацепим\r\n table = [line for line in table\r\n if any([\r\n all([line['BalDeltaQuery'] != 0,\r\n line['Balance'] != 0,\r\n line['BalDeltaQuery'] != line['Balance'],\r\n line['BalDeltaQuery'] != '',\r\n line['Balance'] != '']),\r\n flags.get(f\"{line.get('Operator', '')}_{line.get('PhoneNumber', '')}\", '').startswith('error')])\r\n ]\r\n elif filter == 'LASTDAYCHANGE':\r\n table = [line for line in table if line['BalDelta'] != 0 and line['Balance'] != 0]\r\n table = [line for line in table if line['BalDelta'] != '' and line['Balance'] != '']\r\n return table\r\n\r\n\r\ndef prepare_balance_mobilebalance(filter: str = 'FULL', params: typing.Dict = {}):\r\n \"\"\"Формируем текст для отправки в telegram из html файла полученного из web сервера mobilebalance\r\n \"\"\"\r\n phones = store.ini('phones.ini').phones()\r\n phones_by_num = {v['NN']: v for v in phones.values()}\r\n url = store.options('mobilebalance_http', section='Telegram')\r\n tgmb_format = store.options('tgmb_format', section='Telegram')\r\n response1_text = requests.get(url).content.decode('cp1251')\r\n # нет таблицы\r\n if 'Введите пароль' in response1_text or '#num\r\n table2 = filter_balance(table, filter, params)\r\n res = [tgmb_format.format(**line) for line in table2] # type: ignore\r\n return '\\n'.join(res)\r\n\r\n\r\ndef prepare_balance_sqlite(filter: str = 'FULL', params: typing.Dict = {}):\r\n 'Готовим данные для отчета из sqlite базы'\r\n def alert_suffix(line):\r\n pkey = store.get_pkey(line['PhoneNumber'], line['Operator'])\r\n uslugi = json.loads(responses.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '{}')).get('UslugiList', '')\r\n if flags.get(f\"{line['Operator']}_{line['PhoneNumber']}\", '').startswith('error'):\r\n return f' ! последняя попытка получить баланс завершилась ошибкой !'\r\n if line['Balance'] is not None and line['Balance'] < float(store.options('BalanceLessThen', pkey=pkey)):\r\n return f' ! достигнут порог баланса !'\r\n if line['CalcTurnOff'] is not None and line['CalcTurnOff'] < int(store.options('TurnOffLessThen', pkey=pkey)):\r\n return f\" ! возможно ско��ое отключение - {line['CalcTurnOff']} дней !\"\r\n if line['NoChangeDays'] is not None and pkey in phones and line['NoChangeDays'] > int(store.options('BalanceNotChangedMoreThen', pkey=pkey)):\r\n return f\" ! баланс не изменялся более {store.options('BalanceNotChangedMoreThen', pkey=pkey)} дней !\"\r\n if line['NoChangeDays'] is not None and pkey in phones and line['NoChangeDays'] < int(store.options('BalanceChangedLessThen', pkey=pkey)):\r\n return f\" ! баланс изменился менее {store.options('BalanceChangedLessThen', pkey=pkey)} дней назад!\"\r\n if line['UslugiOn'] is not None:\r\n unwanted_kw = [kw.strip() for kw in store.options('subscription_keyword', pkey=pkey).split(',') if kw.strip() in uslugi]\r\n if len(unwanted_kw) > 0:\r\n unwanted = '\\n'.join([line for line in uslugi.split('\\n') if len([kw for kw in unwanted_kw if kw in line]) > 0])\r\n return f\" ! В списке услуг присутствуют нежелательные: {unwanted}!\"\r\n return ''\r\n\r\n db = dbengine.Dbengine()\r\n table_format = store.options('tg_format', section='Telegram').replace('\\\\t', '\\t').replace('\\\\n', '\\n')\r\n phones = store.ini('phones.ini').phones()\r\n flags = dbengine.flags('getall')\r\n responses = dbengine.responses()\r\n table = db.report()\r\n # table_format = 'Alias,PhoneNumber,Operator,Balance'\r\n # Если формат задан как перечисление полей через запятую - переделываем под формат\r\n if re.match(r'^(\\w+(?:,|\\Z))*$', table_format.strip()):\r\n table_format = ' '.join([f'{{{i.strip()}}}' for i in table_format.split(',')])\r\n table = [i for i in table if i['Alias'] != 'Unknown'] # filter Unknown\r\n table.sort(key=lambda i: [i['NN'], i['Alias']]) # sort by NN, after by Alias\r\n table = filter_balance(table, filter, params)\r\n res = [table_format.format(**line) + alert_suffix(line) for line in table]\r\n return '\\n'.join(res)\r\n\r\n\r\ndef prepare_balance(filter: str = 'FULL', params: typing.Dict = {}):\r\n \"\"\"Готовим баланс для TG, смотрим параметр tg_from (sqlite или mobilebalance) и в зависимости от него кидаем на\r\n prepare_balance_sqlite - Готовим данные для отчета из sqlite базы\r\n prepare_balance_mobilebalance - Формируем текст для отправки в telegram из html файла полученного из web сервера mobilebalance\r\n \"\"\"\r\n try:\r\n baltxt = ''\r\n if store.options('tg_from', section='Telegram') == 'sqlite':\r\n baltxt = prepare_balance_sqlite(filter, params)\r\n else:\r\n baltxt = prepare_balance_mobilebalance(filter, params)\r\n if baltxt == '' and str(store.options('send_empty', section='Telegram')) == '1':\r\n baltxt = 'No changes'\r\n return baltxt\r\n except Exception:\r\n exception_text = f'Ошибка: {store.exception_text()}'\r\n logging.error(exception_text)\r\n return 'error'\r\n\r\n\r\ndef send_telegram_over_requests(text=None, auth_id=None, filter: str = 'FULL', params: typing.Dict = {}):\r\n \"\"\"Отправка сообщения в телеграм через requests без использования python-telegram-bot\r\n Может пригодится при каких-то проблемах с ботом или в ситуации когда на одной машине у нас крутится бот,\r\n а с другой в этого бота мы еще хотим засылать инфу\r\n text - сообщение, если не указано, то это баланс для телефонов у которых он изменился\r\n auth_id - список id через запятую на которые слать, если не указано, то берется список из mbplugin.ini\r\n \"\"\"\r\n store.switch_to_mb_mode()\r\n store.turn_logging(httplog=True) # Т.к. сюда можем придти извне, то включаем логирование здесь\r\n if text is None:\r\n text = prepare_balance(filter, params)\r\n api_token = store.options('api_token', section='Telegram', mainparams=params).strip()\r\n if len(api_token) == 0:\r\n logging.info('Telegram api_token not found')\r\n return\r\n if auth_id is None:\r\n auth_id = list(map(int, store.options('auth_id', section='Telegram', mainparams=params).strip().split(',')))\r\n else:\r\n auth_id = list(map(int, str(auth_id).strip().split(',')))\r\n r = [requests.post(f'https://api.telegram.org/bot{api_token}/sendMessage', data={'chat_id': chat_id, 'text': text, 'parse_mode': 'HTML'}) for chat_id in auth_id if text != '']\r\n return [repr(i) for i in r]\r\n\r\n\r\ndef restart_program(reason='', exit_only=False, delay=0):\r\n 'Restart or exit with delay'\r\n time.sleep(delay)\r\n cmd = psutil.Process().cmdline()\r\n filename_pid = store.abspath_join(store.options('storefolder'), 'web-server.pid')\r\n # Fix нужен т.к. util.py переходит в другую папку и относительные пути ломаются\r\n # cmd = [(os.path.abspath('util.py') if i.endswith('util.py') else i) for i in cmd]\r\n logging.info(f'{\"Exit\" if exit_only else \"Restart\"} by {reason} with cmd:{subprocess.list2cmdline(cmd)}')\r\n TrayIcon().stop()\r\n if os.path.exists(filename_pid):\r\n with open(filename_pid) as f:\r\n pid_from_file = int(f.read())\r\n if pid_from_file == os.getpid():\r\n os.remove(filename_pid)\r\n if not exit_only:\r\n subprocess.Popen(cmd) # Cross platform run process\r\n psutil.Process().kill()\r\n if Q_CMD_EXIT not in cmdqueue.queue: # Если есть то второй раз не кладем\r\n cmdqueue.put(Q_CMD_EXIT) # Если kill не сработал (для pid=1 не сработает) - шлем сигнал\r\n\r\ndef cancel_query(reason=''):\r\n 'Cancel query in getbalance_standalone_one_pass by Q_CMD_CANCEL'\r\n logging.info(f'Press Cancel')\r\n if Q_CMD_CANCEL not in cmdqueue.queue: # Если есть то второй раз не кладем\r\n cmdqueue.put(Q_CMD_CANCEL)\r\n logging.info(f'Send cancel signal to query')\r\n\r\ndef send_http_signal(cmd, force=True):\r\n 'Посылаем сигнал локальному веб-серверу'\r\n logging.info(f'Send {cmd} signal to web server')\r\n filename_pid = store.abspath_join(store.options('storefolder'), 'web-server.pid')\r\n if not os.path.exists(filename_pid) and not force:\r\n return\r\n port = int(store.options('port', section='HttpServer'))\r\n try:\r\n return requests.get(f'http://localhost:{port}/{cmd}', timeout=1).content.decode('cp1251')\r\n except Exception:\r\n pass\r\n # То что дальше - это вышибание процесса если web сервер не остановился\r\n if not (cmd == 'exit' and force):\r\n return\r\n for i in range(50): # Подождем пока сервер остановится\r\n if os.path.exists(filename_pid):\r\n time.sleep(0.1)\r\n if os.path.exists(filename_pid):\r\n with open(filename_pid) as f:\r\n pid = int(open(filename_pid).read())\r\n if not psutil.pid_exists(pid):\r\n return\r\n proc = psutil.Process(pid)\r\n if len([c for c in proc.connections() if c.status == 'LISTEN' and c.laddr.port == port]) > 0:\r\n proc.kill()\r\n\r\n\r\nclass TrayIcon:\r\n 'Создаем переменную класса, и при повторных вызовах не создаем новый а обращаемся к уже созданному'\r\n icon = None\r\n\r\n def __init__(self):\r\n if str(store.options('show_tray_icon')) != '1' or 'pystray' not in sys.modules:\r\n return\r\n if TrayIcon.icon is None:\r\n print('pystray traymeny')\r\n threading.Thread(target=self._create, name='TrayIcon', daemon=True).start()\r\n logging.info('Tray icon started')\r\n else:\r\n self.icon = TrayIcon.icon\r\n\r\n def _create(self):\r\n if sys.platform != 'win32':\r\n return\r\n icon_fn = store.abspath_join('mbplugin', 'plugin', 'httpserver.ico')\r\n self.image = PIL.Image.open(icon_fn)\r\n items = []\r\n for item in tray_menu():\r\n if item['show']:\r\n items.append(pystray.MenuItem(item['text'], item['cmd'], default=(len(items) + 1 == int(store.options('tray_default')))))\r\n self.menu = pystray.Menu(*items)\r\n host = store.options('host', section='HttpServer')\r\n port = int(store.options('port', section='HttpServer'))\r\n self.icon = pystray.Icon('mbplugin', icon=self.image, title=f\"Mbplugin {store.version()} ({host}:{port})\", menu=self.menu)\r\n TrayIcon.icon = self.icon\r\n self.icon.run()\r\n\r\n def stop(self):\r\n print('STOP')\r\n if self.icon is not None:\r\n self.icon.visible = False\r\n self.icon.stop()\r\n\r\n\r\nclass Scheduler():\r\n '''Класс для работы с расписанием\r\n check_only - если не хотим чтобы шедулер стартовал при первом вызове'''\r\n instance = None\r\n # Форматы расписаний см https://schedule.readthedocs.io\r\n # schedule2 = every().day.at(\"10:30\"),megafon\r\n # строк с заданиями может быть несколько и их можно пихать в ini как\r\n # scheduler= ... scheduler1=... и т.д как сделано с table_format\r\n\r\n def __init__(self, check_only=False) -> None:\r\n if Scheduler.instance is None and not check_only:\r\n self._scheduler_running = True # Флаг, что шедулер работает\r\n self._job_running = False # Флаг что в текущий момент задание выполняется\r\n self.thread = threading.Thread(target=self._forever, name='Scheduler', daemon=True)\r\n self.thread.start()\r\n Scheduler.instance = self\r\n logging.info('Scheduler started')\r\n self.reload()\r\n\r\n def _forever(self):\r\n while True:\r\n try:\r\n schedule.run_pending()\r\n except Exception:\r\n print('Schedule fail')\r\n time.sleep(1)\r\n if not self._scheduler_running:\r\n break\r\n\r\n def _run(self, cmd, once=False, kwargs={}):\r\n '''Запускаем задание, именно вызовы _run мы помещаем в очередь\r\n напрямую вызывать нельзя\r\n once - удалить задание после выполнения\r\n kwargs - передается сюда ИМЕННО как словарь без **'''\r\n self._job_running = True\r\n current_job = [job for job in schedule.jobs if job.should_run][0]\r\n if cmd.endswith('_once'):\r\n once = True\r\n cmd = cmd.replace('_once', '')\r\n try:\r\n if cmd == CMD_CHECK or cmd == CMD_CHECK_SEND:\r\n getbalance_standalone(**kwargs)\r\n baltxt = prepare_balance('FULL', params=kwargs.get('params', {}))\r\n store.feedback.text(baltxt)\r\n # Шлем по адресатам прописанным в ini\r\n if TelegramBot.instance is not None and cmd == CMD_CHECK_SEND:\r\n TelegramBot.instance.send_balance()\r\n TelegramBot.instance.send_subscriptions()\r\n elif cmd == CMD_GET_ONE:\r\n get_full_info_one_number(**kwargs)\r\n elif cmd == CMD_CHECK_NEW_VERSION:\r\n if TelegramBot.instance is not None:\r\n ue = updateengine.UpdaterEngine()\r\n if ue.check_update():\r\n msg = f'Найдена новая версия\\n' + '\\n'.join(ue.latest_version_info(short=True))\r\n TelegramBot.instance.send_message(msg)\r\n elif cmd == CMD_PING:\r\n if TelegramBot.instance is not None:\r\n msg = ' '.join(kwargs['filter']).strip()\r\n TelegramBot.instance.send_message('ping' if msg == '' else msg)\r\n else:\r\n logging.error(f'Scheduler: Unknown command {cmd}: {store.exception_text()}')\r\n store.feedback.unset() # После обработки задания отменяем\r\n except Exception:\r\n logging.info(f'Scheduler: Error while run job {current_job}: {store.exception_text()}')\r\n self._job_running = False\r\n if once:\r\n return schedule.CancelJob\r\n\r\n def job_is_running(self):\r\n return Scheduler.instance._job_running\r\n\r\n def run_once(self, cmd, delay: int = 1, feedback_func: typing.Callable = None, kwargs={}) -> bool:\r\n '''Запланировать команду на однократный запуск,\r\n cmd - команда для _run (check, check_send, get_one, check_new_version, ping и т.п.)\r\n delay - отложить старт на N секунд\r\n feedback - функция для отписки статуса, если смогли - вешаем на feedback, не смогли пишем в нее что не смогли\r\n kwargs - аргументы для cmd словарем, а не **\r\n возвращаем True - если запланировали и False если заняты\r\n при планировании once сразу блокируем возможность запланировать на раз еще что-то\r\n чтобы не запутаться с feedback'''\r\n if Scheduler.instance is not None and not Scheduler().job_is_running():\r\n Scheduler.instance._job_running = True # Сразу выставляем флаг что работаем, чтобы вдогонку не поставить второе\r\n schedule.every(delay).seconds.do(Scheduler.instance._run, cmd=cmd, once=True, kwargs=kwargs)\r\n if feedback_func is not None:\r\n store.feedback.set(feedback_func)\r\n return True\r\n else:\r\n if feedback_func is not None:\r\n feedback_func('Одно из заданий сейчас выполняется, попробуйте позже')\r\n return False\r\n\r\n def _validate_sched(self, sched) -> schedule.Job:\r\n 'Проверяет одно расписание на валидность и возвращает в виде job'\r\n # every(4).day.at(\"10:30\")\r\n m = re.match(r'^every\\((?P\\d*)\\)(\\.to\\((?P\\d+)\\))?\\.(?P\\w*)(\\.at\\(\"(?P.*)\"\\))?$', sched.strip())\r\n try:\r\n if not m:\r\n raise\r\n # every(4).hours,mts,beeline -> {'every': '4', 'interval': 'hours', 'at': None}\r\n param = m.groupdict()\r\n param['every'] = int(param['every']) if param['every'].isdigit() else 1\r\n job = schedule.every(int(param['every']))\r\n if param['to'] is not None:\r\n job = job.to(int(param['to']))\r\n job = getattr(job, param.get('interval', ''))\r\n if param['at'] is not None:\r\n job = job.at(param['at'])\r\n return job\r\n except Exception:\r\n logging.error(f'Error parse {sched}')\r\n\r\n def read_from_ini(self) -> typing.List[Job]:\r\n 'Чтение шедулера с диагностикой'\r\n schedules = store.options('schedule', section='HttpServer', listparam=True, flush=True)\r\n jobs = []\r\n for schedule_str in schedules:\r\n err_msg = []\r\n job_has_errors = False\r\n cmd, filter = None, None\r\n if len(schedule_str.split(',')) < 2:\r\n err_msg.append(f'Bad schedule \"{schedule_str}\", cmd not found skipped')\r\n job_has_errors = True\r\n if not job_has_errors:\r\n sched = schedule_str.split(',')[0].strip()\r\n cmd = schedule_str.split(',')[1].strip().lower()\r\n filter = [i.strip() for i in schedule_str.split(',')[2:]]\r\n job_sched = self._validate_sched(sched)\r\n if job_sched is None:\r\n err_msg.append(f'Bad schedule \"{schedule_str}\", error parse job, skipped')\r\n job_has_errors = True\r\n if cmd not in SCHED_CMDS and cmd.replace('_once', '') not in SCHED_CMDS:\r\n err_msg.append(f'Bad cmd {cmd} in schedule \"{schedule_str}\", skipped')\r\n job_has_errors = True\r\n if job_has_errors:\r\n job_sched = None\r\n jobs.append(Job(job_str=schedule_str, job_sched=job_sched, cmd=cmd, filter=filter, err_msg=', '.join(err_msg)))\r\n return jobs\r\n\r\n def _reload(self):\r\n 'метод который отрабатывает в инстансе в котором работает _forever'\r\n schedule.clear()\r\n jobs = self.read_from_ini()\r\n for job in jobs:\r\n if job.job_sched is not None:\r\n job.job_sched.do(self._run, cmd=job.cmd, kwargs={'filter': job.filter})\r\n else:\r\n logging.info(job.err_msg)\r\n logging.info('Schedule was reloaded')\r\n return 'OK'\r\n\r\n def reload(self):\r\n 'Читает расписание из ini'\r\n Scheduler.instance._reload()\r\n\r\n def view_html(self) -> typing.Tuple[str, typing.List[str]]:\r\n 'все задания html страницей'\r\n return 'text/html; charset=cp1251', ['
    ', self.view_txt(), '
    ']\r\n\r\n def view_txt(self) -> str:\r\n 'Все задания текстом'\r\n jobs = self.read_from_ini()\r\n err_jobs = [f'{job.err_msg}\\n{job.job_str}' for job in jobs if job.err_msg != '']\r\n # TODO !!! нужно сопоставить расписания (то что в jobs[n].job_sched у которого нет repr) и задания schedule.jobs\r\n res = '\\n'.join(err_jobs) + ('\\n\\n' if err_jobs != [] else '') + '\\n'.join(map(repr, schedule.jobs))\r\n return res + ' '\r\n\r\n def stop(self):\r\n 'Останавливаем шедулер'\r\n Scheduler.instance._scheduler_running = False\r\n\r\n\r\ndef auth_decorator(errmsg=None, nonauth: typing.Callable = None):\r\n 'Если хотим не залогиненому выдать сообщение об ошибке - указываем его в errmsg, если без авторизации хотим вызвать другой метод - указываем его в nonauth'\r\n def decorator(func): # pylint: disable=no-self-argument\r\n def wrapper(self, update: telegram.update.Update, context):\r\n # update.message.chat_id отсутствует у CallbackQueryHandler пробуем через update.effective_chat.id:\r\n if update is None or update.effective_chat is None or update.effective_message is None:\r\n return\r\n if update.effective_chat.id in self.auth_id():\r\n if update is not None and update.effective_message is not None:\r\n logging.info(f'TG auth:{update.effective_chat.id} {update.effective_message.text}')\r\n res = func(self, update, context) # pylint: disable=not-callable\r\n return res\r\n elif nonauth is not None:\r\n nonauth(self, update, context)\r\n else:\r\n if errmsg is not None:\r\n update.effective_message.reply_text(errmsg)\r\n logging.info(f'TG:{update.effective_chat.id} unauthorized {update.effective_message.text}')\r\n return wrapper\r\n return decorator\r\n\r\n\r\nclass TelegramBot():\r\n\r\n # TODO make singleton class with __new__\r\n instance = None # когда создадим класс сюда запишем ссылку на созданный экземпляр\r\n\r\n def __init__(self):\r\n if 'telegram' not in sys.modules:\r\n return # Нет модуля TG - просто выходим\r\n # TgCommand для команд type(func) != str , для cmd_alias type(func) == str\r\n self.updater = None\r\n TgCommand = collections.namedtuple('TgCommand', 'name, description, func')\r\n commands_list: typing.List[TgCommand] = [\r\n TgCommand('/help', 'справка', self.get_help),\r\n TgCommand('/id', 'узнать id профиля', self.get_id),\r\n TgCommand('/balance', 'текущий баланс', self.get_balancetext),\r\n TgCommand('/balancefile', 'текущий баланс файлом', self.get_balancefile),\r\n TgCommand('/receivebalance', 'запросить балансы, аналог команды mbp get-balance (фильтр после пробела)', self.receivebalance),\r\n TgCommand('/receivebalancefailed', 'запросить балансы номеров с ошибками', self.receivebalance),\r\n TgCommand('/restart', 'перезапустить сервер', self.restartservice),\r\n TgCommand('/cancel', 'остановить очередь запросов', self.cancel),\r\n TgCommand('/getone', 'получить баланс одного номера', self.get_one),\r\n TgCommand('/checkone', 'запросить баланс одного номера', self.get_one, ),\r\n TgCommand('/schedule', 'текущие задачи в планировщике', self.get_schedule),\r\n TgCommand('/schedulereload', 'перезагрузка расписания', self.get_schedule),\r\n TgCommand('/getlog', 'отобразить лог', self.get_log),\r\n ]\r\n self.commands: typing.Dict[str, TgCommand] = {cmd.name: cmd for cmd in commands_list}\r\n # Читаем алиасы команд\r\n for line in store.options('cmd_alias', section='Telegram', listparam=True):\r\n try:\r\n name, description, func = line.split(':', 3)\r\n alias = TgCommand(re.sub('^//', '/', f'/{name.strip()}'), description, re.sub('^//', '/', f'/{func.strip()}'))\r\n self.commands[alias.name] = alias\r\n except Exception:\r\n logging.warning(f'Wrong tg alias {line}')\r\n self.start_bot()\r\n self.add_bot_menu()\r\n\r\n def start_bot(self):\r\n 'Запускаем бота'\r\n api_token = store.options('api_token', section='Telegram').strip()\r\n request_kwargs = {}\r\n tg_proxy = store.options('tg_proxy', section='Telegram').strip()\r\n if tg_proxy.lower() == 'auto':\r\n request_kwargs['proxy_url'] = urllib.request.getproxies().get('https', '')\r\n elif tg_proxy != '' and tg_proxy.lower() != 'auto':\r\n request_kwargs['proxy_url'] = tg_proxy\r\n # ??? Надо или не надо ?\r\n # request_kwargs['urllib3_proxy_kwargs'] = {'assert_hostname': 'False', 'cert_reqs': 'CERT_NONE'}\r\n if api_token != '' and str(store.options('start_tgbot', section='Telegram')) == '1' and 'telegram' in sys.modules:\r\n try:\r\n logging.info(f'Module telegram starting for id={self.auth_id()}')\r\n self.updater = Updater(api_token, use_context=True, request_kwargs=request_kwargs)\r\n logging.info(f'{self.updater}')\r\n for cmd in self.commands.values():\r\n if type(cmd.func) != str: # только команды\r\n # В handler надо класть без слэша '/help' -> 'help' поэтому [1:]\r\n self.updater.dispatcher.add_handler(CommandHandler(cmd.name[1:], cmd.func))\r\n self.updater.dispatcher.add_handler(CallbackQueryHandler(self.button))\r\n self.updater.dispatcher.add_handler(MessageHandler(Filters.all, self.handle_catch_all))\r\n self.updater.start_polling() # Start the Bot\r\n logging.info('Telegram bot started')\r\n TelegramBot.instance = self # Запустили бота - прописываем инстанс singleton\r\n if str(store.options('send_empty', section='Telegram')) == '1':\r\n self.send_message(text='Hey there!', disable_notification=True)\r\n except Exception:\r\n exception_text = f'Ошибка запуска telegram bot {store.exception_text()}'\r\n logging.error(exception_text)\r\n elif 'telegram' not in sys.modules:\r\n logging.info('Module telegram not found')\r\n elif api_token == '':\r\n logging.info('Telegram api_token not found')\r\n elif str(store.options('start_tgbot', section='Telegram')) != '1':\r\n logging.info('Telegram bot start is disabled in mbplugin.ini (start_tgbot=0)')\r\n\r\n def add_bot_menu(self):\r\n 'создает персональное меню бота [/] для всех id из auth_id из пунктов перечисленных в command_menu_list'\r\n if self.updater is None:\r\n return\r\n command_menu_list = store.options('command_menu_list', section='Telegram').strip().split(',')\r\n command_menu_list = [re.sub('^//', '/', f'/{i.strip()}') for i in command_menu_list]\r\n for id in self.auth_id():\r\n # Перебираем команды из списка command_menu_list и те которые есть в command_menu_list вставляем в меню [/]\r\n cmds = [self.commands[c1] for c1 in command_menu_list if c1 in self.commands]\r\n self.updater.bot.set_my_commands(\r\n [telegram.bot.BotCommand(cmd.name, cmd.description) for cmd in cmds],\r\n scope=telegram.BotCommandScopeChat(id))\r\n\r\n def auth_id(self):\r\n auth_id = store.options('auth_id', section='Telegram').strip()\r\n if not re.match(r'(\\d+,?)', auth_id):\r\n logging.error(f'incorrect auth_id in ini: {auth_id}')\r\n return []\r\n return map(int, auth_id.split(','))\r\n\r\n def get_id(self, update, context):\r\n \"\"\"Echo chat id.\"\"\"\r\n logging.info(f'TG:{update.effective_message.chat_id} /id')\r\n self.put_text(update.effective_message.reply_text, update.effective_chat.id)\r\n\r\n def put_text(self, func: typing.Callable, text: str, parse_mode: str = telegram.ParseMode.HTML) -> typing.Optional[typing.Callable]:\r\n '''Вызываем функцию для размещения текста'''\r\n try:\r\n return func(text, parse_mode=parse_mode)\r\n except Exception:\r\n try:\r\n return func(text, parse_mode=None)\r\n except Exception:\r\n exception_text = store.exception_text()\r\n if 'Message is not modified' not in exception_text:\r\n logging.info(f'Unsuccess tg send:{text} {exception_text}')\r\n return None\r\n\r\n def handle_catch_all(self, update, context):\r\n '''catch-all handler - отрабатываем алиасы и логируем все остальное что не попало в фильтры,\r\n аутентификацию можно не отрабатывать - она отработает когда пойдем в вызванную по алиасу команду'''\r\n if update is not None and update.effective_message is not None:\r\n acmd, *aargs = re.split(r'\\s+', update.effective_message.text)\r\n if acmd in self.commands and type(self.commands[acmd].func) == str:\r\n logging.info(f'TG catch alias:{update.effective_chat.id} {update.effective_message.text}')\r\n alias = self.commands[acmd]\r\n # реальный text который уйдет в команду, реальная команда и реальные аргументы\r\n real_text = ' '.join([alias.func] + aargs)\r\n rcmd, *rargs = re.split(r'\\s+', real_text)\r\n if rcmd not in self.commands:\r\n logging.info(f'TG for alias {acmd} not found command {alias.func}')\r\n return\r\n cmd = self.commands[rcmd]\r\n update.effective_message.text = real_text\r\n context.args = rargs\r\n cmd.func(update, context)\r\n return\r\n logging.info(f'TG catch-all:{update.effective_chat.id} {update.effective_message.text}')\r\n\r\n @auth_decorator(errmsg='/help\\n/id')\r\n def get_help(self, update, context):\r\n \"\"\"Send help. only auth user\"\"\"\r\n help_text = [f'{cmd.name} - {cmd.description}' for cmd in self.commands.values()]\r\n if context.args != []:\r\n help_text.insert(0, repr(context.args))\r\n self.put_text(update.effective_message.reply_text, '\\n'.join(help_text).strip())\r\n\r\n @auth_decorator()\r\n def get_balancetext(self, update, context):\r\n \"\"\"Send balance only auth user.\"\"\"\r\n baltxt = prepare_balance('FULL', params={'include': ','.join(context.args)})\r\n self.put_text(update.effective_message.reply_text, baltxt)\r\n\r\n @auth_decorator()\r\n def get_balancefile(self, update, context):\r\n \"\"\"Send balance html file only auth user.\"\"\"\r\n _, res = getreport()\r\n for id in self.auth_id():\r\n self.updater.bot.send_document(chat_id=id, filename='balance.htm', document=io.BytesIO('\\n'.join(res).strip().encode('cp1251')))\r\n\r\n @auth_decorator()\r\n def restartservice(self, update, context):\r\n \"\"\"Hard reset service\"\"\"\r\n self.put_text(update.effective_message.reply_text, 'Service will be restarted')\r\n restart_program(reason=f'TG:{update.effective_message.chat_id} /restart {context.args}')\r\n\r\n @auth_decorator()\r\n def cancel(self, update, context):\r\n \"\"\"Send cancel signal to receive balance query\"\"\"\r\n self.put_text(update.effective_message.reply_text, 'Query will be canceled')\r\n cancel_query(reason=f'TG:{update.effective_message.chat_id} /cancel {context.args}')\r\n\r\n @auth_decorator()\r\n def receivebalance(self, update, context):\r\n \"\"\" Запросить балансы по всем номерам, only auth user.\r\n /receivebalance\r\n /receivebalancefailed\r\n \"\"\"\r\n def feedback_func(txt):\r\n self.put_text(msg.edit_text, txt)\r\n filtertext = '' if len(context.args) == 0 else f\", with filter by {' '.join(context.args)}\"\r\n msg = self.put_text(update.effective_message.reply_text, f'Request all number{filtertext}. Wait...')\r\n # Если запросили плохие - то просто запрашиваем плохие\r\n # Если запросили все - запрашиваем все, потом два раза только плохие\r\n only_failed = (update.effective_message.text == \"/receivebalancefailed\")\r\n params = {'include': None if context.args == [] else ','.join(context.args)}\r\n Scheduler().run_once(cmd=CMD_CHECK, feedback_func=feedback_func, kwargs={'filter': context.args, 'params': params, 'only_failed': only_failed})\r\n\r\n @auth_decorator()\r\n def get_schedule(self, update, context):\r\n \"\"\"Show schedule only auth user.\r\n /schedule\r\n /schedulereload\r\n \"\"\"\r\n if update.effective_message.text == \"/schedulereload\":\r\n Scheduler().reload()\r\n text = Scheduler().view_txt()\r\n self.put_text(update.effective_message.reply_text, text if text.strip() != '' else 'Empty')\r\n\r\n @auth_decorator()\r\n def get_one(self, update, context: callbackcontext.CallbackContext):\r\n \"\"\"Receive one balance with inline keyboard/args, only auth user.\r\n /checkone - получаем баланс\r\n /getone - показываем\"\"\"\r\n # Заданы аргументы? Тогда спросим по ним.\r\n args = ' '.join(context.args if context.args is not None else []).lower()\r\n if args != '' and update is not None: # context.args\r\n cmd = (update.effective_message.text[1:]).split(' ')[0]\r\n filtered = [v for k, v in store.ini('phones.ini').phones().items() if v['number'].lower() == args or v['alias'].lower() == args]\r\n message = self.put_text(update.effective_message.reply_text, f'You have chosen {args}')\r\n if len(filtered) > 0:\r\n val = filtered[0]\r\n callback_data = f\"{cmd}_{val['Region']}_{val['Number']}\"\r\n cmd, keypair = callback_data.split('_', 1) # До _ команда, далее Region_Number\r\n else:\r\n self.put_text(message.edit_text, f'Not found {args}') # type: ignore\r\n return\r\n feedback_func = lambda txt: self.put_text(message.edit_text, txt) # type: ignore\r\n Scheduler().run_once(cmd=CMD_GET_ONE, feedback_func=feedback_func, kwargs={'keypair': keypair, 'check': cmd == 'checkone'})\r\n return\r\n query: typing.Optional[telegram.callbackquery.CallbackQuery] = update.callback_query\r\n if query is None: # Создаем клавиатуру\r\n phones = store.ini('phones.ini').phones()\r\n keyboard: typing.List = []\r\n cmd = (update.effective_message.text[1:]).split(' ')[0] # checkone или getone\r\n for val in list(phones.values()) + [{'Alias': 'Cancel', 'Region': 'Cancel', 'Number': 'Cancel'}]:\r\n # ключом для calback у нас команда_Region_Number\r\n btn = InlineKeyboardButton(val['Alias'], callback_data=f\"{cmd}_{val['Region']}_{val['Number']}\")\r\n if len(keyboard) == 0 or len(keyboard[-1]) == 3:\r\n keyboard.append([btn])\r\n else:\r\n keyboard[-1].append(btn)\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n update.effective_message.reply_text('Please choose:', reply_markup=reply_markup)\r\n else: # реагируем на клавиатуру\r\n if query.data is None:\r\n return\r\n cmd, keypair = query.data.split('_', 1) # До _ команда, далее Region_Number\r\n feedback_func = lambda txt: self.put_text(query.edit_message_text, txt) # type: ignore\r\n Scheduler().run_once(cmd=CMD_GET_ONE, feedback_func=feedback_func, kwargs={'keypair': keypair, 'check': cmd == 'checkone'})\r\n\r\n @auth_decorator()\r\n def get_log(self, update: telegram.update.Update, context: callbackcontext.CallbackContext):\r\n \"\"\"Receive one log with inline keyboard/param, only auth user.\r\n /getlog - лог по последнему запросу\r\n сюда приходим ДВА раза сначала чтобы создать клавиатуру(query=None),\r\n а потом чтобы отреагировать на нее\r\n \"\"\"\r\n # reply(query.edit_message_text, query.message.reply_document, query.data)\r\n def reply(edit_text, message, keypair):\r\n self.put_text(edit_text, 'This is log')\r\n res = prepare_log_personal(keypair)\r\n message.reply_document(filename=f'{keypair}_log.htm', document=io.BytesIO(res.strip().encode('cp1251')))\r\n # Заданы аргументы? Тогда спросим по ним.\r\n args = ' '.join(context.args if context.args is not None else []).lower()\r\n # запрашиваем по заданному аргументу\r\n if args != '' and update is not None: # context.args\r\n logs = prepare_loglist_personal()\r\n filtered = [i for i in logs if args.lower() in i.lower()]\r\n new_msg: telegram.message.Message = self.put_text(update.effective_message.reply_text, f'Info for {args}') # type: ignore\r\n if len(filtered) > 0 and new_msg is not None:\r\n val = filtered[0]\r\n reply(new_msg.edit_text, update.effective_message, val)\r\n else:\r\n self.put_text(new_msg.edit_text, f'Not found {args}')\r\n return\r\n query: typing.Optional[telegram.callbackquery.CallbackQuery] = update.callback_query\r\n if query is None: # Создаем клавиатуру\r\n if update.effective_message is None:\r\n return\r\n keyboard: typing.List[typing.List[InlineKeyboardButton]] = []\r\n logs = prepare_loglist_personal()\r\n for val in logs + ['Cancel']:\r\n # ключом для calback у нас команда_Region_Number\r\n btn = InlineKeyboardButton(val, callback_data=f\"getlog_{val}\")\r\n if len(keyboard) == 0 or len(keyboard[-1]) == 3:\r\n keyboard.append([btn])\r\n else:\r\n keyboard[-1].append(btn)\r\n reply_markup = InlineKeyboardMarkup(keyboard)\r\n update.effective_message.reply_text('Please choose:', reply_markup=reply_markup)\r\n else: # реагируем на клавиатуру\r\n # ...\r\n if query.message is None or query.data is None:\r\n return\r\n reply(query.edit_message_text, query.message, query.data.split('_', 1)[1])\r\n\r\n @auth_decorator()\r\n def button(self, update, context) -> None:\r\n '''Клавиатура, здесь реакция на нажатие\r\n Определяем откуда пришли и бросаем обратно'''\r\n query: typing.Optional[telegram.callbackquery.CallbackQuery] = update.callback_query\r\n if query is None or query.data is None:\r\n return\r\n query.answer()\r\n logging.info(f'TG:reply keyboard to {update.effective_chat.id} CHOICE:{query.data}')\r\n cmd, val = query.data.split('_', 1) # До _ команда, далее кнопка, например Region_Number\r\n if val.startswith('Cancel'):\r\n self.put_text(query.edit_message_text, 'Canceled')\r\n return\r\n self.put_text(query.edit_message_text, 'Request received. Wait...')\r\n # ключом для calback у нас 6 букв\r\n if cmd == 'getlog': # /getlog - генерим лог и выходим\r\n self.get_log(update, context)\r\n if cmd in ['checkone', 'getone']:\r\n self.get_one(update, context)\r\n\r\n def send_message(self, text: str, parse_mode=telegram.ParseMode.HTML, ids=None, **kwargs):\r\n 'Отправляем сообщение по списку ids, либо по списку auth_id из mbplugin.ini'\r\n if self.updater is None or text == '':\r\n return\r\n lst = self.auth_id() if ids is None else ids\r\n text = text if type(text) == str else str(text)\r\n for id in lst:\r\n try:\r\n self.updater.bot.sendMessage(chat_id=id, text=text, parse_mode=parse_mode, **kwargs)\r\n except Exception:\r\n try:\r\n self.updater.bot.sendMessage(chat_id=id, text=text[:4000], parse_mode=None, **kwargs)\r\n except Exception:\r\n exception_text = f'Ошибка отправки сообщения {text} для {id} telegram bot {store.exception_text()}'\r\n logging.error(exception_text)\r\n\r\n def send_balance(self):\r\n 'Отправляем баланс'\r\n if self.updater is None or str(store.options('send_balance_changes', section='Telegram')) == '0':\r\n return\r\n baltxt = prepare_balance('LASTCHANGE')\r\n self.send_message(text=baltxt, parse_mode=telegram.ParseMode.HTML)\r\n\r\n def send_subscriptions(self):\r\n 'Отправляем подписки - это строки из ini вида:'\r\n 'subscriptionXXX = id:123456 include:1111,2222 exclude:6666'\r\n if self.updater is None:\r\n return\r\n subscriptions = store.options('subscription', section='Telegram', listparam=True)\r\n for subscr in subscriptions:\r\n # id:123456 include:1111,2222 -> {'id':'123456', 'include':'1111,2222'}\r\n params = {k: v.strip() for k, v in [i.split(':', 1) for i in subscr.split(' ')]}\r\n baltxt = prepare_balance('LASTCHANGE', params)\r\n ids = [int(i) for i in params.get('id', '').split(',') if i.isdigit()]\r\n self.send_message(text=baltxt, parse_mode=telegram.ParseMode.HTML, ids=ids)\r\n\r\n def stop(self):\r\n '''Stop bot'''\r\n if self.updater is not None:\r\n self.updater.stop()\r\n\r\n\r\nclass Handler(wsgiref.simple_server.WSGIRequestHandler):\r\n # Disable logging DNS lookups\r\n def address_string(self):\r\n return str(self.client_address[0])\r\n\r\n def log_message(self, format, *args):\r\n # убираем пароль из лога\r\n args = re.sub('(/.*?/.*?/.*?/)(.*?)(/.*)', r'\\1xxxxxxx\\3', args[0]), *args[1:]\r\n args = re.sub('(&password=)(.*?)(&)', r'\\1xxxxxxx\\3', args[0]), *args[1:]\r\n # а если это показ лога вообще в лог не пишем, а то фигня получается\r\n if 'GET /log' not in args[0] and 'GET /favicon.ico' and 'GET /favicon.png' not in args[0]:\r\n logging.info(f\"{self.client_address[0]} - - [self.log_date_time_string()] {format % args}\\n\")\r\n\r\n\r\nclass ThreadingWSGIServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\r\n pass\r\n\r\n\r\nclass WebServer():\r\n def __init__(self):\r\n self.filename_pid = store.abspath_join(store.options('storefolder'), 'web-server.pid')\r\n store.turn_logging(httplog=True)\r\n self.port = int(store.options('port', section='HttpServer'))\r\n self.host = store.options('host', section='HttpServer')\r\n with socket.socket() as sock:\r\n sock.settimeout(0.2) # this prevents a 2 second lag when starting the server\r\n if sock.connect_ex(('127.0.0.1', self.port)) == 0:\r\n logging.info(f\"Port 127.0.0.1:{self.port} already in use, try restart.\")\r\n try:\r\n send_http_signal(cmd='exit')\r\n except Exception:\r\n pass\r\n if str(store.options('start_http', section='HttpServer')) != '1':\r\n logging.info(f'Start http server disabled in mbplugin.ini (start_http=0)')\r\n return\r\n with wsgiref.simple_server.make_server(self.host, self.port, self.web_app, server_class=ThreadingWSGIServer, handler_class=Handler) as self.httpd:\r\n with open(self.filename_pid, 'w') as f:\r\n f.write(f'{os.getpid()}')\r\n logging.info(f'Starting web server {store.version()} from {os.path.abspath(__file__)}')\r\n logging.info(f'Listening pid={os.getpid()} {self.host}:{self.port}....')\r\n threading.Thread(target=self.httpd.serve_forever, name='httpd', daemon=True).start()\r\n if 'pystray' in sys.modules: # Иконка в трее\r\n self.tray_icon = TrayIcon() # tray icon (он сам все запустит в threading)\r\n if 'telegram' in sys.modules: # telegram bot (он сам все запустит в threading)\r\n self.telegram_bot = TelegramBot()\r\n if 'schedule' in sys.modules: # Scheduler (он сам все запустит в threading)\r\n self.scheduler = Scheduler()\r\n # Запустили все остальное демонами и ждем, когда они пришлют сигнал exit\r\n while True:\r\n cmd = cmdqueue.get()\r\n if cmd != Q_CMD_EXIT: # если это не наша команда - кладем обратно.\r\n cmdqueue.put(cmd)\r\n time.sleep(1)\r\n\r\n def shutdown(self):\r\n self.telegram_bot.stop()\r\n self.scheduler.stop()\r\n self.httpd.shutdown()\r\n logging.info(f'Shutdown server {self.host}:{self.port}....')\r\n\r\n def editor(self, environ):\r\n ''' Редактор конфигов editcfg\r\n возвращаем Content-type, text, status, add_headers'''\r\n # print(environ)\r\n # т.к. возвращаем разные статусы и куки, готовим переменные под них\r\n authorized = False # Изначально считаем что пользователь не авторизован\r\n # breakpoint() if os.path.exists('breakpoint') else None\r\n status = '200 OK'\r\n add_headers = []\r\n cookie_store_name = store.abspath_join(store.options('storefolder'), 'authcookie')\r\n # Читаем список сохраненных кук\r\n if os.path.exists(cookie_store_name):\r\n with open(cookie_store_name) as f:\r\n authcookies = [i for i in map(str.strip, f.readlines()) if i != '']\r\n else:\r\n authcookies = []\r\n # Получаем переданные куки из заголовка\r\n cookies = {k: v[0] for k, v in urllib.parse.parse_qs(environ.get('HTTP_COOKIE', '{}')).items()}\r\n # Авторизованы если переданная кука в списке сохраненных\r\n authorized = cookies.get('auth', 'None') in authcookies\r\n # Если пришли с localhost и разрешено localhost без авторизации\r\n local_authorized = environ.get('REMOTE_ADDR', 'None') == '127.0.0.1' and str(store.options('httpconfigeditnolocalauth')) == '1'\r\n if local_authorized:\r\n authorized = True\r\n # если еще не открывали редактируемый ini открываем\r\n if not hasattr(self, 'editini'):\r\n self.editini = store.ini()\r\n # print(cookies, f\"auth in authcookies={cookies.get('auth', 'None') in authcookies}\", f'authorized={authorized}')\r\n if environ['REQUEST_METHOD'] == 'POST':\r\n try:\r\n request_size = int(environ['CONTENT_LENGTH'])\r\n request_raw = environ['wsgi.input'].read(request_size)\r\n except (TypeError, ValueError):\r\n request_raw = \"0\"\r\n try:\r\n request = json.loads(request_raw)\r\n except Exception:\r\n try:\r\n request = urllib.parse.parse_qs(request_raw.decode())\r\n request = {k: v[0] for k, v, in request.items()}\r\n except Exception:\r\n request = {'cmd': 'error'}\r\n # print(f'request={request}')\r\n if authorized and request['cmd'] == 'update':\r\n params = settings.ini[request['sec']].get(request['id'] + '_', {})\r\n # Если для параметра указана функция валидации - вызываем ее\r\n if not params.get('validate', lambda i: True)(request['value']):\r\n return 'text/plain', 'ERROR', status, add_headers\r\n logging.info(f\"ini change key [{request['sec']}] {request['id']} {self.editini.ini[request['sec']].get(request['id'], 'default')}->{request['value']}\")\r\n self.editini.ini[request['sec']][request['id']] = request['value']\r\n self.editini.write()\r\n # print('\\n'.join([f'{k}={v}' for k, v in self.editini.ini[request['sec']].items()]))\r\n elif authorized and request['cmd'] == 'delete':\r\n logging.info(f\"ini delete key [{request['sec']}] {request['id']} {self.editini.ini[request['sec']].get(request['id'], 'default')}\")\r\n self.editini.ini[request['sec']].pop(request['id'], None)\r\n self.editini.write()\r\n elif request['cmd'] == 'logon':\r\n status = '303 See Other'\r\n # Пароль совпал (и не пустой !!!) - выдаем токен\r\n passwd_from_ini = store.options('httpconfigeditpassword').strip()\r\n passwd_from_user = request.get('password', 'None').strip()\r\n if passwd_from_user == passwd_from_ini and passwd_from_ini != '':\r\n logging.info('Authorized')\r\n auth_token = uuid.uuid4().hex # auth cookie\r\n authcookies.append(auth_token)\r\n with open(cookie_store_name, 'w') as f:\r\n f.write('\\n'.join(authcookies))\r\n add_headers = [\r\n ('Location', '/editcfg'),\r\n ('Set-Cookie', f'auth={auth_token}'),\r\n ('Set-Cookie', 'wrongpassword=deleted; expires=Thu, 01 Jan 1970 00:00:00 GMT')]\r\n else:\r\n logging.info('Wrong password')\r\n add_headers = [('Location', '/editcfg'), ('Set-Cookie', 'wrongpassword=true')]\r\n return 'text/html', 'redirect', status, add_headers\r\n elif request['cmd'] == 'logout':\r\n # выкидываем куку\r\n with open(cookie_store_name, 'w') as f:\r\n f.write('\\n'.join([i for i in authcookies if i != cookies.get('auth', 'None')]))\r\n status = '303 See Other'\r\n add_headers = [('Location', '/main'), ('Set-Cookie', 'auth=deleted; expires=Thu, 01 Jan 1970 00:00:00 GMT')]\r\n return 'text/html', 'redirect', status, add_headers\r\n elif request['cmd'] == 'error':\r\n return 'text/plain', 'Error', status, add_headers\r\n else:\r\n return 'text/plain', 'Error, unknown cmd', status, add_headers\r\n return 'text/plain', 'OK', status, add_headers\r\n if environ['REQUEST_METHOD'] == 'GET':\r\n self.editini = store.ini()\r\n self.editini.read()\r\n # TODO в финале editor.html будем брать из settings.py\r\n # editor_html = open('editor.html', encoding='cp1251').read()\r\n editor_html = settings.editor_html\r\n inidata = '{}'\r\n if authorized:\r\n inidata = self.editini.ini_to_json().replace('\\\\', '\\\\\\\\')\r\n editor_html = editor_html.replace(\"inifile = JSON.parse('')\", f\"inifile = JSON.parse('{inidata}')\")\r\n if local_authorized:\r\n editor_html = editor_html.replace('localAuthorized = false // init', f'localAuthorized = true // init')\r\n return 'text/html', editor_html, status, add_headers\r\n\r\n def web_app(self, environ, start_response):\r\n try:\r\n logging.debug('web_app start')\r\n store.options('logginglevel', flush=True) # Запускаем, чтобы сбросить кэш и перечитать ini\r\n status = '200 OK'\r\n add_headers = []\r\n ct, text = 'text/html', []\r\n fn = environ.get('PATH_INFO', None)\r\n _, cmd, *param = fn.split('/')\r\n print(f'{cmd}, {param}')\r\n if environ.get('PATH_INFO', None) == '/favicon.ico':\r\n start_response('200 OK', [('Content-type', 'image/x-icon')])\r\n return [open(store.abspath_join('mbplugin', 'plugin', 'httpserver.ico'), 'rb').read()]\r\n if environ.get('PATH_INFO', None) == '/favicon.png':\r\n start_response('200 OK', [('Content-type', 'image/png')])\r\n return [open(store.abspath_join('mbplugin', 'plugin', 'httpserver.png'), 'rb').read()]\r\n elif cmd.lower() == 'getbalance': # старый вариант оставлен пока для совместимости\r\n ct, text = getbalance_plugin('url', param) # TODO !!! Но правильно все-таки через POST\r\n elif cmd.lower() == 'sendtgbalance':\r\n self.telegram_bot.send_balance()\r\n elif cmd.lower() == 'sendtgsubscriptions':\r\n self.telegram_bot.send_subscriptions()\r\n elif cmd.lower() == 'get': # вариант через get запрос\r\n param = urllib.parse.parse_qs(environ['QUERY_STRING'])\r\n ct, text = getbalance_plugin('get', param)\r\n elif cmd.lower() == 'log': # просмотр лога /log/....\r\n if len(param) > 0 and param[0] == 'list': # /log/list\r\n allgroups = prepare_loglist_personal()\r\n text = [settings.header_html] + [f'{g}
    ' for g in allgroups]\r\n elif len(param) > 0 and re.match(r'^\\w*$', param[0]): # /log/p_plugin_number\r\n # text = [f'
    ' for fn in ss]\r\n text = [settings.header_html] + [prepare_log_personal(param[0])]\r\n else: # /log\r\n qs = urllib.parse.parse_qs(environ['QUERY_STRING'])\r\n ct, text = view_log(qs)\r\n text = [settings.header_html] + text\r\n elif cmd.lower() == 'screenshot': # скриншоты\r\n if len(param) == 0 or not re.match(r'^\\w*\\.png$', param[0]):\r\n return\r\n with open(store.abspath_join(store.options('loggingfolder'), param[0]), 'rb') as f:\r\n text = f.read()\r\n ct = 'image/png'\r\n elif cmd.lower() == 'schedule': # просмотр расписания\r\n ct, text = Scheduler().view_html()\r\n text = [settings.header_html] + text\r\n elif cmd.lower() == 'reload_schedule': # обновление расписания\r\n Scheduler().reload()\r\n ct, text = Scheduler().view_html()\r\n text = [settings.header_html] + text\r\n elif cmd.lower() == 'version_update': # обновление версии\r\n res = run_update()\r\n ct, text = 'text/html', settings.header_html + f'\\n
    \\n{res}\\n
    \\n'\r\n if 'Update:' in text and 'No new version found' not in text:\r\n logging.info('Schedule restart web service')\r\n threading.Thread(target=lambda: restart_program(reason=f'WEB: /restart', delay=5), name='Restart', daemon=True).start()\r\n else:\r\n logging.info('No new version, no restart')\r\n elif cmd == 'logging_restart': # logging_restart\r\n store.logging_restart()\r\n ct, text = 'text/html', 'OK'\r\n elif cmd == '' or cmd == 'report': # report\r\n if str(store.options('sqlitestore')) == '1':\r\n ct, text = getreport(param)\r\n else:\r\n ct, text = 'text/html', HTML_NO_REPORT\r\n elif cmd == 'fastreport': # report from balance.html\r\n if str(store.options('sqlitestore')) == '1' and os.path.exists(store.options('balance_html')):\r\n ct, text = 'text/html', open(store.options('balance_html')).read()\r\n else:\r\n ct, text = 'text/html', HTML_NO_REPORT\r\n elif cmd.lower() == 'main': # главная страница\r\n port = store.options('port', section='HttpServer')\r\n info = f'Mbplugin {store.version()} run on {socket.gethostname()}:{port} from {os.path.abspath(os.path.dirname(__file__))}
    '\r\n phones = store.ini('phones.ini').phones()\r\n groups = sorted(set([p['indication'] for p in phones.values() if 'indication' in p]))\r\n group_urls = '
    '.join([f'
    Group_{g} ' for g in groups])\r\n script = ''\r\n if str(store.options('HttpConfigEdit')) == '0':\r\n script = 'document.getElementById(\"call_editor\").style=\"display:none\"'\r\n ct, text = 'text/html; charset=cp1251', [settings.main_html % {'group_urls': group_urls, 'info': info, 'script': script}]\r\n elif cmd.lower() == 'editcfg': # вариант через get запрос\r\n if str(store.options('HttpConfigEdit')) == '1':\r\n ct, text, status, add_headers = self.editor(environ)\r\n elif cmd == 'getbalance_standalone': # start balance request\r\n # TODO подумать над передачей параметров в fetch - filter=filter,only_failed=only_failed\r\n Scheduler().run_once(cmd=CMD_CHECK)\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n elif cmd == 'flushlog': # Start new log\r\n store.logging_restart()\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n elif cmd == 'recompile': # Recompile js lsmblh plugin\r\n compile_all_jsmblh.recompile()\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n elif cmd == 'restart': # exit cmd\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n # TODO нужен редирект иначе она зацикливается на рестарте\r\n threading.Thread(target=lambda: restart_program(reason=f'WEB: /restart', delay=0.1), name='Restart', daemon=True).start()\r\n elif cmd == 'cancel': # cancel query\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n cancel_query(reason=f'WEB: /cancel')\r\n elif cmd == 'exit': # exit cmd\r\n ct, text = 'text/html; charset=cp1251', ['OK']\r\n threading.Thread(target=lambda: restart_program(reason=f'WEB: /exit', exit_only=True, delay=0.1), name='Exit', daemon=True).start()\r\n if status.startswith('200'):\r\n headers = [('Content-type', ct)]\r\n if status.startswith('303'):\r\n headers = add_headers\r\n start_response(status, headers)\r\n logging.debug('web_app done')\r\n if 'png' in ct:\r\n return [text]\r\n return [line.encode('cp1251', errors='ignore') for line in text]\r\n except Exception:\r\n exception_text = f'Ошибка: {store.exception_text()}'\r\n logging.error(exception_text)\r\n headers = [('Content-type', 'text/html')]\r\n return ['ERROR'.encode('cp1251')]\r\n\r\n\r\ndef parse_arguments(argv, parcerclass=argparse.ArgumentParser):\r\n parser = parcerclass()\r\n parser.add_argument('--cmd', type=str, help='command for web server (start/stop)', default='start')\r\n return parser.parse_args(argv)\r\n\r\n\r\ndef run_update():\r\n if sys.platform == 'win32':\r\n mbp_path = os.path.join(store.settings.mbplugin_root_path, 'mbp.bat')\r\n return os.popen(f'\"{mbp_path}\" version-update').read()\r\n else:\r\n mbp_path = os.path.join(store.settings.mbplugin_root_path, 'mbp')\r\n return os.popen(f'\"{mbp_path}\" version-update').read()\r\n\r\n\r\ndef main():\r\n try:\r\n ARGS = parse_arguments(sys.argv[1:])\r\n if ARGS.cmd.lower() == 'start':\r\n WebServer()\r\n if ARGS.cmd.lower() == 'stop':\r\n send_http_signal(cmd='exit')\r\n except Exception:\r\n exception_text = f'Ошибка запуска WebServer: {store.exception_text()}'\r\n logging.error(exception_text)\r\n if 'UnicodeDecodeError:' in exception_text:\r\n exception_text += f'\\nWebServer не запустится если имя компьютера содержит русские буквы,\\nв настоящий момент имя компьютера \"{socket.gethostname()}\"'\r\n logging.error(exception_text)\r\n\r\n\r\nif __name__ == '__main__':\r\n store.switch_to_mb_mode()\r\n main()\r\n","repo_name":"artyl/mbplugin","sub_path":"plugin/httpserver_mobile.py","file_name":"httpserver_mobile.py","file_ext":"py","file_size_in_byte":94261,"program_lang":"python","lang":"ru","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"3675243549","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom fight.forms import CreateGato\nfrom fight.models import Gatos\nfrom fight.forms import eliminar\nfrom .fight import fight2\n\n# Create your views here.\n\ndef home(response):\n return render(response, 'fight/base.html', {})\n\ndef list(response, id):\n forms=eliminar()\n if response.method == \"POST\":\n forms = eliminar(response.POST)\n if forms.is_valid():\n check=forms.cleaned_data['eliminar']\n if check == True:\n f=Gatos.objects.get(id=id)\n f.delete()\n return HttpResponseRedirect('../baseDatos')\n gatos = Gatos.objects.get(id=id)\n return render(response, 'fight/list.html', {'gato':gatos, 'forms':forms, 'id':id})\n\ndef datos(response):\n gatos = Gatos.objects.all()\n return render (response, 'fight/datos.html', {'gatos':gatos})\n \n\ndef create(response):\n \n if response.method == \"POST\":\n forms = CreateGato(response.POST)\n \n if forms.is_valid():\n nomb = forms.cleaned_data[\"name\"]\n est = forms.cleaned_data[\"estilo\"]\n descr= forms.cleaned_data[\"descr\"]\n print(est[0])\n if est[0] == \"Bonito\":\n Ps = 30\n Pcrit = 0.1\n Ev = 1.2\n At = 8\n elif est[0] == \"Astuto\":\n Ps = 20\n Pcrit = 0.35\n Ev = 2\n At = 5\n elif est[0] == \"Peligroso\":\n Ps = 25\n Pcrit = 0.25\n Ev = 1.5\n At = 10\n \n t = Gatos(Nombre=nomb,EstiloCombate=est[0],Descripcion=descr,PuntosVida=Ps,ProbabilidadCritico=Pcrit,Evasion=Ev,Ataque=At)\n t.save()\n \n return HttpResponseRedirect('../%i' %t.id)\n \n else:\n forms = CreateGato()\n return render(response, \"fight/create.html\", {\"form\":forms})\n\ndef fight(response):\n x=fight2(5,6)\n return render(response, 'fight/fight.html', {'x':x})","repo_name":"JaviYarza/DJango","sub_path":"fight/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27387192674","text":"from flask import Flask, render_template, request\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\n\r\napp=Flask(__name__)\r\n\r\nmodel = pickle.load(open('model.pkl', 'rb'))\r\ncars=pd.read_csv(\"Car Data123.csv\")\r\n\r\n@app.route('/')\r\ndef index():\r\n companies = sorted(cars['company'].unique())\r\n ownerused = sorted(cars['owner'].unique())\r\n yearold = sorted(cars['year'].unique(), reverse=True)\r\n fuel_type = cars['fuel'].unique()\r\n return render_template('index.html', companies=companies, ownerused=ownerused, yearold=yearold, fuel_type=fuel_type)\r\n\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n company=request.form.get('company')\r\n owner=request.form.get('owner')\r\n year=int(request.form.get('year'))\r\n fuel=request.form.get('fuel')\r\n km_driven=int(request.form.get('km_driven'))\r\n\r\n prediction = model.predict(pd.DataFrame([[company, owner, year, fuel, km_driven]], columns=['companies', 'ownerused', 'yearold', 'fuel_type', 'km_driven']))\r\n\r\n return str(np.round(prediction[0], 2))\r\n\r\nif __name__==\"__main__\":\r\n app.run(debug=True)","repo_name":"Nikh789/Car-Price-Predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19922009517","text":"from typed_params import BaseModel\r\nfrom asc_overview.utilities.excel_config import ExcelTableConfig\r\nfrom asc_overview.utilities.reindex_rows import get_row_order_from_row_names\r\n\r\n\r\nclass NewRequestsRowNames(BaseModel):\r\n ASCFR_NEW_REQUESTS_18_64_NAME: str\r\n ASCFR_NEW_REQUESTS_65_PLUS_NAME: str\r\n ASCFR_GROWTH_18_64_SINCE_2015_16_NAME: str\r\n ASCFR_GROWTH_65_PLUS_SINCE_2015_16_NAME: str\r\n POPULATION_18_64: str\r\n POPULATION_65_PLUS: str\r\n NEW_REQUESTS_PER_POPULATION_18_64: str\r\n NEW_REQUESTS_PER_POPULATION_65_PLUS: str\r\n SAFEGUARDING_CONCERNS_RAISED_NAME: str\r\n DOLS_APPLICATIONS_RECEIVED_NAME: str\r\n SAFEGUARDING_YEAR_ON_YEAR_CONCERNS_RAISED_NAME: str\r\n DOLS_YEAR_ON_YEAR_APPLICATIONS_RECEIVED_NAME: str\r\n\r\n\r\nclass NewRequestsTable(BaseModel):\r\n OVERVIEW: ExcelTableConfig\r\n CELL_TO_WRITE_TO: str\r\n ROW_NAMES: NewRequestsRowNames\r\n ASCFR_NEW_INDEX: list[str]\r\n DOLS_APPLICATIONS_RECEIVED_ROW_NAME: str\r\n DOLS_APPLICATIONS_RECEIVED_COLUMN_NAME: str\r\n SAFEGUARDING_CONCERNS_RAISED_ROW_NAME: str\r\n SAFEGUARDING_CONCERNS_RAISED_COLUMN_NAME: str\r\n\r\n def get_row_order(self):\r\n return get_row_order_from_row_names(self.ROW_NAMES)\r\n","repo_name":"NHSDigital/ASC-Overview","sub_path":"asc_overview/time_series_tables/new_requests_table/new_requests_table_config.py","file_name":"new_requests_table_config.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35261086361","text":"import torch\nimport os\nfrom torch.utils.data import Dataset\nimport torchvision\nimport numpy as np\nfrom scipy.io import loadmat\n\nclass CreateDataset(Dataset):\n def __init__(self, args, mode, transform=None):\n dataset_path = args.dataset_path\n filename = f'uci_eeg_images_{mode}_{args.split_variant}.mat'\n self.filepath_full = os.path.join(dataset_path, filename)\n data = loadmat(self.filepath_full)\n self.identity = data['label_id']\n self.stimulus = data['label_stimulus']\n self.alcoholism = data['label_alcoholism']\n self.images = data['data']\n self.num_samples = len(self.images)\n self.transform = transform\n self.toTensor = torchvision.transforms.ToTensor()\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, index):\n identity = torch.tensor(self.identity[index], dtype=torch.float32)\n stimulus = torch.tensor(self.stimulus[index], dtype=torch.float32)\n alcoholism = torch.tensor(self.alcoholism[index], dtype=torch.float32)\n\n image = self.images[index]\n image = torch.tensor(image, dtype=torch.float32).permute(2, 0, 1)\n image = convert(image, 0, 1)\n\n if not self.transform is None:\n image = self.transform(image)\n\n # prepare mask for conditional generation\n image_c_real, image_c_fake, condition_array_real, condition_array_fake = get_conditioned_image(image)\n targets_real_cls = torch.cat((identity, stimulus, alcoholism)).reshape(-1, 1)\n targets_fake_cls = targets_real_cls * condition_array_fake # float\n targets_real_adv = get_adv_label('real')\n targets_fake_adv = get_adv_label('fake')\n return image, image_c_real, image_c_fake, condition_array_real, condition_array_fake, identity, stimulus, alcoholism, \\\n targets_real_cls, targets_real_adv, targets_fake_cls, targets_fake_adv\n\ndef get_conditioned_image(image):\n '''\n create random conditions {0,1} for each feature and concatenate them to the image\n '''\n num_channels, height, width = image.shape\n num_features = 3\n\n condition_array_fake = torch.tensor([(np.random.rand(1) > 0.5).astype(np.float32) for index in range(num_features)])\n filter_identity, filter_stimulus, filter_alcoholism = ([condition_array_fake[index] * torch.ones((1, height, width), dtype=torch.float32) for index in range(3)])\n image_c_fake = torch.cat((image, filter_identity, filter_stimulus, filter_alcoholism), dim=0)\n\n condition_array_real = torch.ones_like(condition_array_fake)\n filter_identity, filter_stimulus, filter_alcoholism = ([condition_array_fake[index] * torch.ones((1, height, width), dtype=torch.float32) for index in range(3)])\n image_c_real = torch.cat((image, filter_identity, filter_stimulus, filter_alcoholism), dim=0)\n\n return image_c_real, image_c_fake, condition_array_real, condition_array_fake\n\ndef convert(source, min_value=0, max_value=1):\n smin = source.min()\n smax = source.max()\n a = (max_value - min_value) / (smax - smin)\n b = max_value - a * smax\n target = (a * source + b)\n return target\n\ndef get_adv_label(mode='real'):\n '''\n needs to be updated to random multipliers\n '''\n label = torch.ones(1, dtype=torch.float32)\n if mode == 'real':\n multiplier = 0.9\n elif mode == 'fake':\n multiplier = 0.1\n else:\n raise NotImplementedError\n label *= multiplier\n return label","repo_name":"abhijitadhikary/EEG-conditional-feature-filter-GAN","sub_path":"conditional_filter/create_datasest.py","file_name":"create_datasest.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"29342402970","text":"import numpy as np\nimport paddle\nimport paddle.fluid as fluid\n\n\ndef build_lr_model(args):\n \"\"\"\n Build the LR model to train.\n \"\"\"\n emb_x = fluid.layers.data(\n name=\"emb_x\", dtype='float32', shape=[args.w2v_emb_size])\n label = fluid.layers.data(name=\"label_y\", dtype='int64', shape=[1])\n logits = fluid.layers.fc(input=emb_x,\n size=args.num_class,\n act=None,\n name='classification_layer')\n proba = fluid.layers.softmax(logits)\n loss = fluid.layers.softmax_with_cross_entropy(logits, label)\n loss = fluid.layers.mean(loss)\n acc = fluid.layers.accuracy(input=proba, label=label, k=1)\n return loss, acc\n\n\ndef construct_feed_data(data):\n \"\"\"\n Construct the data to feed model.\n \"\"\"\n datas = []\n labels = []\n for sample in data:\n if len(datas) < 16:\n labels.append([sample[-1]])\n datas.append(sample[1:-1])\n else:\n yield np.array(datas).astype(np.float32), np.array(labels).astype(\n np.int64)\n datas = []\n labels = []\n if len(datas) != 0:\n yield np.array(datas).astype(np.float32), np.array(labels).astype(\n np.int64)\n\n\ndef run_epoch(exe, data, program, stage, epoch, loss, acc):\n \"\"\"\n The epoch funtcion to run each epoch.\n \"\"\"\n print('start {} epoch of {}'.format(stage, epoch))\n all_loss = 0.0\n all_acc = 0.0\n all_samples = 0.0\n count = 0\n for datas, labels in construct_feed_data(data):\n batch_loss, batch_acc = exe.run(\n program,\n fetch_list=[loss, acc],\n feed={\"emb_x\": datas,\n \"label_y\": labels})\n len_samples = len(datas)\n all_loss = batch_loss * len_samples\n all_acc = batch_acc * len_samples\n all_samples += len_samples\n count += 1\n print(\"pass:{}, epoch:{}, loss:{}, acc:{}\".format(stage, epoch, batch_loss,\n all_acc / (len_samples)))\n\n\ndef train_lr_model(args, data):\n \"\"\"\n The main function to run the lr model.\n \"\"\"\n data_nums = len(data)\n train_data_nums = int(0.8 * data_nums)\n train_data = data[:train_data_nums]\n test_data = data[train_data_nums:]\n\n place = fluid.CPUPlace()\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n\n with fluid.program_guard(train_program, startup_program):\n loss, acc = build_lr_model(args)\n test_program = train_program.clone(for_test=True)\n\n with fluid.program_guard(train_program, startup_program):\n adam = fluid.optimizer.Adam(learning_rate=args.lr)\n adam.minimize(loss)\n\n exe = fluid.Executor(place)\n exe.run(startup_program)\n\n for epoch in range(0, args.epoch):\n run_epoch(exe, train_data, train_program, \"train\", epoch, loss, acc)\n print('-------------------')\n run_epoch(exe, test_data, test_program, \"valid\", epoch, loss, acc)\n","repo_name":"PaddlePaddle/PGL","sub_path":"legacy/examples/strucvec/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":1545,"dataset":"github-code","pt":"54"} +{"seq_id":"14610835989","text":"from typing import List\n\n\nclass Solution:\n\n def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:\n\n target = []\n\n for i in range(len(nums)):\n n = nums[i]\n i = index[i]\n target.insert(i, n)\n\n return target\n\n# 模範解答\n# https://leetcode.com/problems/create-target-array-in-the-given-order/discuss/553334/Python-Using-insert()-and-without-insert()\n\n\n'''\nclass Solution:\n def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:\n target = []\n for i in range(len(nums)):\n # 末尾の index に要素追加する場合\n if index[i] == len(target) :\n target.append(nums[i])\n else:\n # リストの前半スライス + 要素追加 + リストの後半スライス\n target = target[:index[i]] + [nums[i]] + target[index[i]:]\n return target\n'''\n","repo_name":"Takuma-Ikeda/other-LeetCode","sub_path":"src/easy/answer/create_target_array_in_the_given_order.py","file_name":"create_target_array_in_the_given_order.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33483302952","text":"from django.urls import path\nfrom .views import all_accounts, \\\n create_account, \\\n delete_account, \\\n update_account, \\\n account_by_unique_id, \\\n account_login, \\\n verify_token\n\nurlpatterns = [\n path('', all_accounts, name=\"accounts\"),\n path('create', create_account, name=\"create\"),\n path('login', account_login, name=\"login\"),\n path('delete', delete_account, name=\"delete\"),\n path('update', update_account, name=\"update\"),\n path('by-id', account_by_unique_id, name=\"byId\"),\n path('verify-token', verify_token, name=\"verifyToken\")\n]","repo_name":"degide/django-CRUD","sub_path":"crud1/crud1Hello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43399662530","text":"#\n# @lc app=leetcode id=87 lang=python3\n#\n# [87] Scramble String\n#\n\n# @lc code=start\n\n## 1、先判断简单边界情况\n## 2、递归,s1和s2是scramble的话,那么必然存在一个在s1上的长度l1,将s1分成s11和s12两段\n## 同样有s21和s22.那么要么s11和s21是scramble的并且s12和s22是scramble的;\n## 要么s11和s22是scramble的并且s12和s21是scramble的\nclass Solution:\n def isScramble(self, s1: str, s2: str) -> bool:\n if len(s1) != len(s2):\n return False\n if s1 == s2:\n return True\n l1, l2 = list(s1), list(s2)\n l1.sort()\n l2.sort()\n str1 = ''.join(l1)\n str2 = ''.join(l2)\n if str1 != str2:\n return False\n for i in range(1, len(s1)):\n s1left = s1[0:i]\n s1right = s1[i:]\n s2left = s2[0:i]\n s2right = s2[i:]\n if self.isScramble(s1left, s2left) and self.isScramble(s1right, s2right):\n return True\n s2left = s2[0:len(s1) - i]\n s2right = s2[len(s1) - i:]\n if self.isScramble(s1left, s2right) and self.isScramble(s1right, s2left):\n return True\n return False\n \n# @lc code=end\n\n","repo_name":"CharmSun/my-leetcode","sub_path":"py/87.scramble-string.py","file_name":"87.scramble-string.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25749800375","text":"import base58\nimport hashlib\nimport rsa\n\nfrom transaction import *\n\nclass User:\n\n def __init__(self, id, name, amount_coin):\n self.id = id\n self.name = name\n self.amount_coin = amount_coin\n\n # Generate public key and private key\n public_key, private_key = rsa.newkeys(512)\n self.private_key = private_key\n self.public_key = public_key\n\n self.generate_address()\n\n def __str__(self):\n str_result = \"Name: {} \\nAmount coin: {}\".format(self.name, self.amount_coin)\n return str_result\n \n\n def generate_address(self):\n \"\"\"\n This function will generate the receiver's address.\n Which is typically derived from the receiver's public key using a hash function.\n \"\"\"\n \n public_key_der = rsa.PublicKey.save_pkcs1(self.public_key, format='DER') # Convert the public key to DER format\n hash = hashlib.sha256(public_key_der).digest() # Hash the public key using SHA-256\n \n # Hash the result again using RIPEMD-160\n ripe_md160 = hashlib.new('ripemd160')\n ripe_md160.update(hash)\n hash = ripe_md160.digest()\n \n # Add a network byte (0x00 for mainnet, 0x6f for testnet)\n network_byte = b'\\x00'\n hash_with_network_byte = network_byte + hash\n \n # Compute the checksum by hashing the hash_with_network_byte twice and taking the first 4 bytes\n checksum = hashlib.sha256(hashlib.sha256(hash_with_network_byte).digest()).digest()[:4]\n \n address_bytes = hash_with_network_byte + checksum \n address = base58.b58encode(address_bytes)\n \n self.address = address.decode()\n\n\n def sign_transaction(self, transaction):\n \"\"\"\n This method use the sender's private key to sign the transaction data and generate a signature.\n \"\"\"\n transaction_info = str(transaction.to_dict()).encode()\n signature = rsa.sign(transaction_info, self.private_key, 'SHA-256')\n \n transaction.signature = signature\n\n\n def create_transaction(self, recipient_address, amount_to_send):\n\n new_transaction = Transaction_class(self.public_key, recipient_address, amount_to_send)\n self.sign_transaction(new_transaction)\n\n return new_transaction","repo_name":"NguyenThaiVu/python_blockchain_simulation","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40282832094","text":"### ITERAVITE APPROACH\n# finding the factorial using iterative approach\n\ndef factorial_iterative(n):\n \"\"\"parameter : n (Integer)\n return: n*(n-1)*....*1 \"\"\"\n fac = 1\n for i in range(n):\n fac = fac*(i+1)\n return fac\n\ny = int(input(\"Enter the number for which u want the factorial: \"))\nprint(factorial_iterative(y))\n\n\n##### Recurssicve approach\n# this is for the factorial of the given number\ndef recur(n):\n if n == 0 or n== 1:\n return 1\n else:\n return n*recur(n-1)\n\nyi = int(input(\"Enter the number: \"))\nprint(recur(yi))\n\n\n# for fibonacchi series\ndef recur_fibo(n):\n if n == 0:\n return 0\n elif n== 1 or n==2:\n return 1\n elif n>2:\n return recur_fibo(n-1)+recur_fibo(n-2)\n\nyio = int(input(\"Enter the number for fibo: \"))\nprint(recur_fibo(yio))\n\n","repo_name":"Pritush09/Python-and-Numpy-practice","sub_path":"recurssive and iterative approach and fibonacci also.py","file_name":"recurssive and iterative approach and fibonacci also.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25685972178","text":"from django.urls import path\nfrom .views import PostListView, PostDetailView, PostEditView,PostDeleteView,CommentDeleteView,ProfileView, ProfileEditView, AddFollower, RemoveFollower, AddLike, Dislike,UserSearch,ListFollowers,CommentAddLike,CommentDislike,CommentReplyView, PostNotification, FollowNotification, RemoveNotification\nurlpatterns = [\n path('', PostListView.as_view(), name='post-list'),\n path('post/', PostDetailView.as_view(), name='post-detail'),\n path('post/edit/', PostEditView.as_view(), name='post-edit'),\n path('post/delete/', PostDeleteView.as_view(), name='post-delete'),\n#Comment\n path(f'post//comment//delete', CommentDeleteView.as_view(), name='comment-delete'),\n path(f'post//comment//reply', CommentReplyView.as_view(), name='comment-reply'),\n#Like & Dislike\n path(f'post//like', AddLike.as_view(), name='like'),\n path(f'post//dislike', Dislike.as_view(), name='dislike'),\n path(f'post//comment//like', CommentAddLike.as_view(), name='comment-like'),\n path(f'post//comment//dislike', CommentDislike.as_view(), name='comment-dislike'),\n#Profile\n path(f'profile/',ProfileView.as_view(), name='profile'),\n path(f'profile/edit/',ProfileEditView.as_view(), name='profile-edit'),\n#Follower\n path(f'profile//followers/add',AddFollower.as_view(), name='add-follower'),\n path(f'profile//followers/list', ListFollowers.as_view(), name='list-followers'),\n path(f'profile//followers/remove',RemoveFollower.as_view(), name='remove-follower'),\n#search\n path('search/', UserSearch.as_view(), name='profile-search'),\n#notification\n path('notification//post/', PostNotification.as_view(), name='post-notification'),\n path('notification//profile/', FollowNotification.as_view(), name='follow-notification'),\n path('notification/delete/', RemoveNotification.as_view(), name='notification-delete'),\n]","repo_name":"javohir6666/Social-media-django","sub_path":"social/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72571492640","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@File : task2.py\n@Time : 2020/05/06 21:56:05\n@Author : KingFar \n@Version : 1.0\n@Contact : 1136421682@qq.com\n@WebSite : https://github.com/KingFarGrace\n\"\"\"\n\n# here put the import lib\nimport socket\nfrom threading import Thread\n\n\"\"\"\n编写一个UDP的聊天程序,客户端和服务器端能互相聊天应答\n\"\"\"\n\ndef client():\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # 使用8080端口\n dest_addr = ('192.168.234.1', 8080)\n while(True):\n send_msg = input(\"请输入要发送的数据:\")\n udp_socket.sendto(send_msg.encode('utf-8'), dest_addr)\n udp_socket.close()\n\n\ndef server():\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_socket.bind(('192.168.234.1', 8080))\n while(True):\n recv_msg = udp_socket.recvfrom(1024)\n print(\">>>客户端{}消息:{}\".format(recv_msg[1], recv_msg[0]))\n udp_socket.close()\n\n \nif __name__ == \"__main__\": \n t1 = Thread(target=server)\n t1.start()\n client()","repo_name":"KingFarGrace/python-practice","sub_path":"homework9/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74757764960","text":"from django.shortcuts import render, redirect\nfrom .forms import PlayerForm, TeamForm, StadiumForm\nfrom .models import Player, Stadium, Team\n\n# Create your views here.\n\ndef home(request):\n return render(request, 'home.html', {})\n\ndef players(request):\n context = {\n \"player_list\": Player.objects.all()\n }\n return render(request, 'players.html', context)\n\n# Agregar jugadores\ndef add_player(request):\n form = PlayerForm()\n\n if request.method == \"POST\":\n form = PlayerForm(request.POST)\n\n if form.is_valid():\n player = form.save(commit = False)\n player.save()\n return redirect(home)\n return render(request, 'add_player.html', {\n \"form\": form\n })\n\ndef teams(request):\n context = {\n \"teams_list\": Team.objects.all()\n }\n return render(request, 'teams.html', context)\n\ndef add_team(request):\n form = TeamForm()\n\n if request.method == \"POST\":\n form = TeamForm(request.POST)\n\n if form.is_valid():\n team = form.save(commit=False)\n team.save()\n return redirect(home)\n return render(request, 'add_team.html', {\n \"form\": form\n })\n\n\ndef stadiums(request):\n context = {\n \"stadium_list\": Stadium.objects.all()\n }\n return render(request, 'stadium.html', context)\n\ndef add_stadium(request):\n form = StadiumForm()\n\n if request.method == \"POST\":\n form = StadiumForm(request.POST)\n\n if form.is_valid():\n stadium = form.save(commit=False)\n stadium.save()\n return redirect(home)\n return render(request, 'add_stadium.html', {\n \"form\": form\n })\n\n\ndef test(request, player_id):\n player = Team.objects.get(pk = author_id)\n","repo_name":"jsgovea/examen","sub_path":"examen/examenapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12163853282","text":"import math\n\n\ndef merge_list_of_lists(lst):\n \"\"\"Merging the list of lists.\n\n Args:\n lst (list): The list of lists.\n\n Returns:\n list: The merged list.\n \"\"\"\n return [x for l in lst for x in l]\n\n\ndef slice_list(lst, size):\n \"\"\"Slicing the list to chunks.\n\n Args:\n lst (list): The list to slice.\n size (int): The number of chunks.\n\n Returns:\n list: The list of lists.\n \"\"\"\n\n sliced_lst = []\n max_list_size = math.ceil(len(lst) / size)\n temp_list = []\n temp_index = 1\n\n for item in lst:\n temp_list.append(item)\n temp_index += 1\n\n if temp_index > max_list_size:\n sliced_lst.append(temp_list)\n temp_list = []\n temp_index = 1\n\n if len(temp_list) > 0:\n sliced_lst.append(temp_list)\n\n return sliced_lst\n","repo_name":"Sapfik/Practise-Python","sub_path":"autostatistic_parser-main/utils/lists_methods.py","file_name":"lists_methods.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41204283520","text":"import torch\nimport torchvision.transforms as transforms\nfrom torchvision.datasets import CIFAR10, mnist\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nfrom torch import nn\nfrom ex_methods.module.model_mnist import Network_mnist\nfrom ex_methods.module.model_cifar10 import Network_cifar10\n\n\ndef Train(model_name, **kwargs):\n # 训练超参数\n train_batchsize = 64 # 训练批大小\n test_batchsize = 128 # 测试批大小\n num_epoches = 20 # 训练轮次\n lr = kwargs.get(\"learning_rate\") # 学习率\n momentum = 0.5 # 动量参数,用于优化算法\n\n # 定义数据转换对象\n '''\n 前者将数据放入tensor中,后者是归一化处理,\n 两个0.5分别表示对张量进行归一化的全局平均值和方差。因图像是灰色的只有一个通道,如果有多个通道,需要有多个数字,如三个通道,应该是Normalize([m1,m2,m3], [n1,n2,n3])\n '''\n\n #获取mnist训练数据\n dataset = kwargs.get(\"dataset\")\n if dataset == \"mnist\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.1307], [0.3081])])\n train_dataset = mnist.MNIST('./data', train=True, transform=transform, download=True)\n test_dataset = mnist.MNIST('./data', train=False, transform=transform, download=False)\n model = Network_mnist(kwargs)\n elif dataset == \"cifar10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])\n train_dataset = CIFAR10('./data', train=True, transform=transform, download=True)\n test_dataset = CIFAR10('./data', train=False, transform=transform, download=False)\n model = Network_cifar10(kwargs)\n\n #datalodar用于加载训练数据\n train_loader = DataLoader(train_dataset, batch_size=train_batchsize, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=test_batchsize,shuffle=False)\n\n # 判断当前设备\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n model.to(device)\n # print(model)\n\n # 损失函数\n loss_func = kwargs.get(\"loss_func\")\n if loss_func == \"ESM\":\n criterion = nn.MSELoss()\n elif loss_func == \"Cross-entropy\":\n criterion = nn.CrossEntropyLoss()\n\n # 优化器\n optimizer = kwargs.get(\"optimizer\")\n if optimizer == \"SGD\":\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)\n elif optimizer == \"Adam\":\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n # 统计损失值和精确度\n losses = []\n acces = []\n eval_losses = []\n eval_acces = []\n\n for epoch in range(num_epoches):\n train_loss = 0\n train_acc = 0\n model.train()\n\n # 读取数据\n for img, label in train_loader:\n # 将数据放入设备中\n img = img.to(device)\n label = label.to(device)\n\n # 向模型中输入数据\n out = model.forward(img)\n # 计算损失值\n loss = criterion(out, label)\n # 清理当前优化器中梯度信息\n optimizer.zero_grad()\n # 根据损失值计算梯度\n loss.backward()\n # 根据梯度信息进行模型优化\n optimizer.step()\n\n # 统计损失信息\n train_loss += loss.item()\n\n # 得到预测值\n _, pred = out.max(1)\n\n # 判断预测正确个数,计算精度\n num_correct = (pred == label).sum().item()\n acc = num_correct / img.shape[0]\n train_acc += acc\n\n losses.append(train_loss/len(train_loader))\n acces.append((train_acc/len(train_loader)))\n\n # 进行模型评估\n eval_loss = 0\n eval_acc = 0\n model.eval()\n\n for img, label in test_loader:\n img = img.to(device)\n label = label.to(device)\n\n out = model.forward(img)\n loss = criterion(out, label)\n\n # 记录误差\n eval_loss += loss.item()\n\n # 记录准确率\n _, pred = out.max(1)\n num_correct = (pred == label).sum().item()\n acc = num_correct / img.shape[0]\n eval_acc += acc\n\n eval_losses.append(eval_loss / len(test_loader))\n eval_acces.append(eval_acc / len(test_loader))\n\n # 打印学习情况\n print('epoch: {}, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'\n .format(epoch, train_loss / len(train_loader), train_acc / len(train_loader),\n eval_loss / len(test_loader), eval_acc / len(test_loader)))\n\n model_detail = {\n \"model_name\": model_name,\n \"model\": model,\n \"parameters\": kwargs\n }\n torch.save(model_detail, \"models/\" + model_name +\".pkl\")\n return {\"train_acc\": train_acc / len(train_loader),\n \"eval_acc\": eval_acc / len(test_loader)}\n","repo_name":"shybeeJD/XAI-platform","sub_path":"control/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1376097796","text":"from sklearn import datasets\r\nfrom sklearn.datasets import fetch_openml\r\nimport pandas as pd\r\nimport random\r\nfrom sklearn.metrics import silhouette_samples, silhouette_score\r\nimport numpy as np\r\nfrom scipy.cluster.hierarchy import linkage\r\nfrom scipy.cluster.hierarchy import dendrogram\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom scipy.cluster.hierarchy import fcluster\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom scipy.cluster.hierarchy import linkage\r\nfrom scipy.cluster.hierarchy import dendrogram\r\nfrom matplotlib import cm\r\n\r\nimport time\r\nclass data:\r\n mnist = fetch_openml('mnist_784', version=1)\r\n \r\n X, y = mnist['data'], mnist['target']\r\n \r\n \r\n df = pd.DataFrame(mnist.data, columns = mnist.feature_names)\r\n y=pd.DataFrame(y)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n sample=df.take(np.random.permutation(len(df))[:1000])\r\n sample_y=y.take(np.random.permutation(len(df))[:1000])\r\n \r\n print(sample)\r\n #print(np.unique(y))\r\n print(np.unique(sample_y))\r\n \r\n \r\n print(sample.shape)\r\n\r\nstart_time = time.time()\r\n\r\n\r\n\r\nclass Kmeans:\r\n \r\n mnist = fetch_openml('mnist_784', version=1)\r\n \r\n #X=mnist.data\r\n \r\n \r\n \r\n df = pd.DataFrame(mnist.data, columns = mnist.feature_names)\r\n \r\n \r\n \r\n sample=df.take(np.random.permutation(len(df))[:1000])\r\n \r\n #model fitting\r\n km = KMeans(n_clusters=10, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0)\r\n y_km = km.fit_predict(sample)\r\n print(y_km)\r\n print(km.cluster_centers_ )\r\n print('Distortion: %.2f' % km.inertia_)\r\n print('silhoutte score k means -mnist',silhouette_score(sample,y_km))\r\n #silhoutte graph\r\n cluster_labels = np.unique(y_km)\r\n n_clusters = cluster_labels.shape[0]\r\n silhouette_vals = silhouette_samples(sample, y_km, metric='euclidean')\r\n y_ax_lower, y_ax_upper = 0, 0\r\n yticks = []\r\n for i, c in enumerate(cluster_labels):\r\n c_silhouette_vals = silhouette_vals[y_km == c]\r\n c_silhouette_vals.sort()\r\n y_ax_upper += len(c_silhouette_vals)\r\n color = cm.jet(float(i) / n_clusters)\r\n plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0, \r\n edgecolor='none', color=color)\r\n \r\n yticks.append((y_ax_lower + y_ax_upper) / 2.)\r\n y_ax_lower += len(c_silhouette_vals)\r\n \r\n silhouette_avg = np.mean(silhouette_vals)\r\n plt.axvline(silhouette_avg, color=\"red\", linestyle=\"--\") \r\n \r\n plt.yticks(yticks, cluster_labels + 1)\r\n plt.ylabel('Cluster')\r\n plt.xlabel('Silhouette coefficient')\r\n \r\n plt.tight_layout()\r\n \r\n plt.show()\r\nprint(\"--- %s seconds for sklearn ---\" % (time.time() - start_time)) \r\n#calculating k using elbow approach \r\nclass elbow:\r\n \r\n mnist = fetch_openml('mnist_784', version=1)\r\n \r\n #X=mnist.data\r\n \r\n \r\n \r\n df = pd.DataFrame(mnist.data, columns = mnist.feature_names)\r\n \r\n \r\n \r\n sample=df.take(np.random.permutation(len(df))[:1000])\r\n\r\n\r\n distortions = []\r\n # Calculate distortions\r\n for i in range(1, 17):\r\n km = KMeans(n_clusters=i, \r\n init='k-means++', \r\n n_init=10, \r\n max_iter=300, \r\n random_state=0)\r\n km.fit(sample)\r\n distortions.append(km.inertia_)\r\n \r\n #Plot distortions for different K\r\n plt.plot(range(1, 17), distortions, marker='o')\r\n plt.xlabel('Number of clusters')\r\n plt.ylabel('Distortion')\r\n plt.tight_layout()\r\n plt.show()\r\nstart_time = time.time()\r\nclass Hirarcheal_scipy:\r\n \r\n mnist = fetch_openml('mnist_784', version=1)\r\n \r\n #X=mnist.data\r\n \r\n \r\n \r\n df = pd.DataFrame(mnist.data, columns = mnist.feature_names)\r\n \r\n \r\n \r\n sample=df.take(np.random.permutation(len(df))[:1000])\r\n row_cluster1 = linkage(sample.values, method='average', metric='euclidean') \r\n #print(row_cluster1)\r\n #dn = dendrogram(row_cluster1, above_threshold_color=\"green\", color_threshold=.7, orientation='right')\r\n row_dendr = dendrogram(row_cluster1) \r\n \r\n \r\n plt.tight_layout()\r\n plt.ylabel('Euclidean distance') \r\n plt.show()\r\n \r\n k=10\r\n #model fitting\r\n \r\n fclust = fcluster(row_cluster1, k, criterion='maxclust')\r\n \r\n \r\n print(fclust)\r\n print('silhoutte score hirarcheal scipy-mnist',silhouette_score(sample, fclust))\r\n #silhoutte graph\r\n cluster_labels = np.unique(fclust)\r\n n_clusters = cluster_labels.shape[0]\r\n silhouette_vals = silhouette_samples(sample, fclust, metric='euclidean')\r\n y_ax_lower, y_ax_upper = 0, 0\r\n yticks = []\r\n for i, c in enumerate(cluster_labels):\r\n c_silhouette_vals = silhouette_vals[fclust == c]\r\n c_silhouette_vals.sort()\r\n y_ax_upper += len(c_silhouette_vals)\r\n color = cm.jet(float(i) / n_clusters)\r\n plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0, \r\n edgecolor='none', color=color)\r\n \r\n yticks.append((y_ax_lower + y_ax_upper) / 2.)\r\n y_ax_lower += len(c_silhouette_vals)\r\n \r\n silhouette_avg = np.mean(silhouette_vals)\r\n plt.axvline(silhouette_avg, color=\"red\", linestyle=\"--\") \r\n \r\n plt.yticks(yticks, cluster_labels + 1)\r\n plt.ylabel('Cluster')\r\n plt.xlabel('Silhouette coefficient-scipy')\r\n \r\n plt.tight_layout()\r\n \r\n plt.show()\r\nprint(\"--- %s seconds for sklearn ---\" % (time.time() - start_time)) \r\nstart_time = time.time()\r\nclass Hirarcheal_sklearn:\r\n \r\n mnist = fetch_openml('mnist_784', version=1)\r\n \r\n #X=mnist.data\r\n \r\n \r\n \r\n df = pd.DataFrame(mnist.data, columns = mnist.feature_names)\r\n \r\n \r\n \r\n sample=df.take(np.random.permutation(len(df))[:1000])\r\n\r\n #model fitting \r\n cluster1 = AgglomerativeClustering(n_clusters=10, affinity='euclidean', linkage='complete')\r\n cluster1_labels = cluster1.fit_predict(sample)\r\n print(cluster1_labels)\r\n print('silhoutte score hirarcheal sklearn - mnist',silhouette_score(sample, cluster1_labels))\r\n #silhoutte graph\r\n cluster_labels = np.unique(cluster1_labels)\r\n n_clusters = cluster_labels.shape[0]\r\n silhouette_vals = silhouette_samples(sample, cluster1_labels, metric='euclidean')\r\n y_ax_lower, y_ax_upper = 0, 0\r\n yticks = []\r\n for i, c in enumerate(cluster_labels):\r\n c_silhouette_vals = silhouette_vals[cluster1_labels == c]\r\n c_silhouette_vals.sort()\r\n y_ax_upper += len(c_silhouette_vals)\r\n color = cm.jet(float(i) / n_clusters)\r\n plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0, \r\n edgecolor='none', color=color)\r\n \r\n yticks.append((y_ax_lower + y_ax_upper) / 2.)\r\n y_ax_lower += len(c_silhouette_vals)\r\n \r\n silhouette_avg = np.mean(silhouette_vals)\r\n plt.axvline(silhouette_avg, color=\"red\", linestyle=\"--\") \r\n \r\n plt.yticks(yticks, cluster_labels + 1)\r\n plt.ylabel('Cluster')\r\n plt.xlabel('Silhouette coefficient-sklearn')\r\n \r\n plt.tight_layout()\r\n \r\n plt.show()\r\n\r\nprint(\"--- %s seconds for sklearn ---\" % (time.time() - start_time)) ","repo_name":"DevineniLikhitha/DevineniLikhitha","sub_path":"MLHW/hw6/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22662985148","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@File : dns_util.py\n@Date : 2023-09-08\n\"\"\"\nfrom __future__ import print_function, unicode_literals, absolute_import, division\nimport six\nimport dns.resolver\n\n\ndef query_domain_cname(domain):\n \"\"\"\n 查询域名的CNAME记录\n :param domain:\n :return:\n \"\"\"\n lst = []\n\n query_object = dns.resolver.resolve(\n qname=domain,\n rdtype='CNAME',\n raise_on_no_answer=False\n )\n\n for query_item in query_object.response.answer:\n for item in query_item.items:\n lst.append(six.text_type(item))\n\n return lst\n","repo_name":"mouday/domain-admin","sub_path":"domain_admin/utils/dns_util.py","file_name":"dns_util.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1065,"dataset":"github-code","pt":"54"} +{"seq_id":"32585652706","text":"import json\nimport re\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'http://baskino.me'\nnumber_pages = 2615\nheaders = {\n \"Accept\": \"*/*\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36\",\n}\n\n\ndef get_video_page_links(url, number_pages, headers):\n film_links = []\n for page in range(1, number_pages + 1):\n print(f'page number: {page}')\n new_url = url + f'/page/{page}/'\n req = requests.get(new_url, headers=headers)\n src = req.text\n\n soup = BeautifulSoup(src, 'lxml')\n try:\n films = soup.find(id=\"dle-content\").find_all(class_=re.compile(\"posttitle\"))\n except AttributeError:\n continue\n\n for film in films:\n try:\n film_links.append(film.find('a').get('href'))\n except AttributeError:\n continue\n return film_links\n\n\ndef get_videos_info(video_page_links):\n films_info = []\n flag = 1\n for film_link in video_page_links:\n print(f'Video number: {flag}')\n flag += 1\n try:\n req = requests.get(film_link, headers=headers)\n src = req.text\n soup = BeautifulSoup(src, 'lxml')\n image = soup.find(class_=\"mobile_cover\").find(\"img\").get(\"src\")\n info_table = soup.find(class_=\"info\").find('table')\n description = soup.find(id=re.compile(\"news-id\")).get_text('
    ')\n name = info_table.find('td', string='Название:').find_next_sibling().text\n original_name = info_table.find('td', string='Оригинальное название:').find_next_sibling().text\n year = int(info_table.find('td', string='Год:').find_next_sibling().text)\n country = info_table.find('td', string='Страна:').find_next_sibling().text\n director = [i.strip() for i in\n info_table.find('td', string='Режиссер:').find_next_sibling().text.split(\",\")]\n genre = [i.strip() for i in info_table.find('td', string='Жанр:').find_next_sibling().text.split(\",\")]\n actors = [i.strip() for i in\n info_table.find('td', string='В главных ролях:').find_next_sibling().text.split(\",\")]\n video = soup.find(id='player-holder-1').find('iframe').get(\"src\")\n rating = float(soup.find('b', itemprop=\"ratingValue\").text.replace(',', '.'))\n except AttributeError:\n continue\n\n films_info.append({\n \"name\": name,\n 'original_name': original_name,\n 'year': year,\n 'country': country,\n 'director': director,\n 'genre': genre,\n 'actors': actors,\n 'video': video,\n 'image': image,\n 'description': description,\n 'rating': rating,\n })\n return films_info\n\n\ndef set_json_file(films_info):\n if os.path.exists('data.json'):\n os.remove(\"data.json\")\n with open('data.json', 'a', encoding='utf-8') as file:\n json.dump(films_info, file, indent=4, ensure_ascii=False)\n\n\nvideo_page_links = get_video_page_links(url, number_pages, headers)\nvideos_info = get_videos_info(video_page_links)\nset_json_file(videos_info)\n","repo_name":"Dima-Bulavenko/baskinoParser","sub_path":"baskinoParser.py","file_name":"baskinoParser.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6864932439","text":"import curses\nfrom src.board import Board\n# import time\n# from curses import textpad\n\n\ndef main(stdscr):\n \n board1 = Board(\"board1\")\n board1.load_from_file(\"plansza1.txt\")\n curses.curs_set(0)\n\n width = board1.get_width()\n height = board1.get_height()\n snake = [(height//2, width//2 + 1), (height//2, width//2), (height//2, width//2 - 1)]\n for i in range(len(board1.table)):\n stdscr.addstr(i, 0, board1.get_line(i))\n for x, y in snake:\n stdscr.addstr(x, y, \"S\") \n stdscr.refresh()\n stdscr.getch()\n direction = curses.KEY_UP\n\n while 1:\n key = stdscr.getch()\n\n if key in [curses.KEY_UP, curses.KEY_DOWN, curses.KEY_RIGHT, curses.KEY_LEFT]:\n direction = key\n\n head = snake[0]\n\n if direction == curses.KEY_UP:\n new_head = ((head[0] - 1) % height, head[1])\n elif direction == curses.KEY_LEFT:\n new_head = (head[0], (head[1] - 1) % width)\n elif direction == curses.KEY_RIGHT:\n new_head = (head[0], (head[1] + 1) % width)\n elif direction == curses.KEY_DOWN:\n new_head = ((head[0] + 1) % height, head[1])\n\n snake.insert(0, new_head)\n stdscr.addstr(new_head[0], new_head[1], \"S\")\n stdscr.addstr(snake[-1][0], snake[-1][1], \" \")\n snake.pop()\n stdscr.refresh()\n\n\ncurses.wrapper(main)","repo_name":"ltopolsk/Snake-Game","sub_path":"src/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3578056412","text":"import copy\nimport logging\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\n\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import CommandJob as RestCommandJob\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import JobBase\nfrom azure.ai.ml._schema.job.command_job import CommandJobSchema\nfrom azure.ai.ml._utils.utils import map_single_brackets_and_warn\nfrom azure.ai.ml.constants import JobType\nfrom azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, LOCAL_COMPUTE_PROPERTY, LOCAL_COMPUTE_TARGET, TYPE\nfrom azure.ai.ml.entities._credentials import (\n AmlTokenConfiguration,\n ManagedIdentityConfiguration,\n UserIdentityConfiguration,\n _BaseJobIdentityConfiguration,\n)\nfrom azure.ai.ml.entities._inputs_outputs import Input, Output\nfrom azure.ai.ml.entities._job._input_output_helpers import (\n from_rest_data_outputs,\n from_rest_inputs_to_dataset_literal,\n to_rest_data_outputs,\n to_rest_dataset_literal_inputs,\n validate_inputs_for_command,\n)\nfrom azure.ai.ml.entities._job.distribution import DistributionConfiguration\nfrom azure.ai.ml.entities._job.job_service import (\n JobService,\n JobServiceBase,\n JupyterLabJobService,\n SshJobService,\n TensorBoardJobService,\n VsCodeJobService,\n)\nfrom azure.ai.ml.entities import Environment\nfrom azure.ai.ml.entities._system_data import SystemData\nfrom azure.ai.ml.entities._util import load_from_dict\nfrom azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationErrorType, ValidationException\n\nfrom .job import Job\nfrom .job_io_mixin import JobIOMixin\nfrom .job_limits import CommandJobLimits\nfrom .job_resource_configuration import JobResourceConfiguration\nfrom .parameterized_command import ParameterizedCommand\nfrom .queue_settings import QueueSettings\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass CommandJob(Job, ParameterizedCommand, JobIOMixin):\n \"\"\"Command job.\n\n :keyword services: Read-only information on services associated with the job.\n :paramtype services: Optional[dict[str, ~azure.ai.ml.entities.JobService]]\n :keyword inputs: Mapping of output data bindings used in the command.\n :paramtype inputs: Optional[dict[str, Union[~azure.ai.ml.Input, str, bool, int, float]]]\n :keyword outputs: Mapping of output data bindings used in the job.\n :paramtype outputs: Optional[dict[str, ~azure.ai.ml.Output]]\n :keyword identity: The identity that the job will use while running on compute.\n :paramtype identity: Optional[Union[~azure.ai.ml.ManagedIdentityConfiguration, ~azure.ai.ml.AmlTokenConfiguration,\n ~azure.ai.ml.UserIdentityConfiguration]]\n :keyword limits: The limits for the job.\n :paramtype limits: Optional[~azure.ai.ml.entities.CommandJobLimits]\n :keyword kwargs: A dictionary of additional configuration parameters.\n :paramtype kwargs: dict\n\n .. admonition:: Example:\n\n .. literalinclude:: ../samples/ml_samples_command_configurations.py\n :start-after: [START command_job_definition]\n :end-before: [END command_job_definition]\n :language: python\n :dedent: 8\n :caption: Configuring a CommandJob.\n \"\"\"\n\n def __init__(\n self,\n *,\n inputs: Optional[Dict[str, Union[Input, str, bool, int, float]]] = None,\n outputs: Optional[Dict[str, Output]] = None,\n limits: Optional[CommandJobLimits] = None,\n identity: Optional[\n Union[ManagedIdentityConfiguration, AmlTokenConfiguration, UserIdentityConfiguration]\n ] = None,\n services: Optional[\n Dict[str, Union[JobService, JupyterLabJobService, SshJobService, TensorBoardJobService, VsCodeJobService]]\n ] = None,\n **kwargs,\n ) -> None:\n kwargs[TYPE] = JobType.COMMAND\n self._parameters = kwargs.pop(\"parameters\", {})\n\n super().__init__(**kwargs)\n\n self.outputs = outputs\n self.inputs = inputs\n self.limits = limits\n self.identity = identity\n self.services = services\n\n @property\n def parameters(self) -> Dict[str, str]:\n \"\"\"MLFlow parameters.\n\n :return: MLFlow parameters logged in job.\n :rtype: dict[str, str]\n \"\"\"\n return self._parameters\n\n def _to_dict(self) -> Dict:\n # pylint: disable=no-member\n return CommandJobSchema(context={BASE_PATH_CONTEXT_KEY: \"./\"}).dump(self)\n\n def _to_rest_object(self) -> JobBase:\n self._validate()\n self.command = map_single_brackets_and_warn(self.command)\n modified_properties = copy.deepcopy(self.properties)\n # Remove any properties set on the service as read-only\n modified_properties.pop(\"_azureml.ComputeTargetType\", None)\n # Handle local compute case\n compute = self.compute\n resources = self.resources\n if self.compute == LOCAL_COMPUTE_TARGET:\n compute = None\n if resources is None:\n resources = JobResourceConfiguration()\n if resources.properties is None:\n resources.properties = {}\n # This is the format of the October Api response. We need to match it exactly\n resources.properties[LOCAL_COMPUTE_PROPERTY] = {LOCAL_COMPUTE_PROPERTY: True}\n\n properties = RestCommandJob(\n display_name=self.display_name,\n description=self.description,\n command=self.command,\n code_id=self.code,\n compute_id=compute,\n properties=modified_properties,\n experiment_name=self.experiment_name,\n inputs=to_rest_dataset_literal_inputs(self.inputs, job_type=self.type),\n outputs=to_rest_data_outputs(self.outputs),\n environment_id=self.environment,\n distribution=self.distribution._to_rest_object() if self.distribution else None,\n tags=self.tags,\n identity=self.identity._to_job_rest_object() if self.identity else None,\n environment_variables=self.environment_variables,\n resources=resources._to_rest_object() if resources else None,\n limits=self.limits._to_rest_object() if self.limits else None,\n services=JobServiceBase._to_rest_job_services(self.services),\n queue_settings=self.queue_settings._to_rest_object() if self.queue_settings else None,\n )\n result = JobBase(properties=properties)\n result.name = self.name\n return result\n\n @classmethod\n def _load_from_dict(cls, data: Dict, context: Dict, additional_message: str, **kwargs) -> \"CommandJob\":\n loaded_data = load_from_dict(CommandJobSchema, data, context, additional_message, **kwargs)\n return CommandJob(base_path=context[BASE_PATH_CONTEXT_KEY], **loaded_data)\n\n @classmethod\n def _load_from_rest(cls, obj: JobBase) -> \"CommandJob\":\n rest_command_job: RestCommandJob = obj.properties\n command_job = CommandJob(\n name=obj.name,\n id=obj.id,\n display_name=rest_command_job.display_name,\n description=rest_command_job.description,\n tags=rest_command_job.tags,\n properties=rest_command_job.properties,\n command=rest_command_job.command,\n experiment_name=rest_command_job.experiment_name,\n services=JobServiceBase._from_rest_job_services(rest_command_job.services),\n status=rest_command_job.status,\n creation_context=SystemData._from_rest_object(obj.system_data) if obj.system_data else None,\n code=rest_command_job.code_id,\n compute=rest_command_job.compute_id,\n environment=rest_command_job.environment_id,\n distribution=DistributionConfiguration._from_rest_object(rest_command_job.distribution),\n parameters=rest_command_job.parameters,\n # pylint: disable=protected-access\n identity=_BaseJobIdentityConfiguration._from_rest_object(rest_command_job.identity)\n if rest_command_job.identity\n else None,\n environment_variables=rest_command_job.environment_variables,\n resources=JobResourceConfiguration._from_rest_object(rest_command_job.resources),\n limits=CommandJobLimits._from_rest_object(rest_command_job.limits),\n inputs=from_rest_inputs_to_dataset_literal(rest_command_job.inputs),\n outputs=from_rest_data_outputs(rest_command_job.outputs),\n queue_settings=QueueSettings._from_rest_object(rest_command_job.queue_settings),\n )\n # Handle special case of local job\n if (\n command_job.resources is not None\n and command_job.resources.properties is not None\n and command_job.resources.properties.get(LOCAL_COMPUTE_PROPERTY, None)\n ):\n command_job.compute = LOCAL_COMPUTE_TARGET\n command_job.resources.properties.pop(LOCAL_COMPUTE_PROPERTY)\n return command_job\n\n def _to_component(self, context: Optional[Dict] = None, **kwargs) -> \"CommandComponent\":\n \"\"\"Translate a command job to component.\n\n :param context: Context of command job YAML file.\n :type context: dict\n :keyword kwargs: Extra arguments.\n :return: Translated command component.\n :rtype: CommandComponent\n \"\"\"\n from azure.ai.ml.entities import CommandComponent\n\n pipeline_job_dict = kwargs.get(\"pipeline_job_dict\", {})\n context = context or {BASE_PATH_CONTEXT_KEY: Path(\"./\")}\n\n # Create anonymous command component with default version as 1\n return CommandComponent(\n tags=self.tags,\n is_anonymous=True,\n base_path=context[BASE_PATH_CONTEXT_KEY],\n code=self.code,\n command=self.command,\n environment=self.environment,\n description=self.description,\n inputs=self._to_inputs(inputs=self.inputs, pipeline_job_dict=pipeline_job_dict),\n outputs=self._to_outputs(outputs=self.outputs, pipeline_job_dict=pipeline_job_dict),\n resources=self.resources if self.resources else None,\n distribution=self.distribution if self.distribution else None,\n )\n\n def _to_node(self, context: Optional[Dict] = None, **kwargs) -> \"Command\":\n \"\"\"Translate a command job to a pipeline node.\n\n :param context: Context of command job YAML file.\n :type context: dict\n :keyword kwargs: Extra arguments.\n :return: Translated command component.\n :rtype: Command\n \"\"\"\n from azure.ai.ml.entities._builders import Command\n\n component = self._to_component(context, **kwargs)\n\n return Command(\n component=component,\n compute=self.compute,\n # Need to supply the inputs with double curly.\n inputs=self.inputs,\n outputs=self.outputs,\n environment_variables=self.environment_variables,\n description=self.description,\n tags=self.tags,\n display_name=self.display_name,\n limits=self.limits,\n services=self.services,\n properties=self.properties,\n identity=self.identity,\n queue_settings=self.queue_settings,\n )\n\n def _validate(self) -> None:\n if self.command is None:\n msg = \"command is required\"\n raise ValidationException(\n message=msg,\n no_personal_data_message=msg,\n target=ErrorTarget.JOB,\n error_category=ErrorCategory.USER_ERROR,\n error_type=ValidationErrorType.MISSING_FIELD,\n )\n if self.environment is None:\n msg = \"environment is required for non-local runs\"\n raise ValidationException(\n message=msg,\n no_personal_data_message=msg,\n target=ErrorTarget.JOB,\n error_category=ErrorCategory.USER_ERROR,\n error_type=ValidationErrorType.MISSING_FIELD,\n )\n if isinstance(self.environment, Environment):\n self.environment.validate()\n validate_inputs_for_command(self.command, self.inputs)\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py","file_name":"command_job.py","file_ext":"py","file_size_in_byte":12226,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"43463461128","text":"# Needed to create random numbers to simulate dice roll\nimport random\n\n# Initialise player scores to 0\nplayer1_score = 0\nplayer2_score = 0\n\n# Repeat everything in this block 10 times\nfor i in range(10):\n\n # Generate random numbers between 1 and 6 for each player.\n player1_value = random.randint(1, 6)\n player2_value = random.randint(1, 6)\n\n # Display the values\n print(\"Player 1 rolled: \", player1_value)\n print(\"Player 2 rolled: \", player2_value)\n\n # Selection: based on comparison of the values, take the appropriate path through the code.\n if player1_value > player2_value:\n print(\"player 1 wins.\")\n player1_score = player1_score + 1 # This is how we increment a variable\n elif player2_value > player1_value:\n print(\"player 2 wins\")\n player2_score = player2_score + 1\n else:\n print(\"It's a draw\")\n\n input(\"Press enter to continue.\") # Wait for user input to proceed.\n\nprint(\"### Game Over ###\")\nprint(\"Player 1 score:\", player1_score)\nprint(\"Player 2 score:\", player2_score)\n","repo_name":"raj-ravan/Hacktoberfest2022","sub_path":"Dicegame.py","file_name":"Dicegame.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"32907403540","text":"from django import template\n\nfrom about.models import SiteBanner\n\nregister = template.Library()\n\n@register.inclusion_tag('about/_site_banner.html')\ndef site_banner():\n try:\n banner = list(SiteBanner.objects.current())[0]\n except IndexError:\n banner = None\n return {'banner': banner}\n\n","repo_name":"yourcelf/btb","sub_path":"scanblog/about/templatetags/site_banner_tags.py","file_name":"site_banner_tags.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"74460529121","text":"#coding:utf-8\n\nfrom selenium import *\nfrom selenium import webdriver\nfrom selenium .webdriver.chrome.options import Options\n\nimport bs4\nimport os\nimport re\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\n\nbase_url = \"https://data.gov.in\" # 홈페이지 검색에 기본이 되는 URL\nbase_file = \"C:/Users/LKS/Desktop/india\"\n\nchromeOptions = webdriver.ChromeOptions()\nprefs = {\"download.default_directory\" : base_file ,\"download.prompt_for_download\": False,\"download.directory_upgrade\": True}\nchromeOptions.add_experimental_option(\"prefs\", prefs)\ndriver=webdriver.Chrome('C:\\chromedriver.exe',chrome_options=chromeOptions)\n\n\npattern = re.compile( r'\\s+' ) # 개행문자 제거를 위한 변수\ndriver.implicitly_wait(5)\ndef spider() :\n \n c_count = 0\n base_url2 = base_url+'/catalogs'\n base_url3 = base_url+'/catalog'\n\n page = 3\n max_page = 475\n soup_before=''\n while page < max_page :\n\n url_page = base_url+'/catalogs#items_per_page=9&page='+str(page)+'&sort_by=created&sort_order=DESC'\n print(url_page)\n page +=1\n driver.get(url_page)\n time.sleep(15)\n soup2 = bs4.BeautifulSoup(driver.page_source,\"html.parser\")\n while True:\n if soup_before==soup2 :\n time.sleep(15)\n soup2 = bs4.BeautifulSoup(driver.page_source,\"html.parser\")\n else :\n break\n \n soup_before=soup2\n count = 0\n \n for links in soup2.findAll( 'div' , {'class' : 'ogpl-grid-list'} ) :\n c_count+=1\n\n if not count == 0 :\n \n try:\n link_aTag = links.find('span', {'class' : 'field-content'}).find( 'a' )\n link_name = link_aTag.text.replace( ' ' , '_' ).replace( '...' , '' ).replace('.','').replace(':','').replace('\\\\', '').replace('<', '').replace('>', '').replace('/','-').replace('*','').replace('|','').replace('?', '')\n link_categ = links.find('div',{'class' : 'views-field views-field-field-ministry-department'}).find('span').text\n print(link_name)\n small_page = 0\n last_page = links.find('span',{'count-resource'}).text.replace('(','').replace(')','')\n \n if not last_page == 'NA' :\n \n last_page = int(last_page)//6+1\n \n while small_page < last_page :\n link_spot = base_url + link_aTag.get('href')+'?title=&file_short_format=&page='+str(small_page)\n link_source = requests.get(link_spot)\n plain_text = link_source.text\n link_soup = BeautifulSoup( plain_text , 'lxml')\n items_count = 0\n for items in link_soup.findAll('div', {'class' : 'ogpl-grid-list'}) :\n\n if not items_count == 0 :\n \n try :\n content_format = items.find('div', {'class' : 'download-confirmation-box'}).find('div').find('a').text\n content_name = items.find('span',{'class' : 'title-content'}).text.replace('\"','').replace(\"'\",'').replace(',',' and ').replace( ' ' , '_' ).replace(':',' to ').replace('\\\\', '').replace('<', '').replace('>', '').replace('/','-').replace('*','').replace('|','').replace('?', '')\n content_code = items.find('div',{'class' : 'data-export-cont'}).get('class')[2].replace('confirmationpopup-','')\n content_link = 'https://data.gov.in/node/'+content_code+'/download'\n \n \n file_url = base_file+'/'+link_categ+'/'+ link_name\n file_path = base_file+'/'+link_categ+'/'+link_name+'/'+content_name+'.'+content_format\n \n \n if not os.path.exists(file_url):\n os.makedirs(file_url)\n \n \n try :\n item_source = requests.get(content_link)\n item_text = item_source.text\n item_soup = BeautifulSoup( item_text , 'lxml')\n item_metaTag = item_soup.find('meta',{'http-equiv':'refresh'}).get('content').replace('1;url=','')\n\n content_request = requests.get( item_metaTag , stream = True )\n with open( file_path , 'wb' ) as content_file :\n content_file.write( content_request.content )\n time.sleep(5)\n\n except :\n print (link_name + \":\"+ content_title + \" - download error!\" )\n except :\n print (content_name + \" - content error!\" )\n\n items_count+=1\n print(link_name,'page :',small_page,'complete')\n\n small_page+=1\n\n except :\n print (link_name + \" - link error!\" )\n\n count+=1\n\n \n print(c_count)\n \nspider()\n\nprint('finish')\n","repo_name":"gangslee/Web_Crawler_Python","sub_path":"India.py","file_name":"India.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43206587776","text":"from . import ndimvar as nv \r\nimport numpy as np\r\nfrom . import geometry as geom\r\nfrom . import triangulation as tr\r\nimport math\r\n\r\nclass MinimalTent(nv.NdimVar):\r\n def __init__(s,T,eps=0.1,k=1.0):\r\n s.T=T\r\n s.eps=eps\r\n s.sublin=False\r\n s.k=k\r\n s.alpha=1.0\r\n def tension(s,alpha):\r\n s.alpha=alpha\r\n def create_subLin(s):\r\n s.sublin=True\r\n s.T.createNT()\r\n n=s.T.N\r\n m=n-len(s.T.Bnd)\r\n CC=np.zeros(shape=(n,3),dtype=float)\r\n j=0\r\n for i in range(n):\r\n if i in s.T.Bnd:\r\n CC[j]=np.zeros(shape=(3,),dtype=float)\r\n j+=1\r\n continue\r\n ar=np.zeros(shape=(3,),dtype=float)\r\n for tk in s.T.NT[i]:\r\n t=s.T.triangle(tk)\r\n norm=t.normal()\r\n \r\n ar+=norm.P*t.square()\r\n CC[j]=ar\r\n j+=1\r\n s.CC=CC\t\r\n def X(s,x):\r\n\t\t\r\n T=s.T\r\n P=s.T.P\r\n n=s.T.N\r\n m=s.T.N-len(s.T.Bnd)\r\n grad=np.ndarray(shape=(n,3),dtype=float)\r\n e3=np.zeros(shape=(3,),dtype=float)\r\n e3[2]=1.0\r\n \r\n for i in range(0,s.T.N):\r\n trk=s.T.NT[i]\r\n gd=np.zeros(shape=(3,),dtype=float)\r\n \r\n for k in trk:\r\n l=0\r\n for v in s.T.T[k]:\r\n if i==v: \r\n i1=l\r\n l+=1\r\n\r\n Tr=s.T.triangle(k)\r\n h=Tr.center()[2]\r\n nn=Tr.orthos()\r\n S=Tr.square()\r\n gd+=nn[i1]*(1+s.k*h)+e3*s.k*S/3\r\n #gd+=nn[i1]*h+e3*S/3\r\n #gd+=e3*S/3\r\n if i in s.T.Bnd : grad[i]=[0,0,0]\r\n else : \r\n dl=gd.dot(gd)\r\n gd=gd/math.sqrt(dl)\r\n grad[i]=gd\r\n\t\t\t\t\r\n return grad","repo_name":"KlyachinVA/MinimalSurface","sub_path":"MinSurface/tentsurface.py","file_name":"tentsurface.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"20841941102","text":"\"\"\"OCS Wrapper Methods\"\"\"\nimport os\nimport os.path as path\nimport sys\nimport tempfile\nfrom sh import mount, umount, Command\nfrom .partitioning import Partition\nfrom .repositories import Image\n\nocs_sr = Command(\"/usr/sbin/ocs-sr\")\n\ndef mount_image(image: Image) -> str:\n \"\"\"Mounts image on system\"\"\"\n if not image.available_local and not image.available_remote:\n raise FileNotFoundError(\"Image is not available in any repo\")\n\n mount_dir = tempfile.mkdtemp()\n mount(image.best_path, mount_dir)\n return mount_dir\n\ndef unmount_image(mount_path: str):\n \"\"\"unmounts image from system\"\"\"\n if not path.isdir(mount_path):\n return\n\n if path.ismount(mount_path):\n umount(mount_path)\n\n os.rmdir(mount_path)\n\n\n\ndef deploy_image(image: Image, target_partition: Partition, source_part=None, io=None):\n \"\"\"deploys image to partition\"\"\"\n # Make sure partition is not busy\n if target_partition.mountpoint:\n umount(target_partition.mountpoint)\n\n mount_path = mount_image(image)\n parts_file = path.join(mount_path, \"parts\")\n try:\n\n if not path.isfile(parts_file):\n raise FileNotFoundError(\"Could not find image parts definition, \\\n image may be corrupted.\")\n\n with open(parts_file, \"r\", encoding=\"utf-8\") as _f:\n all_parts = [line for line in _f.read().strip().split(\" \") if line.strip()]\n\n print(all_parts)\n\n if len(all_parts) == 0:\n raise AssertionError(\"Image does not contain any restorable partitions\")\n\n if len(all_parts) > 1 and not source_part:\n # This function is unequipped to deal with multi-part images\n raise NotImplementedError(\"This deploy mechanism does not support \\\n deploying multiple partitions\")\n\n if source_part and source_part not in all_parts:\n raise NameError(f\"Image does not contain a partition called {source_part}, \\\n available parts: '{' '.join(all_parts)}'\")\n\n if not source_part:\n # Select the only partition\n source_part = all_parts[0]\n\n source_dir = path.basename(mount_path)\n root_dir = path.dirname(mount_path)\n target_device = path.basename(target_partition.path)\n if io:\n ocs_sr(\"-e1\", \"auto\", \"-e2\", \"-t\", \"-r\", \"-k\", \"-batch\", \"-scr\", \"-nogui\",\n \"-or\", root_dir, \"-f\", source_part, \"restoreparts\", source_dir, target_device, _in=io(\"in\"), _out=io(\"out\"), _err=io(\"err\"))\n else:\n # pass\n ocs_sr(\"-e1\", \"auto\", \"-e2\", \"-t\", \"-r\", \"-k\", \"-batch\", \"-scr\", \"-nogui\",\n \"-or\", root_dir, \"-f\", source_part, \"restoreparts\", source_dir, target_device, _fg=True)\n \n finally:\n unmount_image(mount_path)\n","repo_name":"LifetimeMistake/failrp","sub_path":"libs/imaging.py","file_name":"imaging.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41410273027","text":"s = '0123456789'\na = [str(i) for i in range(10)]\na += ['']\nneed = []\nfor i in s:\n for j in s:\n for g in a:\n res = int('12' + i + j + '36' + g + '1')\n if res % 273 == 0:\n need += [[res, res // 273]]\nneed.sort()\nfor i in need:\n print(*i)\n\n","repo_name":"mnkhmtv/kege","sub_path":"досрок 1/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41333155984","text":"from setuptools import setup\r\n# from codecs import open # To use a consistent encoding\r\nfrom os import path\r\n\r\nhere = path.abspath(path.dirname(__file__))\r\n\r\n# Get the long description from the relevant file\r\nwith open(path.join(here, 'README.rst')) as f:\r\n long_description = f.read()\r\n\r\n# Get the requirements from the relevant file\r\nwith open('requirements.txt') as f:\r\n required = f.read().splitlines()\r\n\r\nsetup(name='pecpy',\r\n version='0.7',\r\n description='Optimization-based proximity effect correction',\r\n long_description=long_description,\r\n url='https://git.kern.phys.au.dk/SunTune/pecpy',\r\n author='Emil Haldrup Eriksen',\r\n author_email='emher@au.dk',\r\n license='MIT',\r\n install_requires=required,\r\n include_package_data=True,\r\n packages=['pecpy'],\r\n zip_safe=False)\r\n","repo_name":"maple367/pecpy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36316238793","text":"__author__ = 'utku@hoydaa.com (Utku Utkan)'\n\nimport logging\nimport webapp2\n\nfrom google.appengine.api import memcache\nfrom google.appengine.api import urlfetch\nfrom google.appengine.runtime import apiproxy_errors\n\n\nclass ProxyHandler(webapp2.RequestHandler):\n\n def get(self, url):\n fetch_url = self.request.scheme + '://' + url\n redirect_url = memcache.get(fetch_url)\n\n if redirect_url is not None:\n fetch_url = redirect_url\n\n try:\n response = urlfetch.fetch(fetch_url)\n except (urlfetch.Error, apiproxy_errors.Error):\n logging.exception('Could not fetch URL')\n self.abort(400)\n\n self.response.status_int = response.status_code\n\n for key, value in response.headers.iteritems():\n self.response.headers[key] = value\n\n self.response.out.write(response.content)\n\n\napp = webapp2.WSGIApplication([\n (r'/proxy/(.*)', ProxyHandler),\n])\n","repo_name":"dobri-dobrev/hoydaa-cast","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74766575840","text":"import sys\n\nt = int(input())\nfor test_case in range(1, t + 1):\n\n n, m = map(int, input().split()) # n = 사람의 수, m = 관계의 수\n graph = [[] for _ in range(n + 1)] \n visited = [False] * (n + 1) \n\n for _ in range(m):\n v1, v2 = map(int, input().split()) \n graph[v1].append(v2)\n graph[v2].append(v1)\n\n def dfs(start):\n stack = [start]\n visited[start] = True\n\n while stack:\n current = stack.pop()\n\n for moved in graph[current]:\n if not visited[moved]:\n visited[moved] = True\n stack.append(moved)\n\n cnt = 0\n for i in range(1, n + 1):\n if not visited[i]:\n dfs(i)\n cnt += 1\n print('#{} {}'.format(test_case, cnt))\n\nsys.stdin = open(\"_창용마을무리의개수.txt\")\n","repo_name":"ererink/TIL","sub_path":"Algorithm/SWEA/4_창용마을무리의개수.py","file_name":"4_창용마을무리의개수.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"1909258551","text":"#!/usr/bin/env python\n#\n# Maciek Wielgus 02/Oct/2018\n\nfrom __future__ import division\nimport numpy as np\nimport pandas as pd\nimport sys,os\nfrom eat.inspect import closures as cl\nfrom eat.io import hops, util\nfrom eat.hops import util as hu\nfrom eat.polcal import polcal\nfrom eat.inspect import utils as ut\nimport ehtim as eh\nimport datetime\nfrom astropy.time import Time\nfrom eat.polcal import polcal\n#import weightedstats as ws\n\nZ2AZ = {'Z':'AZ', 'P':'PV', 'S':'SM', 'R':'SR','J':'JC', 'A':'AA','X':'AP', 'L':'LM','Y':'SP'}\n\n\"\"\"Mean, weighted mean, median, and weighted median.\nWeightedStats includes four functions (mean, weighted_mean, median,\nweighted_median) which accept lists as arguments, and two functions\n(numpy_weighted_mean, numpy weighted_median) which accept either lists\nor numpy arrays.\nExample:\n import weightedstats as ws\n my_data = [1, 2, 3, 4, 5]\n my_weights = [10, 1, 1, 1, 9]\n # Ordinary (unweighted) mean and median\n ws.mean(my_data) # equivalent to ws.weighted_mean(my_data)\n ws.median(my_data) # equivalent to ws.weighted_median(my_data)\n\n # Weighted mean and median\n ws.weighted_mean(my_data, weights=my_weights)\n ws.weighted_median(my_data, weights=my_weights)\n # Special weighted mean and median functions for use with numpy arrays\n ws.numpy_weighted_mean(my_data, weights=my_weights)\n ws.numpy_weighted_median(my_data, weights=my_weights)\n\"\"\"\n\ndef mean(data):\n \"\"\"Calculate the mean of a list.\"\"\"\n return sum(data) / float(len(data))\n\ndef weighted_mean(data, weights=None):\n \"\"\"Calculate the weighted mean of a list.\"\"\"\n if weights is None:\n return mean(data)\n total_weight = float(sum(weights))\n weights = [weight / total_weight for weight in weights]\n w_mean = 0\n for i, weight in enumerate(weights):\n w_mean += weight * data[i]\n return w_mean\n\ndef numpy_weighted_mean(data, weights=None):\n \"\"\"Calculate the weighted mean of an array/list using numpy.\"\"\"\n import numpy as np\n weights = np.array(weights).flatten() / float(sum(weights))\n return np.dot(np.array(data), weights)\n\ndef median(data):\n \"\"\"Calculate the median of a list.\"\"\"\n data.sort()\n num_values = len(data)\n half = num_values // 2\n if num_values % 2:\n return data[half]\n return 0.5 * (data[half-1] + data[half])\n\ndef weighted_median(data, weights=None):\n \"\"\"Calculate the weighted median of a list.\"\"\"\n data=list(data)\n if weights is None:\n return median(data)\n else: weights=list(weights)\n midpoint = 0.5 * sum(weights)\n if any([j > midpoint for j in weights]):\n return data[weights.index(max(weights))]\n #if ret is None: return 1.\n #else: return ret\n if any([j > 0 for j in weights]):\n sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))\n cumulative_weight = 0\n below_midpoint_index = 0\n while cumulative_weight <= midpoint:\n below_midpoint_index += 1\n cumulative_weight += sorted_weights[below_midpoint_index-1]\n cumulative_weight -= sorted_weights[below_midpoint_index-1]\n if cumulative_weight - midpoint < sys.float_info.epsilon:\n bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]\n return sum(bounds) / float(len(bounds))\n #if ret is None: return 1.\n #else: return ret\n return sorted_data[below_midpoint_index-1]\n #if ret is None: return 1.\n #else: return ret\n\ndef numpy_weighted_median(data, weights=None):\n \"\"\"Calculate the weighted median of an array/list using numpy.\"\"\"\n import numpy as np\n if weights is None:\n return np.median(np.array(data).flatten())\n data, weights = np.array(data).flatten(), np.array(weights).flatten()\n if any(weights > 0):\n sorted_data, sorted_weights = map(np.array, zip(*sorted(zip(data, weights))))\n midpoint = 0.5 * sum(sorted_weights)\n if any(weights > midpoint):\n return (data[weights == np.max(weights)])[0]\n cumulative_weight = np.cumsum(sorted_weights)\n below_midpoint_index = np.where(cumulative_weight <= midpoint)[0][-1]\n if cumulative_weight[below_midpoint_index] - midpoint < sys.float_info.epsilon:\n return np.mean(sorted_data[below_midpoint_index:below_midpoint_index+2])\n return sorted_data[below_midpoint_index+1]\n\ndef poly_from_str(strcoeffs):\n '''from string with coefficients to polynomial\n '''\n coeffs = list(map(float, strcoeffs.split(',')))\n return np.polynomial.polynomial.Polynomial(coeffs)\n\ndef apply_correction(corrected,ratios,station):\n '''applies polarimetric correction from 'ratios' df to 'corrected' df, but only to chosen station\n '''\n for cou,row in ratios.iterrows():\n if row.station==station:\n corrected_foo1=corrected[(corrected.mjd>=row.mjd_start)&(corrected.mjd<=row.mjd_stop)&(corrected.baseline.str[0]==row.station)].copy()\n corrected_foo2=corrected[(corrected.mjd>=row.mjd_start)&(corrected.mjd<=row.mjd_stop)&(corrected.baseline.str[1]==row.station)].copy()\n corrected_rest=corrected[~((corrected.mjd>=row.mjd_start)&(corrected.mjd<=row.mjd_stop)&(corrected.baseline.str.contains(row.station)))].copy()\n polyf = poly_from_str(str(row.ratio_phas))\n delta_phas1 = polyf(corrected_foo1['mjd'] - row.mjd_start)\n delta_phas2 = polyf(corrected_foo2['mjd'] - row.mjd_start)\n corrected_foo1['phaseL'] = corrected_foo1['phaseL'] +delta_phas1\n corrected_foo2['phaseL'] = corrected_foo2['phaseL'] -delta_phas2\n polyamp = poly_from_str(str(row.ratio_amp))\n delta_amp1 = polyamp(corrected_foo1['mjd'] - row.mjd_start)\n delta_amp2 = polyamp(corrected_foo2['mjd'] - row.mjd_start)\n corrected_foo1['ampL'] = corrected_foo1['ampL']*delta_amp1\n corrected_foo2['ampL'] = corrected_foo2['ampL']*delta_amp2\n corrected = pd.concat([corrected_foo1,corrected_foo2,corrected_rest],ignore_index=True)\n corrected['RLphase'] = np.mod( corrected['phaseR'] - corrected['phaseL'] +180,360)-180\n corrected['RLphaseErr'] = np.sqrt(1./np.asarray(corrected.snrL)**2 + 1./np.asarray(corrected.snrR)**2)*180./np.pi\n corrected['AmpRatio'] = np.asarray(corrected.ampR)/np.asarray(corrected.ampL)\n corrected['AmpRatioErr'] = corrected['AmpRatio']*np.sqrt(np.asarray(1./corrected['snrL'])**2 + np.asarray(1./corrected['snrR'])**2)\n return corrected\n\n\ndef get_polcal(path_data,path_out,degSMA=3,degAPEX=1,snr_cut=1.):\n\n if path_data.endswith('.pic'):\n vis = pd.read_pickle(path_data)\n vis.drop(list(vis[vis.baseline.str.contains('R')].index.values),inplace=True)\n\n elif (path_data.endswith('.hdf')) or (path_data.endswith('.h5')):\n vis=pd.read_hdf(path_data)\n vis.drop(list(vis[vis.baseline.str.contains('R')].index.values),inplace=True)\n\n else: raise Exception('Use .pic or .h5 or .hdf files!')\n\n #PREPARE DATASET FOR POLCAL GAINS CALCULATION\n vis=vis[vis.snr>snr_cut].copy()\n vis = vis[vis.polarization.str[0]==vis.polarization.str[1]]\n #vis = vis[vis.band==band]\n visRR = vis[vis.polarization=='RR']\n visLL = vis[vis.polarization=='LL']\n visRR2,visLL2 = ut.match_frames(visRR.copy(),visLL.copy(),['scan_id','band','baseline'])\n visRR2['ampR'] = visRR2['amp']\n visRR2['ampL'] = visLL2['amp']\n visRR2['phaseR'] = visRR2['phase']\n visRR2['phaseL'] = visLL2['phase']\n visRR2['sigmaR'] = visRR2['sigma']\n visRR2['sigmaL'] = visLL2['sigma']\n visRR2['snrL'] = visLL2['snr']\n visRR2['snrR'] = visRR2['snr']\n visRR2['RLphase'] = np.mod(visRR2['phaseR'] - visRR2['phaseL'] +180,360)-180\n visRR2['RLphaseErr'] = np.sqrt(1./np.asarray(visRR2['snr'])**2 + 1./np.asarray(visLL2['snr'])**2)*180./np.pi\n visRR2['AmpRatio'] = np.asarray(visRR2.ampR)/np.asarray(visRR2.ampL)\n visRR2['AmpRatioErr'] = visRR2['AmpRatio']*np.sqrt(np.asarray(1./visRR2['snrL'])**2 + np.asarray(1./visRR2['snrR'])**2)\n visRR2['baseline'] = list(map(str,visRR2['baseline']))\n visRR2=visRR2.dropna(subset=['ampR','ampL','phaseR','phaseL','sigmaR','sigmaL'])\n corrected = visRR2.copy()\n ##-------------------------------------------------------\n #INITIALIZE POLCAL RATIOS TABLE\n stationL = list(set(list(map(lambda x: x[0],vis.baseline))+list(map(lambda x: x[1],vis.baseline))))\n exptL = list(vis.expt_no.unique())\n bandL = list(vis.band.unique())\n ratios = pd.DataFrame(columns = ['station','mjd_start','mjd_stop','ratio_amp', 'ratio_phas'])\n #time margin for calibration [h]\n toff = 2./24.\n ##-------------------------------------------------------\n #ALMA CALIBRATION\n #ALMA IS ASSUMED TO HAVE 1+0j gains and used as reference\n sourLA = list(vis[vis.baseline.str.contains('A')].source.unique())\n LINE = {'station':'A','mjd_start': vis.mjd.min() - toff,\n 'mjd_stop': vis.mjd.max() + toff,'ratio_amp': \"%.3f\" % 1.,\n 'ratio_phas': \"%.3f\" % -0.}\n print(\"ALMA assumed to have perfect gains\")\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n corrected = apply_correction(corrected,ratios,'A')\n ##-------------------------------------------------------\n #LMT CALIBRATION\n #LMT is calibrated with a single value for all nights from ALMA-LMT baseline\n sourLL = list(vis[vis.baseline.str.contains('L')].source.unique())\n base='AL'\n foo = visRR2[visRR2['baseline']==base]\n NumScans=np.shape(foo)[0]\n print(\"LMT estimated from \"+str(NumScans)+\" scans\")\n if NumScans > 0:\n wph =numpy_weighted_median(foo.RLphase, weights=1./np.asarray(foo.RLphaseErr))\n if wph is None: wph = 0.\n wam = numpy_weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n LINE={'station':'L',\n 'mjd_start': vis.mjd.min() - toff,\n 'mjd_stop': vis.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"%.3f\" % -wph}\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n corrected = apply_correction(corrected,ratios,'L')\n else:\n print(str(NumScans)+\" scans found for LMT\")\n wph = 0.; wam = 1.\n ##-------------------------------------------------------\n #PV CALIBRATION\n #PV is calibrated with a single value for all nights from ALMA-PV baseline\n sourLP = list(vis[vis.baseline.str.contains('P')].source.unique())\n base='AP'\n foo = visRR2[visRR2['baseline']==base]\n NumScans=np.shape(foo)[0]\n print(\"PV estimated from \"+str(NumScans)+\" scans\")\n if NumScans > 0:\n wph =numpy_weighted_median(foo.RLphase, weights=1./np.asarray(foo.RLphaseErr))\n if wph is None: wph = 0.\n wam = numpy_weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n\n LINE={'station':'P',\n 'mjd_start': vis.mjd.min() - toff,\n 'mjd_stop': vis.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"%.3f\" % -wph}\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n corrected = apply_correction(corrected,ratios,'P')\n else:\n print(str(NumScans)+\" scans found for PV\")\n wph = 0.; wam = 1.\n ##-------------------------------------------------------\n #SPT CALIBRATION\n #SPT is calibrated with single value from ALMA baseline, but on night 3597 LMT is used instead\n sourLY = list(vis[vis.baseline.str.contains('Y')].source.unique())\n base='AY'\n foo = visRR2[(visRR2['baseline']==base)&(visRR2.expt_no!=3597)]\n NumScans=np.shape(foo)[0]\n print(\"SPT estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n wph =numpy_weighted_median(foo.RLphase, weights=1./np.asarray(foo.RLphaseErr))\n if wph is None: wph = 0.\n wam = numpy_weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n\n LINE={'station':'Y',\n 'mjd_start': foo.mjd.min() - toff,\n 'mjd_stop': foo.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"%.3f\" % -wph}\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n else:\n print(str(NumScans)+\" scans found for SPT on 3598-3601\")\n wph = 0.; wam = 1.\n\n ###ON FIRST NIGHT SPT IS CALIBRATED WITHOUT ALMA, WITH LMT\n\n base='LY'\n foo = visRR2[(visRR2['baseline']==base)&(visRR2.expt_no==3597)]\n NumScans=np.shape(foo)[0]\n print(\"SPT estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n wph =numpy_weighted_median(foo.RLphase, weights=1./np.asarray(foo.RLphaseErr))\n if wph is None: wph = 0.\n wam = numpy_weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n\n doo = float(ratios[(ratios.station=='L')].ratio_phas)\n goo = -wph+float(doo)\n if goo is None: goo = 0.\n LINE={'station':'Y',\n 'mjd_start': foo.mjd.min() - toff,\n 'mjd_stop': foo.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"%.3f\" % goo}\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n else:\n print(str(NumScans)+\" scans found for SPT on 3597\")\n wph = 0.; wam = 1.\n corrected = apply_correction(corrected,ratios,'Y')\n ##-------------------------------------------------------\n #SMT is calibrated with single value per night, from ALMA-SMT baseline\n #05/Oct/2018 SMT 3601 end of SGRA track added linear slope fit\n #\n sourLZ = list(vis[vis.baseline.str.contains('Z')].source.unique())\n exptL = list(vis.expt_no.unique())\n base='AZ'\n foo = visRR2[visRR2['baseline']==base]\n NumScans=np.shape(foo)[0]\n print(\"SMT amp estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n wam = numpy_weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n else: wam=1.\n\n for expt in exptL:\n foo2 = foo[(foo.expt_no==expt)&(foo.mjd<57854.368)]\n foo_for_mjd = visRR2[(visRR2['expt_no']==expt)&(visRR2.mjd<57854.368)]\n NumScans=np.shape(foo2)[0]\n print(\"SMT phase estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n wph =numpy_weighted_median(foo2.RLphase, weights=1./np.asarray(foo2.RLphaseErr))\n if wph is None: wph = 0.\n\n mjd_start = foo_for_mjd.mjd.min() - toff\n mjd_stop = np.minimum(foo_for_mjd.mjd.max() + toff,57854.368)\n LINE={'station':'Z',\n 'mjd_start': mjd_start,\n 'mjd_stop': mjd_stop,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"%.3f\" % -wph}\n print(LINE)\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n else:\n print(str(NumScans)+\" scans found for SMT on expt \"+str(expt))\n wph = 0.\n\n #### FIXING LINEAR DRIFT ON SMT, 3601, SGRA\n foo2 = foo[(foo.mjd>57854.368)]\n NumScans=np.shape(foo2)[0]\n print(\"SMT phase estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n foo_for_mjd = visRR2[(visRR2.mjd>57854.368)]\n mjd_start = 57854.368\n mjd_stop = foo_for_mjd.mjd.max() + toff\n wph =numpy_weighted_median(foo2.RLphase, weights=1./np.asarray(foo2.RLphaseErr))\n fit_coef = np.polyfit(np.asarray(foo2.mjd) - mjd_start, np.unwrap(np.asarray(foo2.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2['RLphaseErr']))\n if wph is None: wph = 0.\n\n LINE={'station':'Z',\n 'mjd_start': mjd_start,\n 'mjd_stop': mjd_stop,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef[1], \"%.3f\" % -fit_coef[0])}\n\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n print(LINE)\n else: wph = 0.; wam = 1.\n corrected = apply_correction(corrected,ratios,'Z')\n ##-------------------------------------------------------\n #APEX is calibrated with linear functions on predefined time intervals\n #it's often just 1 interval per night, but e.g. 3601 is 4 segments\n\n '''\n #OLD APEX CALIBRATION, I KEEP IT HERE FOR NOW - MW 05/Oct/2018\n sourLX = list(vis[vis.baseline.str.contains('X')].source.unique())\n exptL = list(vis.expt_no.unique())\n\n base='AX'\n otherB='L'\n fooAX = visRR2[visRR2['baseline']==base]\n fooXL = visRR2[visRR2['baseline']=='X'+otherB].copy()\n fooLX=fooXL.copy()\n fooLX['RLphase'] = -fooXL['RLphase']\n foo=pd.concat([fooAX,fooLX],ignore_index=True)\n\n #fooAX = visRR2[visRR2['baseline']==base]\n #fooXL = visRR2[visRR2['baseline']=='XL']\n #fooXL['RLphase'] = -fooXL['RLphase']\n #foo=pd.concat([fooAX,fooXL],ignore_index=True)\n foo = foo[foo.amp==foo.amp]\n foo = foo[foo.phase==foo.phase]\n\n wam =ws.weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n for expt in exptL:\n foo2 = foo[foo.expt_no==expt]\n if expt==3601:\n #print(foo2.source.unique())\n sources3601a = ['OJ287', '1055+018']\n sources3601b = ['M87', '3C279']\n sources3601c = ['J1924-2914', 'SGRA']\n foo2a = foo2[list(map(lambda x: x in sources3601a, foo2.source))]\n foo2b = foo2[list(map(lambda x: x in sources3601b, foo2.source))]\n foo2c = foo2[list(map(lambda x: x in sources3601c, foo2.source))&(foo2.mjd<57854.58368287)]\n foo2d = foo2[(foo2.mjd>57854.58368287)]\n mjd_start_a = foo2a.mjd.min() - 0.005\n mjd_stop_a = foo2a.mjd.max() + 0.005\n mjd_start_b = foo2b.mjd.min() - 0.005\n mjd_stop_b = foo2b.mjd.max() + 0.005\n mjd_start_c = foo2c.mjd.min() - 0.005\n mjd_stop_c = foo2c.mjd.max() + 0.004\n mjd_start_d = foo2d.mjd.min() - 0.004\n mjd_stop_d = foo2d.mjd.max() + 0.005\n\n fit_coef_a = np.polyfit(np.asarray(foo2a.mjd) - mjd_start_a, np.unwrap(np.asarray(foo2a.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2a['RLphaseErr']))\n fit_coef_b = np.polyfit(np.asarray(foo2b.mjd) - mjd_start_b, np.unwrap(np.asarray(foo2b.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2b['RLphaseErr']))\n fit_coef_c = np.polyfit(np.asarray(foo2c.mjd) - mjd_start_c, np.unwrap(np.asarray(foo2c.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2c['RLphaseErr']))\n fit_coef_d = np.polyfit(np.asarray(foo2d.mjd) - mjd_start_d, np.unwrap(np.asarray(foo2d.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2d['RLphaseErr']))\n\n wph_d = ws.weighted_median(foo2d.RLphase, weights=1./np.asarray(foo2d.RLphaseErr))\n #foo=-fit_coef_d[1]#+float(ratios[ratios.station=='L'].ratio_phas)\n foo = -wph_d + float(ratios[(ratios.station==otherB)&(ratios.mjd_stop>57854.58368287)].ratio_phas)\n\n #hacky, it calibrates to LMT\n #foo=-fit_coef_d[1]+float(ratios[ratios.station=='L'].ratio_phas)\n\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'X',\n 'mjd_start': mjd_start_a,\n 'mjd_stop': mjd_stop_a,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef_a[1], \"%.3f\" % -fit_coef_a[0])}])],ignore_index=True)\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'X',\n 'mjd_start': mjd_start_b,\n 'mjd_stop': mjd_stop_b,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef_b[1], \"%.3f\" % -fit_coef_b[0])}])],ignore_index=True)\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'X',\n 'mjd_start': mjd_start_c,\n 'mjd_stop': mjd_stop_c,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef_c[1], \"%.3f\" % -fit_coef_c[0])}])],ignore_index=True)\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'X',\n 'mjd_start': mjd_start_d,\n 'mjd_stop': mjd_stop_d,\n 'ratio_amp': \"%.3f\" % wam,\n #'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef_d[1], \"%.3f\" % -fit_coef_d[0] )\n 'ratio_phas': \"%.3f\" % foo\n }])],ignore_index=True)\n else:\n foo_for_mjd = visRR2[(visRR2['expt_no']==expt)]\n mjd_start = foo_for_mjd.mjd.min() - toff\n mjd_stop = foo_for_mjd.mjd.max() + toff\n fit_coef = np.polyfit(np.asarray(foo2.mjd) - mjd_start, np.unwrap(np.asarray(foo2.RLphase)*np.pi/180)*180/np.pi, deg=1, full=False, w=1./np.asarray(foo2['RLphaseErr']))\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'X',\n 'mjd_start': mjd_start,\n 'mjd_stop': mjd_stop,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': \"{}, {}\".format( \"%.3f\" % -fit_coef[1], \"%.3f\" % -fit_coef[0])}])],ignore_index=True)\n '''\n\n mjd_startAP = [57847.92,57848.06,57848.25,57849.00,57850.15,57852.95,57853.90,57854.02,57854.37]\n mjd_stopAP = [57848.06,57848.25,57848.68,57849.64,57850.85,57853.65,57854.02,57854.37,57854.66]\n\n deg=degAPEX\n strratio = ('{}, '*(deg+1))[:-2]\n\n foo=corrected\n fooAX = foo[foo['baseline']=='AX'].copy()\n if 'XL' in list(foo.baseline.unique()):\n fooXL = foo[foo['baseline']=='XL'].copy()\n fooLX=fooXL.copy()\n fooLX['RLphase'] = -fooXL['RLphase']\n fooLX['baseline'] = 'LX'\n foo=pd.concat([fooAX,fooLX],ignore_index=True)\n elif 'LX' in list(foo.baseline.unique()):\n fooXL = foo[foo['baseline']=='XL'].copy()\n foo=pd.concat([fooAX,fooLX],ignore_index=True)\n else:\n foo=fooAX\n foo=foo.sort_values('mjd').copy()\n NumScans=np.shape(foo)[0]\n print(\"APEX amp estimated from \"+str(NumScans)+\" scans\")\n if NumScans>0:\n wam =weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n else: wam=1.\n for cou, mjd_sta in enumerate(mjd_startAP):\n try:\n mjd_sto=mjd_stopAP[cou]\n #print([mjd_sta,mjd_sto])\n foo2=foo[(foo.mjd>mjd_sta)&(foo.mjd<=mjd_sto)]\n NumScans=np.shape(foo2)[0]\n print(\"APEX phase estimated from \"+str(NumScans)+\" scans\")\n if NumScans>deg:\n fit_coef = np.polyfit(np.asarray(foo2.mjd) - mjd_sta, np.unwrap(np.asarray(foo2.RLphase)*np.pi/180)*180/np.pi, deg=deg, full=False, w=1./np.asarray(foo2['RLphaseErr']))\n listcoef = [\"%.3f\" % -fit_coef[cou] for cou in range(deg,-1,-1)]\n else: listcoef = [\"0.\"]*(deg+1)\n LINE={'station':'X',\n 'mjd_start': mjd_sta,\n 'mjd_stop': mjd_sto,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': strratio.format(*listcoef) }\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n print(LINE)\n except: continue\n corrected = apply_correction(corrected,ratios,'X')\n\n ##-------------------------------------------------------\n #For SMA we manually specify mjd ranges for the 3rd order polynomial fitting\n\n mjd_startV = [57848.02,57848.42,57849.10,57849.40,57850.40,57853.00,57853.07,57853.18,57853.42,57854.10,57854.40]\n mjd_stopV = [57848.42,57848.80,57849.40,57849.70,57850.90,57853.07,57853.18,57853.42,57853.70,57854.40,57854.70]\n\n deg=degSMA\n strratio = ('{}, '*(deg+1))[:-2]\n #################\n foo=corrected[corrected.baseline.str[1]=='S']\n #only use ALMA, LMT, SMT\n foo=foo[(foo.baseline=='AS')|(foo.baseline=='LS')|(foo.baseline=='ZS')]\n foo=foo.sort_values('mjd').copy()\n NumScans=np.shape(foo)[0]\n if NumScans>0:\n wam =weighted_median(foo.AmpRatio, weights=1./np.asarray(foo.AmpRatioErr))\n if wam is None: wam = 1.\n else: wam=1.\n for cou, mjd_sta in enumerate(mjd_startV):\n try:\n mjd_sto=mjd_stopV[cou]\n foo2=foo[(foo.mjd>mjd_sta)&(foo.mjd<=mjd_sto)]\n NumScans=np.shape(foo2)[0]\n print(\"SMA estimated from \"+str(NumScans)+\" scans\")\n if NumScans>deg:\n fit_coef = np.polyfit(np.asarray(foo2.mjd) - mjd_sta, np.unwrap(np.asarray(foo2.RLphase)*np.pi/180)*180/np.pi, deg=deg, full=False, w=1./np.asarray(foo2['RLphaseErr']))\n listcoef = [\"%.3f\" % -fit_coef[cou] for cou in range(deg,-1,-1)]\n else: listcoef = [\"0.\"]*(deg+1)\n LINE={'station':'S',\n 'mjd_start': mjd_sta,\n 'mjd_stop': mjd_sto,\n 'ratio_amp': \"%.3f\" % wam,\n 'ratio_phas': strratio.format(*listcoef) }\n ratios = pd.concat([ratios,pd.DataFrame([LINE])],ignore_index=True)\n print(LINE)\n except: continue\n corrected = apply_correction(corrected,ratios,'S')\n\n ##-------------------------------------------------------\n #JCMT is singlepol, SMAR is not really used, so these get 1+0j correction\n\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'J',\n 'mjd_start': vis.mjd.min() - toff,\n 'mjd_stop': vis.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % 1.,\n 'ratio_phas': \"%.3f\" % -0.}])])\n ratios = pd.concat([ratios,pd.DataFrame([{'station':'R',\n 'mjd_start': vis.mjd.min() - toff,\n 'mjd_stop': vis.mjd.max() + toff,\n 'ratio_amp': \"%.3f\" % 1.,\n 'ratio_phas': \"%.3f\" % -0.}])])\n\n ratios2 = ratios.copy()\n ratios2['station']=list(map(lambda x: Z2AZ[x],ratios2['station']))\n ratios2[['station','mjd_start','mjd_stop','ratio_amp','ratio_phas']].to_csv(path_out,index=False)\n return ratios2\n\n##################################################################################################################################\n########################## Main FUNCTION ########################################################################################\n##################################################################################################################################\ndef main(path_data,path_out,degSMA=3,degAPEX=1,snr_cut=1.):\n print(\"********************************************************\")\n print(\"******************GENERATE POLCAL***********************\")\n print(\"********************************************************\")\n\n get_polcal(path_data,path_out,degSMA=degSMA)\n return 0\n\nif __name__=='__main__':\n\n if (\"-h\" in sys.argv) or (\"--h\" in sys.argv):\n print(\"generating polcal csv file\")\n sys.exit()\n\n if \"--datadir\" in sys.argv:\n for a in range(0, len(sys.argv)):\n if(sys.argv[a] == '--datadir'):\n path_data = sys.argv[a+1]\n else:\n raise Exception(\"must provide data directory!\")\n\n if \"--outpath\" in sys.argv:\n for a in range(0, len(sys.argv)):\n if(sys.argv[a] == '--outpath'):\n path_out = sys.argv[a+1]\n else: path_out='polcal.csv'\n\n if \"--degSMA\" in sys.argv:\n for a in range(0, len(sys.argv)):\n if(sys.argv[a] == '--degSMA'):\n degSMA = int(sys.argv[a+1])\n else: degSMA = 3\n\n if \"--degAPEX\" in sys.argv:\n for a in range(0, len(sys.argv)):\n if(sys.argv[a] == '--degAPEX'):\n degAPEX = int(sys.argv[a+1])\n else: degAPEX = 1\n\n if \"--snr_cut\" in sys.argv:\n for a in range(0, len(sys.argv)):\n if(sys.argv[a] == '--snr_cut'):\n snr_cut = float(sys.argv[a+1])\n else: snr_cut = 1.\n\n main(path_data,path_out,degSMA=degSMA,degAPEX=degAPEX,snr_cut=snr_cut)\n","repo_name":"sao-eht/eat","sub_path":"bin/generate_polcal_table.py","file_name":"generate_polcal_table.py","file_ext":"py","file_size_in_byte":28507,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"54"} +{"seq_id":"8546193329","text":"from re import findall\n\nprint('''\n\n█▄▀ █▀█ ▀█▀ ▄█ █▄▀   █░█ █▄░█ █▄▀ █▄░█ █▀█ █░█░█ █▄░█\n█░█ █▄█ ░█░ ░█ █░█   █▄█ █░▀█ █░█ █░▀█ █▄█ ▀▄▀▄▀ █░▀█\n\n\n===========================\nvk: https://vk.com/kot1kunknown\ngithub: https://github.com/Kot1kUnknown\n===========================\n\n\n\t''')\n\nkeysen = {\n\t'A':\"AAA\", 'B':\"AAА\", 'C':\"AAΑ\",\n\t'D':\"AАA\", 'E':\"AАА\", 'F':\"AАΑ\",\n\t'G':\"AΑA\", 'H':\"AΑА\", 'I':\"AΑΑ\",\n\t'J':\"АAA\", 'K':\"АAА\", 'L':\"АAΑ\",\n\t'M':\"ААA\", 'N':\"ААА\", 'O':\"ААΑ\",\n\t'P':\"АΑA\", 'Q':\"АΑА\", 'R':\"АΑΑ\",\n\t'S':\"ΑAA\", 'T':\"ΑAА\", 'U':\"ΑAΑ\",\n\t'V':\"ΑАA\", 'W':\"ΑАА\", 'X':\"ΑАΑ\",\n\t'Y':\"ΑΑA\", 'Z':\"ΑΑА\", ' ':\"ΑΑΑ\",\n}\n\ncryptmode = input(\"[E]ncrypt|[D]ecrypt: \").upper()\n\nif cryptmode not in ['E','D']:\n\tprint(\"Error: mode is not Found!\"); raise SystemExit\n\nstartMessage = input(\"Напиши своё сообщение: \").upper()\n\ndef regular(text):\n\ttemplate = r\"\\w{3}\"\n\treturn findall(template, text)\n\ndef encryptDecrypt(mode, message, final = \"\"):\n\tif mode == 'E':\n\t\tfor symbol in message:\n\t\t\tif symbol in keysen:\n\t\t\t\tfinal += keysen[symbol]\n\telse:\n\t\tfor threeSymbols in regular(message):\n\t\t\tfor key in keysen:\n\t\t\t\tif threeSymbols == keysen[key]:\n\t\t\t\t\tfinal += key\n\n\treturn final\nprint(\"Твоё сообщение:\", encryptDecrypt(cryptmode, startMessage))\n","repo_name":"Kot1kUnknown/py-crypter","sub_path":"kd.py","file_name":"kd.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36557523789","text":"from nltk.tokenize import sent_tokenize\n\n\ndef lines(a, b):\n \"\"\"Return lines in both a and b\"\"\"\n\n # split each file into lines\n a = a.split('\\n')\n b = b.split('\\n')\n # list to hold common lines\n commonList = []\n length = len(a)\n\n # loop thru the split lines\n for i in range(length):\n # try, except block to see if it exists in both.\n try:\n common = b.index(a[i])\n commonList.append(a[i])\n except:\n continue\n\n # remove duplicates from list if it exists.\n for i in range(len(commonList)):\n if i != commonList.index(commonList[i]):\n commonList.remove(commonList[i])\n\n # return the list with common lines\n return commonList\n\n\ndef sentences(a, b):\n \"\"\"Return sentences in both a and b\"\"\"\n\n # get list of sentences.\n a = sent_tokenize(a)\n b = sent_tokenize(b)\n\n # list to hold common lines\n commonList = []\n length = len(a)\n\n # loop thru the split lines\n for i in range(length):\n # try, except block to see if it exists in both.\n try:\n common = b.index(a[i])\n commonList.append(a[i])\n except:\n continue\n\n # remove duplicates from list if it exists.\n for i in range(len(commonList)):\n if i != commonList.index(commonList[i]):\n commonList.remove(commonList[i])\n\n # return the list with common lines\n return commonList\n\n\ndef getSubStrings(n, list):\n # list for substrings.\n subStrings = []\n\n # loop thru argument and generate substrings\n for strings in list:\n leng = len(strings)\n for i in range(leng):\n if i + n > leng:\n break\n else:\n subStrings.append(strings[i:i+n])\n\n # return list of substrings\n return subStrings\n\n\ndef substrings(a, b, n):\n \"\"\"Return substrings of length n in both a and b\"\"\"\n # compares 2 files based on number of substrings of length n in common\n\n # split each file into lines\n a = a.split()\n b = b.split()\n\n # get substrings for both a and b\n aSubstring = getSubStrings(n, a)\n bSubstring = getSubStrings(n, b)\n\n # list for common substrings\n commonSubStrings = []\n\n # loop thru substring to get common substrings in A and B\n for substring in aSubstring:\n if substring in bSubstring:\n commonSubStrings.append(substring)\n\n # convert list to set and return it\n return set(commonSubStrings)\n","repo_name":"Dueonkim89/CS50","sub_path":"Week 7/similarities/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10533422694","text":"import asyncio\nimport os\nimport random\n\nfrom dotenv import load_dotenv\nfrom loguru import logger\n\nfrom mi.ext import commands\nfrom mi.framework import Note\nfrom mi.framework.router import Router\nfrom mi.utils import check_multi_arg, get_unicode_emojis\nfrom mi.wrapper.file import MiFile\n\nload_dotenv()\n\nenv = os.environ\nTOKEN = env.get(\"TOKEN\")\nURL = env.get(\"URL\")\n\nEXTENSIONS = ['cogs.basic']\n\nif not check_multi_arg(TOKEN, URL):\n raise Exception(\"Please provide both TOKEN and URL\")\n\n\nasync def connect_channel(ws):\n await Router(ws).connect_channel(['global', 'main'])\n\n\nclass MyBot(commands.Bot):\n def __init__(self):\n super().__init__()\n for extension in EXTENSIONS:\n self.load_extension(extension)\n\n async def on_drive_file_created(self, msg):\n print(msg)\n\n async def on_ready(self, ws):\n await connect_channel(ws)\n folder_name = 'test'\n await self.client.drive.folder.action.create(folder_name)\n folders = await self.client.drive.action.get_folders()\n folder = [i for i in folders if i.name == folder_name][0]\n\n await self.client.note.send('hello', files=[\n MiFile(path='/home/yupix/unknown.png', comment='test~', name=\"test\", folder_id=folder.id),\n MiFile(file_id='123456789')\n ])\n logger.success(f'connected {self.user.name}#{self.user.id}')\n\n async def on_reconnect(self, ws):\n await connect_channel(ws)\n\n async def on_message(self, note: Note):\n if note.emojis:\n unicode_emoji = get_unicode_emojis(note.content)\n emoji = random.choice([i.name for i in note.emojis] + unicode_emoji)\n await note.action.reaction.add(f':{emoji}:')\n logger.info(f'{note.author.name}: {note.content}')\n\n\nif __name__ == '__main__':\n bot = MyBot()\n asyncio.run(bot.start(URL, TOKEN))\n","repo_name":"yupix/Mi.py","sub_path":"examples/cog_example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"22136815768","text":"from DataAnalysis_Functions import *\nimport matplotlib.pyplot as plt\nfrom math import pi\nimport numpy as np\n\nfpath = './220313_FiberBenchmark_data.csv'\n\ndatestr = '220313'\n\nPlotFilter = 1\n\nlbl = ['Fiber A Before Cleaning', 'Fiber A After Cleaning','Fiber B', 'Fiber C', 'Fiber D Backwards', 'Fiber E']\n\ntistr = ['12:33:00', '12:40:00', '13:08:00', '13:39:00', '14:21:00', '14:50:00']\n\ntfstr = ['12:38:00','12:45:00', '13:18:00', '13:52:00', '14:30:00', '14:58:00']\n\n################################\n# Don't change anything below this line\n################################\n\n# Grab data\ndata = data_read_format(fpath, datestr)\ndc = data.columns\nprint('Data Headers:\\n', dc)\nfigi = 1 # Figure iterator\n\n# Power Plot\nif 'Power' in dc:\n\tplt.figure(figi)\n\tfigi += 1\n\tplt.subplot(211)\n\tplt.title(datestr + ' Power Data')\n\tplt.xlabel('Time (MDT)')\n\tplt.ylabel('Power [W]')\n\tplt.scatter(data['Time'], data['Power'])\n\t\n\tif PlotFilter == 1:\n\t\tfor i in range(0, len(lbl)):\n\t\t\tdataf = data_filter(data, datestr, tistr[i], tfstr[i])\n\t\t\t\n\t\t\tplt.scatter(dataf['Time'], dataf['Power'])\n\t\t\t\n\nif 'PyrE' in dc:\n\tplt.subplot(212)\n\tplt.title(datestr + ' Pyrheliometer Data')\n\tplt.xlabel('Time (MDT)')\n\tplt.ylabel('Power (W/m$^2$)')\n\tplt.scatter(data['Time'], data['PyrE'])\n\t\n\tif PlotFilter == 1:\n\t\tfor i in range(0, len(lbl)):\n\t\t\tdataf = data_filter(data, datestr, tistr[i], tfstr[i])\n\t\t\t\n\t\t\tplt.scatter(dataf['Time'], dataf['PyrE'])\n\t\t\t\n\t\tplt.legend(['Unfiltered'] + lbl)\n\nif 'TC1' in dc:\n\tplt.figure(figi)\n\tfigi += 1\n\tplt.title(datestr + ' Ambient Temperature')\n\tplt.xlabel('Time (MDT)')\n\tplt.ylabel('Temperature ($^\\circ$C)')\n\tplt.scatter(data['Time'], data['TC1'])\n\t\n\tif PlotFilter == 1:\n\t\tfor i in range(0, len(lbl)):\n\t\t\tdataf = data_filter(data, datestr, tistr[i], tfstr[i])\n\t\t\t\n\t\t\tplt.scatter(dataf['Time'], dataf['TC1'])\n\t\t\t\n\t\tplt.legend(['Unfiltered'] + lbl)\n\t\t\nAc = pi*0.6**2/4\t\t\nif ('PyrE' in dc) & ('Power' in dc) & (PlotFilter == 1):\n\tplt.figure(figi)\n\tfigi += 1\n\t[spx, spy] = subcount(len(lbl))\n\tfor i in range(0, len(lbl)):\n\t\tdataf = data_filter(data, datestr, tistr[i], tfstr[i])\n\t\tPowerAve = dataf['Power'].mean()\n\t\tPyrAve = dataf['PyrE'].mean()*Ac\n\t\tEffAve = PowerAve/PyrAve\n\t\tplt.subplot(spx, spy, i+1)\n\t\tplt.scatter(dataf['PyrE']*Ac, dataf['Power'])\n\t\tplt.xlabel('Power In [W]')\n\t\tplt.ylabel('Power Out [W]')\n\t\tplt.title(lbl[i] + ' | Ave Eff: ' + '{:.4f}'.format(EffAve))\n\t\tplt.suptitle(datestr + ' Power Input vs Power Output')\n\nif ('Power' in dc) & (PlotFilter == 1):\n\tplt.figure(figi)\n\tfigi += 1\n\t[spx, spy] = subcount(len(lbl))\n\tfor i in range(0, len(lbl)):\n\t\tdataf = data_filter(data, datestr, tistr[i], tfstr[i])\n\t\tplt.subplot(spx, spy, i+1)\n\t\tplt.scatter(dataf['Time'], dataf['Power'])\n\t\tPowerAve = dataf['Power'].mean()\n\t\tPyrAve = dataf['PyrE'].mean()*Ac\n\t\tEffAve = PowerAve/PyrAve\n\t\tprint(lbl[i], 'Power Average: \\t', PowerAve)\n\t\tprint(lbl[i], 'Pyr Average: \\t', PyrAve)\n\t\tprint(lbl[i], 'Ave eff: \\t', EffAve)\n\t\tprint(lbl[i], 'Data count: \\t', dataf['Power'].size)\n\t\tplt.scatter(dataf['Time'], dataf['PyrE']*EffAve*Ac)\n\t\tplt.xlabel('Time [MDT]')\n\t\tplt.ylabel('Power Output [W]')\n\t\ttitlestr = lbl[i] + ' | Ave Eff: ' + '{:.4f}'.format(EffAve)\n\t\tplt.title(titlestr)\n\t\tplt.suptitle(datestr + ' Measured Power Output and Estimated Power Output')\n\t\tplt.legend(['Power Output'], ['Est. Power from Pyr'])\n\nplt.show()","repo_name":"coreytrujillo/Data_Analysis_Scipts","sub_path":"Wrapper_Plot.py","file_name":"Wrapper_Plot.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8112516844","text":"import smtplib\nimport codecs\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n# server = smtplib.SMTP_SSL(\"smtp.gmail.com\" , 465)\n# server.login(\"management.flashadeal@gmail.com\" , \"jlmdeyouhpnljmih\")\n\n# message = \"\"\"From: Flashadeal Management \n# MIME-Version: 1.0\n# Content-type: text/html\n# Subject: Thanks for Subscribing\n\n# \"\"\"\n# message += \"Welcome to Flashadeal\"\n# file = codecs.open(\"email/welcomeEmail.html\", \"r\", \"utf-8\")\n# message += file.read()\n\n# server.sendmail(\"management.flashadeal@gmail.com\" , \"j_kesineni@yahoo.co.in\" , message)\n\ndef send_mail(bodyContent,to_email):\n from_email = 'management.flashadeal@gmail.com'\n subject = 'Welcome to Flashadeal'\n message = MIMEMultipart()\n message['Subject'] = subject\n message['From'] = from_email\n message['To'] = to_email\n\n message.attach(MIMEText(bodyContent, \"html\"))\n msgBody = message.as_string()\n\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\" , 465)\n server.login(\"management.flashadeal@gmail.com\" , \"jlmdeyouhpnljmih\")\n server.sendmail(from_email, to_email, msgBody)\n server.quit()\n\nwith open(\"email/welcomeEmail.html\" , 'r') as file:\n l = ['gayatridivi1234@gmail.com']\n for i in l:\n send_mail(file.read() , i)","repo_name":"143prudhvi/email","sub_path":"welcomeEmail.py","file_name":"welcomeEmail.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39508418752","text":"import os\nimport re\n\nclass ObscenceFilter:\n def __init__(self):\n # Список цензурируемых слов\n self._censor_list = []\n\n # Каким символом замемняем\n self._censor_char = \"*\"\n\n # Где найти список цензурируемых слов\n self._BASE_DIR = os.path.abspath(os.path.dirname(__file__))\n self._words_file = os.path.join(self._BASE_DIR, 'wordlist.txt')\n\n def _load_words(self):\n # Получение списка цензурируемых слов\n with open(self._words_file, 'r', encoding='utf-8') as f:\n self._censor_list = [line.strip() for line in f.readlines()]\n profane_words = [w for w in self._censor_list]\n return profane_words \n\n def censor(self, input_text):\n # Возвращает input_text с замененными обсценными словами, заменёнными '*'\n bad_words = self._load_words()\n res = input_text\n\n for word in bad_words:\n word = r'\\b%s\\b' % word \n regex = re.compile(word, re.IGNORECASE)\n res = regex.sub(self._censor_char * (len(word) - 4), res)\n\n return res","repo_name":"olechnaya/NewsPaper","sub_path":"news/templatetags/obscene_filter.py","file_name":"obscene_filter.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20879579709","text":"import datetime\n\nfrom django.http import JsonResponse\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom web3.logs import IGNORE\n\nfrom .models import Presentacion, Documento\n\ndef rendicion_post(request):\n if request.POST[\"action\"] == \"delete\":\n Documento.objects.get(pk=request.POST[\"pk\"]).delete()\n return JsonResponse({\"status\": \"ok\"})\n if request.POST[\"action\"] == \"presentar\":\n presentacion = Presentacion.objects.get(pk=request.POST[\"pk\"])\n if presentacion.documento_set.count() == 0:\n return JsonResponse({\"status\": \"error\", \"message\": \"No hay documentos cargados\"})\n presentacion.estado = True\n presentacion.fecha_presentacion = datetime.datetime.now()\n\n w3 = settings.WEB3_CONNECTION\n contract = settings.WEB3_CONTRACT\n\n tx_hash = contract.functions.addPresentation(\n presentacion.nro_presentacion,\n presentacion.rendicion.anio,\n presentacion.rendicion.periodo,\n presentacion.rendicion.municipio.nombre,\n presentacion.getAll_documento_descripcion(),\n presentacion.getAll_documento_hash(),\n ).transact({\"from\": settings.WEB3_OWNER_ADDRESS})\n tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n data_transaction = contract.events.PresentationAdded().process_receipt(tx_receipt, errors=IGNORE)[0]\n presentacion.save()\n return JsonResponse({\n \"status\": \"ok\",\n \"url\": reverse(\"rendicion_list\"),\n \"blockHash\": data_transaction[\"blockHash\"].hex(),\n \"blockNumber\": data_transaction[\"blockNumber\"],\n \"contractAddress\": data_transaction[\"address\"],\n \"transactionHash\": data_transaction[\"transactionHash\"].hex(),\n \"presentacionId\": data_transaction[\"args\"][\"id\"],\n })\n","repo_name":"FacuRodriJ/django-truffle","sub_path":"django_setup/core/http_verbs.py","file_name":"http_verbs.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41781670567","text":"from models.brv.base_random_value import BaseRandomValue\nimport numpy as nmp\n\nMAX_VALUE = 65536\n\n\ndef frequency_test(\n brv: BaseRandomValue,\n k: int = 10,\n n1: int = 100,\n n2: int = 10_000\n) -> tuple[nmp.ndarray, nmp.ndarray]:\n res1 = __frequency_test_int(brv, k, n1)\n res2 = __frequency_test_int(brv, k, n2)\n return res1, res2\n\n\ndef __frequency_test_int(brv: BaseRandomValue, k: int, n: int) -> nmp.ndarray:\n src = nmp.array([brv.next() for i in range(n)])\n step = MAX_VALUE / k\n groups = nmp.array([int((i + 1) * step) for i in range(k)])\n src = nmp.array(\n [nmp.argmin(x > groups) for x in src]\n )\n v, c = nmp.unique(src, return_counts=True)\n res = nmp.zeros(k, dtype=float)\n res[v] = c / n\n return res","repo_name":"HornedHeck/MMod","sub_path":"tools/frequency_test.py","file_name":"frequency_test.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30674025603","text":"import os\nfrom setuptools import setup, find_packages\n\nVERSION = '0.1.0'\n\nsetup(\n\tnamespace_packages = ['tiddlywebplugins'],\n\tname = 'tiddlywebplugins.mongodb',\n\tversion = VERSION,\n\tdescription = 'A ',\n\tlong_description=file(os.path.join(os.path.dirname(__file__), 'README')).read(),\n\tauthor = 'Ben Paddock',\n\turl = 'http://pypi.python.org/pypi/tiddlywebplugins.mongodb',\n\tpackages = find_packages(exclude=['test']),\n\tauthor_email = 'pads@thisispads.me.uk',\n\tplatforms = 'Posix; MacOS X; Windows',\n\tinstall_requires = ['tiddlyweb', 'pymongo'],\n\textras_require = {\n\t\t'testing': ['pytest', 'mock', 'tiddlywebplugins.utils'],\n\t\t'coverage': ['pytest-cov', 'python-coveralls'],\n\t\t'style': ['pep8']\n\t},\n\tzip_safe = False,\n\t)\n","repo_name":"pads/tiddlywebplugins.mongodb","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24151994169","text":"import random\n\nnames = 'Julian Bob PyBites Dante Martin Rodolfo'.split()\naliases = 'Pythonista Nerd Coder'.split() * 2\npoints = random.sample(range(81, 101), 6)\nawake = [True, False] * 3\nSEPARATOR = ' | '\n\n\ndef generate_table(*args):\n for mix in zip(*args):\n mix = [str(m) for m in mix]\n yield \"{}\".format(SEPARATOR.join(mix))\n","repo_name":"xtakacsx/bitesofpy","sub_path":"14/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40170376579","text":"import requests\nimport json\n\ndata = {\n 'text_list': [\"I am very happy.\",\n\t\t\t\t \"I love McDonalds\",\n\t\t\t\t \"I love McDonalds\",\n\t\t\t\t \"I am a worker from New york.\"]\n}\n\nresponse = requests.post(\n \"https://api.monkeylearn.com/v2/classifiers/cl_oJNMkt2V/classify/\",\n data=json.dumps(data),\n headers={'Authorization': 'Token 35ab2ef38b1f683705009f64525d4398f27cbf6f',\n 'Content-Type': 'application/json'})\n\nprint (json.loads(response.text))\n","repo_name":"gaspardorey/mysite","sub_path":"polls/texts.py","file_name":"texts.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44063161724","text":"from math import pow, sqrt, pi\n\nfor _ in range(int(input())):\n r, n = [int(i) for i in input().split(\" \")]\n\n pts = []\n for _ in range(n):\n pts.append([int(i) for i in input().split(\" \")])\n pts.append(pts[0])\n\n length = 0\n for i in range(n):\n length += sqrt(pow(pts[i][0] - pts[i + 1][0], 2) + pow(pts[i][1] - pts[i + 1][1], 2))\n\n perimeter = length - 2 * pi * r\n scale = perimeter / length\n\n if scale < 0:\n print(\"Not possible\")\n else:\n print(scale)","repo_name":"leslieyip02/kattis","sub_path":"completed/tracksmoothing.py","file_name":"tracksmoothing.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39867894216","text":"# -*- coding: utf-8 -*-\n# external\n# import urlparse\nimport slumber\n\n## base\nfrom ..api import BaseAPI\n# from ..serialize import Serializer\nfrom ..responsor import (\n Response,\n PagerResponse\n)\n\n\n# 以下を message 以外で使用するなら BaseAPIへ移動\n\n\n__all__ = [\"Message\"]\n\n\nclass Message(BaseAPI):\n\n class Meta:\n api_name = \"message\"\n api_version = \"v1\"\n secure = False\n response = Response\n responses = PagerResponse\n client = slumber.API\n\n def __init__(self, *args, **kwargs):\n \"\"\" MessageAPI \"\"\"\n super(Message, self).__init__(*args, **kwargs)\n self._client = self._meta.client(self.get_base_url(), auth=kwargs.pop(\"auth\", None))\n\n def get_base_url(self):\n schema = \"https\" if self._meta.secure else \"http\"\n return \"{0}://{1}/{2}/{3}/\".format(schema, self.server,\n self._meta.api_name, self._meta.api_version)\n\n def push_message(self, subject, body, mailfrom, mailto):\n \"\"\"\n Read the `Message Interface `_\n\n return値 生データ ::\n\n .. code-block:: python\n\n sample = {\n u'opid': u'877280C4-89D6-49A7-B401-E3F27ED03144',\n u'utime': u'2012-01-31T10:40:16',\n u'ctime': u'2012-01-23T17:40:23',\n u'id': 1\n }\n\n :param subject: mail subject\n :param body: mail body\n :param mailform: admin name\n :param mailto: device ids\n :rtype: object\n :return: Response Object\n \"\"\"\n assert isinstance(mailto, (list, tuple))\n data = [\n {\"subject\": subject, \"body\": body, \"mailfrom\": mailfrom, \"mailto\": to} for to in mailto]\n push = self._client.operation.push\n serializer = push.get_serializer()\n # sample = {\n # u'opid': u'877280C4-89D6-49A7-B401-E3F27ED03144',\n # u'utime': u'2012-01-31T10:40:16',\n # u'ctime': u'2012-01-23T17:40:23',\n # u'id': 1\n # }\n # # return self._meta.response(push, sample, self._meta.api_name)\n return self._meta.response(push, serializer.loads(push.post(data)), self._meta.api_name)\n\n def status_message(self, opid, page={\"offset\": 0, \"limit\": 20}):\n \"\"\"\n\n Read the `Message Interface `_\n\n\n return値 生データ ::\n\n .. code-block:: python\n\n sample = {u'meta': {u'limit': 20,\n u'next': u'/message/v1/message/?limit=20&offset=20',\n u'offset': 0,\n u'previous': None,\n u'total_count': 3},\n u'objects': [{\n u'id': u\"1\",\n u\"did\": u\"d-12345670\",\n u\"read\": True,\n u\"read_date\": u'2012-01-30T15:29:41',\n u'status': u'success',\n u\"reason\": \"\",\n u'ctime': u'2012-01-24T13:41:16',\n u'utime': u'2012-01-30T15:29:44'\n }, {\n u'id': u\"2\",\n u\"did\": u\"d-12345671\",\n u\"read\": True,\n u\"read_date\": u'2012-01-30T15:29:43',\n u'status': u'success',\n u\"reason\": \"\",\n u'ctime': u'2012-01-24T13:41:16',\n u'utime': u'2012-01-30T15:29:44'\n }]\n }\n\n :param opid: opid is the return value from push_message.\n :rtype: object\n :return: PagerResponse Object\n \"\"\"\n status = self._client.operation.status\n # return self._meta.responses(status,\n # {u'meta': {\n # u'limit': 20,\n # u'next': None,\n # u'offset': 0,\n # u'previous': None,\n # u'total_count': 10\n # },\n # u'objects': [\n # {u'id': u\"{0}\".format(num),\n # u\"did\": u\"d-1234567{0}\".format(num),\n # u\"read\": True,\n # u\"read_date\": u'2012-01-30T15:29:4{0}'.format(num),\n # u'status': u'success',\n # u\"reason\": \"\",\n # u'ctime': u'2012-01-24T13:41:16',\n # u'utime': u'2012-01-30T15:29:44'\n # } for num in range(0, 10)]})\n return self._meta.responses(status, status.get(**page))\n","repo_name":"bizmobile/bizmobile-python","sub_path":"bizmobile/connect/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"6676815485","text":"import urllib.request as req\nfrom bs4 import BeautifulSoup\nimport urllib.parse as par #한글을 특수한 문자로 변환\n\nkeyword = input(\"키워드 입력 >> \")\nencoded = par.quote(keyword) #한글을 특수한 문자로 변환\n\npage_num = 1\nwhile True:\n url = \"https://news.joins.com/Search/TotalNews?Keyword={}&SortType=New&SearchCategoryType=TotalNews&PeriodType=All&ScopeType=All&ImageType=All&JplusType=All&BlogType=All&ImageSearchType=Image&TotalCount=0&StartCount=0&IsChosung=False&IssueCategoryType=All&IsDuplicate=True&Page={}&PageSize=10&IsNeedTotalCount=True\".format(encoded, page_num)\n code = req.urlopen(url)\n soup = BeautifulSoup(code, \"html.parser\")\n title = soup.select(\"h2.headline.mg > a\")\n if len(title) == 0: # 리스트자료형인 title의 원소가 0개 라면 == 끝에 페이지까지 크롤링 완료 했으면?\n break\n for i in title:\n #print(i.string) #받아온 요소의 내용안에 또다른 요소가 있으면 none으로 출력 --> text로 대체\n print(\"제목 : \", i.text)\n print(\"링크 : \", i.attrs[\"href\"]) #기사 본문의 주소 -> attrs : 속성값 가져오기\n code_news = req.urlopen(i.attrs[\"href\"])\n soup_news = BeautifulSoup(code_news, \"html.parser\")\n content = soup_news.select_one(\"div#article_body\") #요소 하나만 가져오기\n print(content.text.strip().replace(\"   \",\" \").replace(\"   \",\"\"))#데이터 가공하기 -> strip : 쓸데 없는 공백 없애기 -> replace: 문자열 사이 공백\n page_num +=1\n","repo_name":"sue4869/python_web","sub_path":"29_중앙일보_크롤링.py","file_name":"29_중앙일보_크롤링.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21421480081","text":"from sys import stdin\n\ndef ktDauCuoi(a):\n size = len(a)\n if(a[0] == a[size - 1]):\n return True\n return False\n\ndef main():\n for s in stdin:\n a = str(s.strip()) #xoa ky tu newline cua stdin\n if ktDauCuoi(a) == True:\n print('YES')\n else:\n print('NO')\n\nif __name__ == \"__main__\":\n main()","repo_name":"nttmkhang/String_Buoi","sub_path":"DUYNO_Python/DUYNO_Python.py","file_name":"DUYNO_Python.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7658939743","text":"from .Board import *\r\n\r\nfrom pprint import pprint\r\nfrom math import sqrt\r\n\r\n\r\nDISTANCE_DE_REFERENCE = [\r\n 0, 0, 5, 15, 31, 50, 65, 95, 135, 185, 250, 330, 420, 550, 680, 800, 1000\r\n]\r\n\r\ndef calc_ratio(v1,v2):\r\n \"\"\"Calcule le ratio de v1 par v2 en renvoyant 0 si v2 est nul.\"\"\"\r\n if v2 == 0:\r\n return 0\r\n return v1/v2 \r\n\r\nclass Game:\r\n def __init__(self):\r\n self.board = Board()\r\n self.note_dimmensionnement = self.calcul_note_dimensionnement()\r\n self.note_pollution = self.calcul_note_pollution()\r\n\r\n def calcul_ratios_note_dimensionnement(self, population):\r\n total = {}\r\n\r\n for k in DATA_KEYS:\r\n total[k] = 0\r\n \r\n for _, bloc in self.board:\r\n for k in DATA_KEYS:\r\n total[k] += blocs[bloc][k]\r\n return {\r\n \"énergétique\": calc_ratio(total[\"production électricité\"], total[\"consommation énergie\"]),\r\n \"nourriture\": calc_ratio(total[\"production nourriture\"], total[\"consommation nourriture\"]),\r\n \"eau production\": calc_ratio(total[\"production eau\"], total[\"consommation eau\"]),\r\n \"eau traitement\": calc_ratio(total[\"traitement eau\"], total[\"consommation eau\"]),\r\n \"hébergement\": calc_ratio(total[\"capacité hébergement\"], population),\r\n \"formation\": calc_ratio(total[\"capacité formation\"], (0.235*total[\"capacité hébergement\"])),\r\n \"santé EHPAD\": calc_ratio(total[\"santé EHPAD\"], (0.01*total[\"capacité hébergement\"])),\r\n \"santé hôpitaux\": calc_ratio(total[\"santé hôpitaux\"], (0.005*total[\"capacité hébergement\"])),\r\n \"déchets ménagers\": calc_ratio(total[\"traitement déchets ménagers\"], total[\"production déchets ménagers\"]),\r\n \"déchets industriels\": calc_ratio(total[\"traitement déchets industriels\"], total[\"production déchets industriels\"]),\r\n \"déchets réutilisés\": calc_ratio(total[\"consommation déchets\"], (0.2*(total[\"production déchets industriels\"]+total[\"production déchets ménagers\"]))),\r\n \"emploi\": calc_ratio(total[\"capacité emploi\"], (0.443*total[\"capacité hébergement\"])),\r\n \"transport\": calc_ratio(total[\"capacité transport\"], (0.5*total[\"capacité hébergement\"])),\r\n \"commerces\": calc_ratio(total[\"capacité hébergement\"], total[\"capacité commerces\"])\r\n }\r\n\r\n points = {\r\n 0.5: (\"commerces\",),\r\n 1.0: (\"eau traitement\", \"transport\", \"déchets réutilisés\"),\r\n 1.5: (\"emploi\", \"formation\", \"déchets ménagers\", \"déchets industriels\"),\r\n 1.75: (\"nourriture\", \"eau production\", \"hébergement\", \"énergétique\", \"santé hôpitaux\", \"santé EHPAD\")\r\n }\r\n\r\n def calcul_note_dimensionnement(self, population=100000):\r\n note = 0\r\n ratios = self.calcul_ratios_note_dimensionnement(population)\r\n\r\n for p in self.points:\r\n for r in self.points[p]:\r\n if abs(ratios[r]-1) <= 0.1:\r\n note += p\r\n\r\n return note\r\n\r\n def calcul_note_pollution_categories(self):\r\n notes = {}\r\n for c in CATEGORIES:\r\n # [numérateur, dénominateur]\r\n notes[c] = [0, 0]\r\n for _, bloc in self.board:\r\n c = blocs[bloc][\"catégorie\"]\r\n notes[c][0] += blocs[bloc][\"émissions CO2\"] * blocs[bloc][\"coefficient pollution\"]\r\n notes[c][1] += blocs[bloc][\"émissions CO2\"]\r\n for c in CATEGORIES:\r\n notes[c] = calc_ratio(*notes[c])\r\n return notes\r\n\r\n def calcul_note_pollution(self):\r\n return sum(self.calcul_note_pollution_categories().values())\r\n\r\n def calcul_note_disposition(self):\r\n buildings = {}\r\n ratios = {}\r\n points = {}\r\n categories_eclatement = [\r\n \"Habitation\", \"Industrie\", \"Transport\", \r\n \"Education\", \"Production d'énergie\", \"Consommation\"\r\n ]\r\n\r\n for c in categories_eclatement:\r\n buildings[c] = []\r\n points[\"Eclatement \"+c] = 0\r\n \r\n for coords,b in self.board:\r\n c = blocs[b][\"catégorie\"]\r\n if c in categories_eclatement:\r\n buildings[c].append(coords)\r\n \r\n for c in categories_eclatement:\r\n l = len(buildings[c])\r\n distance = 0\r\n print(buildings[c])\r\n while len(buildings[c]) > 0:\r\n bloc1 = buildings[c].pop()\r\n for bloc2 in buildings[c]:\r\n distance += sqrt( (bloc2[0]-bloc1[0])**2 + (bloc2[1]-bloc1[1])**2)\r\n print(l,DISTANCE_DE_REFERENCE)\r\n\r\n ratios[c] = calc_ratio(distance, DISTANCE_DE_REFERENCE[l])\r\n\r\n if ratios[c] > 1:\r\n points[\"Eclatement \"+c] = 1 # TODO ou /!\\ 0.5 /!\\\r\n \r\n return sum(ratios.values())\r\n\r\n def update_notes(self):\r\n self.note_dimmensionnement = self.calcul_note_dimensionnement()\r\n self.note_pollution = self.calcul_note_pollution()\r\n\r\n def get_notes(self):\r\n self.update_notes()\r\n return (self.note_dimmensionnement, self.note_pollution, 0)","repo_name":"CaptainTheDelta/SimCitiesDurables","sub_path":"simcitydurable/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74812683362","text":"from CvPythonExtensions import *\nimport CvUtil\nimport ScreenInput\nimport CvScreenEnums\nimport string\n\n# globals\ngc = CyGlobalContext()\nArtFileMgr = CyArtFileMgr()\nlocalText = CyTranslator()\n\nclass CvPediaCorporation:\n\t\"Civilopedia Screen for Corporations\"\n\n\tdef __init__(self, main):\n\t\tself.iCorporation = -1\n\t\tself.top = main\n\t\t\n\t\tself.X_MAIN_PANE = self.top.X_PEDIA_PAGE + 20\n\t\tself.Y_MAIN_PANE = 55\n\t\tself.W_MAIN_PANE = 250\n\t\tself.H_MAIN_PANE = 260\n\n\n\t\tself.W_ICON = 150\n\t\tself.H_ICON = 150\n\t\tself.X_ICON = self.X_MAIN_PANE + (self.W_MAIN_PANE-self.W_ICON)/2\n\t\tself.Y_ICON = self.Y_MAIN_PANE + (self.H_MAIN_PANE-self.H_ICON)/2\n\t\tself.ICON_SIZE = 64\n\t\t\n\t\tself.X_REQUIRES = self.X_MAIN_PANE + self.W_MAIN_PANE + 10\n\t\tself.Y_REQUIRES = 55\n\t\tself.W_REQUIRES = 1024 - (self.X_REQUIRES) -24\n\t\tself.H_REQUIRES = 110\n\n\t\tself.X_SPECIAL = self.X_MAIN_PANE + self.W_MAIN_PANE + 10\n\t\tself.Y_SPECIAL = self.Y_REQUIRES + self.H_REQUIRES\n\t\tself.W_SPECIAL = 1024 - (self.X_MAIN_PANE + self.W_MAIN_PANE + 10) - 24\n\t\tself.H_SPECIAL = self.Y_MAIN_PANE + self.H_MAIN_PANE - self.Y_SPECIAL\n\n\t\tself.X_TEXT = self.X_MAIN_PANE\n\t\tself.Y_TEXT = self.Y_MAIN_PANE + self.H_MAIN_PANE + 20\n\t\tself.W_TEXT = 1024 - (self.X_MAIN_PANE) - 24\n\t\tself.H_TEXT = 705 - self.Y_TEXT\n\t\t\n\t# Screen construction function\n\tdef interfaceScreen(self, iCorporation):\t\n\t\t\t\n\t\tself.iCorporation = iCorporation\n\t\n\t\tself.top.deleteAllWidgets()\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tbNotActive = (not screen.isActive())\n\t\tif bNotActive:\n\t\t\tself.top.setPediaCommonWidgets()\n\t\t\tself.placeLinks()\n\n\t\t# Header...\n\t\tszHeader = u\"\" + gc.getCorporationInfo(self.iCorporation).getDescription().upper() + u\"\"\n\t\tszHeaderId = self.top.getNextWidgetName()\n\t\tscreen.setLabel(szHeaderId, \"Background\", szHeader, CvUtil.FONT_CENTER_JUSTIFY, self.top.X_SCREEN, self.top.Y_TITLE, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)\n\t\t\n\t\t# Top\n\t\tscreen.setText(self.top.getNextWidgetName(), \"Background\", self.top.MENU_TEXT, CvUtil.FONT_LEFT_JUSTIFY, self.top.X_MENU, self.top.Y_MENU, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_PEDIA_MAIN, CivilopediaPageTypes.CIVILOPEDIA_PAGE_RELIGION, -1)\n\n\t\tif self.top.iLastScreen != CvScreenEnums.PEDIA_CORPORATION or bNotActive:\t\t\n\t\t\tif self.top.iLastScreen != CvScreenEnums.PEDIA_MAIN:\n\t\t\t\tself.placeLinks()\n\t\t\tself.top.iLastScreen = CvScreenEnums.PEDIA_CORPORATION\n\n\t\t# Icon\n\t\tscreen.addPanel( self.top.getNextWidgetName(), \"\", \"\", False, False,\n\t\t self.X_MAIN_PANE, self.Y_MAIN_PANE, self.W_MAIN_PANE, self.H_MAIN_PANE, PanelStyles.PANEL_STYLE_BLUE50)\n\t\tscreen.addPanel(self.top.getNextWidgetName(), \"\", \"\", false, false,\n\t\t self.X_ICON, self.Y_ICON, self.W_ICON, self.H_ICON, PanelStyles.PANEL_STYLE_MAIN)\n\t\tscreen.addDDSGFC(self.top.getNextWidgetName(), gc.getCorporationInfo(self.iCorporation).getButton(),\n\t\t self.X_ICON + self.W_ICON/2 - self.ICON_SIZE/2, self.Y_ICON + self.H_ICON/2 - self.ICON_SIZE/2, self.ICON_SIZE, self.ICON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1 )\n\n\t\tself.placeSpecial()\n\t\tself.placeRequires()\n\t\tself.placeText()\n\n\tdef placeRequires(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, localText.getText(\"TXT_KEY_PEDIA_REQUIRES\", ()), \"\", false, true,\n\t\t\t\t self.X_REQUIRES, self.Y_REQUIRES, self.W_REQUIRES, self.H_REQUIRES, PanelStyles.PANEL_STYLE_BLUE50 )\n\t\tscreen.attachLabel(panelName, \"\", \" \")\n\t\t\n\t\tiTech = gc.getCorporationInfo(self.iCorporation).getTechPrereq()\n\t\tif (iTech > -1):\n\t\t\tscreen.attachImageButton( panelName, \"\", gc.getTechInfo(iTech).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iTech, 1, False )\n\t\t\t\n\t\tfor iBuilding in range(gc.getNumBuildingInfos()):\n\t\t\tif (gc.getBuildingInfo(iBuilding).getFoundsCorporation() == self.iCorporation):\n\t\t\t\tscreen.attachImageButton( panelName, \"\", gc.getBuildingInfo(iBuilding).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_BUILDING, iBuilding, 1, False )\n\t\t\t\t\n\t\tfor iUnit in range(gc.getNumUnitInfos()):\n\t\t\tbRequired = false\n\t\t\tfor iBuilding in range(gc.getNumBuildingInfos()):\n\t\t\t\tif (gc.getBuildingInfo(iBuilding).getFoundsCorporation() == self.iCorporation):\n\t\t\t\t\tif gc.getUnitInfo(iUnit).getBuildings(iBuilding) or gc.getUnitInfo(iUnit).getForceBuildings(iBuilding):\n\t\t\t\t\t\tbRequired = true\n\t\t\t\t\t\tbreak\n\n\t\t\tif bRequired:\n\t\t\t\tscreen.attachImageButton( panelName, \"\", gc.getUnitInfo(iUnit).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_UNIT, iUnit, 1, False )\n\t\t\t\n\tdef placeSpecial(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, localText.getText(\"TXT_KEY_PEDIA_EFFECTS\", ()), \"\", true, false,\n\t\t\t\t self.X_SPECIAL, self.Y_SPECIAL, self.W_SPECIAL, self.H_SPECIAL, PanelStyles.PANEL_STYLE_BLUE50 )\n\t\t\t\t\n\t\tlistName = self.top.getNextWidgetName()\n\t\tscreen.attachListBoxGFC( panelName, listName, \"\", TableStyles.TABLE_STYLE_EMPTY )\n\t\tscreen.enableSelect(listName, False)\n\t\t\n\t\tszSpecialText = CyGameTextMgr().parseCorporationInfo(self.iCorporation, True)[1:]\n\t\tsplitText = string.split( szSpecialText, \"\\n\" )\n\t\tfor special in splitText:\n\t\t\tif len( special ) != 0:\n\t\t\t\tscreen.appendListBoxString( listName, special, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY )\n\t\t\t\t\n\tdef placeText(self):\n\t\t\n\t\tscreen = self.top.getScreen()\n\t\t\n\t\tpanelName = self.top.getNextWidgetName()\n\t\tscreen.addPanel( panelName, \"\", \"\", true, true,\n\t\t\t\t self.X_TEXT, self.Y_TEXT, self.W_TEXT, self.H_TEXT, PanelStyles.PANEL_STYLE_BLUE50 )\n \n\t\tszText = gc.getCorporationInfo(self.iCorporation).getCivilopedia()\n\t\tscreen.attachMultilineText( panelName, \"Text\", szText, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\n\t\t\n\tdef placeLinks(self):\n\n\t\tself.top.placeLinks()\n\t\tself.top.placeCorporations()\n\t\t\t\n\t# Will handle the input for this screen...\n\tdef handleInput (self, inputClass):\n\t\treturn 0\n\n\n","repo_name":"frankstrater/RFC-RAND-UP","sub_path":"Rhye's and Fall RAND/Assets/Python/screens/CvPediaCorporation.py","file_name":"CvPediaCorporation.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39658762915","text":"from pathlib import Path\n\n# Настройки путей\n_BASE_DIR = Path.cwd() # D:\\Program Files\\Other\\Coding\\shooter_game\\shooter\n_RESOURCES_DIR = _BASE_DIR / 'resources'\n_IMAGES_DIR = _RESOURCES_DIR / 'images'\n_LEVELS_DIR = _RESOURCES_DIR / 'levels'\n\n# Общие настройки\nWINDOW_WIDTH = 800\nWINDOW_HEIGHT = 533\nWINDOW_CAPTION = 'Zombie Shoter!'\nFRAME_RATE = 60\n\n# Настройки игры\nBACKGROUND_COLOR = (0, 0, 0)\n\n# Настройки игрока\nPLAYER_IMAGE = _IMAGES_DIR / 'player_min.png'\nPLAYER_SPEED = 5\nPLAYER_HEALTH = 100\n\n# Настройки пули\nBULLET_IMG = _IMAGES_DIR / 'bullet.png'\nBULLET_SPEED = 15\nBULLET_DAMAGE = 10\n\n# Настройки врагов\nZOMBIE_IMAGE = _IMAGES_DIR / 'zombie_min.png'\nZOMBIE_SPEED = 3\nZOMBIE_AGR_RANGE = 50\nZOMBIE_HEALTH = 2000\nZOMBIE_DAMAGE = 40\nZOMBIE_ATTACK_DELAY = 1\n\n# Настройка объектов игрового окружения\nLANDSCAPE_GROUND = _IMAGES_DIR / 'ground.png'\nLANDSCAPE_WATER = _IMAGES_DIR / 'water.png'\nLANDSCAPE_STONE = _IMAGES_DIR / 'stone.png'\nLANDSCAPE_PALM = _IMAGES_DIR / 'palm-1.png'\nLANDSCAPE_BONEFIRE = _IMAGES_DIR / 'bonefire.png'\n\n# Настройка объектов ловушек\nCACTUS_IMG = _IMAGES_DIR / 'cactus.png'\nCACTUS_ATTACK_DELAY = 1\nCACTUS_SPIKE_DAMAGE = 10\n\n# Список уровней\nLEVEL_1 = _LEVELS_DIR / 'level.txt'\nLEVEL_2 = _LEVELS_DIR / 'level_2.txt'\n","repo_name":"notrurs/zombie_shooter_026","sub_path":"shooter/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40918933767","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=128)),\n ('deadline', models.DateField()),\n ('description', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Entrant',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=128)),\n ('email', models.EmailField(max_length=254)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"vtsyms/CS399Agency","sub_path":"CS399Agency/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6621621939","text":"import ReadFile\nimport DataParsing\nimport Graphing\nimport KMeansClustering\nimport KNNClustering\nimport RandomKMeansClustering\nimport random\nimport math\n\n\ndef _askUser():\n print(\"Please input what fields you would like to use\")\n print(\"you can enter up to 3 with there being 4 fields from 1 - 4\")\n print(\"The fields being 1. Sepal Length, 2. Sepal Width, 3. Petal Length, 4. Petal Width\")\n user_in = input(\"Please separate your inputs of the fields you want via a space\\n\")\n split_user = user_in.split(\" \")\n return split_user\n\n\ndef _generateTestSet(data_set, percentage):\n length = len(data_set)\n percent = math.ceil(length / 100)\n amount = percent * percentage\n\n testing_set = []\n\n for i in range(amount):\n generated_number = random.randint(1, length) - 1\n while generated_number in testing_set:\n generated_number = random.randint(1, length) - 1\n testing_set.append(generated_number)\n\n return testing_set\n\n\ncorrect = []\n\n\ndef _run():\n data_names = [\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\"]\n user_input = _askUser()\n converted_input = []\n for i in user_input:\n converted_input.append(DataParsing.convetToNumber(i) - 1)\n\n data_set = DataParsing.parseData(ReadFile.readFile(\"irisData.txt\"), converted_input)\n\n testing_indexes = _generateTestSet(data_set, 10)\n testing_indexes.sort(reverse=True)\n print(testing_indexes)\n\n # write a method to keep some data back as a testing data\n\n used_names = []\n for current_input in converted_input:\n used_names.append(data_names[current_input])\n\n # part 1\n Graphing.newDrawGraph(data_set, used_names)\n\n # part 2\n # clusters = KMeansClustering.chooseStartingClusterCenters(data_set)\n # clusters = KMeansClustering.randomlyChooseStartingCenters(data_set)\n clusters = RandomKMeansClustering.randomlyChooseStartingCenters(data_set)\n Graphing.DrawClustering(clusters, used_names)\n\n # part 3\n test_set = []\n for index in testing_indexes:\n test_data = data_set[index]\n test_set.append(test_data)\n data_set.remove(test_data)\n\n correct.append(KNNClustering.calculateKNN(data_set, test_set, 13))\n\n\n_run()\n","repo_name":"Sean-Powell/Machine-Learning","sub_path":"Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27804478862","text":"# Create a class for a dog\n\nclass Dog():\n def __init__(self, name, age):\n # Initialize name and age attributes\n self.name = name\n self.age = age\n\n def sit(self):\n # Simulate a dos sitting in response to a command\n print(self.name.title() + \" is now sitting.\")\n\n def roll_over(self):\n #Simulate a dog roling over on command\n print(self.name.title() + \" rolled over!\")\n\n\n# Create an instance of the class\n\nmy_dog = Dog('Willie', 6)\n\nprint (\"My dog's name is \" + my_dog.name.title())\nprint (\"My dog is \" + str(my_dog.age) + \"years old\")\n\nwifes_dog = Dog('Jonie', 2)\n\nprint (\"My dog's name is \" + wifes_dog.name.title())\nprint (\"My dog is \" + str(wifes_dog.age) + \"years old\")\n\nmy_dog.sit()\n\nmy_dog.roll_over()","repo_name":"ReynardtDeminey/Python-Crash-Course","sub_path":"dog_class.py","file_name":"dog_class.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2291074398","text":"import pickle\r\nimport random\r\nimport tkinter as tk\r\nfrom tkinter import font\r\nimport pprint\r\nfrom collections import Counter\r\nfrom tqdm import tqdm\r\nimport sys\r\n\r\n\r\nXrflag = False #Turns Player number 1 to a random player\r\nOrflag = False #Turns Player number 2 to a random player\r\nNumOfGames = 10000\r\n\r\ntry:\r\n if sys.argv[2] == '1':\r\n Xrflag = True #Turns Player number 1 to a random player\r\n if sys.argv[3] == '1':\r\n Orflag = True #Turns Player number 2 to a random player\r\n NumOfGames = int(sys.argv[1])\r\nexcept:\r\n pass\r\nMaxProbab = 30 #Higher number means higher persistance of moves\r\nDrawprobab = 30 #0 indicates no reward for draws\r\n\r\nclass GUI3:\r\n def __init__(self):\r\n self.p1_wins = 0\r\n self.p2_wins = 0\r\n self.draws = 0\r\n\r\n self.move_no = 1\r\n self.rescall=0\r\n self.obj_names = []\r\n self.Temp_obj_list1 = []\r\n self.Temp_obj_list2 = []\r\n try:\r\n with open('dictX.pkl', 'rb') as file:\r\n self.Menace_obj_list1 = pickle.load(file)\r\n print('x_open')\r\n except:\r\n self.Menace_obj_list1 = {}\r\n print(\"Cannot find x\")\r\n try:\r\n with open('dictO.pkl', 'rb') as file1:\r\n self.Menace_obj_list2 = pickle.load(file1)\r\n print('o_open')\r\n except:\r\n self.Menace_obj_list2 = {}\r\n print(\"Cannot find o\")\r\n\r\n self.mv = 1\r\n self.board = tk.Tk()\r\n self.board.title(\"Tic-Tac-Toe\")\r\n self.buttons = []\r\n self.font1 = font.Font(size=36)\r\n for x in range(0, 3):\r\n for y in range(0, 3):\r\n b = tk.Button(self.board, height=2, width=6, text='', font=self.font1)\r\n b.config(command=lambda widget=b: self.Menace_click2(widget))\r\n b.grid(row=x, column=y)\r\n b.position = (x, y)\r\n self.buttons.append(b)\r\n\r\n self.b1 = tk.Button(self.board, height=1, width=6, text='Exit', font=self.font1)\r\n self.b1.config(command=lambda: self.Destroy())\r\n self.b1.grid(row=3, column=0)\r\n\r\n\r\n def Invalid_Moves(self):\r\n invalid_m = []\r\n for x in range(0, 9):\r\n if self.buttons[x][\"text\"] == \"X\" or self.buttons[x][\"text\"] == \"O\":\r\n invalid_m.append(x)\r\n return invalid_m\r\n\r\n def Menace_click1(self, Menace_obj):\r\n click = Menace_obj.r_select()\r\n if click == -1:\r\n self.GUI3_loss()\r\n else:\r\n invalid_m = self.Invalid_Moves()\r\n if click not in invalid_m and self.mv % 2 != 0:\r\n self.buttons[click][\"text\"] = \"X\"\r\n self.mv = 2\r\n self.obj_names.append(str(click))\r\n self.move_no += 1\r\n Check_if_Won = self.Has_Won()\r\n if Check_if_Won == \"Continue\":\r\n return\r\n if Check_if_Won == \"O\":\r\n #print(Check_if_Won,\"11\")\r\n self.GUI3_loss()\r\n elif Check_if_Won == \"X\":\r\n #print(Check_if_Won,\"12\")\r\n self.GUI3_win()\r\n elif Check_if_Won == \"Draw\":\r\n #print(Check_if_Won,\"13\")\r\n self.GUI3_draw()\r\n def Menace_click2(self, Menace_obj):\r\n click = Menace_obj.r_select()\r\n if click == -1:\r\n self.GUI3_win()\r\n else:\r\n invalid_m = self.Invalid_Moves()\r\n if click not in invalid_m and self.mv % 2 != 1:\r\n self.buttons[click][\"text\"] = \"O\"\r\n self.mv = 1\r\n self.obj_names.append(str(click))\r\n self.move_no += 1\r\n Check_if_Won = self.Has_Won()\r\n\r\n if Check_if_Won == \"Continue\":\r\n return\r\n if Check_if_Won==\"X\":\r\n #print(Check_if_Won,\"21\")\r\n self.GUI3_win()\r\n elif Check_if_Won==\"O\":\r\n #print(Check_if_Won,\"22\")\r\n self.GUI3_loss()\r\n elif Check_if_Won==\"Draw\":\r\n #print(Check_if_Won,\"23\")\r\n self.GUI3_draw()\r\n\r\n def Runprog(self):\r\n global NumOfGames\r\n #while self.rescall