diff --git "a/1753.jsonl" "b/1753.jsonl" new file mode 100644--- /dev/null +++ "b/1753.jsonl" @@ -0,0 +1,1555 @@ +{"seq_id":"25115862099","text":"def to_camel_case(text):\n lis = list(text)\n for index, letter in enumerate(lis): \n if letter == '-':\n lis[index + 1] = lis[index + 1].upper()\n return ''.join(lis).replace('-', '')\n \n if letter == '_':\n lis[index + 1] = lis[index + 1].upper()\n return ''.join(lis).replace('_', '')\n\nhello = to_camel_case('the_stealth_warrior')\nprint(hello)","repo_name":"Nyapal/yeet-code","sub_path":"idk.py","file_name":"idk.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4006942606","text":"\"\"\"\nSudoku game by Roland and Oliver. All rights reserved.\n\n\"\"\"\nimport sys\nimport copy\nimport os\nimport platform\nimport random\nimport time\nfrom colorama import init\nfrom colorama import Fore, Back, Style\n\nwith open(\"title.txt\", \"r\") as f:\n cont = f.read()\n print (cont)\n\nwith open(\"sudoku-top95.txt\" , \"r\") as f:\n sudokus = f.readlines()\n\n\ndef ValueCheck(x,y):\n return True if matrix_list_original[x][y] > 0 else False\n\n\ndef SudokuImport(x):\n with open(\"sudoku-top95.txt\", \"r\") as f:\n text = f.read()\n temp = text.split(\"\\n\")\n temp.pop(len(temp)-1)\n #print(temp)\n if x == \"r\":\n return MatrixFill(temp[random.randint(0,len(temp) - 1)])\n elif x > 0 and x < len(temp):\n return MatrixFill(temp[x - 1] )\n else:\n return MatrixFill(temp[1])\n\n\ndef color_generator(board):\n colortrix = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,2,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]]\n for i, elem in enumerate(board):\n for i2, subelem in enumerate(elem):\n if matrix_list_original[i][i2] > 0:\n colortrix[i][i2] = f\"{Fore.MAGENTA}{board[i][i2]}{Style.RESET_ALL}\" \n elif subelem != 0 and color_checker(board,i,i2):\n colortrix[i][i2] = f\"{Fore.RED}{board[i][i2]}{Style.RESET_ALL}\" \n elif board[i][i2] == 0:\n colortrix[i][i2] = \" \"\n else:\n colortrix[i][i2] = subelem \n return colortrix\n\ndef color_checker(board,x,y):\n checked_number = board[x][y]\n templist = []\n \n #horizontal check\n if board[x].count(checked_number) > 1:\n return True\n #vertical check\n templist = list(board[i][0] for i in range(0,9))\n if templist.count(checked_number) > 1:\n return True\n #block check\n templist = []\n blockIDx = 0\n blockIDy = 0\n if x < 3:\n blockIDx = 0\n elif x < 6:\n blockIDx = 3\n elif x < 9:\n blockIDx = 6\n if y < 3:\n blockIDy = 0\n elif y < 6:\n blockIDy = 3\n elif y < 9:\n blockIDy = 6 \n for i in range(0 + blockIDx, 3 + blockIDx):\n for i2 in range(0 + blockIDy, 3 + blockIDy):\n templist.append(board[i][i2])\n if templist.count(checked_number) > 1:\n return True\n\n\n\n\ndef MatrixFill(string):\n counter = 0\n MatrixList = [[0,1,8,0,0,0,0,3,0],[9,0,0,0,0,2,0,4,5],[7,0,0,0,0,6,0,0,0],[0,0,0,0,0,7,1,2,0],[0,0,0,0,5,0,0,0,0],[0,8,4,3,0,0,0,0,0],[0,0,0,7,0,0,0,0,6],[8,2,0,6,0,0,0,0,9],[0,3,0,0,0,0,5,8,0]]\n for y in range(0,9):\n for x in range(0,9):\n MatrixList[y][x] = int(string[counter])\n counter += 1\n return MatrixList\n\n\ndef print_sudoku2(board): # A board a mátrix!!\n letters = tuple((\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\"))\n print(f\"{Fore.BLUE} \" + \" 1 \" + \" 2 \" + \" 3 \" + \" 4 \" + \" 5 \" + \" 6 \" + \" 7 \" + \" 8 \" + \" 9 \" + f\" {Style.RESET_ALL}\")\n print(\" \" + \"┏\" + \"━━━┳\"*8 + \"━━━┓\")\n for i, row in enumerate(board):\n print(f\"{Fore.BLUE} \" + str(letters[i] + f\"{Style.RESET_ALL} \" + \"┃\" + \" {} │ {} │ {} ┃\"*3).format(*[x if x != 0 else \" \" for x in row]))\n if i % 3 == 2:\n print(\" \" + \"┣\" + (\"━━━┿\"*2 + \"━━━╋\")*2 + \"━━━┿\"*2 + \"━━━┫\")\n elif i % 20 == 18:\n print(\" \" + \"┗\" + \"━━━┻\"*8 + \"━━━┛\")\n else: \n print(\" \" + \"┠\" + (\"───┼\"*2 + \"───╂\")*2 + \"───┼\"*2 + \"───┨\")\n #print(\" \" + \"┗\" + \"━━━┻\"*8 + \"━━━┛\")\n\ndef user_input():\n while True:\n userinput = input(\"Enter the coordinates of the value you wish to change, followed by colon and the new value.\\nFor example, 'A1:4'.\\nPress q to quit game: \").upper()\n if userinput == \"q\" or userinput == \"Q\": sys.exit()\n if len(userinput) != 4:\n print(\"\\nInvalid input, please try again!\" + \"\\n\" *2)\n continue\n ABC = {\"A\":0, \"B\":1, \"C\":2, \"D\":3, \"E\":4, \"F\":5, \"G\":6, \"H\":7, \"I\":8}\n valid_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\n try:\n if (int(userinput[1]) not in valid_numbers[1:] \n or int(userinput[3]) not in valid_numbers\n or userinput[0] not in list(ABC.keys())\n or userinput[2] != \":\"):\n print(\"\\nInvalid input, please try again!\" + \"\\n\" *2)\n continue\n except:\n print(\"invalid input, please try again!\" + \"\\n\" *2)\n continue\n x = int(ABC[userinput[0]])\n y = int(userinput[1])\n if matrix_list_original[x][y-1] > 0:\n print(\"\\nOriginal value cannot be changed!\\n\")\n continue\n req_value = int(userinput[3])\n matrix_list_prod[x][y-1] = req_value\n update() \n if check_validation():\n if check_verification():\n return True\n continue\n#C:\\Users\\olive\\AppData\\Local\\Programs\\Python\\Python36\\python.exe D:\\Coding\\Sudoku\\Sudoku-last-py\n\ndef update():\n if platform.system() == 'Linux':\n os.system('clear')\n print(cont)\n print_sudoku2(color_generator(matrix_list_prod)) \n elif platform.system() == 'Windows':\n os.system('cls')\n print(cont)\n print_sudoku2(color_generator(matrix_list_prod)) \n\ndef check_validation():\n ValidationCheck = 0 \n for i in range(0,9):\n if len(list(filter(lambda x: 10 > x > 0,matrix_list_prod[i]))) == 9:\n ValidationCheck += 1\n templist = [[i2][0] for i2 in range(0,9)]\n if len(list(filter(lambda x: 10 > x > 0,templist))):\n ValidationCheck += 1\n return True if ValidationCheck == 18 else False\n\ndef check_verification():\n check_verification = 0\n for i in range(0,9):\n if len(set(matrix_list_prod[i])) == len(matrix_list_prod[i]):\n check_verification += 1\n templist = [[i2][0] for i2 in range(0,9)]\n if len(set(templist)) == len(templist):\n check_verification += 1\n for block in range(0,7,3):\n for block2 in range(0,7,3):\n templist.clear()\n for i in range(0,3):\n for i2 in range(0,3):\n templist.append(matrix_list_prod[i2 + block][i + block2])\n if len(set(templist)) == len(templist):\n check_verification += 1\n return True if check_verification == 27 else False\n\ndef clear_table():\n if platform.system() == 'Linux':\n os.system('clear')\n\n elif platform.system() == 'Windows':\n os.system('cls')\n\n# A mátrix , a sudoku mezőinek értékeivel. Az értékek mátrix elemenként változtathatóak.\n\n\n\n #\"r\" for random sudoku\n#for Line in range(0,9):\n #print(MatrixList[Line])\n\n\"\"\"Itt következik maga a sudoku tábla.\"\"\"\n\n\n\n\"\"\"A koordinatak a 9x9-es táblát A1-től I9-ig osztják fel. Egy harmadik inputtal a kivalasztott mátrix pontra lehet\n új értéket megadni , illetve javítani az előző értéken.\"\"\"\n\n# Koordinatak és új értékek:\n\nwhile True:\n print(\"\\nMenu:\\n[1] Choose Sudoku\\n[2] Random Sudoku\\n[3] Quit\")\n try:\n user_answer = int(input(\"Please enter menu item: \"))\n except:\n print(\"invalid input\\n\")\n continue\n if user_answer == 1:\n try:\n user_answer = int(input(\"Please choose Sudoku from 1 to 95: \"))\n except:\n print(\"invalid input\\n\")\n continue\n if 1 > user_answer > 95:\n print(\"invalid input\")\n continue\n matrix_list_original = SudokuImport(user_answer)\n matrix_list_prod = copy.deepcopy(matrix_list_original)\n clear_table()\n print(cont)\n print_sudoku2(color_generator(matrix_list_prod))\n while True:\n if user_input() == True:\n break\n break\n if user_answer == 2:\n matrix_list_original = SudokuImport(\"r\")\n matrix_list_prod = copy.deepcopy(matrix_list_original)\n clear_table()\n print(cont)\n print_sudoku2(color_generator(matrix_list_prod))\n while True:\n if user_input() == True:\n break\n break\n if user_answer == 3:\n sys.exit()\n\nif platform.system() == 'Linux':\n os.system('clear')\n\nelif platform.system() == 'Windows':\n os.system('cls')\n \nprint(\" _______ .__ ____. ___. ._.\\n \\ \\ |__| ____ ____ | | ____\\_ |__| |\\n / | \\| |/ ___\\/ __ \\ | |/ _ \\| __ \\ |\\n/ | \\ \\ \\__\\ ___/ /\\__| ( <_> ) \\_\\ \\|\\n\\____|__ /__|\\___ >___ > \\________|\\____/|___ /_\\n \\/ \\/ \\/ \\/\\/\")\n\n\"\"\"A győzelmi feltételek , illetve a helyes értékek ellenőrzése , eredmény megjelenítése\"\"\"\n\nprint(\"\\n\"*6)\n\n \n","repo_name":"oliverJuhasz/Sudoku","sub_path":"Sudoku-last.py","file_name":"Sudoku-last.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26990574208","text":"#coding: utf-8\n\nimport re\nimport random\nimport logging\nimport redis\nimport scrapy\nimport requests\nfrom scrapy.http import Request\nfrom scrapy.utils.log import configure_logging\nfrom hospital.items import HospitalItem\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom requests.exceptions import ConnectTimeout, ReadTimeout\n\nconfigure_logging(install_root_handler=False)\n#定义了logging的些属性\nlogging.basicConfig(\n filename='scrapy.log',\n format='%(levelname)s: %(levelname)s: %(message)s',\n level=logging.INFO\n)\n#运行时追加模式\nlogger = logging.getLogger('SimilarFace')\n\npool = redis.ConnectionPool(host='localhost', port=6379, db=0)\nredisClient = redis.StrictRedis(connection_pool=pool)\n\ndef getIp():\n times = 0\n while True:\n if times > 3:\n return None,\"\" \n count = redisClient.llen(\"PROXY_IPS\")\n index = random.randint(0, count)\n proxyIp = redisClient.lindex(\"PROXY_IPS\", index)\n proxies = {'http': 'http://{0}'.format(proxyIp)}\n try:\n reqUrl = \"http://www.poi86.com/poi/1008130.html\"\n r = requests.get(reqUrl, proxies=proxies, timeout=3)\n if r.status_code == 200:\n return proxies['http'], r.text\n except ConnectTimeout:\n logger.info(\"[[connect timeout wait for 3s to try again]]\" + proxies['http'])\n except ReadTimeout:\n times += 1\n logger.info(\"[[read timeout {0} times]]\".format(times))\n except Exception as e:\n logger.info(\"[[proxy error wait for 3s to try again]]\" + proxies['http']+\" \"+str(e.message))\n #redisClient.lrem(\"PROXY_IPS\", 0, proxyIp)\n\nclass HospitalSpider(CrawlSpider):\n name = \"hospital\"\n start_urls = [\"http://www.poi86.com/poi/tag/209/1.html\"]\n\n rules = (\n #\"http://www.poi86.com\"+x\n Rule(LinkExtractor(restrict_xpaths = (\"//ul[@class='pagination']/li\"),process_value=lambda x: x),\\\n callback = \"getRecord\", follow=True),\n )\n\n def getRecord(self, response):\n urls = response.xpath(\"//tr/td/a[not(contains(@href,'district') or contains(@href,'category'))]/@href\").extract()\n for url in urls:\n poiid = re.findall(\"\\d+\", url)[0]\n url = \"http://www.poi86.com\" + url\n yield Request(url=url, meta={\"poiid\":poiid}, callback=self.parseItem)\n\n def parseItem(self, response):\n item = HospitalItem()\n item[\"poiid\"] = response.meta['poiid']\n item[\"name\"] = response.xpath('//h1/text()').extract_first() \n\n res = response.xpath(\"//ul/li[@class='list-group-item']/a/text()\").extract()\n item[\"province\"] = res[0]\n item[\"city\"] = res[1]\n item[\"district\"] = res[2]\n item[\"label\"] = res[3].replace(\"(\",\"\")\n\n res = response.xpath(\"//ul/li[@class='list-group-item']\").extract()\n item[\"address\"] = re.findall(\"(.*)\",res[3])[0]\n item[\"phone\"] = re.findall(\"(.*)\",res[4])[0]\n item[\"category\"] = \"\"\n item[\"groundpos\"] = re.findall(\"(.*)\",res[-3])[0]\n item[\"marspos\"] = re.findall(\"(.*)\",res[-2])[0]\n item[\"baidupos\"] = re.findall(\"(.*)\",res[-1])[0]\n\n yield item\n","repo_name":"godly4/hospital","sub_path":"hospital/spiders/getItem.py","file_name":"getItem.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34811310824","text":"import pandas as pd\nfrom flask import Flask, request, render_template\nimport pickle\n\napp = Flask(\"__name__\")\n\n@app.route(\"/\")\ndef loadPage():\n\treturn render_template('index.html', query=\"\")\n\n@app.route(\"/\", methods=['POST'])\ndef CovidPrediction():\n inputQuery1 = request.form['query1']\n inputQuery2 = request.form['query2']\n inputQuery3 = request.form['query3']\n inputQuery4 = request.form['query4']\n inputQuery5 = request.form['query5']\n inputQuery6 = request.form['query6']\n inputQuery7 = request.form['query7']\n \n X_pred = [[inputQuery1, inputQuery2, inputQuery3, inputQuery4, inputQuery5, inputQuery6, inputQuery7]]\n\n X_pred = pd.DataFrame(X_pred, columns=['Body_Temp','Breath_Rate','BloodPressure','Oxygen_Level','Vaccinated','Cough_Cold','Age'])\n\n savename = \"dataset/covidmodel.sav\"\n\n load_model = pickle.load(open(savename, \"rb\"))\n\n predicted = load_model.predict(X_pred)\n\n probability = load_model.predict_proba(X_pred)[:,1][0]*100\n\n if predicted==1:\n output = \"The patient is having Covid 19 Positive symptoms. Make sure to Quarantine Yourself from other and Stay Safe .\"\n output1 = \"Model Accuracy: {}\".format(probability)\n else:\n output = \"The patient is having Covid 19 Negative symptoms. Maintain Social Distancing and Stay Safe.\"\n output1 = \"Model Accuracy: {}\".format(100-probability)\n \n return render_template('index.html', output1=output, output2=output1, query1 = request.form['query1'], query2 = request.form['query2'],query3 = request.form['query3'],query4 = request.form['query4'],query5 = request.form['query5'],query6 = request.form['query6'],query7 = request.form['query7'])","repo_name":"Priyansh-15/Covid19-Predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70419085993","text":"import configparser\n\nfrom .network_utils import *\n\nclass Config:\n NODE_SECTION = 'Node'\n MASTER_NODE_SECTION = 'Master Node'\n\n def __init__(self, ip, port, master_ip=None, master_port=None):\n self.master = master_ip is None\n self.ip = ip if ip else get_ip()\n self.port = port if port else find_free_port()\n self.master_ip = master_ip\n self.master_port = master_port\n\n @staticmethod\n def load_from_file(config_path):\n configuration = configparser.ConfigParser()\n configuration.read(config_path)\n\n master = configuration.getboolean(Config.NODE_SECTION, 'master')\n ip = configuration.get(Config.NODE_SECTION, 'ip') if configuration.has_option(Config.NODE_SECTION,\n 'ip') else None\n port = configuration.getint(Config.NODE_SECTION, 'port') if configuration.has_option(Config.NODE_SECTION,\n 'port') else None\n\n if not master:\n master_ip = configuration.get(Config.MASTER_NODE_SECTION, 'ip')\n master_port = configuration.getint(Config.MASTER_NODE_SECTION, 'port')\n return Config(ip, port, master_ip, master_port)\n return Config(ip, port)\n","repo_name":"Basicula/GradWork","sub_path":"parcsv2/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43099055567","text":"# ObjMgr database information\n\n# Import the ObjMgr symbols\nfrom ObjMgr import *\n\n\nOBJ_GENERAL = \"General\"\nINF_MESSAGE = \"Message\"\nINF_PATH = \"Path\"\n\n\nLAY_DATA = \"Data\"\nLAY_DESCRIPTION = \"Description\"\nLAY_APPEARANCE = \"Appearance\"\nLAY_MEASUREMENTS = \"Measurements\"\nINF_STRUCTURE = \"Structure\"\nINF_FILENAME = \"Filename\"\nINF_INFO = \"Info\"\nINF_SIDE = \"Side\"\nINF_LEVEL = \"Level\"\n\nINF_COLOR = \"Color\"\nINF_TRANSPARENCY = \"Transparency\"\nINF_VISIBLE = \"Visible\"\nINF_BB = \"BoundingBoxOn\"\n\n\n# Add here all messages for Image data\n\nOBJ_IMAGE = \"Image\"\nOBJ_ROI = \"ROI[0-99]+\"\nLAY_RESULT = \"Result[0-99]+\"\nLAY_MODALITY = \"Modality\"\nLAY_ROI = \"ROI\"\nINF_LUTCENTER = \"LutCenter\"\nINF_LUTWIDTH = \"LutWidth\"\nINF_IMAGEDATE = \"Date\"\nINF_EQUIPMENT = \"Equipment\"\nINF_INSTITUTION = \"Institition\"\nINF_ROIX = \"ROI[0-99]+\"\n\n# ObjMgr symbols and messages for tumor segmentation results\nOBJ_TUMORSEG = \"TumorSegmentation\"\nLAY_TUMORPREFIX = \"Tumor_\"\nINF_BARYCENTER = \"BaryCenter\"\nINF_MAX_DIAMETER = \"maxDiameter\"\nINF_MIDPOINT = \"MidPoint\"\nINF_PATH = \"Path\"\nINF_ROI_CENTER = \"RoiCenter\"\nINF_ROI_SIZE = \"RoiSize\"\nINF_VOLUME = \"Volume\"\nINF_X_AXIS = \"xAxis\"\nINF_X_DIAMETER = \"xDiameter\"\nINF_Y_AXIS = \"yAxis\"\nINF_Y_DIAMETER = \"yDiameter\"\nINF_Z_AXIS = \"zAxis\"\nINF_Z_DIAMETER = \"zDiameter\"\n\nOBJ_PLANCONFIG = \"PlanConfig\"\nOBJ_MONITORCONFIG = \"MonitorConfig\"\nOBJ_ASSESSCONFIG = \"AssessConfig\"\n\nLAY_CASE = \"Case\"\nINF_CASEDIR = \"Directory\"\n\nOBJ_THIS_IS_A_DUMMY = \"\"\nLAY_THIS_IS_A_DUMMY = \"\"\nINF_THIS_IS_A_DUMMY = \"\"\n\n\n# Add here all global messages (LAY_GLOBAL + INF_MESSAGE)\n\nLAY_GLOBAL = \"Global\"\n\nMSG_LOAD = \"load\"\nMSG_CLEANUP = \"cleanup\"\n\n# Add here all messages for the Viewer module (LAY_VIEWER + INF_MESSAGE)\n\nLAY_VIEW = \"View\"\n\n\n\n\n\n\n# VIEWER XYZ layers\n\nINF_GLOBAL_REFRESH = \"Refresh\"\nLAY_VIEWER_CAMERA = \"Camera\"\nLAY_VIEWER_PROPERTIES = \"Properties\"\nINF_VIEWER_CAMERA_POSITION = \"Position\"\nINF_VIEWER_CAMERA_ORIENTATION = \"Orientation\"\nINF_VIEWER_CAMERA_HEIGHT = \"Height\"\nINF_VIEWER_CAMERA_NEWDATA = \"NewData\"\nINF_VIEWER_CAMERA_FOCALDISTANCE = \"FocalDistance\"\nINF_VIEWER_PROPERTIES_FRAMERATE = \"Framerate\"\nINF_VIEWER_PROPERTIES_CAPTUREFRAME = \"CaptureFrame\"\nINF_VIEWER_PROPERTIES_CREATEAVI = \"CreateAVI\"\nINF_VIEWER_PROPERTIES_CANCELRECORDING = \"CancelRecording\"\nINF_VIEWER_PROPERTIES_UPDATEMLOUTPUT = \"UpdateMLOutput\"\nINF_VIEWER_PROPERTIES_VIEWERSTATUS = \"ViewerStatus\"\nINF_VIEWER_PROPERTIES_PROTOCOLVIEWERSTATUS = \"ProtocolViewerStatus\"\nINF_VIEWER_PROPERTIES_BGCOLOR = \"BGColor\"\nINF_VIEWER_PROPERTIES_BGGREYCENTER = \"BGGreyCenter\"\nINF_VIEWER_PROPERTIES_BGGREYWIDTH = \"BGGreyWidth\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass ObjInfoCom(ObjInfo):\n \"\"\"Extends ObjInfo class with communication functions\"\"\"\n \n def sendMessage(self, layer, msg, arg= None):\n \"\"\"Send 'message' and 'argument' to module specified by 'layer'\n (e.g. sendMessage(LAY_GLOBAL,MSG_LOAD) or sendMessage(LAY_VIEWER,MSG_VIEWMODE,\"\"))\"\"\"\n\n self.typedSet(OBJ_COMMUNICATION,layer,INF_MESSAGE, msg, INFOTYPE_MESSAGE)\n if arg: self.set(INF_DATA, arg)\n self.notify()\n return\n","repo_name":"MeVisLab/communitymodules","sub_path":"UMD/METK/Modules/Macros/UMDObjCommunicator/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"32068123968","text":"\n\nfrom vl.lek12.augment import augment,bottleneck\nfrom vl.lek12.residual_graph import residual_graph\n\n# s-t-Pfad als Knotenfolge, O(m)\ndef get_path(v,parent):\n if v == None: return [] # s erreicht\n return get_path(parent[v],parent) + [v] \n\nfrom vl.lek03.dfs import dfs\n# Eingabe: Flussnetzwerk G, Quelle s, Senke t\n# o.b.d.A. alle Knoten sind von s aus erreichbar,\n# und in G ex. max. eine der Kanten (u,v) und (v,u)\ndef ford_fulkerson(G,s,t):\n # Fluss mit 0 initialisieren und ersten Pfad suchen\n f = {(u,v):0 for u in range(len(G)) for v in G[u]} # O(m+k) \n Gf = residual_graph(G,f) # hier ist Gf = G # O(m+k)\n R,parent = dfs(Gf,s) # Pfad suchen # O(m+k)\n while t in R: # ex. s-t-Pfad in Gf ? # O(C) \n P = get_path(t,parent) # Pfad als Knotenfolge # O(m)\n b = bottleneck(P,Gf) # min. Kap. entlang P # O(m)\n f = augment(f,P,b) # f um b erhoehen # O(m)\n Gf = residual_graph(G,f) # neuer Residualgraph # O(m+k)\n R,parent = dfs(Gf,s) # Pfad in Gf suchen # O(m+k)\n return f, sum(f[s,u] for u in G[s])\n\nfrom vl.lek12.residual_graph import G\nprint(ford_fulkerson(G,0,3))\n\n\n","repo_name":"MMueller98/Algorithmen-Design-Python","sub_path":"Code/vl/lek12/ford_fulkerson.py","file_name":"ford_fulkerson.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3247087364","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'bert'\n\nfrom dateutil import get_current_datetime\n\nfrom django.conf import settings\n\nfrom bdem import msgutil\nfrom eaglet.core import watchdog\n\nfrom weixin2 import models as weixin_models\nfrom mall.promotion import models as mall_models\nfrom modules.member import module_api as member_model_api\n\n\ndef send_mns_message(topic_name, message_name, data):\n\tmsgutil.send_message(topic_name, message_name, data)\n\n\tmessage = u\"send_mns_message, topic_name:{}, message_name:{}, data:{}\".format(topic_name, message_name, data)\n\twatchdog.info(message)\n\n\ndef coupon_issuing_tmpl_compatible(tmpl_name):\n\t\"\"\"\n\tdecorator\n\t为了兼容新旧消息模板,首先检测商家是否配置了新的tmpl_name,如果是的话,则采用新的阿里云消息机制,\n\t否则,使用旧的celery机制\n\t注:为了能够获取到商家的woid,所以,被装饰的函数第一个参数要求是CouponRule的实例\n\t:param tmpl_name: 新模板的消息名称\n\t\"\"\"\n\tdef wrapper(func):\n\t\tdef inner(*args, **kwargs):\n\t\t\t#判断是否配置了新的模版\n\t\t\tcoupon_rule = args[0]\n\t\t\twoid = coupon_rule.owner_id\n\t\t\tmember_id = args[1]\n\n\n\t\t\tif has_new_tmpl(woid, tmpl_name):\n\t\t\t\tall_member_noused_coupons = mall_models.Coupon.objects.filter(member_id=member_id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t status=mall_models.COUPON_STATUS_UNUSED)\n\t\t\t\ttotal_money = 0\n\t\t\t\tfor coupon in all_member_noused_coupons:\n\t\t\t\t\ttotal_money += coupon.money\n\n\t\t\t\t#商家配置了新的模板消息,则使用mns,并返回空函数\n\t\t\t\tmember = member_model_api.get_member_by_id(member_id)\n\t\t\t\tsend_weixin_template_msg({\n\t\t\t\t\t'user_id': woid,\n\t\t\t\t\t'member_id': member_id,\n\t\t\t\t\t'name': tmpl_name,\n\t\t\t\t\t'url': u'{}/mall/my_coupons/?woid={}&fmt={}'.format(settings.H5_HOST, woid, member.token),\n\t\t\t\t\t'items': {\n\t\t\t\t\t\t'keyword1': u'个人中心-我的优惠券',\n\t\t\t\t\t\t'keyword2': str(coupon_rule.money),\n\t\t\t\t\t\t'keyword3': str(total_money),\n\t\t\t\t\t\t'keyword4': get_current_datetime(),\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\treturn empty_func()\n\t\t\telse:\n\t\t\t\treturn func(*args, **kwargs)\n\n\t\treturn inner\n\treturn wrapper\n\ndef empty_func():\n\tpass\n\ndef has_new_tmpl(owner_id, tmpl_name):\n\tuser_tmpls = weixin_models.UserTemplateSettings.objects.filter(owner_id=owner_id, title=tmpl_name, status=True)\n\treturn user_tmpls.count() > 0\n\n\ndef send_weixin_template_msg(data):\n\ttopic_name = '{}-weixin-topic'.format('test' if settings.IS_TMS_TEST else 'deploy')\n\tmessage_name = 'template_msg'\n\tdata['test_env'] = settings.TEST_ENV\n\n\tsend_mns_message(topic_name, message_name, data)","repo_name":"chengdg/weizoom","sub_path":"weapp/utils/send_mns_message.py","file_name":"send_mns_message.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23870268451","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport six.moves as sm\nimport tensorflow as tf\nimport time\nfrom sys import path, modules\n\nfrom utils import sequential_name\n\npath.insert(1, './Parameters')\n\n\nclass Parameters(object):\n \"\"\"\n A Parameters-class object takes as input a string naming a .py file\n in the ./Parameters folder from which attributes will be retrieved.\n This string will also name the model for saving weights, training \n logs and any fooling-images created.\n \n Attributes in must be included, while \n are optional. There are also other attributes that will be retrieved\n based on whether others are present, e.g. decay_params if a \n decay_function is given.\n \n required_vars\n - data_wrap: a function that returns the dataset to use when called,\n expected to be in the format given in data_wrappers.py\n - n_classes: int, number of output classes\n - image_dim: list-of-ints, size of the input images as [H,W,C]\n - batch_size: int, size of training batches\n - n_batches: int, maximum number of batches to train for\n - check_every: int, in batches, how often to report validation error,\n update logs, and check for early stopping (if used)\n - optimizer: tf.train.Optimizer class to use for training\n - learning_rate: float, learning rate for optimizer. Used as the \n initial rate with LR decay (except piecewise_constant)\n - layers: list of layers of the network, in the format of Layer class\n of utils.py\n \n optional_vars\n - early_stopping: int, maximum number of batches since the best \n validation result before training is stopped\n - clip_min_and_max: list of floats, [min, max] to use for gradient \n clipping\n - decay_function: function to use for learning rate decay\n - dropout_prob: float, dropout probability to use in training\n - layers_to_track_dead_units: list-of-ints giving (zero-indexed) \n indices of layers where the number of inactive output units \n will be logged (intended for use with ReLU activation function)\n - original_model: string, for transfer learning, the name of the \n pretrained model to use weights from\n - train_loss_check_every: int, in batches, how often to log training\n loss. Defaults to .\n \n dependent on decay_function:\n - decay_params: list of parameter values to feed to decay_function,\n aside from initial rate and global_step.\n \n dependent on original_model:\n - up_to_layer: int, the final layer from the original model to take.\n All layers from before that one are taken too: layers are zero-\n indexed, so up_to_layer=3 means the first 4 layers are used. \n - original_run: int, or tuple of (int, 'BEST' or 'LAST') giving which\n training run of the original model to use\n - also, if original_model was given, the layers parameter is changed\n by appending to the start of the list, the layers from the original\n model. Thus self.layers represents all the layers used in taking \n the raw image to the final classification.\n \"\"\"\n required_vars = [\n 'data_wrap',\n 'n_classes',\n 'image_dim',\n 'batch_size',\n 'n_batches',\n 'check_every', \n 'optimizer',\n 'learning_rate',\n 'layers']\n optional_vars = [\n 'early_stopping',\n 'clip_min_and_max',\n 'decay_function',\n 'dropout_prob',\n 'layers_to_track_dead_units',\n 'original_model',\n 'train_loss_check_every']\n \n def __init__(self, name, run=None):\n \"\"\"\n The optional input can be used to identify a specific\n training run of the CNNClassifier model defined by .\n \n Both the best-validation-loss ('BEST') and last-found ('LAST') \n weights are saved during training. By default, the best weights of\n the most-recent run (that with the highest number) are accessed.\n \n can be given as an integer of the run number or as a tuple \n of (int, 'BEST' or 'LAST'). In the integer case, 'BEST' will be\n assumed (warning: if the training run had lower n_batches than \n check_every parameter, the 'BEST' save will not have been created).\n \n Training a new model in CNNClassifier assumes run was given as None \n as it will save under a new run with its number incremented by one.\n \"\"\"\n if run is None:\n self.run_was_set = False\n if os.access('./Weights/{:s}'.format(name), os.F_OK):\n entries = os.listdir('./Weights/{:s}'.format(name))\n entries = [int(x[4:]) for x in entries if x.startswith('Run_')] + [0]\n self.run = max(entries)\n else:\n self.run = 0\n version = 'BEST'\n elif isinstance(run,int):\n self.run_was_set = True\n self.run = run\n version = 'BEST'\n else:\n self.run_was_set = True\n assert isinstance(run[0], int)\n self.run = run[0]\n version = run[1].upper()\n assert version in ['BEST', 'LAST']\n \n self.name = name\n self.weights_path = self.make_weights_path(version)\n self.logs_path = self.make_logs_path()\n \n if self.run_was_set:\n folder, _, filename = self.weights_path.rpartition('/')\n weights = os.listdir(folder)\n if filename+'.data-00000-of-00001' not in weights:\n raise ValueError('{:s} no weights saved for this run')\n \n try:\n param_file = modules[name]\n sm.reload_module(param_file)\n except KeyError:\n param_file = __import__(name)\n \n for var_name in self.required_vars:\n setattr(self, var_name, getattr(param_file, var_name))\n for var_name in self.optional_vars:\n setattr(self, var_name, getattr(param_file, var_name, None))\n \n if self.train_loss_check_every is None:\n self.train_loss_check_every = self.check_every\n \n if self.decay_function is not None:\n setattr(self, 'decay_params', getattr(param_file, 'decay_params'))\n \n if self.original_model is not None:\n setattr(self, 'up_to_layer', getattr(param_file, 'up_to_layer'))\n setattr(self, 'original_run', getattr(param_file, 'original_run', None))\n original_file = __import__(self.original_model)\n original_layers = getattr(original_file, 'layers')\n self.layers = original_layers[:(self.up_to_layer+1)] + self.layers\n \n def make_weights_path(self, ver='BEST'):\n ver = ver.upper()\n assert ver in ['BEST', 'LAST']\n return './Weights/{n}/Run_{r}/{n}_Run_{r}_{v}'.format(\n n=self.name, r=self.run, v=ver)\n\n def make_logs_path(self):\n return './Logs/{n}/Run_{r}'.format(n=self.name, r=self.run)\n\nclass CNNClassifier(object):\n \"\"\"\n A CNNClassifier-class object can define and train a convolutional \n neural network as specified by the attributes of its Parameters-class\n input.\n \"\"\"\n def __init__(self, params):\n self.params = params\n \n def add_placeholders(self):\n \"\"\"\n Creates placeholders including supplementary ones for layers \n having different train- and test-behaviour, such as dropout and\n batch-normalization.\n \"\"\"\n self.input_placeholder = tf.placeholder(tf.float32,\n shape = [None] + self.params.image_dim,\n name = 'Images')\n self.labels_placeholder = tf.placeholder(tf.float32,\n shape = [None, self.params.n_classes],\n name = 'Labels')\n self.is_training_placeholder = tf.placeholder_with_default(False,\n shape = [],\n name = 'Is_Training')\n self.dropout_placeholder = tf.placeholder_with_default(1.,\n shape = [],\n name = 'Dropout_Rate')\n self.extra_holders = {'is_training': self.is_training_placeholder,\n 'keep_prob': self.dropout_placeholder}\n \n def add_body(self, x):\n self.layer_outputs = []\n for i, layer in enumerate(self.params.layers):\n x = layer(x, self.extra_holders, 'Layer_{:d}'.format(i))\n self.layer_outputs.append(x)\n return x\n \n def add_loss(self, preds, eps=1e-15):\n \"\"\"\n Takes predictions from model layers and returns cross-entropy loss,\n classification accuracy and final scores for each label.\n \n Optional input eps is added to predictions for numerical stability.\n \"\"\"\n y_ = self.labels_placeholder\n loss = -tf.reduce_mean(tf.log(tf.reduce_sum(y_*preds,1)+eps))\n correct = tf.equal(tf.argmax(preds,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n return loss, accuracy\n \n def add_training_op(self, loss):\n \"\"\"\n Adds training-step operation to the computational graph. Optionally\n applies learning-rate decay and/or gradient clipping, if their\n required parameters have been set.\n \"\"\"\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n if self.params.decay_function == tf.train.piecewise_constant:\n self.lr = self.params.decay_function(\n self.global_step, *self.params.decay_params)\n elif self.params.decay_function is not None:\n self.lr = self.params.decay_function(\n self.params.learning_rate, self.global_step,\n *self.params.decay_params)\n else:\n self.lr = self.params.learning_rate\n optim = self.params.optimizer(self.lr)\n grads_and_vars = optim.compute_gradients(loss)\n if self.params.clip_min_and_max is not None:\n def clipper(gv):\n clip = tf.clip_by_value(gv[0], *self.params.clip_min_and_max)\n return (clip, gv[1])\n grads_and_vars = [clipper(gv) for gv in grads_and_vars]\n return optim.apply_gradients(grads_and_vars, global_step=self.global_step)\n \n def build_model(self):\n self.add_placeholders()\n self.predictions = self.add_body(self.input_placeholder)\n self.loss, self.accuracy = self.add_loss(self.predictions)\n self.train_op = self.add_training_op(self.loss)\n \n def train_model(self, resume=False):\n \"\"\"\n Trains the CNN Classifier and saves best validation-loss weights in\n ./Weights folder under the model's name, and the weights from the\n final iteration under the modelname + '_LAST'.\n \n tf.summary records of progress are saved in ./Logs under the model\n name, from where they can be viewed with TensorBoard.\n \n If optional argument 'resume' is True, training restarts from\n weights saved previously for the given run. In this case, wall-time\n plots in TensorBoard include time between the sessions, not just \n the actual training time.\n \"\"\"\n start_time = time.time()\n data = self.params.data_wrap()\n \n print('Building Computational Graph')\n g = tf.Graph()\n with g.as_default(), tf.Session() as session:\n with tf.variable_scope(self.params.name):\n self.build_model()\n variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope=self.params.name)\n saver = tf.train.Saver(variables)\n if resume:\n if self.params.weights_path[-4:] == 'LAST':\n print('Warning: Continuing training from a LAST checkpoint'\n ' will overwrite saved BEST')\n saver.restore(session, self.params.weights_path)\n self.params.weights_path = self.params.make_weights_path()\n else:\n session.run(tf.variables_initializer(variables))\n if self.params.run_was_set:\n print('Training (without resuming) will overwrite saved'\n ' {:s} Run {:d}'.format(self.params.name, self.params.run))\n self.params.weights_path = self.params.make_weights_path()\n else:\n self.params.run += 1\n self.params.weights_path = self.params.make_weights_path()\n self.params.logs_path = self.params.make_logs_path()\n os.makedirs(self.params.weights_path.rpartition('/')[0])\n \n train_writer = tf.summary.FileWriter(\n sequential_name(self.params.logs_path,'train'))\n valid_writer = tf.summary.FileWriter(\n sequential_name(self.params.logs_path,'validation'))\n test_writer = tf.summary.FileWriter(\n sequential_name(self.params.logs_path,'test'))\n \n mean_sum = tf.reduce_mean(tf.reduce_sum(self.predictions, 1))\n main_summaries = [tf.summary.scalar('Loss', self.loss),\n tf.summary.scalar('Accuracy', self.accuracy),\n tf.summary.scalar('Mean_Sum_Of_Predictions', mean_sum)]\n lr_summary = [tf.summary.scalar('Learning_Rate', self.lr)]\n \n weights_and_biases = g.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=self.params.name+'/Layer_')\n wb_summaries = []\n for var in weights_and_biases:\n wb_summaries.append(tf.summary.histogram(var.op.name, var))\n \n dead_summaries = []\n if self.params.layers_to_track_dead_units is not None:\n for i in self.params.layers_to_track_dead_units:\n dead = tf.less_equal(tf.count_nonzero(self.layer_outputs[i], 0), 0)\n n_ = tf.reduce_mean(tf.cast(dead, tf.float32))*100.\n s_ = tf.summary.scalar('Percent_Inactive_Layer_{:d}'.format(i), n_)\n dead_summaries.append(s_)\n \n train_summaries = tf.summary.merge(main_summaries \n + lr_summary)\n valid_summaries = tf.summary.merge(main_summaries \n + wb_summaries \n + dead_summaries)\n test_summaries = tf.summary.merge(main_summaries)\n \n valid_feed = {self.input_placeholder: data.validation.images,\n self.labels_placeholder: data.validation.labels}\n train_feed = {self.is_training_placeholder: True,\n self.dropout_placeholder: self.params.dropout_prob}\n \n if resume:\n best_acc = session.run(self.accuracy, feed_dict=valid_feed)\n else:\n best_acc = 0.\n best_step = 0\n \n batch_time = time.time()\n print('Training Model')\n for i in sm.range(self.params.n_batches):\n batch = data.train.next_batch(self.params.batch_size)\n train_feed[self.input_placeholder] = batch[0]\n train_feed[self.labels_placeholder] = batch[1]\n if (i+1)%self.params.train_loss_check_every == 0:\n train_summ, steps, _ = session.run([train_summaries, \n self.global_step, \n self.train_op], \n feed_dict=train_feed)\n train_writer.add_summary(train_summ, steps)\n else:\n _ = session.run(self.train_op, feed_dict=train_feed)\n if (i+1)%self.params.check_every == 0:\n valid_summ, steps, loss, acc = session.run([valid_summaries, \n self.global_step,\n self.loss, self.accuracy],\n feed_dict=valid_feed)\n valid_writer.add_summary(valid_summ, steps)\n saver.save(session, self.params.make_weights_path('LAST'))\n print('batch {:d}: loss {:.5f}, accuracy {:.4f}, {:.2f} sec'\n .format(i+1, loss, acc, time.time()-batch_time))\n batch_time = time.time()\n if acc > best_acc:\n best_acc = acc\n best_step = i+1\n saver.save(session, self.params.weights_path)\n if self.params.early_stopping is not None:\n if (i+1-best_step) >= self.params.early_stopping:\n break\n \n saver.save(session, self.params.make_weights_path('LAST'))\n \n if self.params.check_every <= self.params.n_batches:\n saver.restore(session, self.params.weights_path)\n best_step = session.run(self.global_step)\n print('Testing Model: Best Step = {:g}'.format(best_step))\n feed = {self.input_placeholder: data.test.images,\n self.labels_placeholder: data.test.labels}\n test_summ, steps, loss, acc = session.run([test_summaries, \n self.global_step,\n self.loss, self.accuracy],\n feed_dict=feed)\n test_writer.add_summary(test_summ, steps)\n print('test loss {:.5f}, accuracy {:.4f}'.format(loss, acc))\n \n train_writer.close()\n valid_writer.close()\n test_writer.close()\n \n print('Total Time {:.2f}'.format(time.time()-start_time))\n \n \ndef load_cnn(session, name, run=None):\n \"\"\"\n Loads a previously-trained CNNClassifier object with the given name.\n The operations of that object are added to the current default graph,\n so load_cnn should not be called twice with the same name in the same\n graph. The trained variables are initialized in input tf.Session \n 'session'.\n \"\"\"\n g = tf.get_default_graph()\n with g.as_default():\n params = Parameters(name, run)\n CNN = CNNClassifier(params)\n with tf.variable_scope(name):\n CNN.build_model()\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, \n scope=name+'/')\n saver = tf.train.Saver(variables)\n saver.restore(session, params.weights_path)\n return CNN\n \n","repo_name":"vogelta/vogelta.github.io","sub_path":"cnn_classifier.py","file_name":"cnn_classifier.py","file_ext":"py","file_size_in_byte":17743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18810717387","text":"import sys\r\ninput = sys.stdin.readline\r\nclass Node(object):\r\n def __init__(self, key, value=None):\r\n self.key = key\r\n self.value = value\r\n self.children = {}\r\n\r\nclass Trie:\r\n def __init__(self):\r\n self.head = Node(None)\r\n \r\n def insert(self, string):\r\n curNode = self.head # 시작은 Head노드부터\r\n\r\n for char in string:\r\n if char not in curNode.children:\r\n curNode.children[char] = Node(char) # 해당 문자가 존재하지 않을 경우 새로운 노드를 생성\r\n curNode=curNode.children[char]\r\n curNode.value = string # 문자열의 끝 부분��� value로 문자열값 자체를 할당\r\n\r\n def search(self, string):\r\n curNode = self.head\r\n cnt = 0\r\n if curNode.key == None: # Head Case\r\n if len(curNode.children) == 1:\r\n cnt+=1\r\n for char in string:\r\n if curNode.value != None or len(curNode.children) > 1:\r\n cnt+=1\r\n curNode = curNode.children[char]\r\n return cnt\r\n\r\nN = int(input())\r\nwhile N >= 1:\r\n trie = Trie()\r\n In = []\r\n for _ in range(N):\r\n string = str(input().rstrip())\r\n In.append(string)\r\n trie.insert(string)\r\n sum = 0\r\n for string in In:\r\n sum += trie.search(string)\r\n print('{:.2f}'.format((sum/len(In))))\r\n try:\r\n N = int(input())\r\n except:\r\n break","repo_name":"ukjinlee66/PS","sub_path":"백준/Platinum/5670. 휴대폰 자판/휴대폰 자판.py","file_name":"휴대폰 자판.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10863236124","text":"from .__templates__ import Static\nimport itchat\nimport itertools\n\n\nclass Calc24(Static):\n __author__ = \"Hanzhi Zhou\"\n alias = \"calc24\"\n parameters = \"[n1] [n2] [n3] [n4]\"\n title = \"24 Point Calculation\"\n description = \"Find an arithmetic combination, if exists, of four integers that equals to 24\"\n example = \"Example: /calc24 2 4 6 8\"\n fast_execution = True\n\n # maximum number of solutions to output\n num_solutions = 3\n\n @staticmethod\n def eval_postfix(expr):\n token_list = expr\n stack = []\n for token in token_list:\n if token in \"+-**/\":\n o1 = stack.pop()\n o2 = stack.pop()\n if token == \"/\":\n if int(o2) // int(o1) != int(o2) / int(o1):\n return False\n stack.append(str(int(eval(o2 + token + o1))))\n else:\n stack.append(token)\n return float(stack.pop())\n\n @staticmethod\n def post_to_in(expr):\n token_list = expr\n stack = []\n for token in token_list:\n if token in \"+-\":\n o1 = stack.pop()\n o2 = stack.pop()\n stack.append(\"({} {} {})\".format(o2, token, o1))\n elif token in \"**/\":\n o1 = stack.pop()\n o2 = stack.pop()\n if token == \"*\":\n r = u\" × \"\n elif token == \"/\":\n r = u\" ÷ \"\n else:\n r = \" ^ \"\n stack.append(o2 + r + o1)\n else:\n stack.append(token)\n\n st = stack[0]\n # remove the outermost parentheses\n if st.startswith(\"(\") and st.endswith(\")\"):\n st = st[1:len(st) - 1]\n return st\n\n @classmethod\n def parse_args(cls, from_user, args):\n assert len(args) >= 4, \"4 positive integer parameters are required!\"\n assert args[0].isdigit() and args[1].isdigit() and args[2].isdigit() and args[\n 3].isdigit(), \"4 positive integer parameters are required!\"\n return args[0:4]\n\n @classmethod\n def call(cls, from_user, args):\n operands = ['+', '-', '*', '/']\n nums = args\n results = []\n for per in itertools.permutations(nums, 4):\n for ops in itertools.combinations_with_replacement(operands, 3):\n for op in itertools.permutations(ops, 3):\n expr = [per[0], per[1], op[0], per[2], op[1], per[3], op[2]]\n if cls.eval_postfix(expr) == 24:\n if expr not in results:\n results.append(expr)\n if len(results) == 0:\n itchat.send_msg(\"No solution!\", from_user)\n else:\n for result in results:\n if result[2] == \"*\" or result[2] == \"+\":\n if result[2] == result[4] and result[4] == result[6]:\n idx = 0\n for per in itertools.permutations([result[0], result[1], result[3], result[5]]):\n if idx == 0:\n idx += 1\n continue\n try:\n results.remove([per[0], per[1], result[2], per[2], result[4], per[3], result[6]])\n except:\n pass\n elif result[2] == result[4]:\n idx = 0\n for per in itertools.permutations([result[0], result[1], result[3]]):\n if idx == 0:\n idx += 1\n continue\n try:\n results.remove([per[0], per[1], result[2], per[2], result[4], result[5], result[6]])\n except:\n pass\n else:\n try:\n results.remove(\n [result[1], result[0], result[2], result[3], result[4], result[5], result[6]])\n except:\n pass\n\n if len(results) > cls.num_solutions:\n dist = len(results) // cls.num_solutions\n results = results[0], results[dist], results[-1]\n\n for result in results:\n itchat.send_msg(cls.post_to_in(result))\n","repo_name":"hanzhi713/WeChat-CLI-Tool","sub_path":"modules/Calc24.py","file_name":"Calc24.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"133033079","text":"#!/usr/bin/env python\n# coding: utf-8\nimport json\nimport argparse\nimport logging\nfrom host_status import HostStatus\nimport yt.wrapper as yt\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n ap = argparse.ArgumentParser()\n ap.add_argument('--input', required=True, type=argparse.FileType('r'))\n ap.add_argument('--recalc-table', default='//home/videoindex/full/docbase/reprecalc/hosts')\n args = ap.parse_args()\n recalc = set()\n hosts = json.load(args.input)\n for hostrec in hosts:\n for item in hostrec['Samples']:\n if item.get('Status') in [HostStatus.SEARCHABLE_HTML, HostStatus.SEARCHABLE_VIDEO]:\n recalc.add(hostrec['Host'])\n data = [{'Host': host, 'Comment': 'from autoplayer check'} for host in recalc]\n yt.retries.run_with_retries(lambda: yt.insert_rows(args.recalc_table, data))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"extsearch/player_testing/pipeline/force_precalc.py","file_name":"force_precalc.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35499087153","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/7/18 3:58 下午\n# @Author : Li\n# @Email : m15574933885@163.com\n# @File : demo_07.py\n# @Software: PyCharm\n\n\n# 定位一组元素\n\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://www.baidu.com\")\ndriver.implicitly_wait(10)\n\nel = driver.find_element(By.ID, \"s-usersetting-top\")\nActionChains(driver).move_to_element(el).perform()\n\ndriver.find_element(By.PARTIAL_LINK_TEXT, '高级搜索').click()\n\nredio_list = driver.find_elements(By.NAME, 'q5')\n# 先选择一个\nredio_list[1].click()\n\n# 循环遍历\ntime.sleep(2)\nfor i in redio_list:\n i.click()\n time.sleep(2)\n\n# 循环遍历输入\ninputs = driver.find_elements_by_css_selector(\"input.c-input.adv-q-input\")\nfor send in inputs:\n send.send_keys(\"sssxxx\")\n time.sleep(0.5)\n\ntime.sleep(2)\n\ndriver.quit()\n","repo_name":"liousAlready/P9_P10selenium","sub_path":"class3/demo_07.py","file_name":"demo_07.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1214466874","text":"#이름 짓기\n#파스칼 표기법 : 첫단어를 대문자로 시작하며 이름을 지음\n# 첫단어를 대문자로 시작하며 이름을 지음\n# Employees, Departments\n# RegisterEmployee, JoinMember\n\n#카멜 표기법 :\n# 첫단어를 소문자로 시작하며 이름을 지음\n# registerEmployee, joinMember\n\n\n#스네이크 표기법 :\n# 소문자와 _기호를 이용해서 이름을 지음\n# register_employee, join_member\n\n#헝가리언 표기법\n# 자료형을 의미하는 접두사를 이용해서 이름을 지음\n# strName, isMarried, boolMarried\n\n\n\n# 함수로 재작성하기\nimport random\n\n#8 자취방 구하기\ndef compareRoom(width, height, price):\n return (width * height)/price\n\nroomA = compareRoom(2.5, 3, 27)\nroomB = compareRoom(4, 2, 30)\n\nif(roomA > roomB) :\n print('방A가 낫네요')\nelse:\n print('방B가 낫네요')\n\n\n#10\n\ndef computeProfit():\n c = input('불변자본을 입력하세요')\n v = input('가변자본을 입력하세요')\n s = input('잉여가치액을 입력하세요')\n\n #constant capital, variable capital\n #surplus value\n\n return (c+v)/s\n\nprint(computeProfit())\n\n#11\n\n# 달러환율 : 1071\n# 유로환율 : 1309\n\ndef getExchangeRate(country):\n rate = 0\n if country == 'us' :\n rate=1071\n elif country == 'euro':\n rate =1309\n return rate\n\nbuyUS = 780*getExchangeRate('us')\nbuyEuro = 780*getExchangeRate('euro')\n\nif buyUS > buyEuro:\n print('유로화로 구입하는게 더 싸네요')\nelse:\n print('달러로 구입하는게 더 싸네요')\n\n\n#12\ndef howManyRun(radius):\n pi=3.14\n\n return radius * pi\nstudentA = howManyRun(100)\nstudentB = howManyRun(90)\n\nprint('학생A는 학생B보다 %d m만큼 더 뜀'\\\n % (studentA - studentB))\n\n#17 계산기 (제곱연산 추가)\ndef intCalu():\n num1 = int(input('좌변값을 하나 입력하세요'))\n num2 = int(input('우변값을 하나 입력하세요'))\n fmt = \"%d + %d = %d \\n %d - %d = %d \\n\"\n fmt += \"%d * %d = %d \\n %d + %d = %.5f \\n\"\n fmt += \" %d ** %d = %d\"\n print(fmt % (num1, num2, num1+num2, \\\n num1, num2, num1-num2, \\\n num1, num2, num1*num2, \\\n num1, num2, num1/num2, \\\n num1, num2, num1**num2 ))\nintCalu()\n\n#18 세금계산\n\ndef computeTax():\n salary = int(input('연봉을 입력하세요'))\n ismarried = input('결혼했나요? (Y/N)')\n tax = 0\n\n if ismarried.upper() == 'N':\n\n if salary < 3000:\n tax = salary * 0.1\n else:\n tax = salary * 0.25\n isMarried=\"아니오\"\n else:\n if salary < 6000:\n tax = salary * 0.1\n else:\n tax = salary * 0.25\n isMarried = \"예\"\n\n\n\n\n fmt = \"연봉 : %d, 결혼여부 : %s, 세금 : %.1f\"\n print(fmt % (salary, isMarried, tax))\n\ncomputeTax()\n\n#윤년여부\ndef isLeapYear():\n year = int(input('올해는 몇년도 입니까?'))\n yun = '윤년입니다.'\n noyun = '윤년이 아닙니다.'\n\n if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:\n print(yun)\n else:\n print(noyun)\n\nisLeapYear()\n\n#20 복권발행\nprint('#20 복권 발행')\n\n\ndef roulettetLotto():\n lotto = str(random.randint(100, 999))\n lucky = str(input('복권 숫자 3자리를 입력하세요\\n'))\n match = 0 #일치여부\n prize = '꽝!!!!! 다음 기회에!'\n\n for i in [0,1,2]:\n for j in [0,1,2]:\n if (lucky[i] == lotto[j]):\n match +=1\n\n if match == 3:\n prize=('상금 1백만 지금')\n elif match == 2:\n prize=print('상금 1만 지급!')\n elif match == 1:\n prize= print ('상금 1천 지급')\n\n print(lucky,lotto, prize)\n\nroulettetLotto()","repo_name":"BecomingBigdataAnalyst/DataAnalysis-Python","sub_path":"study/Lab03.py","file_name":"Lab03.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33423797890","text":"import pygame\r\npygame.init()\r\n\r\nSIZE = WIDTH, HEIGHT = 1000, 600\r\nSCREEN = pygame.display.set_mode(SIZE)\r\n\r\n# rgb color code\r\nWHITE = 255,255,255\r\nRED = 255,0,0\r\nBLUE = 0,0,255\r\nBLACK = 0,0,0\r\n\r\ncircle_x = 50\r\ncircle_y = 50\r\nradius = 50\r\n\r\nSPEED_X = 0.5\r\nSPEED_Y = 0.1\r\n\r\nwhile True:\r\n eventList = pygame.event.get()\r\n for event in eventList:\r\n # print(event)\r\n if event.type == pygame.QUIT:\r\n # quit the pygame\r\n pygame.quit()\r\n # quit python\r\n quit()\r\n\r\n SCREEN.fill(WHITE)\r\n\r\n pygame.draw.circle(SCREEN, BLUE, [circle_x, circle_y], radius)\r\n circle_x += SPEED_X\r\n circle_y += SPEED_Y\r\n\r\n if circle_x > WIDTH - radius:\r\n SPEED_X = -0.5\r\n elif circle_y > HEIGHT - radius:\r\n SPEED_Y = -0.1\r\n elif circle_x < radius:\r\n SPEED_X = 0.5\r\n elif circle_y < radius:\r\n SPEED_Y = 0.1\r\n\r\n\r\n # updates the screen\r\n pygame.display.flip()\r\n\r\n\r\n","repo_name":"brainmentorspvtltd/DU_Maitry_2022","sub_path":"CircleAnimation.py","file_name":"CircleAnimation.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26601557168","text":"\"\"\"\n Cast module contains Cast Data Model and its support functions\n\"\"\"\nfrom app.extensions import db\n\nfrom app.models.movie import Movie\nfrom app.models.people import Character\n\n\nclass Cast(db.Model):\n \"\"\" Cast Model represents SQL Cast Table as a conjunction table between\n Movies and People in them.\n\n\n Attributes:\n movie_id (str) -- Foreign key of Movie table\n character_id (str) -- Foreign Key of Character Table\n\n \"\"\"\n movie_id = db.Column(db.String, db.ForeignKey('movie.id'),\n primary_key=True)\n character_id = db.Column(db.String, db.ForeignKey('character.id'),\n primary_key=True)\n\n movie = db.relationship(Movie,\n backref=db.backref(\"cast\",\n cascade=\"all, delete-orphan\")\n )\n character = db.relationship(Character,\n backref=db.backref(\n \"cast\",\n cascade=\"all, delete-orphan\")\n )\n\n __tablename__ = 'cast'\n\n def __init__(self, movie, person):\n self.movie_id = movie\n self.character_id = person\n","repo_name":"zerozez/ghibliplay","sub_path":"app/models/cast.py","file_name":"cast.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13538249959","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nx = random.sample(range(0,200),60)#生成随机的60个真实点\nx.sort()#对点进行排序\nbo = 0\nao = 0\ny = []\nwhile True:##自定义设置目标实际函数\n if bo==0:\n bo=float(input('输入“b*x^2 + a”中的b,例如:4\\n'))\n if ao==0:\n ao=float(input('输入“b*x^2 + a”中的a,例如:3\\n'))\n if bo!=0 and ao!=0:\n break\nfor i in x:##取得实际函数对应的真实点\n j = bo*i**2 + ao\n y.append(j)\ndef gauss_noisy(x, y):##引用高斯噪声函数\n mu = 0\n sigma = 4\n for i in range(len(x)):\n x[i] += random.gauss(mu, sigma)\n y[i] += random.gauss(mu, sigma)\ngauss_noisy(x, y)# 加入高斯噪声\n\n###对函数进行拟合\n##求解所需系数\nlen_x = len(x)\nsum_x = np.sum(x)\nsum_y = np.sum(y)\nsum_xx = 0\nsum_xy = 0\nfor i in range(len(x)):\n sum_xy = sum_xy + x[i]*y[i]\n sum_xx = sum_xx + x[i]**2\n##开始求解a和b的值\nA = np.array([[len_x,sum_x,sum_y],[sum_x,sum_xx,sum_xy]])\nc = -A[1][0]/A[0][0]\nA[1] = A[0]*c + A[1]\nb = A[1][2]/A[1][1]\na = (A[0][2]-b*A[0][1])/A[0][0]\nprint(\"拟合直线a值为{:.2f}\".format(a))\nprint(\"拟合直线b值为{:.2f}\".format(b))\n\n###误差估计(和反差sse)\nxw = random.sample(range(0,200),100)##取100个点进行误差估计\nxw.sort()\nyw = []\nfor i in xw:\n j = bo*i**2 + ao\n yw.append(j)\ngauss_noisy(xw, yw)\n##另取得100个实际值\n##求sse\nsse = 0\nfor i in range(len(xw)):\n sj = b * xw[i] + a\n sse = sse + (sj-yw[i])**2\nprint(\"估计的和方差为{:.2f}\".format(sse))\n\n##画出拟合曲线和离散点\nX = np.linspace(0, 200, 1000)\nY = b * X + a\nplt.plot(X, Y)\nplt.title('Y={}+{}X'.format(a, b))\nplt.legend(loc='lower right')\nplt.scatter(x, y)# 画出这些加入噪声后的真实点\nplt.show()\n","repo_name":"HnuAiSimOpt/Education","sub_path":"p1_202004061119.py","file_name":"p1_202004061119.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"32235167764","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport getpass\nimport json\nimport os\nimport re\nimport time\nfrom urllib.parse import quote_plus, unquote\n\nimport chromedriver_autoinstaller\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nCHROMEDRIVER_PATH = \"./chromedriver/\"\n\n# as per recommendation from @freylis, compile once only\nRE_HTML = re.compile(r\"<.*?>\")\nRE_PARENTHESIS = re.compile(r\"\\((?:[^)(]|\\([^)(]*\\))*\\)\")\nRE_BRACKET = re.compile(r\"\\[(?:[^\\]\\[]|\\[[^\\]\\[]*\\])*\\]\")\nRE_PARENTHESIS_HANGUL = re.compile(r\"\\(([ㄱ-ㅎ가-힣ㅏ-ㅣ\\s]+)\\)\")\nRE_HANGUL = re.compile(r\"[가-힣]+\")\n\nRE_KANJI = re.compile(\"[\\u4E00-\\u9FFF\\s]+\")\nRE_HIRA = re.compile(\"[\\u3040-\\u309Fー\\s]+\")\nRE_KATA = re.compile(\"[\\u30A0-\\u30FF\\s]+\")\nRE_JAPANESE = re.compile(\"[\\u3040-\\u30FF\\u30A0-\\u30FFー\\s]+\")\nRE_HIRA_KATA = re.compile(\"[\\u3040-\\u30FFー\\s]+\")\n\n\ndef clean_html(raw):\n cleantext = re.sub(RE_HTML, \"\", raw)\n return cleantext\n\n\ndef clean_parenthesis(raw):\n while True:\n output = re.sub(RE_PARENTHESIS, \"\", raw)\n if output == raw:\n break\n raw = output\n\n while True:\n output = re.sub(RE_BRACKET, \"\", raw)\n if output == raw:\n break\n raw = output\n\n output = output.replace(\"'\", \"\")\n output = output.replace('\"', \"\")\n output = output.replace(\"‘\", \"\")\n output = output.replace(\"’\", \"\")\n\n return output\n\n\ndef clean_parenthesis_hangul(raw):\n cleantext = re.sub(RE_PARENTHESIS_HANGUL, \"\", raw)\n return cleantext\n\n\ndef get_only_hangul(raw):\n return \"\".join(re.findall(RE_HANGUL, raw))\n\n\ndef get_only_hira_kata(raw):\n return \"\".join(re.findall(RE_HIRA_KATA, raw))\n\n\ndef get_only_japanese(raw):\n return \"\".join(re.findall(RE_JAPANESE, raw))\n\n\ndef get_first_item(raw):\n return raw.split(\";\")[0].split(\",\")[0].split(\".\")[0].split(\"·\")[0].strip()\n\n\ndef get_driver() -> webdriver.Chrome:\n driver_path = chromedriver_autoinstaller.install(path=CHROMEDRIVER_PATH)\n return webdriver.Chrome(service=Service(executable_path=driver_path))\n\n\ndef get_naver_login_session(username, password, session_save, session_load):\n if not session_load:\n driver = get_driver()\n driver.get(\"https://nid.naver.com/nidlogin.login\")\n\n timeout = 5\n try:\n element_present = EC.presence_of_element_located((By.ID, \"id\"))\n WebDriverWait(driver, timeout).until(element_present)\n except TimeoutException:\n print\n \"Timed out waiting for page to load\"\n\n driver.find_element(By.ID, \"id\").send_keys(username)\n driver.find_element(By.ID, \"pw\").send_keys(password)\n driver.find_element(By.ID, \"log.login\").click()\n\n wait = WebDriverWait(driver, 600)\n wait.until(lambda driver: \"https://nid.naver.com/\" not in driver.current_url)\n\n cookies = driver.get_cookies()\n\n if session_save:\n with open(\"session_info\", mode=\"w\") as f:\n f.write(json.dumps(cookies))\n else:\n with open(\"session_info\", mode=\"r\") as f:\n cookies = json.loads(f.read())\n\n s = requests.Session()\n\n for cookie in cookies:\n s.cookies.set(cookie[\"name\"], cookie[\"value\"])\n\n return s\n\n\ndef get_vocab_lists(session, page, page_size=100):\n vocab_lists_text = session.get(\n f\"https://learn.dict.naver.com/gateway-api/jakodict/mywordbook/wordbook/list.dict?page={page}&page_size={page_size}&st=0&domain=naver\"\n )\n\n result = json.loads(vocab_lists_text.text)\n\n if result[\"meta\"][\"status\"] != 1:\n raise Exception(\"단어장 목록 받아오기 실패\")\n\n vocab_lists = result[\"data\"][\"m_items\"]\n\n vocab_list_dict = {}\n\n for vocab_list in vocab_lists:\n vocab_list_dict[vocab_list[\"name\"]] = (\n vocab_list[\"id\"],\n vocab_list[\"wordCount\"],\n )\n\n return vocab_list_dict\n\n\ndef get_vocabs(session, vocab_list_id, start, count, search_size=100):\n words = []\n\n fisrt_page = start // 100\n if fisrt_page == 0:\n fisrt_page = 1\n\n total_page = (start + count + 100) // 100\n if total_page == 0:\n total_page = 1\n\n page = fisrt_page\n cursor = \"\"\n\n vocab_count = 0\n\n while page <= total_page:\n if page == 1:\n link = f\"https://learn.dict.naver.com/gateway-api/jakodict/mywordbook/word/list/search?wbId={vocab_list_id}&qt=0&st=0&page_size={search_size}&domain=naver\"\n else:\n link = f\"https://learn.dict.naver.com/gateway-api/jakodict/mywordbook/word/list/search?wbId={vocab_list_id}&qt=0&st=0&cursor={cursor}&page_size={search_size}&domain=naver\"\n\n result = json.loads(session.get(link).text)\n\n if result[\"meta\"][\"status\"] != 1:\n print(f\"{page} 페이지 파싱 결과 : 단어장 불러오기 실패\")\n print(result)\n time.sleep(1)\n page += 1\n continue\n\n vocabs = result[\"data\"][\"m_items\"]\n cursor = quote_plus(result[\"data\"][\"next_cursor\"])\n\n vocab_success_count = 0\n\n for vocab in vocabs:\n vocab_count += 1\n\n if vocab_count < start:\n continue\n\n word, meaning = process_vocab(vocab)\n\n if word == None or meaning == None:\n continue\n\n vocab_success_count += 1\n\n words.append((word, meaning))\n\n if vocab_count >= count + start - 1:\n break\n\n if vocab_count >= count + start - 1:\n break\n\n print(f\"{page} 페이지 파싱 결과 : {vocab_success_count}/{search_size} 개 불러옴\")\n\n time.sleep(1)\n\n page += 1\n\n return words\n\n\ndef get_entry(element):\n try:\n element = element[\"entry\"]\n lang = element[\"language\"]\n\n if lang == \"ja\": # 일어\n kanji, word = get_word(element[\"members\"])\n meaning = get_mean(element[\"means\"])\n\n return kanji, word, meaning\n\n return None, None, None\n\n except Exception as e:\n return None, None, None\n\n\ndef get_word(elements):\n kanji = None\n entry_name = None\n\n for element in elements:\n entry_name = element[\"entry_name\"]\n entry_name = clean_parenthesis(entry_name)\n entry_name = clean_html(entry_name)\n entry_name = get_first_item(entry_name)\n entry_name = get_only_hira_kata(entry_name)\n\n kanji = element[\"kanji\"]\n kanji = clean_parenthesis(kanji)\n kanji = clean_html(kanji)\n kanji = get_first_item(kanji)\n\n if entry_name == \"\":\n entry_name = None\n if kanji == \"\":\n kanji = None\n\n if entry_name is not None:\n break\n\n return kanji, entry_name\n\n\ndef get_mean(elements):\n mean = None\n\n for element in elements:\n mean = element[\"show_mean\"]\n mean = clean_parenthesis(mean)\n mean = clean_html(mean)\n mean = get_first_item(mean)\n\n if mean == \"\":\n mean = None\n\n if mean is not None:\n break\n\n return mean\n\n\ndef process_vocab(vocab):\n if not isinstance(vocab, dict):\n return None, None\n\n dic_type = vocab[\"dicType\"]\n\n if dic_type == \"jako\": # 일한 사전\n kanji, word, meaning = get_entry(json.loads(unquote(vocab[\"content\"])))\n\n if word is None or meaning is None:\n return None, None\n\n if kanji is not None:\n meaning = word + \" \" + meaning\n word = kanji\n\n return word, meaning\n\n elif dic_type == \"koja\": # 한일 사전, 지원 X\n return None, None\n\n return None, None\n\n\ndef export_to_file(file_name, vocabs):\n with open(file_name, \"w\", newline=\"\", encoding=\"utf-8-sig\") as f:\n wr = csv.writer(f)\n\n for word, meaning in vocabs:\n wr.writerow([word, meaning])\n\n f.close()\n\n\ndef main():\n username = input(\"네이버 아이디 : \")\n password = getpass.getpass(\"네이버 비밀번호 : \")\n\n session_load = input(\"세션을 불러올까요? (y or n) \")\n\n if session_load == \"y\":\n session_load = True\n else:\n session_load = False\n\n session_save = False\n\n if not session_load:\n session_save = input(\"세션을 저장할까요? (y or n) \")\n\n if session_save == \"y\":\n session_save = True\n else:\n session_save = False\n\n session = get_naver_login_session(username, password, session_save, session_load)\n\n command = \"\"\n\n while command != \"q\":\n vocabs = []\n\n while True:\n vocab_lists_page = 1\n vocab_lists_select = 0\n vocab_list_items = []\n\n print()\n print(f\"현재 {len(vocabs)}개의 단어를 불러왔습니다.\")\n\n while vocab_lists_select <= 0:\n vocab_lists = get_vocab_lists(session, vocab_lists_page)\n\n vocab_list_items = list(vocab_lists.items())\n\n print()\n\n print(f\"단어장 목록 {vocab_lists_page} 페이지\")\n\n print()\n\n for idx, vocab_list_item in enumerate(vocab_list_items):\n print(\n f\"{idx+1} : {vocab_list_item[0]} ({vocab_list_item[1][1]}개 단어)\"\n )\n\n print()\n\n vocab_lists_select = int(\n input(\n \"단어장을 선택하거나 전 페이지를 검색하려면 -1, 다음 페이지를 검색하려면 0을 입력하십시오(종료는 -2) : \"\n )\n )\n\n if vocab_lists_select == -1:\n if vocab_lists_page <= 1:\n print()\n print(\"이미 첫 페이지입니다.\")\n\n vocab_lists_page = 1\n\n else:\n vocab_lists_page -= 1\n\n elif vocab_lists_select == 0:\n vocab_lists_page += 1\n\n else:\n break\n\n if vocab_lists_select == -2:\n break\n\n # print()\n # print(f\"선택한 단어장 : {vocab_list_items[vocab_lists_select-1][0]} ({vocab_list_items[vocab_lists_select-1][1][1]}개 단어)\")\n\n # vocab_start_count = int(input(\"불러오기 시작할 단어의 수를 입력하세요 : \"))\n # vocab_count = int(input(\"불러올 단어 개수를 입력하세요 : \"))\n\n vocab_lists_id = vocab_list_items[vocab_lists_select - 1][1][0]\n\n _vocabs = get_vocabs(\n session,\n vocab_lists_id,\n 0,\n vocab_list_items[vocab_lists_select - 1][1][1],\n )\n vocabs.extend(_vocabs)\n print()\n\n print(\n f\"{vocab_list_items[vocab_lists_select - 1][0]} 단어장에서 {len(_vocabs)}개를 불러와 추가했습니다.\"\n )\n\n print(f\"{len(vocabs)}개의 단어를 저장합니다.\")\n print()\n vocab_unit = int(input(\"한 번에 저장할 단어 개수를 입력하세요 : \"))\n file_original_name = input(\"저장할 파일 이름을 입력하세요 : \")\n\n for i in range(0, len(vocabs), vocab_unit):\n file_name = f\"{file_original_name}-{i//vocab_unit + 1}.csv\"\n\n if i + vocab_unit > len(vocabs):\n end_index = len(vocabs)\n else:\n end_index = i + vocab_unit\n\n vocabs_by_unit = vocabs[i:end_index]\n\n export_to_file(file_name, vocabs_by_unit)\n\n print(f\"{file_name}에 {end_index-i}개의 단어를 저장했습니다.\")\n\n print(\"\")\n print(\"작업이 완료되었습니다.\")\n command = input(\"종료하려면 q를, 단어장 선택 화면으로 돌아가려면 q를 제외한 다른 키를 누르십시오.\")\n\n\nmain()\n","repo_name":"tonynamy/ExportNaverJpVocab","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12096,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37352394935","text":"# -*- coding:utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\ndata_dir = \"/data2/code/DaguanFengxian/bert_model/data/splits/fold_0_nezha_base_vocab\"\ndata_dev = \"/dev.txt\"\ndata_train = \"/train.txt\"\n\nwith open(data_dir + data_train, 'r', encoding='utf-8') as f:\n train_data = []\n\n for i, line in enumerate(f):\n if i == 0: continue\n\n idx = line.strip().split(\"\\t\")[0]\n sent_ = line.strip().split(\"\\t\")[1]\n label_2 = line.strip().split(\"\\t\")[2]\n label_1 = label_2.split(\"-\")[0]\n train_data.append([idx, sent_, label_2, label_1])\n\nwith open(data_dir + data_dev, 'r', encoding='utf-8') as f:\n dev_data = []\n\n for i, line in enumerate(f):\n if i == 0: continue\n\n idx = line.strip().split(\"\\t\")[0]\n sent_ = line.strip().split(\"\\t\")[1]\n label_2 = line.strip().split(\"\\t\")[2]\n label_1 = label_2.split(\"-\")[0]\n dev_data.append([idx, sent_, label_2, label_1])\n\nf_out = open(data_dir + data_train, 'w', encoding='utf-8')\nfor i, samp in enumerate(train_data):\n f_out.write(\"%d,%s,%s,%s\" % (int(samp[0]), samp[1], samp[2], samp[3]) + \"\\n\")\n\nf_out = open(data_dir + data_dev, 'w', encoding='utf-8')\nfor i, samp in enumerate(dev_data):\n f_out.write(\"%d,%s,%s,%s\" % (int(samp[0]), samp[1], samp[2], samp[3]) + \"\\n\")\n","repo_name":"Coding-Zuo/DaguanFengxian","sub_path":"bert_model/script/7_multi-task_data_proc.py","file_name":"7_multi-task_data_proc.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"73054461673","text":"import torch\nfrom torch import nn\n\n\n# 二维卷积 互相关运算,未引入padding\n\ndef corr2d(X, k):\n h, w = k.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) # shape[0] 获取张量的行数,确定输出Y的形状\n\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n # print(X[i:i + h, j: j+w])\n print()\n Y[i, j] = (X[i:i + h, j: j + w] * k).sum()\n\n return Y\n\n\nX = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\nK = torch.tensor([[0, 1], [2, 3]])\nY = corr2d(X, K)\n\nprint()\nprint(X[2, 1])\n\nprint(Y)\n","repo_name":"BenjaminChiu/MyPytorch","sub_path":"DIDL/5.1.1_CNN.py","file_name":"5.1.1_CNN.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37112775206","text":"from django import forms\nfrom .models import Order\n\n\nclass OrderForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs): # Переименовываем поле\n super().__init__(*args, **kwargs)\n self.fields['order_date'].label = 'Дата получения заказа'\n\n order_date = forms.DateField(widget=forms.TextInput(attrs={'type': 'date'})) # выбор даты в календаре\n\n class Meta:\n model = Order\n fields = (\n 'first_name', 'last_name', 'phone', 'address', 'buying_type', 'order_date', 'comment'\n )\n","repo_name":"iliya-84/Django_Projects","sub_path":"shop_django/mainapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6243308985","text":"import contextlib\nimport pytest\nimport os\nimport sys\nfrom stat import S_IREAD, S_IRGRP, S_IROTH\nfrom run_app import runner\n\nSCRIPT_DIR = os.path.dirname(__file__)\n\n\n@pytest.fixture()\ndef data_init():\n \"\"\"\n Initialize dependencies for test suite\n :return:\n \"\"\"\n # SETUP\n data_init = dict()\n data_init['example_file1'] = os.path.join(SCRIPT_DIR, 'example1.txt')\n data_init['example_file2'] = os.path.join(SCRIPT_DIR, 'example2.txt')\n data_init['no_permissions'] = os.path.join(SCRIPT_DIR, 'noPermissionsFile.txt')\n print('doing things to setup')\n yield data_init\n # TEARDOWN\n print('doing things to teardown')\n\n\n@pytest.mark.negative\ndef test_without_input_files():\n \"\"\"\n Running test without input files\n :return:\n \"\"\"\n print('Running test without input files')\n try:\n runner(files='', regex=r'\\d\\d\\d\\d\\d')\n assert False\n except FileNotFoundError:\n assert True\n\n\n@pytest.mark.negative\ndef test_no_matches(data_init):\n \"\"\"\n Running test without successful matches\n :param data_init: test suite dependencies (list of example input files)\n :return:\n \"\"\"\n print('Running test without successful matches')\n results = runner(files=data_init['example_file1'], regex=r'\\d')\n assert True if len(results[data_init['example_file1']]) == 0 else False\n\n\n@pytest.mark.functional\ndef test_basic_functionality(data_init):\n \"\"\"\n Running basic test functionality (1 input file & pattern)\n :param data_init: test suite dependencies (list of example input files)\n :return:\n \"\"\"\n print('Running basic test functionality')\n results = runner(files=data_init['example_file1'], regex='grows')\n assert True if len(results[data_init['example_file1']]) == 2 else False\n\n\n@pytest.mark.functional\ndef test_multiple_files(data_init):\n \"\"\"\n\n :param data_init:\n :return:\n \"\"\"\n print('Running test with multiple files')\n example_files = \\\n data_init['example_file1'] + ',' + data_init['example_file2']\n results = runner(files=example_files, regex='grows')\n count = 0\n for val in results.values():\n count += len(val)\n assert True if count == 4 else False\n\n\n@pytest.mark.negative\ndef test_without_permission_files(data_init):\n \"\"\"\n Running test without permission files\n :return:\n \"\"\"\n print('Running test without permission files')\n no_permissions_file = open(data_init['no_permissions'], \"w\")\n no_permissions_file.write(\"Your text goes here876182735491256487\")\n os.chmod(data_init['no_permissions'], S_IREAD)\n # Silence matches prints (prints appears when file read permissions is allowed'):\n # with nostdout():\n try:\n # Run RegexInFiles with expected file without any permissions\n runner(files=data_init['no_permissions'], regex=r'\\d\\d\\d\\d\\d')\n assert False\n except PermissionError:\n assert True\n os.chmod(data_init['no_permissions'], 0o777)\n no_permissions_file.close()\n\n\n\n@contextlib.contextmanager\ndef nostdout():\n save_stdout = sys.stdout\n sys.stdout = DummyFile()\n yield\n sys.stdout = save_stdout\n\n\nclass DummyFile(object):\n def write(self, x): pass\n","repo_name":"idanb1/Exercise","sub_path":"pytest/TestSuite.py","file_name":"TestSuite.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13111602594","text":"#CalThreeKingdomV2.py\n\n#姓名频率计算\n\nimport jieba\nimport os\nimport traceback\n\ntry:\n\ttxt=open(\"../data/threekingdoms.txt\",\"r\",encoding=\"utf-8\").read() #utf-8编码方式打开文本\n\texcludes={\"将军\",\"却说\",\"荆州\",\"二人\",\"不可\",\"不能\",\"如此\",\"商议\",\"如何\",\"主公\",\"军士\",\"左右\",\"军马\",\"引兵\",\"次日\",\"大喜\"\\\n\t ,\"天下\",\"东吴\",\"于是\",\"今日\",\"不敢\",\"魏兵\",\"陛下\",\"一人\",\"都督\",\"人马\",\"不知\",\"汉中\",\"只见\",\"众将\",\"后主\",\"蜀兵\"\\\n\t ,\"上马\",\"大叫\",\"太守\",\"此人\",\"夫人\",\"先主\",\"后人\",\"背后\",\"城中\",\"天子\",\"一面\",\"何不\",\"大军\",\"忽报\",\"先生\",\"百姓\"\\\n\t ,\"何故\",\"然后\",\"先锋\",\"不如\",\"赶来\",\"原来\",\"令人\",\"江东\",\"下马\",\"喊声\",\"正是\",\"徐州\",\"忽然\",\"因此\",\"成都\",\"不见\"\\\n\t ,\"未知\",\"大败\",\"大事\",\"之后\",\"一军\",\"引军\",\"起兵\",\"军中\",\"接应\",\"进兵\",\"大惊\",\"可以\",\"以为\",\"大怒\",\"不得\",\"心中\"\\\n\t ,\"下文\",\"一声\",\"追赶\"} #通过不断运行CalThreeKingdomV1和此文件,排除Top20中非姓名的词语\n\twords=jieba.lcut(txt)\n\n\n\tcounts={}\n\tfor word in words:\n\t if len(word)==1:\n\t continue\n\t #通过不断运行CalThreeKingdomV1和此文件,将指向同一人物的词语\"归一化\"\n\t elif word==\"诸葛亮\" or word==\"孔明曰\":\n\t rword=\"孔明\"\n\t elif word==\"关公\" or word==\"云长\":\n\t rword=\"关羽\"\n\t elif word==\"玄德\" or word==\"玄德曰\":\n\t rword=\"刘备\"\n\t elif word==\"孟德\" or word==\"丞相\":\n\t rword=\"曹操\"\n\t else:\n\t rword=word\n\t counts[rword]=counts.get(rword,0)+1\n\n\tfor word in excludes: #删除非姓名词汇\n\t del counts[word]\n\n\titems=list(counts.items())\n\titems.sort(key=lambda y:y[1],reverse=True)\n\tfor i in range(20):\n\t word,count=items[i]\n\t print(\"{0:<10}{1:>5}\".format(word,count))\nexcept :\n\ttraceback.print_exc()\nfinally:\n\tos.system(\"pause\")\n#txt.close()\n\n","repo_name":"AnRanbel/StudyPython","sub_path":"CalThreeKingdom/CalThreeKingdomV2.py","file_name":"CalThreeKingdomV2.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15758072928","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nRoboMission measures's correlation analysis.\r\nPython Version: 3.6\r\n\"\"\"\r\n\r\nfrom collections import Counter\r\nimport editdistance\r\nimport seaborn as sns\r\nfrom scipy.spatial.distance import euclidean\r\n\r\nfrom tools import *\r\n\r\n\r\ndef difficulty_and_complexity_measures(snapshots_path, task_sessions_path, tasks_path):\r\n \"\"\"\r\n Computes difficulty and complexity measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; difficulty and complexity measures\r\n \"\"\"\r\n data = load_extended_snapshots(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n task_sessions_cols=[\"id\", \"student\", \"task\", \"solved\", \"time_spent\"],\r\n tasks_cols=[\"id\", \"solution\"])\r\n data = data.fillna(False)\r\n data = data[data.correct == data.new_correct]\r\n\r\n all_sessions = data.groupby(\"task_session\").agg({\"task\": \"max\",\r\n \"granularity\": count_submits,\r\n \"new_correct\": count_true,\r\n \"solution\": last_with_empty_values})\r\n all_sessions[\"new_solved\"] = all_sessions.new_correct.astype(bool)\r\n\r\n successful_sessions = all_sessions[all_sessions.new_solved]\r\n all_sessions_by_tasks = all_sessions.groupby(\"task\").agg({\"new_solved\": \"count\"})\r\n successful_sessions_by_tasks = successful_sessions.groupby(\"task\").agg({\"new_solved\": \"count\"})\r\n\r\n difficulty_and_complexity = 1 - successful_sessions_by_tasks / all_sessions_by_tasks\r\n difficulty_and_complexity.rename(columns={\"new_solved\": \"task_sessions_unsolved\"}, inplace=True)\r\n\r\n del all_sessions_by_tasks\r\n del successful_sessions_by_tasks\r\n del successful_sessions\r\n\r\n distinct_blocks_by_task = all_sessions.groupby(\"task\").agg({\"solution\": \"last\"})\r\n difficulty_and_complexity[\"distinct_blocks_1\"] = count_distinct_blocks(distinct_blocks_by_task.solution, 1)\r\n difficulty_and_complexity.distinct_blocks_1 = difficulty_and_complexity.distinct_blocks_1.astype(\"int64\")\r\n\r\n difficulty_and_complexity[\"distinct_blocks_3\"] = count_distinct_blocks(distinct_blocks_by_task.solution, 3)\r\n difficulty_and_complexity.distinct_blocks_3 = difficulty_and_complexity.distinct_blocks_3.astype(\"int64\")\r\n\r\n difficulty_and_complexity[\"distinct_blocks_4\"] = count_distinct_blocks(distinct_blocks_by_task.solution, 4)\r\n difficulty_and_complexity.distinct_blocks_4 = difficulty_and_complexity.distinct_blocks_4.astype(\"int64\")\r\n\r\n data[\"granularity_submits\"] = data.granularity\r\n data[\"program_all\"] = data.program\r\n data[\"program_edits\"] = data.program\r\n data[\"program_1_0\"] = data.program\r\n\r\n task_sessions = data.groupby(\"task_session\").agg({\"task\": \"last\",\r\n \"time_spent\": \"max\",\r\n \"solution\": \"last\",\r\n \"granularity\": count_edits,\r\n \"granularity_submits\": count_submits,\r\n \"program\": last_with_empty_values,\r\n \"program_all\": partial(count_deletions, mode=\"all\"),\r\n \"program_edits\": partial(count_deletions, mode=\"edits\"),\r\n \"program_1_0\": partial(count_deletions, mode=\"1_0\")})\r\n\r\n tasks = task_sessions.groupby(\"task\").agg({\"time_spent\": \"median\",\r\n \"granularity\": \"median\",\r\n \"granularity_submits\": \"median\",\r\n \"program\": median_of_lens,\r\n \"solution\": len_of_last,\r\n \"program_all\": \"median\",\r\n \"program_edits\": \"median\",\r\n \"program_1_0\": [\"median\", \"sum\", \"count\"]})\r\n tasks.deletion_ratio = tasks[(\"program_1_0\", \"sum\")] / tasks[(\"program_1_0\", \"count\")]\r\n\r\n difficulty_and_complexity[\"median_time\"] = tasks[(\"time_spent\", \"median\")]\r\n difficulty_and_complexity[\"median_edits\"] = tasks[(\"granularity\", \"median\")]\r\n difficulty_and_complexity[\"median_submissions\"] = tasks[(\"granularity_submits\", \"median\")]\r\n difficulty_and_complexity[\"median_solution_length\"] = tasks[(\"program\", \"median_of_lens\")]\r\n difficulty_and_complexity[\"sample_solution_length\"] = tasks[(\"solution\", \"len_of_last\")]\r\n difficulty_and_complexity[\"deletion_ratio\"] = tasks.deletion_ratio\r\n difficulty_and_complexity[\"median_deletions_all\"] = tasks[(\"program_all\", \"median\")]\r\n difficulty_and_complexity[\"median_deletions_edits\"] = tasks[(\"program_edits\", \"median\")]\r\n difficulty_and_complexity[\"median_deletions_1_0\"] = tasks[(\"program_1_0\", \"median\")]\r\n\r\n for column in difficulty_and_complexity:\r\n print(column, statistics(difficulty_and_complexity[column]))\r\n return difficulty_and_complexity\r\n\r\n\r\ndef solution_uniqueness_measures(snapshots_path, task_sessions_path, tasks_path):\r\n \"\"\"\r\n Computes solution uniqueness measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; solution uniqueness measures\r\n \"\"\"\r\n data = load_extended_snapshots(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n task_sessions_cols=[\"id\", \"task\", \"solved\"],\r\n tasks_cols=[\"id\", \"solution\"])\r\n data = data[data.new_correct.notnull()]\r\n data = data[data.new_correct]\r\n data = data[data.new_correct == data.correct]\r\n\r\n tasks = data.groupby(\"task\").agg({\"program\": dict_of_counts,\r\n \"square_sequence\": dict_of_counts,\r\n \"solution\": \"last\"})\r\n\r\n uniqueness = pd.DataFrame(index=tasks.index)\r\n uniqueness[\"solutions_entropy\"] = list(map(entropy, tasks.program))\r\n uniqueness[\"squares_sequences_entropy\"] = list(map(entropy, tasks.square_sequence))\r\n tasks[\"distinct_solutions\"] = [len(x[0]) for x in tasks.program]\r\n tasks[\"distinct_squares_sequences\"] = [len(x[0]) for x in tasks.square_sequence]\r\n uniqueness[\"unique_solutions\"] = tasks.distinct_solutions\r\n uniqueness[\"unique_squares_sequences\"] = tasks.distinct_squares_sequences\r\n uniqueness[\"program_clusters_count\"], _, _ = count_program_clusters(tasks.program)\r\n\r\n for column in uniqueness:\r\n print(column, statistics(uniqueness[column]))\r\n return uniqueness\r\n\r\n\r\ndef task_similarity_measures(snapshots_path, task_sessions_path, tasks_path):\r\n \"\"\"\r\n Computes similarity measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; similarity measures\r\n \"\"\"\r\n del snapshots_path\r\n del task_sessions_path\r\n\r\n tasks = pd.read_csv(tasks_path, index_col=\"id\")\r\n sample_solutions = tasks.solution\r\n asts = pd.Series(list(map(build_ast, sample_solutions)), index=sample_solutions.index)\r\n bags_of_blocks = bag_of_blocks(sample_solutions)\r\n bags_of_entities = bag_of_entities(tasks.setting)\r\n\r\n ast_ted_matrix = pd.DataFrame(data=None, index=sorted(sample_solutions.index),\r\n columns=sorted(sample_solutions.index))\r\n levenshtein_matrix = pd.DataFrame(data=None, index=sorted(sample_solutions.index),\r\n columns=sorted(sample_solutions.index))\r\n bag_of_blocks_matrix = pd.DataFrame(data=None, index=sorted(sample_solutions.index),\r\n columns=sorted(sample_solutions.index))\r\n bag_of_entities_matrix = pd.DataFrame(data=None, index=sorted(sample_solutions.index),\r\n columns=sorted(sample_solutions.index))\r\n\r\n for i in sorted(sample_solutions.index):\r\n for j in sorted(sample_solutions.index):\r\n if i < j:\r\n ast_ted_matrix.loc[i][j] = ast_ted(asts.loc[i], asts.loc[j])\r\n levenshtein_matrix.loc[i][j] = editdistance.eval(sample_solutions.loc[i], sample_solutions.loc[j])\r\n bag_of_blocks_matrix.loc[i][j] = euclidean(bags_of_blocks.loc[i], bags_of_blocks.loc[j])\r\n bag_of_entities_matrix.loc[i][j] = euclidean(bags_of_entities.loc[i], bags_of_entities.loc[j])\r\n\r\n matrices = [ast_ted_matrix, levenshtein_matrix, bag_of_blocks_matrix, bag_of_entities_matrix]\r\n titles = [\"AST TED\", \"Levenshtein\", \"Bag-of-blocks\", \"Bag-of-entities\"]\r\n for i in range(len(matrices)):\r\n frequencies = dict(Counter(matrices[i].values.flatten()))\r\n plt.bar(list(frequencies.keys()), list(frequencies.values()), width=0.05, color='g')\r\n plt.title(titles[i] + \" distances distribution\")\r\n plt.xlabel(\"distance\")\r\n plt.ylabel(\"count\")\r\n plt.show()\r\n\r\n flat_ast_ted_matrix = flatten_table_remove_nan(ast_ted_matrix)\r\n flat_levenshtein_matrix = flatten_table_remove_nan(levenshtein_matrix)\r\n flat_bag_of_blocks_matrix = flatten_table_remove_nan(bag_of_blocks_matrix)\r\n flat_bag_of_entities_matrix = flatten_table_remove_nan(bag_of_entities_matrix)\r\n\r\n for i in sorted(sample_solutions.index):\r\n for j in sorted(sample_solutions.index):\r\n if i > j:\r\n ast_ted_matrix.loc[i][j] = ast_ted_matrix.loc[j][i]\r\n levenshtein_matrix.loc[i][j] = levenshtein_matrix.loc[j][i]\r\n bag_of_blocks_matrix.loc[i][j] = bag_of_blocks_matrix.loc[j][i]\r\n bag_of_entities_matrix.loc[i][j] = bag_of_entities_matrix.loc[j][i]\r\n\r\n similarity = pd.DataFrame(index=tasks.index)\r\n similarity[\"ast_ted2\"] = count_similar_tasks(ast_ted_matrix, np.quantile(flat_ast_ted_matrix, 0.02))\r\n similarity[\"ast_ted5\"] = count_similar_tasks(ast_ted_matrix, np.quantile(flat_ast_ted_matrix, 0.05))\r\n similarity[\"ast_ted10\"] = count_similar_tasks(ast_ted_matrix, np.quantile(flat_ast_ted_matrix, 0.10))\r\n similarity[\"levenshtein2\"] = count_similar_tasks(levenshtein_matrix, np.quantile(flat_levenshtein_matrix, 0.02))\r\n similarity[\"levenshtein5\"] = count_similar_tasks(levenshtein_matrix, np.quantile(flat_levenshtein_matrix, 0.05))\r\n similarity[\"levenshtein10\"] = count_similar_tasks(levenshtein_matrix, np.quantile(flat_levenshtein_matrix, 0.10))\r\n similarity[\"blocks2\"] = count_similar_tasks(bag_of_blocks_matrix, np.quantile(flat_bag_of_blocks_matrix, 0.02))\r\n similarity[\"blocks5\"] = count_similar_tasks(bag_of_blocks_matrix, np.quantile(flat_bag_of_blocks_matrix, 0.05))\r\n similarity[\"blocks10\"] = count_similar_tasks(bag_of_blocks_matrix, np.quantile(flat_bag_of_blocks_matrix, 0.10))\r\n similarity[\"entities2\"] = count_similar_tasks(bag_of_entities_matrix, np.quantile(flat_bag_of_entities_matrix, 0.02))\r\n similarity[\"entities5\"] = count_similar_tasks(bag_of_entities_matrix, np.quantile(flat_bag_of_entities_matrix, 0.05))\r\n similarity[\"entities10\"] = count_similar_tasks(bag_of_entities_matrix, np.quantile(flat_bag_of_entities_matrix, 0.10))\r\n similarity[\"closest_ast\"], _ = get_shortest_distance(ast_ted_matrix)\r\n similarity[\"closest_levenshtein\"], _ = get_shortest_distance(levenshtein_matrix)\r\n similarity[\"closest_blocks\"], _ = get_shortest_distance(bag_of_blocks_matrix)\r\n similarity[\"closest_entities\"], _ = get_shortest_distance(bag_of_entities_matrix)\r\n\r\n for column in similarity:\r\n print(column, statistics(similarity[column]))\r\n return similarity\r\n\r\n\r\ndef frequent_problems_measures(snapshots_path, task_sessions_path, tasks_path, **kwargs):\r\n \"\"\"\r\n Computes frequent problems measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; frequent problems measures\r\n \"\"\"\r\n data = load_extended_snapshots(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n task_sessions_cols=[\"id\", \"task\"],\r\n tasks_cols=[])\r\n data = data.fillna(False)\r\n data = data[data.new_correct == data.correct]\r\n\r\n last_ts_snapshots = data.groupby(\"task_session\").agg({\"task\": \"max\",\r\n \"new_correct\": count_true,\r\n \"granularity\": \"last\",\r\n \"program\": last_with_empty_values,\r\n \"square_sequence\": last_with_empty_values})\r\n\r\n last_ts_snapshots = last_ts_snapshots[[isinstance(x, str) for x in last_ts_snapshots.program]]\r\n last_ts_snapshots[\"new_solved\"] = last_ts_snapshots.new_correct.astype(bool)\r\n\r\n wrong_ts = last_ts_snapshots[last_ts_snapshots.new_solved == 0]\r\n wrong_ts = synchronous_interpreter_run(data_frame=wrong_ts,\r\n only_executions=False,\r\n only_edits=True,\r\n save=False,\r\n tasks_path=tasks_path)\r\n wrong_ts[\"string_square_sequence\"] = square_sequences_to_strings(wrong_ts.square_sequence)\r\n\r\n del last_ts_snapshots\r\n tasks_left = wrong_ts.groupby([\"task\", \"string_square_sequence\"]).agg(\r\n {\"program\": partial(dict_of_counts, del_false=True), \"new_solved\": \"count\"})\r\n del wrong_ts\r\n tasks_left[\"distinct_programs\"] = len_of_programs_dict(tasks_left.program)\r\n tasks_left[\"most_frequent_program\"] = get_most_frequent_program(tasks_left.program)\r\n tasks_left[\"abs_count\"] = count_total_abs_freq(tasks_left.program)\r\n tasks_left[\"task_freq\"] = count_task_frequency(tasks_left)\r\n tasks_left[\"rel_count\"] = tasks_left.abs_count / tasks_left.task_freq\r\n\r\n # --------------------\r\n\r\n data = data[data.granularity == \"execution\"]\r\n data = data[data.new_correct == False]\r\n data = data[[isinstance(x, str) for x in data.program]]\r\n\r\n data[\"string_square_sequence\"] = square_sequences_to_strings(data.square_sequence)\r\n incorrect_submits = data.groupby([\"task\", \"string_square_sequence\"]).agg(\r\n {\"program\": partial(dict_of_counts, del_false=True)})\r\n\r\n incorrect_submits[\"distinct_programs\"] = len_of_programs_dict(incorrect_submits.program)\r\n incorrect_submits[\"most_frequent_program\"] = get_most_frequent_program(incorrect_submits.program)\r\n incorrect_submits[\"abs_count\"] = count_total_abs_freq(incorrect_submits.program)\r\n incorrect_submits[\"task_freq\"] = count_task_frequency(incorrect_submits)\r\n incorrect_submits[\"rel_count\"] = incorrect_submits.abs_count / incorrect_submits.task_freq\r\n\r\n if kwargs[\"plot\"]:\r\n print(\"Incorrect submits plot\")\r\n plot_frequent_wrong_programs_ratio(\r\n tasks=incorrect_submits[[\"abs_count\", \"rel_count\", \"task_freq\", \"most_frequent_program\"]],\r\n abs_step=30, abs_begin=1, abs_end=11,\r\n rel_step=0.05, rel_begin=1, rel_end=11)\r\n print(\"Leaving points plot\")\r\n plot_frequent_wrong_programs_ratio(\r\n tasks=tasks_left[[\"abs_count\", \"rel_count\", \"task_freq\", \"most_frequent_program\"]],\r\n abs_step=5, abs_begin=1, abs_end=11,\r\n rel_step=0.05, rel_begin=1, rel_end=11)\r\n\r\n tasks = pd.DataFrame(index=tasks_left.index.levels[0])\r\n tasks[\"frequent_leaving_points_ratio\"], \\\r\n tasks[\"unique_frequent_leaving_points\"], \\\r\n tasks[\"frequent_leaving_points_programs\"] = count_frequent_wrong_programs_ratio(tasks=tasks_left,\r\n abs_threshold=10,\r\n rel_threshold=0.10)\r\n tasks[\"frequent_incorrect_submits_ratio\"], \\\r\n tasks[\"unique_frequent_incorrect_submits\"], \\\r\n tasks[\"frequent_incorrect_submits_programs\"] = count_frequent_wrong_programs_ratio(tasks=incorrect_submits,\r\n abs_threshold=50,\r\n rel_threshold=0.10)\r\n\r\n for column in [\"frequent_leaving_points_ratio\", \"unique_frequent_leaving_points\",\r\n \"frequent_incorrect_submits_ratio\", \"unique_frequent_incorrect_submits\"]:\r\n print(column, statistics(tasks[column]))\r\n\r\n return tasks[[\"frequent_leaving_points_ratio\", \"unique_frequent_leaving_points\",\r\n \"frequent_incorrect_submits_ratio\", \"unique_frequent_incorrect_submits\"]]\r\n\r\n\r\ndef learner_task_session_performance_measures(snapshots_path, task_sessions_path, tasks_path):\r\n \"\"\"\r\n Computes task session performance measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; task session performance measures\r\n \"\"\"\r\n data = load_extended_snapshots(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n task_sessions_cols=[\"id\", \"student\", \"task\", \"time_spent\"],\r\n tasks_cols=[])\r\n data = data.fillna(False)\r\n data = data[data.new_correct == data.correct]\r\n\r\n data[\"granularity_submits\"] = data.granularity\r\n data[\"program_edits\"] = data.program\r\n data[\"program_1_0\"] = data.program\r\n\r\n ts = data.groupby(\"task_session\").agg({\"task_session\": \"max\",\r\n \"new_correct\": count_true,\r\n \"time_spent\": lambda x: np.log(max(x)),\r\n \"granularity\": count_edits,\r\n \"granularity_submits\": count_submits,\r\n \"program\": partial(count_deletions, mode=\"all\"),\r\n \"program_edits\": partial(count_deletions, mode=\"edits\"),\r\n \"program_1_0\": partial(count_deletions, mode=\"1_0\")})\r\n ts.new_correct = ts.new_correct.astype(int)\r\n\r\n performance = pd.DataFrame(index=ts.task_session)\r\n performance[\"time\"] = ts.time_spent\r\n performance[\"edits\"] = ts.granularity\r\n performance[\"submits\"] = ts.granularity_submits\r\n performance[\"deletions_all\"] = ts.program\r\n performance[\"deletions_edits\"] = ts.program_edits\r\n performance[\"deletions_1_0\"] = ts.program_1_0\r\n\r\n for column in performance:\r\n print(column, statistics(performance[column]))\r\n return performance\r\n\r\n\r\ndef learner_total_performance_measures(snapshots_path, task_sessions_path, tasks_path):\r\n \"\"\"\r\n Computes total performance measures.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :return: pd.DataFrame; total performance measures\r\n \"\"\"\r\n data = load_extended_snapshots(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n task_sessions_cols=[\"id\", \"student\", \"task\", \"time_spent\"],\r\n tasks_cols=[\"id\", \"level\"])\r\n data = data.fillna(False)\r\n data = data[data.new_correct == data.correct]\r\n\r\n ts = data.groupby(\"task_session\").agg({\"task\": \"max\",\r\n \"student\": \"max\",\r\n \"level\": \"max\",\r\n \"new_correct\": count_true,\r\n \"program\": last_with_empty_values,\r\n \"time_spent\": lambda x: np.log(max(x))})\r\n ts[\"new_solved\"] = ts.new_correct.astype(bool)\r\n ts = ts[ts.new_solved]\r\n ts[\"credits\"] = ts.new_solved * ts.level\r\n\r\n students = ts.groupby(\"student\").agg({\"task\": pd.Series.nunique,\r\n \"credits\": \"sum\",\r\n \"program\": count_used_blocks,\r\n \"time_spent\": \"sum\"})\r\n students.rename(columns={\"program\": \"used_blocks\",\r\n \"task\": \"solved_tasks\",\r\n \"time_spent\": \"total_time\"},\r\n inplace=True)\r\n\r\n for column in students:\r\n print(column, statistics(students[column]))\r\n return students\r\n\r\n\r\ndef measures_correlations(measures_table, method):\r\n \"\"\"\r\n Computes correlation of task measures and creates its heat table.\r\n :param measures_table: pd.DataFrame; table of measures\r\n :param method: string; \"pearson\" or \"spearman\"\r\n :return: pd.DataFrame; correlation table of measures\r\n \"\"\"\r\n correlations = measures_table.corr(method=method)\r\n print(correlations)\r\n\r\n sns.heatmap(correlations, cmap='viridis', annot=True, vmin=-1, vmax=1)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n sns.clustermap(correlations, cmap='viridis', annot=True, vmin=-1, vmax=1, figsize=(8, 5))\r\n plt.gcf().subplots_adjust(bottom=0.35, left=0.25, right=0.75, top=0.95)\r\n plt.show()\r\n\r\n return correlations\r\n\r\n\r\ndef correlation_methods_correlations(pearson_measures_correlation, spearman_measures_correlation, full_or_triangle):\r\n \"\"\"\r\n Computes correlation of correlation methods and creates its heat table.\r\n :param pearson_measures_correlation: pd.DataFrame; correlation table by Pearson\r\n :param spearman_measures_correlation: pd.DataFrame; correlation table by Spearman\r\n :param full_or_triangle: string; \"full\" or \"triangle\", type of correlation table processing mode\r\n :return: correlation table of correlation tables\r\n \"\"\"\r\n if full_or_triangle == \"full\":\r\n correlations = np.corrcoef(np.ndarray.flatten(pearson_measures_correlation.as_matrix()),\r\n np.ndarray.flatten(spearman_measures_correlation.as_matrix()))\r\n elif full_or_triangle == \"triangle\":\r\n correlations = np.corrcoef(flattened_triangle_table_from_array(pearson_measures_correlation.as_matrix()),\r\n flattened_triangle_table_from_array(spearman_measures_correlation.as_matrix()))\r\n correlations = pd.DataFrame(correlations, index=[\"Pearson's\", \"Spearman's\"], columns=[\"Pearson's\", \"Spearman's\"])\r\n print(correlations)\r\n\r\n sns.heatmap(correlations, cmap='viridis', annot=True, vmin=-1, vmax=1)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n return correlations\r\n\r\n\r\ndef all_correlations(snapshots_path, task_sessions_path, tasks_path, measures_function, **kwargs):\r\n \"\"\"\r\n Computes all levels of correlation.\r\n :param snapshots_path: string; path to snaphots .csv file\r\n :param task_sessions_path: string; path to task sessions .csv file\r\n :param tasks_path: string; path to tasks .csv file\r\n :param measures_function: function; function which computes measures\r\n :return:\r\n \"\"\"\r\n measures_table = measures_function(snapshots_path=snapshots_path,\r\n task_sessions_path=task_sessions_path,\r\n tasks_path=tasks_path,\r\n **kwargs)\r\n pearson_measures_correlation = measures_correlations(measures_table=measures_table,\r\n method=\"pearson\")\r\n spearman_measures_correlation = measures_correlations(measures_table=measures_table,\r\n method=\"spearman\")\r\n\r\n correlation_methods_correlations(pearson_measures_correlation=pearson_measures_correlation,\r\n spearman_measures_correlation=spearman_measures_correlation,\r\n full_or_triangle=\"full\")\r\n\r\n correlation_methods_correlations(pearson_measures_correlation=pearson_measures_correlation,\r\n spearman_measures_correlation=spearman_measures_correlation,\r\n full_or_triangle=\"triangle\")\r\n","repo_name":"matej-vanek/dp","sub_path":"Kod/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":25438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1417954800","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport pandas as pd\nimport yfinance as yf\nimport datetime\nimport matplotlib.pyplot as plt\n\ndef testyf(symbols):\n print(symbols)\n # Plot everything by leveraging the very powerful matplotlib package\n fig, ax = plt.subplots(figsize=(16, 9))\n\n for symbol in symbols:\n tickerdata = yf.Ticker(symbol)\n\n today = datetime.datetime.today().isoformat()\n\n tickerof = tickerdata.history(interval = '1d', start = '2020-1-1', end = today[:10])\n ph = tickerof['High']\n pl = tickerof['Low']\n\n ph = ph / ph[0];\n pl = pl / pl[0];\n\n ax.plot(ph.index, ph, label=symbol + ' high')\n ax.plot(pl.index, pl, label=symbol + ' low')\n\n ax.set_xlabel('Date')\n ax.set_ylabel('Adjusted closing price ($)')\n ax.legend()\n\n plt.show()\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n tickers_list = [\"BTC-USD\", \"ETH-USD\"]\n testyf(tickers_list)\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"maxsPPlash/finance","sub_path":"yfin/plot_comparation.py","file_name":"plot_comparation.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33850351275","text":"# 100 Days of Code: Python\n# June 20, 2022\n# Flask project: virtual bookshelf web app\n# practicing what I've learned\n\n# Import modules\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, URLField, SelectField\nfrom wtforms.validators import DataRequired\nimport sqlite3\n\n# Set up Flask app\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'harrypotterravenclaw'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///library.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nBootstrap(app)\n\n# Set up database\ndb = SQLAlchemy(app)\n\n# Book table\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n author = db.Column(db.String(250), unique=False, nullable=False)\n rating = db.Column(db.Float(), unique=False, nullable=False)\n\n# With SQLAlchemy to make\n# db.create_all()\n\n# Old first way to make: with sqlite3 package\n# db = sqlite3.connect(\"library.db\")\n# cursor = db.cursor()\n# cursor.execute(\"CREATE TABLE IF NOT EXISTS books (id INTEGER PRIMARY KEY, title varchar(250) NOT NULL UNIQUE, author varchar(250) NOT NULL, rating FLOAT NOT NULL)\")\n# cursor.execute(\"INSERT INTO books VALUES(1, 'Harry Potter', 'J. K. Rowling', '9.3')\")\n# db.commit()\n\n# ---------------\n\n# CRUD examples with SQLAlchemy\n# # Read All Records\n# all_books = db.session.query(Book).all()\n\n# # Read A Particular Record By Query\n# book = Book.query.filter_by(title=\"Harry Potter\").first()\n\n# # Update A Particular Record By Query\n# book_to_update = Book.query.filter_by(title=\"Harry Potter\").first()\n# book_to_update.title = \"Harry Potter and the Chamber of Secrets\"\n# db.session.commit()\n\n# # Update A Record By PRIMARY KEY\n# book_id = 1\n# book_to_update = Book.query.get(book_id)\n# book_to_update.title = \"Harry Potter and the Goblet of Fire\"\n# db.session.commit()\n\n# # Delete A Particular Record By PRIMARY KEY\n# book_id = 1\n# book_to_delete = Book.query.get(book_id)\n# db.session.delete(book_to_delete)\n# db.session.commit()\n\n# Get all books from database\ndef get_all_books():\n all_books_list = []\n all_books_db = db.session.query(Book).all()\n for db_book in all_books_db:\n b = {\n \"id\": db_book.id,\n \"title\": db_book.title,\n \"author\": db_book.author,\n \"rating\": db_book.rating\n }\n all_books_list.append(b)\n return all_books_list\n\n# Flask routes\n@app.route('/')\ndef home():\n all_books_list = get_all_books()\n return render_template(\"index.html\", books=all_books_list)\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add():\n if request.method == \"GET\":\n return render_template(\"add.html\")\n elif request.method == \"POST\":\n new_book = Book(\n title=request.form[\"title\"],\n author=request.form[\"author\"],\n rating=request.form[\"rating\"]\n )\n db.session.add(new_book)\n db.session.commit()\n\n all_books_list = get_all_books()\n return render_template(\"index.html\", books=all_books_list)\n\n@app.route(\"/edit/\", methods=[\"GET\", \"POST\"])\ndef edit(id):\n if request.method == \"GET\":\n selected_book = None\n all_books_list = get_all_books()\n for b in all_books_list:\n if b[\"id\"] == int(id):\n selected_book = b\n return render_template(\"edit.html\", book=selected_book)\n elif request.method == \"POST\":\n db.session.query(Book). \\\n filter(Book.id == request.form[\"book_id\"]). \\\n update({\"rating\": request.form[\"rating\"]})\n db.session.commit()\n\n all_books_list = get_all_books()\n return render_template(\"index.html\", books=all_books_list)\n\n@app.route(\"/delete/\")\ndef delete(id):\n selected_book = None\n all_books_list = get_all_books()\n for b in all_books_list:\n if b[\"id\"] == int(id):\n selected_book = b\n db.session.query(Book). \\\n filter(Book.id == id).delete()\n db.session.commit()\n\n all_books_list = get_all_books()\n return render_template(\"index.html\", books=all_books_list)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"gdbecker/100DaysOfCodePython","sub_path":"05 - Advanced/Day 063 - Virtual Bookshelf Web App/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14832097099","text":"from typing import Dict\n\nimport torch\nfrom torch.distributions import Categorical, constraints\nfrom torch.distributions.distribution import Distribution\n\n__all__ = [\"MixtureSameFamily\"]\n\n\nclass MixtureSameFamily(Distribution):\n r\"\"\"\n The `MixtureSameFamily` distribution implements a (batch of) mixture\n distribution where all component are from different parameterizations of\n the same distribution type. It is parameterized by a `Categorical`\n \"selecting distribution\" (over `k` component) and a component\n distribution, i.e., a `Distribution` with a rightmost batch shape\n (equal to `[k]`) which indexes each (batch of) component.\n\n Examples::\n\n >>> # xdoctest: +SKIP(\"undefined vars\")\n >>> # Construct Gaussian Mixture Model in 1D consisting of 5 equally\n >>> # weighted normal distributions\n >>> mix = D.Categorical(torch.ones(5,))\n >>> comp = D.Normal(torch.randn(5,), torch.rand(5,))\n >>> gmm = MixtureSameFamily(mix, comp)\n\n >>> # Construct Gaussian Mixture Model in 2D consisting of 5 equally\n >>> # weighted bivariate normal distributions\n >>> mix = D.Categorical(torch.ones(5,))\n >>> comp = D.Independent(D.Normal(\n ... torch.randn(5,2), torch.rand(5,2)), 1)\n >>> gmm = MixtureSameFamily(mix, comp)\n\n >>> # Construct a batch of 3 Gaussian Mixture Models in 2D each\n >>> # consisting of 5 random weighted bivariate normal distributions\n >>> mix = D.Categorical(torch.rand(3,5))\n >>> comp = D.Independent(D.Normal(\n ... torch.randn(3,5,2), torch.rand(3,5,2)), 1)\n >>> gmm = MixtureSameFamily(mix, comp)\n\n Args:\n mixture_distribution: `torch.distributions.Categorical`-like\n instance. Manages the probability of selecting component.\n The number of categories must match the rightmost batch\n dimension of the `component_distribution`. Must have either\n scalar `batch_shape` or `batch_shape` matching\n `component_distribution.batch_shape[:-1]`\n component_distribution: `torch.distributions.Distribution`-like\n instance. Right-most batch dimension indexes component.\n \"\"\"\n arg_constraints: Dict[str, constraints.Constraint] = {}\n has_rsample = False\n\n def __init__(\n self, mixture_distribution, component_distribution, validate_args=None\n ):\n self._mixture_distribution = mixture_distribution\n self._component_distribution = component_distribution\n\n if not isinstance(self._mixture_distribution, Categorical):\n raise ValueError(\n \" The Mixture distribution needs to be an \"\n \" instance of torch.distributions.Categorical\"\n )\n\n if not isinstance(self._component_distribution, Distribution):\n raise ValueError(\n \"The Component distribution need to be an \"\n \"instance of torch.distributions.Distribution\"\n )\n\n # Check that batch size matches\n mdbs = self._mixture_distribution.batch_shape\n cdbs = self._component_distribution.batch_shape[:-1]\n for size1, size2 in zip(reversed(mdbs), reversed(cdbs)):\n if size1 != 1 and size2 != 1 and size1 != size2:\n raise ValueError(\n f\"`mixture_distribution.batch_shape` ({mdbs}) is not \"\n \"compatible with `component_distribution.\"\n f\"batch_shape`({cdbs})\"\n )\n\n # Check that the number of mixture component matches\n km = self._mixture_distribution.logits.shape[-1]\n kc = self._component_distribution.batch_shape[-1]\n if km is not None and kc is not None and km != kc:\n raise ValueError(\n f\"`mixture_distribution component` ({km}) does not\"\n \" equal `component_distribution.batch_shape[-1]`\"\n f\" ({kc})\"\n )\n self._num_component = km\n\n event_shape = self._component_distribution.event_shape\n self._event_ndims = len(event_shape)\n super().__init__(\n batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args\n )\n\n def expand(self, batch_shape, _instance=None):\n batch_shape = torch.Size(batch_shape)\n batch_shape_comp = batch_shape + (self._num_component,)\n new = self._get_checked_instance(MixtureSameFamily, _instance)\n new._component_distribution = self._component_distribution.expand(\n batch_shape_comp\n )\n new._mixture_distribution = self._mixture_distribution.expand(batch_shape)\n new._num_component = self._num_component\n new._event_ndims = self._event_ndims\n event_shape = new._component_distribution.event_shape\n super(MixtureSameFamily, new).__init__(\n batch_shape=batch_shape, event_shape=event_shape, validate_args=False\n )\n new._validate_args = self._validate_args\n return new\n\n @constraints.dependent_property\n def support(self):\n # FIXME this may have the wrong shape when support contains batched\n # parameters\n return self._component_distribution.support\n\n @property\n def mixture_distribution(self):\n return self._mixture_distribution\n\n @property\n def component_distribution(self):\n return self._component_distribution\n\n @property\n def mean(self):\n probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)\n return torch.sum(\n probs * self.component_distribution.mean, dim=-1 - self._event_ndims\n ) # [B, E]\n\n @property\n def variance(self):\n # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])\n probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)\n mean_cond_var = torch.sum(\n probs * self.component_distribution.variance, dim=-1 - self._event_ndims\n )\n var_cond_mean = torch.sum(\n probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0),\n dim=-1 - self._event_ndims,\n )\n return mean_cond_var + var_cond_mean\n\n def cdf(self, x):\n x = self._pad(x)\n cdf_x = self.component_distribution.cdf(x)\n mix_prob = self.mixture_distribution.probs\n\n return torch.sum(cdf_x * mix_prob, dim=-1)\n\n def log_prob(self, x):\n if self._validate_args:\n self._validate_sample(x)\n x = self._pad(x)\n log_prob_x = self.component_distribution.log_prob(x) # [S, B, k]\n log_mix_prob = torch.log_softmax(\n self.mixture_distribution.logits, dim=-1\n ) # [B, k]\n return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B]\n\n def sample(self, sample_shape=torch.Size()):\n with torch.no_grad():\n sample_len = len(sample_shape)\n batch_len = len(self.batch_shape)\n gather_dim = sample_len + batch_len\n es = self.event_shape\n\n # mixture samples [n, B]\n mix_sample = self.mixture_distribution.sample(sample_shape)\n mix_shape = mix_sample.shape\n\n # component samples [n, B, k, E]\n comp_samples = self.component_distribution.sample(sample_shape)\n\n # Gather along the k dimension\n mix_sample_r = mix_sample.reshape(\n mix_shape + torch.Size([1] * (len(es) + 1))\n )\n mix_sample_r = mix_sample_r.repeat(\n torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es\n )\n\n samples = torch.gather(comp_samples, gather_dim, mix_sample_r)\n return samples.squeeze(gather_dim)\n\n def _pad(self, x):\n return x.unsqueeze(-1 - self._event_ndims)\n\n def _pad_mixture_dimensions(self, x):\n dist_batch_ndims = self.batch_shape.numel()\n cat_batch_ndims = self.mixture_distribution.batch_shape.numel()\n pad_ndims = 0 if cat_batch_ndims == 1 else dist_batch_ndims - cat_batch_ndims\n xs = x.shape\n x = x.reshape(\n xs[:-1]\n + torch.Size(pad_ndims * [1])\n + xs[-1:]\n + torch.Size(self._event_ndims * [1])\n )\n return x\n\n def __repr__(self):\n args_string = (\n f\"\\n {self.mixture_distribution},\\n {self.component_distribution}\"\n )\n return \"MixtureSameFamily\" + \"(\" + args_string + \")\"\n","repo_name":"pytorch/pytorch","sub_path":"torch/distributions/mixture_same_family.py","file_name":"mixture_same_family.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"593526199","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the jumpingOnClouds function below.\ndef jumpingOnClouds(c):\n count = 0\n pos = 0\n while pos < len(c):\n count += 1\n if pos < len(c)-2 and c[pos + 2] == 0:\n pos += 1\n pos += 1 \n return count - 1 \n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n c = list(map(int, input().rstrip().split()))\n\n result = jumpingOnClouds(c)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Vivekagent47/HackerRank","sub_path":"Problem Solving/49.py","file_name":"49.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72388547112","text":"import hashlib\nimport json\nimport os\nimport pathlib\nimport urllib.parse\nimport re\nimport shlex\nimport shutil\nimport subprocess\nfrom collections.abc import Sequence\nfrom urllib.parse import unquote, urljoin, urlparse, parse_qs\n\nimport pyperclip\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import http\nfrom mitmproxy.flow import Flow\n\nwith open(os.path.join(\n pathlib.Path(__file__).parent.absolute(), \"config.json\")) as f:\n json_config = json.load(f)\n\nmap_local_base_urls = json_config['mapLocalBaseUrls']\nmap_local_dir = json_config['mapLocalDir']\n\n\ndef is_url_in_map_local_base_urls(url):\n for base_url in map_local_base_urls:\n if base_url in url:\n return True\n return False\n\n\ndef get_after_base_url_from_local_base(without_parameters_url):\n for base_url in map_local_base_urls:\n if base_url in without_parameters_url:\n return without_parameters_url.replace(base_url, '')\n return without_parameters_url\n\n\ndef get_status_code():\n \"\"\"\n :return: the response status code as a string\n \"\"\"\n ctx.master.commands.execute(\"cut.clip @focus response.status_code\")\n return pyperclip.paste()\n\n\ndef trim_response_content(s):\n \"\"\"\n :return: a string\n \"\"\"\n if isinstance(s, bytes):\n s = s.decode('utf-8')\n if len(s) >= 2100:\n s = s[:2100] + \"...\"\n return s\n\n\ndef get_time_from_timestamps():\n ctx.master.commands.execute(\"cut.clip @focus request.timestamp_start\")\n start = pyperclip.paste()\n ctx.master.commands.execute(\"cut.clip @focus response.timestamp_end\")\n end = pyperclip.paste()\n return float(end) - float(start)\n\n\ndef get_curl_formatted():\n ctx.master.commands.execute(\"export.clip curl @focus\")\n s = pyperclip.paste()\n\n parts = shlex.split(s)\n output = parts[0] + \" \\\\\\n\"\n next_cmd = False\n markdown_table = \"\"\n\n for s in parts[1:]:\n parsed_url = urlparse(s)\n if parsed_url.scheme and parsed_url.netloc:\n # unquote since the export.clip command returns a string like this:\n # ?alias=projectStatus%3Astatus\n s = unquote(s)\n\n if next_cmd:\n output += \"'\" + s + \"' \\\\\\n\"\n next_cmd = False\n elif s.startswith(\"--\"):\n output += \" \" + s + \" \\\\\\n\"\n next_cmd = False\n elif s.startswith(\"-\"):\n output += \" \" + s + \" \"\n next_cmd = True\n else:\n # pass -g for the unescaped URL so the curl can be executed in the\n # terminal\n output += \" -g \\\\\\n\"\n\n # pretty much always, this is the URL at the very end of the command\n output += \" '\" + s + \"' \\\\\\n\"\n\n query_params = parse_qs(urlparse(s).query)\n if len(query_params) >= 1:\n url = strip_base_url_prefix(s)\n if '?' in url:\n url = url[:url.index('?')]\n\n ctx.master.commands.execute(\"cut.clip @focus request.method\")\n method = pyperclip.paste()\n full = method + \" \" + url\n\n param_list = [{\"Parameter\": key, \"Value\": value[0]} for key, value in query_params.items()]\n markdown_table = \"\\n
\\n\" + full + \"\\n\\n\"\n for param in param_list:\n markdown_table += f\"`{param['Parameter']}`\\n{param['Value']}\\n\\n\"\n markdown_table += '
'\n\n next_cmd = False\n\n output = output.strip()\n if output.endswith(\"\\\\\"):\n output = output[:-1]\n\n return \"```bash\\n\" + output + \"\\n```\\n\" + markdown_table\n\n\nclass CurlAddon:\n\n @command.command(\"cu\")\n def do(self) -> None:\n pyperclip.copy(get_curl_formatted())\n ctx.log.alert(\"Copied cURL with ```bash to clipboard\")\n\n\nclass UrlAddon:\n\n @command.command(\"u\")\n def do(self) -> None:\n ctx.master.commands.execute(\"cut.clip @focus request.url\")\n url = unquote(pyperclip.paste())\n pyperclip.copy(url)\n ctx.log.alert(\"Copied to clipboard: \" + url)\n\n\nclass ShortUrlMarkdownAddon:\n\n @command.command(\"ur\")\n def do(self) -> None:\n \"\"\"Copy the method plus the URL with the base and the params cut off\n with surrounding Markdown code blocks.\n \n Example: `GET printouts/printhours.pdf`\n \"\"\"\n ctx.master.commands.execute(\"cut.clip @focus request.url\")\n url = strip_base_url_prefix(unquote(pyperclip.paste()))\n\n ctx.master.commands.execute(\"cut.clip @focus request.method\")\n method = pyperclip.paste()\n full = \"`\" + method + \" \" + url + \"`\"\n pyperclip.copy(full)\n\n ctx.log.alert(\"Copied to clipboard: \" + full)\n\n\nclass ShortUrlAddon:\n\n @command.command(\"url\")\n def do(self) -> None:\n \"\"\"Copy the method plus the URL with the base and the params cut off.\n\n Example: GET printouts/printhours.pdf\n \"\"\"\n ctx.master.commands.execute(\"cut.clip @focus request.url\")\n raw_url = unquote(pyperclip.paste())\n url = strip_base_url_prefix(raw_url)\n if raw_url != url:\n if \"?\" in url:\n url = url[:url.index(\"?\")]\n\n ctx.master.commands.execute(\"cut.clip @focus request.method\")\n method = pyperclip.paste()\n pyperclip.copy(method + \" \" + url)\n\n ctx.log.alert(\"Copied to clipboard: \" + method + \" \" + url)\n\n\nclass AllResponseBodyAddon:\n\n @command.command(\"copyall\")\n def do(self, flows: Sequence[Flow]) -> None:\n if len(flows) != 1:\n ctx.log.alert(\"This can only operate on a single flow!\")\n else:\n flow = flows[0]\n curl = get_curl_formatted()\n code = get_status_code()\n time = get_time_from_timestamps()\n response = flow.response.content\n\n if len(response) == 0:\n pyperclip.copy(curl + \"\\nTook \" +\n \"{:.2f}\".format(time) + \"s with \" +\n \"status code \" + code +\n \" with no response body.\")\n ctx.log.alert(\"Copied cURL and status code to clipboard. \" +\n \"There was no response body!\")\n else:\n # noinspection PyBroadException\n try:\n as_json = \"```json\\n\" + trim_response_content(\n json.dumps(json.loads(response), indent=2)) + \\\n \"\\n```\"\n except Exception:\n as_json = \"```\\n\" + \\\n trim_response_content(response) + \\\n \"```\"\n pyperclip.copy(curl + \"\\n\" + \"Took \" +\n \"{:.2f}\".format(time) + \"s with \" +\n \"status code \" + code + \" to return:\\n\\n\" +\n as_json)\n ctx.log.alert(\n \"Copied cURL, response body and status code to clipboard\")\n\n\nclass AllResponseBodyKey:\n\n @command.command(\"a\")\n def do(self) -> None:\n ctx.master.commands.execute(\"copyall @focus\")\n\n\nclass AllResponseWithoutBodyAddon:\n\n @command.command(\"ab\")\n def do(self) -> None:\n curl = get_curl_formatted()\n code = get_status_code()\n time = get_time_from_timestamps()\n pyperclip.copy(curl + \"\\nTook \" +\n \"{:.2f}\".format(time) + \"s with \" + \"status code \" +\n code + \".\")\n ctx.log.alert(\"Copied cURL and status code to clipboard\")\n\n\n# if is_request is false, it is assumed that this is a request\ndef copy_content_as_json(content, is_request):\n if content == b'':\n ctx.log.alert(\"The flow does not have a request body!\")\n else:\n # noinspection PyBroadException\n try:\n as_json = json.dumps(json.loads(content), indent=2)\n pyperclip.copy(as_json)\n if is_request:\n ctx.log.alert(\"Copied JSON request body to clipboard\")\n else:\n ctx.log.alert(\"Copied JSON request body to clipboard\")\n except Exception:\n pyperclip.copy(content.decode('utf-8'))\n if is_request:\n ctx.log.alert(\"Copied non-JSON request body to clipboard\")\n else:\n ctx.log.alert(\"Copied non-JSON response body to clipboard\")\n\n\nclass RequestBodyAddon:\n\n @command.command(\"copyrequest\")\n def do(self, flows: Sequence[Flow]) -> None:\n if len(flows) != 1:\n ctx.log.alert(\"This can only operate on a single flow!\")\n else:\n flow = flows[0]\n content = flow.request.content\n copy_content_as_json(content, is_request=True)\n\n\nclass RequestBodyKey:\n\n @command.command(\"req\")\n def do(self) -> None:\n ctx.master.commands.execute(\"copyrequest @focus\")\n\n\nclass ResponseBodyAddon:\n\n @command.command(\"copyresponse\")\n def do(self, flows: Sequence[Flow]) -> None:\n if len(flows) != 1:\n ctx.log.alert(\"This can only operate on a single flow!\")\n else:\n flow = flows[0]\n content = flow.response.content\n copy_content_as_json(content, is_request=False)\n\n\nclass ResponseBodyKey:\n\n @command.command(\"resp\")\n def do(self) -> None:\n ctx.master.commands.execute(\"copyresponse @focus\")\n\n\nclass KeyBindingAddon:\n\n @command.command(\"k\")\n def do(self) -> None:\n ctx.master.commands.execute(\"console.view.keybindings\")\n\n\nclass FlowResumeAddon:\n\n @command.command(\"r\")\n def do(self) -> None:\n ctx.master.commands.execute(\"flow.resume @focus\")\n\n\nclass InterceptAddon:\n\n @command.command(\"intercept.inner\")\n def do(self, flows: Sequence[Flow]) -> None:\n for f in flows:\n url = re.escape(f.request.url)\n ctx.master.commands.execute(\n f\"console.command set intercept '~u {url} & ~s'\")\n\n\nclass InterceptKey:\n\n @command.command(\"cept\")\n def do(self) -> None:\n ctx.master.commands.execute(\"intercept.inner @focus\")\n\n\ndef get_url_without_parameters(url):\n path = urljoin(url, urlparse(url).path)\n # drop /v1/ for a cleaner file system\n path = path.replace(\"/index.php/v1/\", \"/index.php/\")\n # strip trailing / to be able to create a file on file system\n if path.endswith('/'):\n return path[:-1]\n return path\n\n\ndef create_url_parameters_hash(url):\n query = urlparse(url).query\n if query:\n return ' ' + hashlib.sha256(query.encode('utf-8')).hexdigest()[0:12]\n return ''\n\n\ndef append_to_debug(data):\n home_dir = os.path.expanduser(\"~\")\n debug_file = os.path.join(home_dir, \"debug.py\")\n with open(debug_file, \"a\") as df:\n df.write(data + '\\n')\n\n\n# noinspection PyShadowingNames\ndef map_flow_to_local_path(flow):\n url = flow.request.pretty_url\n\n if 'stripUrlParts' in json_config:\n query_params = parse_qs(urlparse(url).query)\n for key in json_config['stripUrlParts']:\n query_params.pop(key, None)\n\n new_query_string = \"&\".join(f\"{key}={value[0]}\" for key, value in query_params.items())\n # noinspection PyProtectedMember\n url = urlparse(url)._replace(query=new_query_string).geturl()\n\n without_parameters = get_url_without_parameters(url)\n after_base = get_after_base_url_from_local_base(without_parameters)\n base_name = os.path.basename(after_base)\n return os.path.join(map_local_dir, os.path.dirname(after_base),\n flow.request.method + ' ' +\n base_name) + create_url_parameters_hash(url) + \".json\"\n\n\nclass MapLocalRequests:\n \"\"\" Map to local response bodies from a directory. \"\"\"\n\n @staticmethod\n def request(flow: http.HTTPFlow) -> None:\n local_file = map_flow_to_local_path(flow)\n\n if os.path.exists(local_file):\n with open(local_file) as f:\n data = json.loads(f.read())\n flow.response = http.Response.make(\n data['statusCode'], json.dumps(data['response'], indent=2),\n data['headers'])\n\n\ndef format_any_content(content):\n if content == 'b':\n return None\n try:\n return json.loads(content)\n except (json.JSONDecodeError, UnicodeDecodeError):\n try:\n return content.decode('utf-8')\n except UnicodeDecodeError:\n # note that this is the case for POST files\n return \"unmappable content for JSON\"\n\n\ndef strip_base_url_prefix(url):\n for base_url in map_local_base_urls:\n if url.startswith(base_url):\n return url[len(base_url):]\n return url\n\n\ndef check_if_flow_ignored(flow):\n if 'ignoreForLocalMapping' in json_config:\n url = strip_base_url_prefix(flow.request.pretty_url)\n for ignore in json_config['ignoreForLocalMapping']:\n parts = ignore.split(\" \")\n if parts[0] == flow.request.method and parts[1] == url:\n return True\n return False\n\n\nclass CreateLocal:\n\n @command.command(\"local\")\n def do(self, flows: Sequence[Flow]) -> None:\n has_error = False\n single_flow = len(flows) == 1\n no_response_count = 0\n unmappable_response_count = 0\n\n for flow in flows:\n if flow.response:\n url = flow.request.pretty_url\n if is_url_in_map_local_base_urls(url):\n if check_if_flow_ignored(flow):\n continue\n\n local_file = map_flow_to_local_path(flow)\n local_dir = os.path.dirname(local_file)\n\n if not os.path.exists(local_dir):\n os.makedirs(local_dir)\n\n response_headers = {}\n for k, v in flow.response.headers.items():\n response_headers[k] = v\n\n request_content = format_any_content(flow.request.content)\n resp = format_any_content(flow.response.content)\n\n if request_content == \"unmappable content for JSON\" or \\\n resp == \"unmappable content for JSON\":\n unmappable_response_count += 1\n continue\n\n req = flow.request\n data = {\n 'response': resp,\n 'headers': response_headers,\n 'statusCode': flow.response.status_code,\n 'url': req.method + ' ' + req.pretty_url,\n 'requestBody': request_content,\n }\n\n json_string = json.dumps(data, indent=2)\n\n if single_flow:\n if 'startCommandOnLocalMap' in json_config and \\\n len(json_config['startCommandOnLocalMap']) >= 1:\n\n with open(local_file, 'w+') as f1:\n f1.write(json_string)\n\n command = json_config['startCommandOnLocalMap']\n cmd = map(lambda x: x.replace('###', local_file), command)\n subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n else:\n c = ctx.master.spawn_editor(json_string)\n with open(local_file, 'w+') as f2:\n f2.write(c)\n else:\n with open(local_file, 'w+') as f3:\n f3.write(json_string)\n\n else:\n if single_flow:\n has_error = True\n ctx.log.alert(\n 'Configured base URLs are not present: ' +\n str(map_local_base_urls))\n else:\n no_response_count += 1\n\n if not has_error:\n if no_response_count == 0 and unmappable_response_count == 0:\n ctx.log.alert('Saved ' + str(len(flows)) + ' flow(s) to ' +\n map_local_dir)\n else:\n ctx.log.alert('Saved ' + str(\n len(flows) - no_response_count -\n unmappable_response_count) + ' flow(s) to ' +\n map_local_dir + ' --- ' +\n str(no_response_count) +\n ' flow(s) without response --- ' +\n str(unmappable_response_count) +\n ' flow(s) unmappable to JSON')\n\n\nclass CreateAllLocalKey:\n\n @command.command(\"la\")\n def do(self) -> None:\n ctx.master.commands.execute(\"local @shown\")\n\n\nclass CreateLocalKey:\n\n @command.command(\"l\")\n def do(self) -> None:\n ctx.master.commands.execute(\"local @focus\")\n\n\nclass DeleteLocalRequest:\n\n @command.command(\"localdelete\")\n def do(self, flows: Sequence[Flow]) -> None:\n for flow in flows:\n url = flow.request.pretty_url\n\n if is_url_in_map_local_base_urls(url):\n local_file = map_flow_to_local_path(flow)\n\n try:\n os.remove(local_file)\n ctx.log.alert('Deleted ' + local_file)\n except FileNotFoundError:\n ctx.log.alert('Local mapping files do not exist!')\n else:\n ctx.log.alert('Configured base URLs are not present: ' +\n str(map_local_base_urls))\n\n\nclass DeleteLocalRequestKey:\n\n @command.command(\"ld\")\n def do(self) -> None:\n ctx.master.commands.execute(\"localdelete @focus\")\n\n\nclass ClearMappedLocalRequests:\n\n @command.command(\"lc\")\n def do(self) -> None:\n shutil.rmtree(map_local_dir)\n os.mkdir(map_local_dir)\n ctx.log.alert('Deleted everything under ' + map_local_dir)\n\n\nclass ClearViewFilterKey:\n\n @command.command(\"fc\")\n def do(self) -> None:\n ctx.master.commands.execute(\"set view_filter ''\")\n ctx.log.alert('Cleared view_filter')\n\n\nclass SetupViewFilterKey:\n\n @command.command(\"f\")\n def do(self) -> None:\n ctx.master.commands.execute(\"console.command set view_filter\")\n\n\naddons = [\n MapLocalRequests(),\n CreateLocal(),\n DeleteLocalRequest(),\n DeleteLocalRequest(),\n ClearMappedLocalRequests(),\n # other functionality\n CurlAddon(),\n UrlAddon(),\n ShortUrlAddon(),\n ShortUrlMarkdownAddon(),\n RequestBodyAddon(),\n ResponseBodyAddon(),\n KeyBindingAddon(),\n InterceptAddon(),\n FlowResumeAddon(),\n AllResponseBodyAddon(),\n AllResponseWithoutBodyAddon(),\n # keys\n SetupViewFilterKey(),\n ClearViewFilterKey(),\n DeleteLocalRequestKey(),\n CreateLocalKey(),\n CreateAllLocalKey(),\n ResponseBodyKey(),\n RequestBodyKey(),\n AllResponseBodyKey(),\n InterceptKey(),\n]\n","repo_name":"Dima-369/mitmproxy-config","sub_path":"addon-commands.py","file_name":"addon-commands.py","file_ext":"py","file_size_in_byte":19094,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"36076463380","text":"from bottle import default_app, response, route\nimport os\nimport json\nimport redis\n\n\nREDIS_HOST = os.environ['REDIS_HOST']\nredis_db = redis.Redis(host=REDIS_HOST, port=6379, db=0)\n\n\n@route('/redis-info')\ndef redis_info():\n response.headers['Content-Type'] = 'application/json'\n response.headers['Cache-Control'] = 'no-cache'\n info = redis_db.info()\n if info is not None:\n return redis_db.info()\n response.status = 500\n return 'Failed Redis Connection' \n\n\n@route('/add/=')\ndef add(key, value):\n redis_db.set(key, value)\n response.headers['Content-Type'] = 'text/html'\n response.headers['Cache-Control'] = 'no-cache'\n return f'Key-Value: {key}:{value} has been added.'\n\n\n@route('/remove/')\ndef remove(key):\n value = redis_db.get(key)\n if value is not None:\n redis_db.delete(key)\n return f'{key}:{value} has been deleted.'\n return f'Key:{key} not found.'\n\n\n@route('/list/keys')\ndef list_keys():\n redis_keys = [key.decode('utf-8') for key in redis_db.keys('*')]\n response.headers['Content-Type'] = 'application/json'\n response.headers['Cache-Control'] = 'no-cache'\n return json.dumps(redis_keys)\n\n\n@route('/list/key-values')\ndef list_key_values():\n redis_keys = [key.decode('utf-8') for key in redis_db.keys('*')]\n key_values = {}\n for key in redis_keys:\n value = redis_db.get(key)\n key_values[key] = value.decode('utf-8')\n response.headers['Content-Type'] = 'application/json'\n response.headers['Cache-Control'] = 'no-cache'\n return json.dumps(key_values)\n\n\napp = default_app()\n","repo_name":"tomowatt/flask-bottle-redis-demo","sub_path":"bottle/src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2856058303","text":"# Ecrire un script python afin de:\n#\n# * Charger les données du fichier employees.csv et les stocker dans un DataFrame\n# * Afficher les employés qui ont une commission\n# * Afficher le salaire de Jennifer Whalen\n# * Affiche rle nom complet des employés qui sont dans le département 50 et qui ont un salaire > 3000\n# * Afficher le nom complet des managers\n\nimport pandas as pd\n\nwith open('C:\\\\Users\\Administrateur\\\\PycharmProjects\\\\Yassine\\\\m2i\\\\data\\\\hr\\\\employees.csv') as f:\n\n #On crée une liste de colonnes, avec la ligne d'en-têtes et la focntion split? Elle va nous servir dans la boucle\n columns = 'EMPLOYEE_ID;FIRST_NAME;LAST_NAME;EMAIL;PHONE_NUMBER;HIRE_DATE;JOB_ID;SALARY;COMMISSION_PCT;MANAGER_ID;DEPARTMENT_ID'.split(';')\n # >>>> ['EMPLOYEE_ID', 'FIRST_NAME', 'LAST_NAME', 'EMAIL', 'PHONE_NUMBER', 'HIRE_DATE', 'JOB_ID', 'SALARY', 'COMMISSION_PCT', 'MANAGER_ID', 'DEPARTMENT_ID']\n\n #On crée une liste avec comme éléments, les lignes du fichier\n lines = f.readlines()\n\ndata_salaries = {j:[line[1:-3].split(';')[i] for line in lines if 'EMPLOYEE_ID' not in line] for i,j in enumerate(columns) }\n #print(data)\n\ndf = pd.DataFrame(data_salaries)\n# print(df [df['COMMISSION_PCT'] != '' ])\n#\n# print(df[df['LAST_NAME']=='Whalen' ]['SALARY'])\n\n# * Afficher le nom complet des employés qui sont dans le d��partement 50 et qui ont un salaire > 3000\n# print(df['SALARY'].astype(float))\nemp_selected = df[(df['DEPARTMENT_ID'] =='50') & (df['SALARY'].astype(float) > 3000)]\nprint(emp_selected['FIRST_NAME']+' '+emp_selected['LAST_NAME'])\n\n# * Afficher le nom complet des managers\n#\n# df4=df['EMPLOYEE_ID','FIRST_NAME','LAST_NAME']\n# df4['EMPLOYEE_ID']=df4['EMPLOYEE_ID']+''\n# df5 = df['MANAGER_ID','FIRST_NAME','LASTNAME']\n# df5.columns=['EMPLOYEE_ID','FIRST_NAME','LAST_NAME']\n#\n# print(df4.join(df5, on='EMPLOYEE_ID'))\n\n#Ne fonctionne pas, essayer avec des pd.series\n\n","repo_name":"adelinedepaepe/perso_m2i","sub_path":"3-Python/Pandas/pandas_practice3.py","file_name":"pandas_practice3.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9838755006","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\"\"\"\n @Time: 19-6-12 下午3:12\n @Author: hezhiqiang\n @FileName: douban_spider.py\n @IDE: PyCharm\n\n 爬取豆瓣上所有的电视剧\n\n\"\"\"\nimport requests\nimport json\n\n\nclass DoubanSpider(object):\n\n def __init__(self):\n\n self.url_temp = \"https://m.douban.com/rexxar/api/v2/subject_collection/tv_domestic/items?start={}&count=18&loc_id=108288\"\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'Referer': 'https://m.douban.com/tv/chinese'\n }\n\n def parse_url(self, url):\n print(url) # 将当前想要获取响应的url打印出来\n response = requests.get(url, headers=self.headers)\n return response.content.decode('utf-8').replace(\";jsonp1(\", \"\").replace(\");\", \"\")\n # 返回一个json_str,此处的replace\n\n def get_content_list(self, json_str):\n dict_ret = json.loads(json_str) # 将json数据转化为字典\n content_list = dict_ret[\"subject_collection_items\"]\n return content_list\n\n def save_content_list(self, content_list):\n with open(\"douban3.txt\", 'a', encoding='utf-8') as f: # 以追加的方式写入\n for content in content_list:\n f.write(json.dumps(content, ensure_ascii=False))\n f.write(\"\\n\") # 写入换行符进行换\n print(\"保存成功...........\")\n\n def run(self): # 实现主要逻辑\n num = 0 # 起始位置\n\n while True:\n # 1.start url\n url = self.url_temp.format(num)\n # 2.发送请求,获取响应\n json_str = self.parse_url(url)\n # 3.提取数据\n content_list = self.get_content_list(json_str)\n # 4.保存\n self.save_content_list(content_list)\n\n if len(content_list) < 18: # 最后一页不够18条记录\n break\n\n # 5.构造下一页的url地址,进入循环\n num += 18\n\n\nif __name__ == '__main__':\n\n douban_spider = DoubanSpider()\n douban_spider.run()\n\n","repo_name":"Jianglinhe/Spider","sub_path":"remote/douban_spider.py","file_name":"douban_spider.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3175242845","text":"def repeat_string(s, req_len):\n return (s * req_len)[:req_len]\n\ndef szyfruj(plain_text, klucz):\n plain_text = plain_text.replace(\" \",\"\")\n klucz = repeat_string(klucz, len(plain_text))\n s_tab = []\n for i in range(0, len(plain_text)):\n pt_ascii = ord(plain_text[i])-64\n k_ascii = ord(klucz[i])-64\n if (pt_ascii+k_ascii)%26 == 0:\n s_tab.append(chr((pt_ascii+k_ascii)%26+64+26)) # loop\n else: \n s_tab.append(chr((pt_ascii+k_ascii)%26+64))\n print(''.join(s_tab))\n\nplain_text = input(\"Podaj tekst do zaszyfrowania: \")\nklucz = input(\"Podaj klucz: \")\nszyfruj(plain_text, klucz)\n","repo_name":"chixPL/matura-probna-infr-operon-2023","sub_path":"Zadanie 3/3_2.py","file_name":"3_2.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72172373033","text":"import json\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.autograd import Variable\n\nfrom collections import defaultdict\nimport os\nimport cv2\nimport numpy as np\n\nimport random\n\nimport grid_distortion\n\nPADDING_CONSTANT = 0\n\n# This creates the batches and zero pads the images that are smaller than the longest one,\n# so every one in the batch is the same size. line_imgs is a 4d tensor\n# with shape (batch_size, num_channels, img_height, img_width)\ndef collate(batch):\n batch = [b for b in batch if b is not None]\n #These all should be the same size or error\n assert len(set([b['line_img'].shape[0] for b in batch])) == 1\n assert len(set([b['line_img'].shape[2] for b in batch])) == 1\n\n # dim0 = img_height\n dim0 = batch[0]['line_img'].shape[0]\n # dim1 = img_width\n dim1 = max([b['line_img'].shape[1] for b in batch])\n # dim2 = num of color channels\n dim2 = batch[0]['line_img'].shape[2]\n \n\n # zero pad small images\n input_batch = np.full((len(batch), dim0, dim1, dim2), PADDING_CONSTANT).astype(np.float32)\n\n for i in xrange(len(batch)):\n b_img = batch[i]['line_img']\n input_batch[i,:,:b_img.shape[1],:] = b_img\n\n # (batch_size, num_color_channels, img_height, img_width)\n line_imgs = input_batch.transpose([0,3,1,2])\n line_imgs = torch.from_numpy(line_imgs)\n\n return {\n \"line_imgs\": line_imgs,\n \"gt\": [b['gt'] for b in batch]\n }\n\nclass HwDataset(Dataset):\n def __init__(self, data, img_height=32, augmentation=False): #So what else does this class need to be able to do?\n #I remember there was a thing about changing what you pass in for 'data'\n self.data = data #Also, cutting out a section of the image (the part that you need)\n self.img_height = img_height #So maybe those two go together. \n self.augmentation = augmentation\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n item = self.data[idx]\n\n img = cv2.imread(item['im'])\n\n if img is None:\n print(\"Warning: image is None:\", item['im'])\n return None\n\n img = cv2.resize(img, (self.img_height, self.img_height), interpolation=cv2.INTER_CUBIC) #Why are we resizing the image again?\n\n # image augmentation (this basically distorts the image so we can have more training data\n # without hand-generating it)\n if self.augmentation:\n img = grid_distortion.warp_image(img) \n\n img = img.astype(np.float32)\n img = img / 128.0 - 1.0\n\n gt = item['gt']\n\n # return a dict object with these elements\n return {\n \"line_img\": img,\n \"gt\": gt,\n 'filename' : item['im']\n }\n","repo_name":"jsw800/hwr","sub_path":"recognition/classifier/hw_dataset.py","file_name":"hw_dataset.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27459785620","text":"'''\nCreate a function that takes any string as argument and returns the length of that string.\nThe lowest possible temperature that physical matter can reach is -273.15 C.\nWith that in mind, please improve the function by making it print out a message in case a number lower than -273.15 is passed as input when calling the function.\n'''\ndef cel_to_fahr(c):\n if c < -273.15:\n return \"Absurd Temperature Value\"\n else:\n f = float(c) * 9/5 + 32\n return f\n\nc = float(input(\"Enter Temperature in Celcius: \"))\nprint(cel_to_fahr(c))\n","repo_name":"nirmalnishant645/Python-Programming","sub_path":"Functions-and-Conditionals/Functions-and-Conditionals-Exercise-02.py","file_name":"Functions-and-Conditionals-Exercise-02.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"69889600552","text":"import random\nimport time\n\nclass Player:\n def __init__(self, name, wallet=0, card_count=0):\n self.name = name\n self.wallet = wallet\n self.card_count = card_count\n\n def update_wallet(self, wallet):\n self.wallet = self.wallet + wallet\n\n def update_card_count(self, card_number):\n self.card_count = self.card_count + card_number\n card_number = card_number\n\ndef deal_card():\n deck = []\n suit = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}\n for i in suit.items():\n deck.append(i[1])\n random.shuffle(deck)\n dealt_card = deck.pop()\n return dealt_card\n\ndef place_bet(w):\n bet = int(input(\"How much would you like to bet:\"))\n while bet > w:\n if bet > w:\n bet = int(input(f\"You do not have enough funds, you have ${w} and bet ${bet}, please enter another bet: \"))\n else:\n print(f\"You have bet ${bet}.\")\n print(f\"You have bet ${bet}.\")\n return bet\n\ndef game_begin(w, p, d):\n print(f\"You have ${w}!\")\n time.sleep(1)\n bet = place_bet(w)\n time.sleep(1)\n p.update_card_count(deal_card())\n time.sleep(1)\n d.update_card_count(deal_card())\n print(f\"Your total card count is {p.card_count}. My total card count is {d.card_count}\")\n return bet\n \ndef deal_another(p, d):\n p.update_card_count(deal_card())\n time.sleep(1)\n if d.card_count < 16:\n d.update_card_count(deal_card())\n time.sleep(1)\n print(f\"Your total card count is {p.card_count}\")\n \ndef lost_bet(bet, p):\n return p.update_wallet(-bet)\n \ndef won_bet(bet, p):\n return p.update_wallet(bet * 1.5)\n \ndef win_or_lose(p, d, bet):\n if p.card_count > 21:\n print(\"Dealer Wins\")\n return lost_bet(bet, p)\n elif p.card_count < d.card_count:\n if d.card_count <=21:\n print(\"Dealer Wins!\")\n return lost_bet(bet, p)\n else:\n print(\"You win!\")\n return won_bet(bet, p)\n elif p.card_count > d.card_count:\n print(\"You win!\")\n return won_bet(bet, p)\n\ndef main():\n name = input(\"What is your name?\").strip()\n wallet = int(input(\"How much money would you like to add to your wallet?\"))\n player_one = Player(name)\n player_one.update_wallet(wallet)\n dealer = Player('dealer')\n another_card = 'yes'\n play_again = 'yes'\n print(\"Let's play BlackJack!\")\n while play_again == 'yes':\n player_one.update_card_count(0)\n dealer.update_card_count(0)\n bet = game_begin(wallet, player_one, dealer)\n while another_card == 'yes':\n deal_another(player_one, dealer)\n another_card = input('Would you like another card?').lower()\n print(win_or_lose(player_one, dealer, bet))\n print(player_one.card_count, dealer.card_count, player_one.wallet)\n player_one.update_wallet(player_one.wallet)\n play_again = input(\"Would you like to play again?(Yes or No)\")\n \n \nmain()\n\n\n\n\n\n\n\n\n","repo_name":"christianbrotherson/CasinoGames","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71784285672","text":"import numpy as np\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\n\nnp.random.seed(42)\nrandom_x = np.random.randint(1,101,100)\nrandom_y = np.random.randint(1,101,100)\n\ndata = [go.Scatter(x = random_x, \n y = random_y,\n mode = 'markers', \n marker = dict(\n size = 12,\n color = 'rgb(51,204,153)',\n symbol = 'pentagon',\n line = {'width':2}\n ))]\n#this will allow you to name the plot and axes, and will let you choose what happens when hovering over a plot\nlayout = go.Layout(title = \"hello first plot\",\n #these do the same thing but plotly docs use the \"yaxis\" syntax\n xaxis = {'title': 'random x'},\n yaxis = dict(title = 'random y'),\n hovermode = 'closest')\nfig = go.Figure(data=data, layout=layout)\npyo.plot(data, filename = 'scatter.html')","repo_name":"JPcalkins213/plotly_notes","sub_path":"scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29164387415","text":"def two_sum(target, iterable):\n s2 = set(map(lambda x: target - x, iterable))\n res = tuple(set(iterable).intersection(s2))\n return res\n\nwith open(\"day_9_input.txt\", 'r') as f:\n buffer = [int(f.__next__().strip()) for _ in range(25)]\n for line in f:\n t = int(line.strip())\n if not two_sum(t, buffer):\n print(t)\n buffer.pop(0)\n buffer.append(t)\n","repo_name":"RyanSchaefer/AdventOfCode2020","sub_path":"day_9/day_9_part_1.py","file_name":"day_9_part_1.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73412238953","text":"import os\nfrom unittest import TestCase\n\nfrom foster.init import Init\n\n\nclass InitTestCase(TestCase):\n\n def setUp(self):\n root = os.path.join(os.path.dirname(__file__), 'frames', 'init')\n self.target = os.path.join(root, 'package.py')\n os.chdir(root)\n\n def tearDown(self):\n if os.path.isfile(self.target):\n os.unlink(self.target)\n\n def test_create_package_file(self):\n Init().run()\n self.assertTrue(os.path.isfile(self.target))\n with open(self.target, 'rb') as f:\n self.assertTrue(f.read())\n\n def test_create_package_dont_overwrite_existing_file(self):\n open(self.target, 'a').close()\n with self.assertRaises(SystemExit):\n Init().run()\n with open(self.target, 'rb') as f:\n self.assertEqual(f.read(), b'')\n","repo_name":"hugollm/foster","sub_path":"tests/init_tests.py","file_name":"init_tests.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"447413560","text":"from __future__ import annotations\n\nimport numpy as np\nimport pytest\n\nfrom sentinelhub.testing_utils import assert_statistics_match\n\n\n@pytest.mark.parametrize(\n (\"data\", \"expected_statistics\"),\n [\n (\n np.arange(100).reshape(10, 10, 1),\n {\"exp_shape\": (10, 10, 1), \"exp_min\": 0, \"exp_max\": 99, \"exp_mean\": 49.5, \"exp_median\": 49.5},\n ),\n (\n np.arange(100).reshape(10, 10, 1),\n {\"exp_shape\": (10, 10, 1), \"exp_max\": 99, \"exp_mean\": 50, \"exp_median\": 50, \"rel_delta\": 0.05},\n ),\n (\n np.arange(100).reshape(10, 10, 1),\n {\"exp_shape\": (10, 10, 1), \"exp_max\": 99, \"exp_mean\": 50, \"exp_median\": 50, \"abs_delta\": 0.5},\n ),\n ],\n)\ndef test_assert_statistics_match(data: np.array, expected_statistics: dict) -> None:\n assert_statistics_match(data, **expected_statistics)\n\n\n@pytest.mark.parametrize(\n (\"data\", \"expected_statistics\"),\n [\n (\n np.arange(100).reshape(10, 10, 1),\n {\n \"exp_shape\": (10, 10, 1),\n \"exp_min\": 0,\n \"exp_max\": 100,\n },\n ),\n (\n np.arange(100).reshape(10, 10, 1),\n {\n \"exp_shape\": (10, 10, 1),\n \"exp_mean\": 50,\n \"exp_median\": 50,\n \"abs_delta\": 0.05,\n },\n ),\n ],\n)\ndef test_assert_statistics_match_fa(data: np.array, expected_statistics: dict) -> None:\n with pytest.raises(AssertionError):\n assert_statistics_match(data, **expected_statistics)\n","repo_name":"sentinel-hub/sentinelhub-py","sub_path":"tests/test_testing_utils.py","file_name":"test_testing_utils.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":740,"dataset":"github-code","pt":"72"} +{"seq_id":"34931053399","text":"#!/usr/bin/env python3\n############################################################\n# This script is designed to brute force the key needed\n# to execute the program pulled from the FTP server without\n# error, extract the decoded message from the output, and\n# decode the message for the user. \n#\n# This will require files pulled down from the target after\n# completing certain tasks. Two specific files are needed\n# in order for this script to do its job: (1) the program\n# from the FTP server, (2) a wordlist to use for the brute\n# force attack.\n#\n# The user must also put the program in the same directory\n# as this script and mark the program as executable, so it\n# can run and be brute forced.\n############################################################\n\nimport argparse\nimport os\nimport platform\nimport re\nimport requests\n\nANSI_CLRLN = \"\\r\\x1b[2K\\r\"\nANSI_RST = \"\\x1b[0m\"\nANSI_GRN = \"\\x1b[32;1m\"\nANSI_RED = \"\\x1b[31;1m\"\nANSI_BLU = \"\\x1b[34;1m\"\nANSI_YLW = \"\\x1b[33;1m\"\n\ndef SucMsg(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_GRN}+{ANSI_RST}] {msg}\")\n return\n\ndef ErrMsg(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_RED}-{ANSI_RST}] {msg}\")\n return\n\ndef InfoMsg(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_BLU}i{ANSI_RST}] {msg}\")\n return\n\ndef InfoMsgNB(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_BLU}i{ANSI_RST}] {msg}\", end=\"\")\n return\n\ndef SysMsg(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_YLW}*{ANSI_RST}] {msg}\")\n return\n\ndef SysMsgNB(msg):\n print(f\"{ANSI_CLRLN}[{ANSI_YLW}*{ANSI_RST}] {msg}\", end=\"\")\n return\n\n\ndef gen_words(wordlist):\n message = str()\n success = bool()\n\n try:\n with open(wordlist) as fptr:\n for word in fptr:\n yield word\n message = \"wordlist looped through successfully\"\n success = True\n except Exception as ex:\n message = str(ex)\n success = False\n\n return (success, message)\n\n\ndef port_type(portno):\n portno = int(portno)\n\n if (portno < 1) or (portno > 65535):\n raise argparse.ArgumentError(\"Port must be within range 1 - 65535.\")\n\n return portno\n\ndef brute_program(wlgen):\n decoded = str()\n encoded = str()\n message = str()\n success = bool()\n\n try:\n for word in wlgen:\n word = word.replace(\"\\n\",\"\")\n SysMsgNB(f\"Attempting {word}\")\n output = os.popen(f\"./program {word}\").read()\n if \"Incorrect\" not in output:\n SucMsg(f\"Password Found: {word}\")\n message = \"password found\"\n success = True\n break\n\n if not(success):\n raise ValueError(\"unable to find password\")\n\n SysMsgNB(\"Searching for encoded message\")\n pattern = \"Decode This.*\".lower()\n r = re.compile(pattern)\n matches = r.findall(output.lower())\n encoded = matches[0].split(\"=>\")[1]\n\n message = \"message successfully discovered\"\n success = True\n except Exception as ex:\n encoded = \"\"\n message = str(ex)\n success = False\n\n return (encoded, success, message)\n\ndef decode_message(message):\n decodeDict = {\n \"2\": [\"a\",\"b\",\"c\"],\n \"3\": [\"d\",\"e\",\"f\"],\n \"4\": [\"g\",\"h\",\"i\"],\n \"5\": [\"j\",\"k\",\"l\"],\n \"6\": [\"m\",\"n\",\"o\"],\n \"7\": [\"p\",\"q\",\"r\",\"s\"],\n \"8\": [\"t\",\"u\",\"v\"],\n \"9\": [\"w\",\"x\",\"y\",\"z\"]\n }\n\n decodedLst = list()\n\n if isinstance(message,str):\n message = message.split(\" \")\n message.remove(\"\")\n\n try:\n for seq in message:\n key = seq[0]\n tap = len(seq) - 1\n decodedLst.append(decodeDict[key][tap])\n decoded = \"\".join(decodedLst)\n message = \"message successfully decoded\"\n success = True\n except Exception as ex:\n decoded = str()\n message = str(ex)\n success = False\n\n return (decoded, success, message)\n\ndef main():\n if platform.system().lower() == \"windows\":\n os.system(\"\")\n \n parser = argparse.ArgumentParser()\n\n ############################################################\n # Setup required command-line arguments.\n ############################################################\n parser.add_argument(\"wordlist\", help=\"worslist to use for brute force.\", type=str)\n\n args = parser.parse_args()\n\n wordlist = args.wordlist\n\n InfoMsg(f\"Wordlist: {wordlist}\")\n\n try:\n wordgen = gen_words(wordlist)\n encoded, success, message = brute_program(wordgen)\n if not(success):\n raise ValueError(message)\n SucMsg(f\"Message: {encoded}\")\n\n decoded, success, message = decode_message(encoded)\n if not(success):\n raise ValueError(message)\n SysMsg(f\"Decoded Message: {decoded}\")\n except Exception as ex:\n ErrMsg(str(ex))\n\n return\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"thomas-osgood/TryHackMe","sub_path":"PsychoBreak/get_ssh_creds.py","file_name":"get_ssh_creds.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"759084943","text":"# import itertools\n#\n# n, m = map(int, input().split())\n# city = []\n# home_index = []\n# chicken_index = []\n# save_chicken = []\n# answer=[]\n#\n# for _ in range(n):\n# city.append(list(map(int, input().split())))\n#\n# for x in range(n):\n# for y in range(n):\n# if city[x][y] == 1:\n# home_index.append([x, y])\n# elif city[x][y] == 2:\n# chicken_index.append([x, y])\n#\n# save_chicken=list(itertools.combinations(chicken_index, m))\n#\n# for i in range(len(save_chicken)):\n# chicken_load=[]\n# for x, y in home_index:\n# length = n*2\n# for j in range(len(save_chicken[0])):\n# length = min(length, abs(x-save_chicken[i][j][0])+abs(y-save_chicken[i][j][1]))\n# chicken_load.append(length)\n# answer.append(sum(chicken_load))\n# answer.sort()\n# print(answer[0])\n\nfrom itertools import combinations\n\nn, m = map(int, input().split())\nchicken, house = [], []\n\nfor r in range(n):\n data = list(map(int, input().split()))\n for c in range(n):\n if data[c] == 1:\n house.append((r, c))\n elif data[c] == 2:\n chicken.append((r, c))\n\ncandidates = list(combinations(chicken, m))\n\ndef get_sum(candidate):\n result = 0\n for hx, hy in house:\n temp = 1e9\n for cx, cy in candidate:\n temp = min(temp, abs(hx-cx)+abs(hy-cy))\n result+=temp\n return result\n\nresult = 1e9\nfor candidate in candidates:\n result = min(result, get_sum(candidate))\nprint(result)\n","repo_name":"HongGunWoo/Python-Programming-Team-Notes","sub_path":"구현/치킨 배달.py","file_name":"치킨 배달.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6552952108","text":"import numpy as np\nimport re\nfrom keras.layers import Input, Dense, SimpleRNN\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom keras.utils import to_categorical\nfrom keras import backend as K\n\n# Unificar y ordenar todos los nombres\narchivos_nombres = ['./RNR/RNN_county.txt']\ntodos_los_nombres = []\nfor archivo in archivos_nombres:\n with open(archivo, 'r') as f:\n todos_los_nombres.extend(f.readlines())\ntodos_los_nombres = sorted(todos_los_nombres)\n\nwith open('todos_los_nombres.txt', 'w') as f:\n f.writelines(todos_los_nombres)\n\n# Leer el set de datos\nnombres = open('todos_los_nombres.txt', 'r').read().lower()\n\n# Crear diccionario\nalfabeto = list(set(nombres))\ntam_datos, tam_alfabeto = len(nombres), len(alfabeto)\ncar_a_ind = {car: ind for ind, car in enumerate(sorted(alfabeto))}\nind_a_car = {ind: car for ind, car in enumerate(sorted(alfabeto))}\n\n# Inicializar variables\nn_a = 25 # Número de unidades en la capa oculta\n\n# Definir modelo\nentrada = Input(shape=(None, tam_alfabeto))\na0 = Input(shape=(n_a,))\ncelda_recurrente = SimpleRNN(n_a, activation='tanh', return_state=True)\ncapa_salida = Dense(tam_alfabeto, activation='softmax')\n\nsalida = []\nhs, _ = celda_recurrente(entrada, initial_state=a0)\nsalida.append(capa_salida(hs))\nmodelo = Model([entrada, a0], salida)\nopt = SGD(learning_rate=0.0005)\nmodelo.compile(optimizer=opt, loss='categorical_crossentropy')\n\n# Generar ejemplos de entrenamiento\ndef train_generator():\n while True:\n ejemplo = todos_los_nombres[np.random.randint(0, len(todos_los_nombres))].strip().lower()\n X = [None] + [car_a_ind[c] for c in ejemplo]\n Y = X[1:] + [car_a_ind['\\n']]\n x = np.zeros((len(X), 1, tam_alfabeto))\n onehot = to_categorical(X[1:], tam_alfabeto).reshape(len(X) - 1, 1, tam_alfabeto)\n x[1:, :, :] = onehot\n y = to_categorical(Y, tam_alfabeto).reshape(len(X), tam_alfabeto)\n a = np.zeros((len(X), n_a))\n yield [x, a], y\n\n# Entrenar el modelo\nBATCH_SIZE = 80\nNITS = 10000\nfor j in range(NITS):\n historia = modelo.fit(train_generator(), steps_per_epoch=BATCH_SIZE, epochs=1, verbose=0)\n if j % 1000 == 0:\n print(f'Iteración: {j}, Error: {historia.history[\"loss\"][0]}')\n\n# Generar nombres con el modelo entrenado\ndef generar_nombre():\n x = np.zeros((1, 1, tam_alfabeto))\n a = np.zeros((1, n_a))\n nombre_generado = ''\n fin_linea = '\\n'\n car = -1\n contador = 0\n\n while (car != fin_linea and contador != 50):\n a, _ = celda_recurrente(K.constant(x), initial_state=K.constant(a))\n y = capa_salida(a)\n prediccion = K.eval(y)\n ix = np.random.choice(list(range(tam_alfabeto)), p=prediccion.ravel())\n car = ind_a_car[ix]\n nombre_generado += car\n x = to_categorical(ix, tam_alfabeto).reshape(1, 1, tam_alfabeto)\n a = K.eval(a)\n contador += 1\n\n if contador == 50:\n nombre_generado += '\\n'\n\n if len(nombre_generado.strip()) <= 4:\n return None\n\n if re.search(r\"[aeiou]{3,}|[bcdfghjklmnpqrstvwxyz]{3,}\", nombre_generado):\n return None\n\n return nombre_generado\n\nfor i in range(100):\n nombre = None\n while nombre is None:\n nombre = generar_nombre()\n print(nombre)\n","repo_name":"fitomx19/Temas_Selectos_IA","sub_path":"Practica_5/RNR/RNN_generacion_nombres.py","file_name":"RNN_generacion_nombres.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13272542516","text":"from os import system\nfrom Cryptodome.Hash import SHA256\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Signature import PKCS1_v1_5\n\n\ngenerarFirma()\n\n\n\n#funcion de generacion de firmas\n#se ingresa la direccion de donde se quiere guardar las claves de salida\ndef generarFirma(clave_privada_out, clave_publica_out):\n key=RSA.generate(1024)\n\n with open(clave_privada_out, 'wb') as f:\n f.write(key.exportKey('PEM'))\n f.close()\n with open(clave_publica_out, 'wb') as f:\n f.write(key.publickey().exportKey('PEM'))\n f.close()\n return 0\n\n''' #ejemplo\nclave_privada_out1='chemapriv.PEM'\nclave_publica_out1='chemapub.PEM'\ngenerarFirma(clave_privada_out1,clave_publica_out1)\n'''","repo_name":"mausanch/crypto-service-flask","sub_path":"Gen_Key.py","file_name":"Gen_Key.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25833564855","text":"\"\"\"Contains the Undertaker Character class\"\"\"\n\nimport datetime\nimport json\nimport random\n\nimport discord\nimport toml\n\nimport botutils\nimport globvars\nfrom botc import (BOTCUtils, Character, Demon, Minion, NonRecurringAction,\n Outsider, Townsfolk, emojis, game_text)\nfrom botc.gamemodes.troublebrewing.Drunk import Drunk\n\nfrom . import character_texts\nfrom ._utils import TBRole, TroubleBrewing\n\ncharacter_text = character_texts[TBRole.undertaker.value.lower()]\n\nundertaker_nightly = game_text[\"gameplay\"][\"undertaker_nightly\"]\ncopyrights_str = game_text[\"misc\"][\"copyrights\"]\n\nfrom config import DISABLE_DMS\n\n\nclass Undertaker(Townsfolk, TroubleBrewing, Character, NonRecurringAction):\n \"\"\"Undertaker: Each night, you learn which character died by execution today.\n\n ===== UNDERTAKER =====\n\n true_self = undertaker\n ego_self = undertaker\n social_self = undertaker\n\n commands:\n - None\n\n initialize setup? -> NO\n initialize role? -> NO\n\n ----- First night\n START:\n override first night instruction? -> NO # default is to send instruction string only\n\n ----- Regular night\n START:\n override regular night instruction? -> YES # default is to send nothing\n => Send passive nightly information\n \"\"\"\n\n def __init__(self):\n\n Character.__init__(self)\n TroubleBrewing.__init__(self)\n Townsfolk.__init__(self)\n\n self._desc_string = character_text[\"description\"]\n self._examp_string = character_text[\"examples\"]\n self._instr_string = character_text[\"instruction\"]\n self._lore_string = character_text[\"lore\"]\n self._brief_string = character_text[\"brief\"]\n self._action = character_text[\"action\"]\n\n self._art_link = \"https://bloodontheclocktower.com/wiki/images/f/fe/Undertaker_Token.png\"\n self._art_link_cropped = \"https://imgur.com/3CpqHsL.png\"\n self._wiki_link = \"https://bloodontheclocktower.com/wiki/Undertaker\"\n\n self._role_enum = TBRole.undertaker\n self._emoji = emojis[\"troublebrewing\"][\"undertaker\"]\n\n def create_n1_instr_str(self):\n \"\"\"Create the instruction field on the opening dm card\"\"\"\n\n # First line is the character instruction string\n msg = f\"{self.emoji} {self.instruction}\"\n addendum = character_text[\"n1_addendum\"]\n\n # Some characters have a line of addendum\n if addendum:\n scroll_emoji = botutils.BotEmoji.scroll\n msg += f\"\\n{scroll_emoji} {addendum}\"\n\n return msg\n\n def __create_droisoned_info(self):\n \"\"\"Create drunk/poisoned information for the undertaker info\"\"\"\n\n executed_player = globvars.master_state.game.today_executed_player\n\n # If someone has been executed\n if executed_player:\n\n tb_townsfolk_all = BOTCUtils.get_role_list(TroubleBrewing, Townsfolk)\n tb_outsider_all = BOTCUtils.get_role_list(TroubleBrewing, Outsider)\n tb_minion_all = BOTCUtils.get_role_list(TroubleBrewing, Minion)\n tb_demon_all = BOTCUtils.get_role_list(TroubleBrewing, Demon)\n\n executed_player.role.set_new_social_self(executed_player)\n\n # The executed player has a good role. The droisoned undertaker will see a\n # bad role.\n if executed_player.role.social_self.is_good():\n pool = tb_minion_all + tb_demon_all\n ret = random.choice(pool)\n return ret\n\n # The executed player has a bad role. The droisoned undertaker will see\n # a good role.\n else:\n pool = tb_townsfolk_all + tb_outsider_all\n pool = [character for character in pool if character.name != Undertaker().name]\n ret = random.choice(pool)\n return ret\n\n # If no one is executed, then send none\n else:\n return None\n\n async def send_n1_end_message(self, recipient):\n \"\"\"Send the character of the executed player today.\"\"\"\n await self.send_regular_night_end_dm(recipient)\n\n async def send_regular_night_end_dm(self, recipient):\n \"\"\"Send the character of the executed player today.\"\"\"\n\n if DISABLE_DMS:\n return\n\n player = BOTCUtils.get_player_from_id(recipient.id)\n\n # Dead players do not receive anything\n if not player.is_alive():\n return\n\n executed_player = globvars.master_state.game.today_executed_player\n if player.is_droisoned():\n # Poisoned info\n character_of_executed = self.__create_droisoned_info()\n else:\n if executed_player:\n executed_player.role.set_new_social_self(executed_player)\n character_of_executed = executed_player.role.social_self\n else:\n character_of_executed = None\n\n if not character_of_executed:\n return\n\n # Add information to replay\n drunk = \"Drunk \" if player.role.true_self.name == Drunk().name else \"\"\n globvars.master_state.game.replay += f\"- {recipient.name} ({drunk}Undertaker) learns that \"\\\n f\"{executed_player.user.name} is {character_of_executed}\\n\"\n executed_player.add_reminder_token('botc/assets/tb_reminder_tokens_cropped/undertaker_dead.png')\n\n msg = f\"***{recipient.name}#{recipient.discriminator}***, the **{self.name}**:\"\n msg += \"\\n\"\n msg += self.emoji + \" \" + self.instruction\n msg += \"\\n\"\n msg += undertaker_nightly.format(character_of_executed.name)\n\n embed = discord.Embed(description = msg)\n embed.set_thumbnail(url = character_of_executed._art_link_cropped)\n embed.set_footer(text = copyrights_str)\n embed.timestamp = datetime.datetime.utcnow()\n\n try:\n await recipient.send(embed = embed)\n except discord.Forbidden:\n pass\n","repo_name":"0xallie/Blood-on-the-Clocktower-Storyteller-Discord-Bot","sub_path":"botc/gamemodes/troublebrewing/Undertaker.py","file_name":"Undertaker.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"32047626162","text":"\"\"\"male names not required\n\nRevision ID: 7cb63055cd27\nRevises: 7de96aea0f36\nCreate Date: 2023-05-15 21:05:40.227339\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '7cb63055cd27'\ndown_revision = '7de96aea0f36'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user', 'first_name',\n existing_type=sa.VARCHAR(length=256),\n nullable=True)\n op.alter_column('user', 'last_name',\n existing_type=sa.VARCHAR(length=256),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user', 'last_name',\n existing_type=sa.VARCHAR(length=256),\n nullable=False)\n op.alter_column('user', 'first_name',\n existing_type=sa.VARCHAR(length=256),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"Melevir/learn_bot","sub_path":"migrations/versions/7cb63055cd27_male_names_not_required.py","file_name":"7cb63055cd27_male_names_not_required.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33151976167","text":"class Node:\r\n def __init__(self,datava1=None):\r\n self.datava1=datava1\r\n self.nextval=None\r\nclass sl:\r\n def __init__(self):\r\n self.headval =None\r\n def listp(self):\r\n prival=self.headval\r\n while prival is not None:\r\n print(prival.datava1)\r\n prival=prival.nextval\r\n def atbiagning(self,newdata):\r\n newnode= Node(newdata)\r\n newnode.nextval=self.headva1\r\n# self.headva1=newnode\r\nlist = sl()\r\nlist.headval=Node(\"mon\")\r\ne2 =Node(\"tue\")\r\ne3=Node('wed')\r\nlist.headval.nextval=e2\r\ne2.nextval=e3\r\nlist.atbiagning(\"sun\")\r\nlist.listp()\r\n\r\n ","repo_name":"AhmadAlshammri/Data-Structure","sub_path":"new lap.py","file_name":"new lap.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15174017621","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PIL import Image\r\nimport time\r\nimport pytesseract\r\n\r\nclass GetCode():\r\n def __init__(self,driver):\r\n self.driver = driver\r\n\r\n def save_image(self,file_name):\r\n self.driver.save_screenshot(file_name)\r\n image_element = self.driver.find_element_by_id('getcode_num')\r\n left = image_element.location['x']\r\n top = image_element.location['y']\r\n right = image_element.size['width'] + left\r\n height = image_element.size['height'] + top\r\n im = Image.open(file_name)\r\n img = im.crop((left,top,right,height))\r\n img.save(file_name)\r\n time.sleep(2)\r\n #return img\r\n\r\n\r\n def code_online(self,file_name):\r\n self.save_image(file_name)\r\n image = open(file_name)\r\n #image = image.convert('RGB')\r\n code_text = pytesseract.image_to_string(image)\r\n time.sleep(2)\r\n if code_text == '':\r\n return '123'\r\n else:\r\n return code_text\r\n","repo_name":"ganyusheng/seleniumPython","sub_path":"web_auto_test/util/get_code.py","file_name":"get_code.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42912442198","text":"from data_analysis.parallel_statistics.err_mem_size_statistics import prlp_tag, rlp_tag, mem_size_tag\nfrom paper_figures.tkde.data_legacy.static_data_loader import *\n\n\ndef get_algorithm_indexing_time_lst(tag):\n if tag is vldbj_reasd_tag:\n return [0.00522976, 0.00612305, 0.00374263, 0.00103439, 0.00722572, 0.00366369,\n # 0.00847435,\n 0.00673668, 0.020272, 0.0117233, 0.00976721, 0.0828088, 0,\n 0.00699347, 0.0430287]\n elif tag is vldbj_readrq_tag:\n return [0.000235797, 0.000275629, 0.00020318, 7.15575e-05, 0.000356041, 0.000165915,\n # 0.000449374,\n 0.000430675, 0.00149114, 0.000618879, 0.000762481, 0.00199481, 0.0045728,\n 0.000455444, 0.00191903]\n # local_push\n else:\n return [0.00130415, 0.00241406, 0.00237629, 0.00163426, 0.0139743, 0.0493718,\n # 0.124753,\n 0.102021, 0.271308, 0.268973, 1.25391, 2.47118, 9.09415,\n 0.56654, 8.8932]\n\n\ndef get_algorithm_mem_usage_lst(tag):\n relative_data_root_dir_path = '../../..'\n\n if tag in [prlp_tag, rlp_tag]:\n with open('{}/data_analysis/data-json/parallel_exp/err_mem_size04_24.json'.format(\n relative_data_root_dir_path)) as ifs:\n our_algo_dict = json.load(ifs)[tag]\n mem_lst = [our_algo_dict[data_set][mem_size_tag] for data_set in local_data_set_lst]\n else:\n with open('{}/data_analysis/data-json/parallel_exp/seq_mem_previous.json'.format(\n relative_data_root_dir_path)) as ifs:\n seq_other_algo_dict = json.load(ifs)[tag]\n mem_lst = [seq_other_algo_dict[data_set] for data_set in local_data_set_lst]\n return list(map(lambda mem_size: mem_size / 1024., mem_lst))\n\n\nif __name__ == '__main__':\n logger = get_logger('/home/yche/logs/tmp.log', __name__)\n data_set_lst = [\n \"ca-GrQc\", \"ca-HepTh\", \"p2p-Gnutella06\", \"wiki-Vote\",\n \"email-Enron\", \"email-EuAll\", \"digg-friends\", \"web-Stanford\",\n \"web-BerkStan\", \"web-Google\", \"flickr-growth\", \"cit-Patents\",\n \"soc-LiveJournal1\", \"wiki-Link\"\n ]\n local_data_set_lst = [\n \"ca-GrQc\", \"ca-HepTh\", \"p2p-Gnutella06\", \"wiki-Vote\",\n \"email-Enron\", \"email-EuAll\", \"web-Stanford\",\n \"web-BerkStan\", \"web-Google\", \"cit-Patents\",\n \"soc-LiveJournal1\",\n ]\n with open('inc-sr.json') as ifs:\n inc_sr_dict = json.load(ifs)\n\n # 1st: time\n time_dict = dict()\n for tag in [vldbj_reasd_tag, vldbj_readrq_tag, vldbj_dlp_tag]:\n time_dict[tag] = dict(zip(data_set_lst, get_algorithm_indexing_time_lst(tag)))\n time_dict[icde_inc_sr_tag] = inc_sr_dict['time']\n time_dict[tkde_pdlp_tag] = dict(\n zip(data_set_lst, [x / 15. for x in get_algorithm_indexing_time_lst(vldbj_dlp_tag)]))\n with open('vldbj-icde-dynamic-time.json', 'w') as ofs:\n ofs.write(json.dumps(time_dict, indent=4))\n\n with open('vldbj-mem-size.json') as ifs:\n mem_size_dict = json.load(ifs)\n mem_size_dict[icde_inc_sr_tag] = inc_sr_dict['mem']\n mem_size_dict.pop(vldbj_probesim_tag)\n mem_size_dict.pop(vldbj_sling_tag)\n mem_size_dict[tkde_pdlp_tag] = dict(zip(local_data_set_lst, get_algorithm_mem_usage_lst(prlp_tag)))\n mem_size_dict[vldbj_dlp_tag] = dict(zip(local_data_set_lst, get_algorithm_mem_usage_lst(rlp_tag)))\n with open('vldbj-icde-dynamic-mem.json', 'w') as ofs:\n ofs.write(json.dumps(mem_size_dict, indent=4))\n","repo_name":"RapidsAtHKUST/SimRank","sub_path":"python_experiments/paper_figures/tkde/data_legacy/dynamic_time_mem.py","file_name":"dynamic_time_mem.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"5660412887","text":"#AJAY NAIK 1901me14\ndef get_memory_score(input_list):\n l_2=[]\n count=0\n for x in input_list:\n if x in l_2:\n count+=1\n else:\n l_2.append(x)\n if len(l_2)==6:\n l_2.remove(l_2[0])\n return count\n\ndef is_digit(input_list2):\n invalid_l2=[]\n ok=True\n for x in input_list2:\n if type(x)==int and x<10 and x>=0:\n continue\n else:\n invalid_l2.append(x)\n ok=False\n \n if ok:\n return True\n else:\n print (\"Please enter a valid input list\")\n print (\"Invalid inputs detected:\", invalid_l2)\n return False\n\ninput_list=[3,4,5,3,2,1,6,6,6,9,5]\ns=is_digit(input_list)\nif s:\n print (\"Score:\",get_memory_score(input_list))\n\n\n ","repo_name":"ajay123101/1901ME14_2021","sub_path":"tut02/tut02.py","file_name":"tut02.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19317892675","text":"import os\nimport numpy as np\nfrom collections import OrderedDict\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport sys\nfrom statistics import mean\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nfrom get_data_for_bayes import CommonFunctions\n\nDataDetailsFolder = '/home/sheffieldlab/Desktop/NoReward/Scripts/AnimalDetails/'\nsys.path.append(DataDetailsFolder)\nimport DataDetails\n\n# For plotting styles\nPlottingFormat_Folder = '/home/sheffieldlab/Desktop/NoReward/Scripts/PlottingTools/'\nsys.path.append(PlottingFormat_Folder)\nimport plottingfunctions as pf\n\n\ndef compiled_decoding_accuracy_by_cells(DataFolder, taskstoplot, datatype, ax_numcell, legendflag=0):\n cmd = CompileModelData(DataFolder, taskstoplot, datatype)\n cmd.compile_numcells(ax_numcell, taskstoplot, legendflag)\n\n\ndef compile_confusion_matrix_bytask(DataFolder, taskdict, fighandle, ax_cm, datatype):\n cmd = CompileModelData(DataFolder, taskdict, datatype)\n cmd.compile_confusion_matrix(fighandle, ax_cm)\n\n\ndef compile_meanbderror_bytask(DataFolder, taskdict, ax_er, datatype):\n cmd = CompileModelData(DataFolder, taskdict, datatype)\n cmd.compile_mean_bderror(ax_er)\n\n\ndef compile_error_bytrack(DataFolder, taskdict, ax_er, datatype):\n cmd = CompileModelData(DataFolder, taskdict, datatype)\n cmd.compile_meanerror_bytrack(ax_er)\n\n\nclass CompileModelData(object):\n def __init__(self, DataFolder, taskdict, datatype):\n self.DataFolder = DataFolder\n self.animals = [f for f in os.listdir(self.DataFolder) if\n f not in ['LickData', 'BayesResults_All', 'SaveAnalysed']]\n self.datatype = datatype\n self.tracklength = 200\n self.trackbins = 5\n self.taskdict = taskdict\n\n def compile_numcells(self, ax, taskstoplot, legendflag=0):\n percsamples = [1, 5, 10, 20, 50, 80, 100]\n percsamples = [f'%d%%' % p for p in percsamples]\n numcells_combined = pd.DataFrame([])\n for a in self.animals:\n print(a)\n animalinfo = DataDetails.ExpAnimalDetails(a)\n if self.datatype == 'endzonerem':\n bayesmodel = np.load(\n os.path.join(animalinfo['saveresults'], 'modeloneachtask_withendzonerem.npy'),\n allow_pickle=True).item()\n else:\n bayesmodel = np.load(os.path.join(animalinfo['saveresults'], 'modeloneachtask.npy'),\n allow_pickle=True).item()\n\n for t in animalinfo['task_dict']:\n numcells_dataframe = bayesmodel[t]['Numcells_Dataframe']\n numcells_dataframe['Task'] = t\n numcells_dataframe['animalname'] = a\n numcells_combined = pd.concat((numcells_combined, numcells_dataframe), ignore_index=True)\n g = numcells_combined.groupby(['SampleSize', 'Task', 'animalname']).agg([np.mean]).reset_index()\n g.columns = g.columns.droplevel(1)\n sns.pointplot(x='SampleSize', y='R2_angle', data=g[g.Task.isin(taskstoplot)], order=percsamples, hue='Task',\n ax=ax)\n if legendflag:\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax.get_legend().remove()\n ax.set_xlabel('Percentage of active cells used')\n ax.set_ylabel('R-squared')\n # ax.set_aspect(aspect=1.6)\n pf.set_axes_style(ax, numticks=4)\n\n for t in self.taskdict:\n if t != 'Task1':\n d, p = Stats.significance_test(g[g.Task == t]['R2'], g[g.Task == 'Task1']['R2'], type_of_test='KStest')\n print(f'%s: KStest : p-value %0.4f' % (t, p))\n\n def compile_confusion_matrix(self, fs, ax):\n cm_all = {k: np.zeros((int(self.tracklength / self.trackbins), int(self.tracklength / self.trackbins))) for k in\n self.taskdict.keys()}\n for a in self.animals:\n animalinfo = DataDetails.ExpAnimalDetails(a)\n if self.datatype == 'endzonerem':\n bayesmodel = np.load(\n os.path.join(animalinfo['saveresults'], 'modeloneachtask_withendzonerem.npy'),\n allow_pickle=True).item()\n else:\n bayesmodel = np.load(os.path.join(animalinfo['saveresults'], 'modeloneachtask.npy'),\n allow_pickle=True).item()\n\n for t in animalinfo['task_dict']:\n cm = bayesmodel[t]['cm']\n cm_all[t] += cm\n\n for n, t in enumerate(self.taskdict.keys()):\n cm_all[t] = cm_all[t].astype('float') / cm_all[t].sum(axis=1)[:, np.newaxis]\n img = ax[n].imshow(cm_all[t], cmap=\"Blues\", vmin=0, vmax=0.4, interpolation='nearest')\n ax[n].plot(ax[n].get_xlim()[::-1], ax[n].get_ylim(), ls=\"--\", c=\".3\", lw=1)\n pf.set_axes_style(ax[n], numticks=3, both=True)\n\n if n == len(self.taskdict) - 1:\n CommonFunctions.create_colorbar(fighandle=fs, axis=ax[n], imghandle=img, title='Probability',\n ticks=[0, 0.4])\n\n ax[0].set_ylabel('Actual')\n ax[0].set_xlabel('Predicted')\n\n def compile_mean_bderror(self, ax):\n mean_error = {k: [] for k in ['R2', 'Task', 'animalname', 'BD error (cm)', 'BD accuracy']}\n for a in self.animals:\n animalinfo = DataDetails.ExpAnimalDetails(a)\n if self.datatype == 'endzonerem':\n bayesmodel = np.load(\n os.path.join(animalinfo['saveresults'], 'modeloneachtask_withendzonerem.npy'),\n allow_pickle=True).item()\n else:\n bayesmodel = np.load(os.path.join(animalinfo['saveresults'], 'modeloneachtask.npy'),\n allow_pickle=True).item()\n for t in animalinfo['task_dict'].keys():\n kfold = np.max(bayesmodel[t]['K-foldDataframe']['CVIndex'])\n mean_error['R2'].extend(bayesmodel[t]['K-foldDataframe']['R2_angle'])\n mean_error['BD accuracy'].extend(bayesmodel[t]['K-foldDataframe']['ModelAccuracy'])\n for k in np.arange(kfold):\n mean_error['BD error (cm)'].append(\n np.mean(bayesmodel[t]['K-foldDataframe']['Y_ang_diff'][k] * self.trackbins))\n mean_error['Task'].append(t)\n mean_error['animalname'].append(a)\n\n mean_error_df = pd.DataFrame.from_dict(mean_error)\n for n, i in enumerate(['R2', 'BD accuracy', 'BD error (cm)']):\n sns.boxplot(x='Task', y=i, data=mean_error_df, palette='Blues', ax=ax[n], showfliers=False)\n for t in self.taskdict.keys():\n if t != 'Task1':\n d, p = Stats.significance_test(mean_error_df[mean_error_df.Task == t][i],\n mean_error_df[mean_error_df.Task == 'Task1'][i],\n type_of_test='KStest')\n print(f'%s: %s: KStest: p-value %0.4f' % (i, t, p))\n ax[n].set_xlabel('')\n pf.set_axes_style(ax[n], numticks=4)\n\n def compile_meanerror_bytrack(self, ax):\n numbins = int(self.tracklength / self.trackbins)\n numanimals = np.size(self.animals)\n kfold = 10\n Y_diff_by_track = {k: np.zeros((numanimals, kfold, numbins)) for k in ['Task1', 'Task2']}\n Y_diff_by_track_mean = {k: [] for k in ['Task1', 'Task2']}\n\n for n, a in enumerate(self.animals):\n print(a)\n animalinfo = DataDetails.ExpAnimalDetails(a)\n if self.datatype == 'endzonerem':\n bayesmodel = np.load(\n os.path.join(animalinfo['saveresults'], 'modeloneachtask_withendzonerem.npy'),\n allow_pickle=True).item()\n else:\n bayesmodel = np.load(os.path.join(animalinfo['saveresults'], 'modeloneachtask.npy'),\n allow_pickle=True).item()\n for t in ['Task1', 'Task2']:\n for k in np.arange(kfold):\n y_diff = np.asarray(bayesmodel[t]['K-foldDataframe']['Y_ang_diff'][k]) * self.trackbins\n y_test = np.asarray(bayesmodel[t]['K-foldDataframe']['y_test'][k])\n for i in np.arange(numbins):\n Y_indices = np.where(y_test == i)[0]\n Y_diff_by_track[t][n, k, i] = np.mean(y_diff[Y_indices])\n\n for t in ['Task1', 'Task2']:\n Y_diff_by_track_mean[t] = Y_diff_by_track[t].reshape(numanimals * kfold, numbins)\n meandiff, semdiff = np.nanmean(Y_diff_by_track_mean[t], 0), scipy.stats.sem(Y_diff_by_track_mean[t], 0,\n nan_policy='omit')\n error1, error2 = meandiff - semdiff, meandiff + semdiff\n ax.plot(np.arange(numbins), meandiff)\n ax.fill_between(np.arange(numbins), error1, error2, alpha=0.5)\n ax.set_xlabel('Track Length (cm)')\n ax.set_ylabel('BD error (cm)')\n ax.set_xlim((0, numbins))\n\n d, p = Stats.significance_test(np.mean(Y_diff_by_track_mean['Task2'], 0),\n np.mean(Y_diff_by_track_mean['Task1'], 0),\n type_of_test='KStest')\n print(f'KStest: p-value %0.4f' % p)\n\n CommonFunctions.convertaxis_to_tracklength(ax, self.tracklength, self.trackbins, convert_axis='x')\n ax.set_xlim((0, self.tracklength / self.trackbins))\n pf.set_axes_style(ax, numticks=4)\n\n\nclass Stats:\n @staticmethod\n def significance_test(x, y, type_of_test='KStest'):\n if type_of_test == 'KStest':\n d, p = scipy.stats.ks_2samp(x, y)\n return d, p\n elif type_of_test == 'Wilcoxon':\n s, p = scipy.stats.ranksums(x, y)\n return s, p\n elif type_of_test == 'ttest':\n s, p = scipy.stats.ttest_rel(x, y)\n return s, p\n","repo_name":"seethakris/HPCrewardpaper","sub_path":"BayesDecoder/CompiledBayesplots.py","file_name":"CompiledBayesplots.py","file_ext":"py","file_size_in_byte":10087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43277596247","text":"from recommendedSpotifyPlaylist import *\n\n# Enter your username here\nusername = \"\"\n\n# Enter the playlist name here\nplaylistName = \"\"\n\n# Enter the playlist description here\nplaylistDescription = \"\"\n\n# Enter the number of songs you wish to add to your playlist here\nnumSongs = 5\n\nif authenticate(username) == True:\n try:\n createAPlaylist(username, playlistName, playlistDescription, numSongs)\n print(\"Success!\")\n except:\n print(\"Something went wrong. Try again\")","repo_name":"TylerKay/Spotify-Playlist-Generator","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27928687502","text":"# ---------------------------------------------------------------------------------------------------------------------\n# 3d surface reconstruction from point-cloud with EikoNet\n# ---------------------------------------------------------------------------------------------------------------------\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n# imports\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport array as arr\nimport sys\nsys.path.append(\"./eikonal\")\nimport temp\nfrom plyfile import PlyData\nfrom torch.autograd import grad\nimport os\nfrom datetime import datetime\nimport GPUtil\nimport argparse\n\n# ---------------------------------------------------------------------------------------------------------------------\ndef mkdir_ifnotexists(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\ndef set_available_gpu():\n deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[],\n excludeUUID=[])\n gpu = deviceIDs[0]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '{0}'.format(gpu)\n\n# regularization coefficient\nALPHA = 0.1\n# normals regularization coefficient\nALPHA2 = 1.0\n# sofplus coefficient\nBETA = 100\n# ---------------------------------------------------------------------------------------------------------------------\n\nclass ImplicitNetwork(nn.Module):\n def __init__(\n self,\n latent_size,\n d_in,\n d_out,\n dims,\n skip_in=(),\n weight_norm=False,\n geometric_init=False,\n bias=1.0,\n ):\n super().__init__()\n\n dims = [d_in + latent_size] + dims + [d_out]\n\n self.pc_dim = d_in #\n self.d_in = d_in + latent_size\n self.num_layers = len(dims)\n self.skip_in = skip_in\n\n for l in range(0, self.num_layers - 1):\n if l + 1 in self.skip_in:\n out_dim = dims[l + 1] - dims[0]\n else:\n out_dim = dims[l + 1]\n\n # lin = nng.LinearGrad(dims[l], out_dim)\n lin = nn.Linear(dims[l], out_dim)\n\n if geometric_init:\n if l == self.num_layers - 2:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n # self.softplus = nng.SoftplusGrad(beta=100)\n self.softplus = nn.Softplus(beta=100)\n # self.softplus = nn.Softplus()\n\n # def forward(self, input, compute_grad=False):\n def forward(self, input):\n '''\n :param input: [shape: (N x d_in)]\n :param compute_grad: True for computing the input gradient. default=False\n :return: x: [shape: (N x d_out)]\n x_grad: input gradient if compute_grad=True [shape: (N x d_in x d_out)]\n None if compute_grad=False\n '''\n x = input\n # x_grad = None\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n if l in self.skip_in:\n x = torch.cat([x, input], 1) / np.sqrt(2)\n # if compute_grad:\n # skip_grad = torch.eye(self.d_in, device=x.device)[:, -self.pc_dim:].repeat(input.shape[0], 1, 1)#\n # x_grad = torch.cat([x_grad, skip_grad], 1) / np.sqrt(2)\n\n # x, x_grad = lin(x, x_grad, compute_grad, l == 0, self.pc_dim)\n x = lin(x)\n\n if l < self.num_layers - 2:\n # x, x_grad = self.softplus(x, x_grad, compute_grad)\n x = self.softplus(x)\n\n # return x, x_grad\n return x\n\n def gradient(self, x):\n x.requires_grad_(True)\n y = self.forward(x)\n d_output = torch.ones_like(y, requires_grad=False, device=y.device)\n gradients = torch.autograd.grad(\n outputs=y,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradients.unsqueeze(1)\n\n\n# --------------------------------------------------------------------------------------------------------------------#\n# --------------------------------------------------------------------------------------------------------------------#\n# --------------------------------------------------------------------------------------------------------------------#\n\nif __name__ == '__main__':\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', type=str, default='debug_3d')\n parser.add_argument('--pc_path', type=str)\n args = parser.parse_args()\n\n device = 'gpu'\n set_available_gpu()\n\n # output path+name\n exps_folder_name = 'exps'\n expname = args.name\n expdir = os.path.join(os.path.dirname(__file__), exps_folder_name, expname)\n mkdir_ifnotexists(os.path.join('.', exps_folder_name))\n mkdir_ifnotexists(expdir)\n timestamp = '{:%Y_%m_%d_%H_%M_%S}'.format(datetime.now())\n mkdir_ifnotexists(os.path.join(expdir, timestamp))\n expdir = os.path.join(expdir, timestamp)\n\n # model parameters\n d_in = 3\n d_out = 1\n dims = [512, 512, 512] #, 512, 512, 512, 512, 512]\n skips = [] # [4] # before which layers we do the skip connection\n bias = 1.0\n N = 128 **2 # batch size\n\n # training parameters\n max_epochs = 100001\n learning_rate = 1.0 * 1e-4\n # learning_rate_decay = 0.95\n decrease_lr_every = 0 # 1000\n decrease_lr_by = 1.0 # 0.5\n sigma_nn = 10 #50\n\n # output surface every\n output_surface_every = 1000\n\n # create our MLP model\n model = ImplicitNetwork(0, d_in, d_out, dims, skip_in=skips, geometric_init=True)\n\n if (device == 'gpu'):\n model = model.cuda()\n\n # optimize model\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n # # load point cloud\n with open(args.pc_path, \"rb\") as f:\n plydata = PlyData.read(f)\n vertices = np.concatenate([plydata[\"vertex\"][\"x\"].reshape(-1, 1), plydata[\"vertex\"][\"y\"].reshape(-1, 1), plydata[\"vertex\"][\"z\"].reshape(-1, 1)], axis=1).astype(\"float64\")\n point_set = torch.tensor(vertices, dtype=torch.float32)\n \n # normalize shape to 1.0 radius ball\n point_set = point_set-point_set.mean(dim=0)\n scale = np.max(np.linalg.norm(point_set,np.inf,axis=1))\n point_set = 1.0*point_set/scale\n point_set_np = point_set.cpu().numpy()\n sigma_set = torch.ones(len(point_set)) * 0.01\n\n for t in range(max_epochs):\n # use per point sigma defined above to sample points\n random_idx = torch.randperm(point_set.shape[0])[:N]\n S = torch.index_select(point_set, 0, random_idx).cuda()\n sigmas = torch.index_select(sigma_set, 0, random_idx).cuda()\n X_normal = (torch.randn(1, S.shape[0], d_in).cuda() * sigmas.unsqueeze(0).unsqueeze(2).repeat(\n 1, 1, 3) + S.unsqueeze(0).repeat(1, 1, 1)).reshape(1 * S.shape[0], d_in).cuda()\n\n X_general = torch.empty(S.shape[0] // 2, d_in).uniform_(-1.0, 1.0).cuda()\n X = torch.cat([X_normal, X_general], 0)\n\n # compute loss\n Y = model(S)\n grad = model.gradient(X)\n\n grad_norm = grad[:,0,:].norm(2, dim=1)\n grad_loss = ((grad_norm - 1) ** 2).mean()\n\n loss_fn = (torch.abs(Y)).mean() + ALPHA * grad_loss #+ ALPHA2 * surface_normals_loss\n\n # print loss and grad loss every 500 epochs\n if divmod(t, 100)[1] == 0:\n print(expname, timestamp, t, 'loss =', loss_fn.item(), 'grad_loss =', grad_loss.item())\n\n # backward pass\n optimizer.zero_grad()\n loss_fn.backward()\n optimizer.step()\n\n # output surface in middle epochs, if required\n if (t > 0) and (output_surface_every > 0) and (np.mod(t, output_surface_every) == 0):\n temp.plot_surface(with_points=True,\n points=S.detach(),\n eik_points=X.detach(),\n decoder=model,\n latent=None,\n path=expdir,\n epoch='iteration__' + str(t),\n in_epoch=0,\n shapefile='',\n resolution=100,\n mc_value=0,\n is_uniform_grid=False, verbose=True, save_html=True, save_ply=True)\n temp.plot_cuts(sdf=model,\n path=expdir,\n epoch=t,\n latent=None)\n torch.save(\n {\"model\": model.state_dict()},\n os.path.join(expdir + \"/network_{0}.pth\".format(t)))\n\n # update learning rate, if required\n if (decrease_lr_every > 0) and (np.mod(t, decrease_lr_every) == 0) and (t > 1):\n learning_rate = learning_rate * decrease_lr_by\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n torch.save(\n {\"model\": model.state_dict()},\n os.path.join(expdir + \"/network_{0}.pth\".format(t)))\n\n # plot the zero level set surface\n temp.plot_surface(with_points=True,\n points=S.detach(),\n eik_points=X.detach(),\n decoder=model,\n latent=None,\n path=expdir,\n epoch='iteration__' + str(t),\n in_epoch=0,\n shapefile='',\n resolution=500,\n mc_value=0,\n is_uniform_grid=False, verbose=True, save_html=True, save_ply=True)\n\n print('end')\n","repo_name":"noamroze/moser_flow","sub_path":"eikonal/implicit_network_3d.py","file_name":"implicit_network_3d.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"19487650419","text":"lst=eval(input())\r\n\r\ndef Sum(lst,level,s):\r\n for i in lst:\r\n if type(i)==list:\r\n s+=Sum(i,level+1,0)\r\n else:\r\n s+=10-level\r\n return s\r\n\r\nprint(Sum(lst,0,0))\r\n","repo_name":"Yif1999/Programming-in-Python","sub_path":"code in python/7-4 列表元素的个数加权和.py","file_name":"7-4 列表元素的个数加权和.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8810413554","text":"# 1/15\n# 문제 유형: 정렬\n# 추천 풀이 시간: 15분\n\n# 나이 이름\n# 나이: 오름차순 \n# 나이 같으면) 먼저 가입한 순서대로\n\nimport sys\nx = int(sys.stdin.readline())\n\ndata = []\n\nfor _ in range(x):\n a, b = sys.stdin.readline().rstrip().split(' ')\n data.append((int(a),b))\n\ndata.sort(key=lambda x: x[0]) \n# 나이로 sort하겠다. 그 외 요소는 들어온 순서대로 됨.\n\nfor i in data:\n print(i[0], i[1])\n\n# 동빈나\n\n# (나이, 이름)의 정보를 입력받은 뒤에 나이를 기준으로 정렬\n# 기본 정렬 라이브러리를 이용\n# 나이 동일) key속성을 설정\n\nn = int(input())\narray = []\n\nfor _ in range(n):\n input_data = input().split(' ')\n array.append((int(input_data[0]), input_data[1]))\n\narray.sort(key=lambda x: x[0])\n\nprint(array)","repo_name":"Minsik113/Algorithm-practice","sub_path":"문제풀이/01_정렬/05_10814_나이순 정렬.py","file_name":"05_10814_나이순 정렬.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32067690878","text":"\n# l-CLUSTERING\nfrom d07_kruskal import kruskal\nfrom b12_comp import comp\n\ndef cluster(m,d,l):\n # 1. vollstaendigen Graph mit m Knoten, m^2 Kanten und Kosten d[u,v] konstruieren\n G = [{j:d[i,j] for j in range(m) if i!=j} for i in range(m)] # O(m^2)\n\n # 2. Alg. kruskal unveraendert aufrufen\n (mst,_) = kruskal(G) # O(k*log(m))\n\n # 3. die l-1 teuersten Kanten löschen\n # Liste mit Kanten und dazugehörigen Kosten erstellen \n li = []\n for e in mst: # O(k)\n (u,v) = e \n dist = d[u,v]\n li.append((dist,(u,v)))\n\n # Liste reversed sortieren und ersten l-1 Elemente \"entfernen\"\n edges = sorted(li, reverse=True)[l-1:] # O(k*log(k))\n\n\n # 4. Zush.komponenten bestimmen, via B.12 (comp)\n # dazu Graph mit allen Knoten konstuieren und Kanten einf.\n Graph = [set() for _ in range(m)]\n for e in edges: # O(k)\n (_,(u,v)) = e\n Graph[u].add(v)\n Graph[v].add(u)\n \n # Zusammenhangskomponenten bestimmen\n comps = comp(Graph) # O(m+k)\n\n return comps\n\n\n# Distanzfunktion symmetrisch, Diag. 0\ndef define_d():\n d = {}\n d[0,0]= 0\n d[0,1]= 11\n d[0,2]= 7\n d[0,3]= 15\n d[0,4]= 8\n d[1,0]= 11\n d[1,1]= 0\n d[1,2]= 12\n d[1,3]= 9\n d[1,4]= 2\n d[2,0]= 7\n d[2,1]= 12\n d[2,2]= 0\n d[2,3]= 13\n d[2,4]= 14\n d[3,0]= 15\n d[3,1]= 9\n d[3,2]= 13\n d[3,3]= 0\n d[3,4]= 6\n d[4,0]= 8\n d[4,1]= 2\n d[4,2]= 14\n d[4,3]= 6\n d[4,4]= 0 \n\n return d \n\nif __name__ == \"__main__\":\n d = define_d()\n m = 5\n for l in range(1,6):\n print(\"=========================================================\")\n print(f\"cluster: {cluster(m,d,l)}\")\n \n \n \n\n\n\n\n","repo_name":"MMueller98/Algorithmen-Design-Python","sub_path":"Code/d08_cluster.py","file_name":"d08_cluster.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22083677959","text":"from SnakeGameColors import *\nimport random\nimport pygame\npygame.init()\n\nclock = pygame.time.Clock()\nfont = pygame.font.SysFont(\"ariel\", 30)\n\nscreen_width = 650\nscreen_height = 450\n\ngameWindow = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Snake Game\")\n\n\ndef show_on_screen(text, color, x, y):\n screen_text = font.render(text, True, color)\n gameWindow.blit(screen_text, [x, y])\n\n\ndef grow_snake(base, color, ordinates, size):\n for x, y in ordinates:\n pygame.draw.rect(base, color, [x, y, size, size])\n\n\ndef game_loop():\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(75, screen_height - 20)\n food_radius = 5\n velocity_x = 0\n velocity_y = 0\n snake_size = 10\n snake_y = 220\n snake_x = 320\n score = 0\n fps = 50\n\n snake_length = 1\n snake_list = []\n\n exit_game = False\n game_over = False\n\n while not exit_game:\n if game_over:\n gameWindow.fill(white)\n show_on_screen(\"Game Over!!! Press Return to Continue\", red, 130, 180)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n game_over = False\n game_loop()\n\n else:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit_game = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n velocity_y = -5\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_x = 0\n velocity_y = 5\n if event.key == pygame.K_RIGHT:\n velocity_y = 0\n velocity_x = 5\n if event.key == pygame.K_LEFT:\n velocity_y = 0\n velocity_x = -5\n\n snake_x = snake_x + velocity_x\n snake_y = snake_y + velocity_y\n\n if abs(snake_x - food_x) < 5 and abs(snake_y - food_y) < 5:\n score = score + 10\n snake_length = snake_length + 3\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(50, screen_height - 20)\n elif abs(food_x - snake_x) < 5 and abs(food_y - snake_y) < 5:\n score = score + 10\n snake_length = snake_length + 3\n food_x = random.randint(20, screen_width - 20)\n food_y = random.randint(50, screen_height - 20)\n\n gameWindow.fill(white)\n show_on_screen(\"Score : \" + str(score), purple, 10, 10)\n pygame.draw.line(gameWindow, brown, (0, 40), (screen_width, 40), 3)\n pygame.draw.circle(gameWindow, red, [food_x, food_y], food_radius)\n\n snake_head = list()\n snake_head.append(snake_x)\n snake_head.append(snake_y)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n if snake_head in snake_list[:-1]:\n game_over = True\n\n if snake_x < 0 or snake_x > screen_width or snake_y < 40 or snake_y > screen_height:\n game_over = True\n\n grow_snake(gameWindow, black, snake_list, snake_size)\n pygame.display.update()\n clock.tick(fps)\n\n pygame.quit()\n quit()\n\n\ngame_loop()\n","repo_name":"omlondhe/Python-Pygame","sub_path":"#11 GameOver.py","file_name":"#11 GameOver.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15433615974","text":"import numpy as np;\nimport cv2;\n\nMIN_MATCH_COUNT = 30\n\ndef getImage(imgName):\n imgReaded = cv2.imread(imgName)\n return cv2.resize(imgReaded,(800, 600), interpolation=cv2.INTER_CUBIC)\n\ndef closeWindows():\n cv2.destroyAllWindows()\n \ndef siftImage(firstImg, secondImg):\n global MIN_MATCH_COUNT\n dscr = cv2.SIFT_create() # Inicializamos el detector y el descriptor\n kp1, des1 = dscr.detectAndCompute(firstImg, None) # Encontramos los puntos clave y los descriptores con SIFT en la imagen 1\n kp2, des2 = dscr.detectAndCompute(secondImg, None) # Encontramos los puntos clave y los descriptores con SIFT en la imagen 2\n matcher = cv2.BFMatcher(cv2.NORM_L2)\n matches = matcher.knnMatch(des1, des2, k=2)\n \n # Guardamos los buenos matches usando el test de razón de Lowe\n good = []\n\n for m, n in matches:\n if(m.distance < (0.60 * n.distance)):\n good.append(m)\n \n if(len(good) > MIN_MATCH_COUNT):\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n H, mask = cv2.findHomography(dst_pts,src_pts,cv2.RANSAC,5.0) #Computamos la homografía con RANSAC\n matchesMask = mask.ravel().tolist()\n \n wimg2 = cv2.warpPerspective(secondImg, H, (800,600)) # Aplicamos la transformación perspectiva H a la imagen 2\n \n # Mezclamos ambas imágenes\n alpha = 0.5\n return np.array(wimg2 * alpha + firstImg * (1 - alpha), dtype=np.uint8), drawPointMatches(firstImg, kp1, secondImg, kp2, good, matchesMask)\n\ndef drawPointMatches(firstImg, kp1, secondImg, kp2, good, matchesMask):\n draw_params = dict(matchColor = None, # Dibujar puntos en común\n matchesMask = matchesMask, # Dibujar lineas entre los puntos\n singlePointColor = None,\n flags = 2)\n\n return cv2.drawMatches(firstImg, kp1 , secondImg, kp2, good, None, **draw_params)\n\n\ndef init():\n firstImg = getImage(\"img1.jpeg\")\n secondImg = getImage(\"img2.jpeg\")\n blend, imgWithMatches = siftImage(firstImg, secondImg)\n cv2.imshow(\"Puntos en comun\", imgWithMatches)\n cv2.imshow(\"Resultado\", blend)\n \n while (1):\n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n closeWindows()\n break\n \ninit()\n","repo_name":"RomeroAlexisM/vision_2022","sub_path":"practico-12/practico-12.py","file_name":"practico-12.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9348486277","text":"\n# Contains the functions for downloading the contents of a manifest. \n#\n# Author: James Matsumura\n# Contact: jmatsumura@som.umaryland.edu\n\n# base 3.6 lib(s)\nimport urllib,hashlib,os,shutil,sys\nfrom ftplib import FTP\n# additional dependencies (get from pip) \nimport boto\nfrom boto.utils import get_instance_metadata\n\n# S3 connection.\nS3_CONNS = {}\n\ndef get_s3_connection():\n if 'S3' not in S3_CONNS:\n S3_CONNS['S3'] = boto.connect_s3(anon=True)\n return S3_CONNS['S3']\n\n# FTP connections.\nFTP_CONNS = {}\n\ndef get_ftp_connection(host):\n if host not in FTP_CONNS:\n ftp = FTP(host)\n ftp.login('hmp_client')\n FTP_CONNS[host] = ftp\n return FTP_CONNS[host]\n\ndef parse_ftp_url(url):\n dest = url.split('//')[1]\n host = dest.split('/')[0]\n file_path = url.split(host)[1]\n return { 'dest': dest, 'host': host, 'file_path': file_path }\n\n# Function to download each URL from the manifest.\n# Arguments:\n# manifest = manifest list created by functions in convert_to_manifest.py\n# destination = set destination to place output declared when calling client.py\n# priorities = endpoint priorities established by get_prioritized_endpoint\n# block_sz = the byte size to break the file into to allow for interrupted downloads\ndef download_manifest(manifest,destination,priorities,block_sz):\n \n # build a list of elements to indicate how many and why the files failed\n # 1 = no valid URL in manifest\n # 2 = URL exists, but not accessible at the location specified\n # 3 = MD5 check failed for file (file is corrupted or the wrong MD5 is attached to the file)\n failed_files = [] \n\n # iterate over the manifest data structure, one ID/file at a time\n for mfile in manifest: \n\n url_list = get_prioritized_endpoint(mfile['urls'],priorities)\n\n # Handle private data or simply nodes that are not correct and lack \n # endpoint data\n if not url_list:\n print(\"No valid URL found in the manifest for file ID {0}\".format(mfile['id']))\n failed_files.append(1)\n continue\n\n file_name = \"{0}/{1}\".format(destination,url_list[0].split('/')[-1])\n\n if not os.path.exists(file_name): # only need to download if the file is not present\n\n tmp_file_name = \"{0}.partial\".format(file_name)\n\n # If we only have part of a file, get the new start position\n current_byte = 0\n if os.path.exists(tmp_file_name):\n current_byte = os.path.getsize(tmp_file_name)\n\n # Need to try get the others to work like this, but for now HTTP \n # is the only one that can pull bytes in chunks without needing to\n # drop the connection.\n http_header = {} \n http_header['Range'] = 'bytes={0}-'.format(current_byte)\n \n res,endpoint = (\"\" for i in range(2))\n eps = []\n\n for url in url_list:\n endpoint = url.split(':')[0].upper()\n eps.append(endpoint)\n res = get_url_obj(url,endpoint,http_header)\n\n if res != \"error\": # if we get back a network object, continue\n break\n\n if res == \"error\": # if all attempts resulted in no object, move on to next file\n print(\"skipping file ID {0} as none of the URLs checked {1} yielded a valid file\".format(mfile['id'],eps))\n failed_files.append(2)\n continue\n\n with open(tmp_file_name,'ab') as file:\n\n # Need to pull the size without the potential bytes buffer\n file_size = get_file_size(url,endpoint)\n print(\"downloading file (via {0}): {1} | total bytes = {2}\".format(endpoint, file_name, file_size))\n\n while True:\n\n if block_sz > file_size:\n generate_status_message(\"block size greater than total file size, pulling entire file in one go\")\n\n buffer = get_buffer(res,endpoint,block_sz,current_byte,file_size,file)\n \n if not buffer: # note that only HTTP/S3 make it beyond this point\n break\n\n file.write(buffer)\n\n current_byte += len(buffer)\n generate_status_message(\"{0} [{1:.2f}%]\".format(current_byte, current_byte * 100 / file_size))\n\n # If the download is complete, establish the final file\n if checksum_matches(tmp_file_name,mfile['md5']):\n shutil.move(tmp_file_name,file_name)\n failed_files.append(0)\n else:\n print(\"\\rMD5 check failed for the file ID {0} , data may be corrupted\".format(key))\n failed_files.append(3)\n\n else: # file already done downloading\n failed_files.append(0)\n\n return failed_files\n\n# Function to get a network object of the file that can be iterated over.\n# Arguments:\n# url = path to location of file on the web\n# endpoint = HTTP/FTP/S3\n# http_header = HTTP range to pull from the file, the other endpoints require \n# this processing in the get_buffer() function.\ndef get_url_obj(url,endpoint,http_header):\n\n if endpoint == \"HTTP\":\n res = \"\"\n\n try:\n req = urllib.request.Request(url,headers=http_header)\n res = urllib.request.urlopen(req)\n except:\n res = \"\"\n\n if res:\n return res\n\n if endpoint == \"FTP\":\n p = parse_ftp_url(url)\n ftp = get_ftp_connection(p['host'])\n if list(ftp.mlsd(p['file_path'])): # make sure there's something there\n file_str = \"RETR {0}\".format(p['file_path'])\n def get_data(callback, blocksize, start_pos):\n ftp.retrbinary(file_str, callback, blocksize=blocksize, rest=start_pos)\n return get_data\n\n elif endpoint == \"S3\":\n res = s3_get_key(url)\n if res:\n return res\n\n # If made it here, no network object established\n return \"error\"\n \n# Function to retrieve the file size.\n# Arguments:\n# url = path to location of file on the web\n# endpoint = HTTP/FTP/S3\ndef get_file_size(url,endpoint):\n if endpoint == 'HTTP':\n return int(urllib.request.urlopen(url).info()['Content-Length'])\n\n elif endpoint == 'FTP':\n p = parse_ftp_url(url)\n ftp = get_ftp_connection(p[\"host\"])\n return ftp.size(p[\"file_path\"])\n\n elif endpoint == 'S3':\n k = s3_get_key(url)\n return k.size \n\n# Function to retrieve a particular set of bytes from the file.\n# Arguments:\n# res = network object created by get_url_obj()\n# endpoint = HTTP/FTP/S3\n# block_sz = number of bytes to be considered a chunk to allow interrupts/resumes\n# start_pos = position to start at\n# max_range = maximum value to use for the range, same as the file's size\n# file = file handle to write out to\ndef get_buffer(res,endpoint,block_sz,start_pos,max_range,file):\n if endpoint == \"HTTP\":\n return res.read(block_sz)\n\n elif endpoint == \"FTP\":\n current_byte = start_pos\n\n # The Python ftplib requires transfer to pass to a callback function,\n # using this to break up the download into pieces. Unfortunately this\n # function by default accepts just the byte-block being pulled by \n # .retrbinary() so we need a nonlocal variable to help with printing\n # out the progress.\n def callback(data):\n nonlocal current_byte\n\n file.write(data)\n\n current_byte += len(data)\n generate_status_message(\"{0} [{1:.2f}%]\".format(current_byte, current_byte * 100 / max_range))\n\n res(callback,block_sz,start_pos)\n\n return None\n\n elif endpoint == \"S3\":\n if start_pos >= max_range:\n return None # exit the while loop\n headers = {}\n range_end = start_pos+block_sz-1 # offset by 1 since bytes are 0-based\n headers['Range'] = 'bytes={0}-'.format(start_pos)\n if range_end <= max_range:\n headers['Range'] += \"{0}\".format(range_end)\n return res.get_contents_as_string(headers=headers)\n\n# Function to get the Key object from S3.\n# Arguments:\n# url = path to location of file on the web\ndef s3_get_key(url):\n url = url.lstrip('s3://')\n bucket = url.split('/',1)[0]\n key = url.split('/',1)[1]\n s3_conn = get_s3_connection()\n b = s3_conn.get_bucket(bucket)\n return b.get_key(key)\n\n# Function to get the URL for the prioritized endpoint that the user requests.\n# Note that priorities can be a list of ordered priorities.\n# Arguments:\n# manifest_urls = the CSV set of endpoint URLs\n# priorities = priorities declared when calling client.py\ndef get_prioritized_endpoint(manifest_urls,priorities):\n\n url_list = []\n\n urls = manifest_urls.split(',')\n eps = priorities.split(',')\n\n # If the user didn't provide a set of priorities, then prioritize based on\n # whether on an EC2 instance.\n if eps[0] == \"\":\n\n md = get_instance_metadata(timeout=0.5,num_retries=1)\n\n if len(md.keys()) > 0:\n eps = ['S3','HTTP','FTP']\n else:\n eps = ['HTTP','FTP','S3'] # if none provided, use this order\n\n # Go through and build a list starting with the higher priorities first.\n for ep in eps:\n for url in urls:\n if url.startswith(ep.lower()):\n\n # Quick fix until the correct endpoints for the demo data (bucket+key) are established on S3. \n if 's3://' in url and 'HMDEMO' in url:\n elements = url.split('/')\n url = \"s3://{0}/DEMO/{1}/{2}\".format(elements[2],elements[4],\"/\".join(elements[-4:]))\n\n url_list.append(url)\n\n return url_list\n\n# This function failing is largely telling that the data in OSDF for the\n# particular file's MD5 is not correct.\n# Arguments:\n# file_path = location of the file just downloaded which requires an integrity check\n# original_md5 = MD5 provided by OSDF data\ndef checksum_matches(file_path,original_md5):\n\n md5 = hashlib.md5()\n\n # Read the file in chunks and build a final MD5\n with open(file_path,'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n md5.update(chunk)\n\n if md5.hexdigest() == original_md5:\n return True\n else:\n return False\n\n# Function to output a status message to the user.\n# Argument:\n# message = the string to temporarily output to the user\ndef generate_status_message(message):\n status = message\n status = status + chr(8)*(len(status)+1) # backspace everything\n print(\"\\r{0}\".format(status),end=\"\")\n","repo_name":"michbur/hmp_client","sub_path":"bin/process_manifest.py","file_name":"process_manifest.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"987748390","text":"import sqlite3\nfrom sqlite3 import Error\n\n\ndef get_weight(user_id):\n results_lst = []\n try:\n con = sqlite3.connect(\"wtracker.db\")\n cursorObj = con.cursor()\n cursorObj.execute(\n \"SELECT * FROM weights where user_id = :user_id\", {\"user_id\": user_id}\n )\n rows = cursorObj.fetchall()\n for row in rows:\n results_dict = {}\n results_dict[\"weight\"] = row[2]\n results_dict[\"time_stamp\"] = row[3]\n results_lst.append(results_dict)\n except Error:\n print(Error)\n finally:\n con.close()\n return results_lst\n\n\ndef get_user(username):\n results_lst = []\n try:\n con = sqlite3.connect(\"wtracker.db\")\n cursorObj = con.cursor()\n cursorObj.execute(\n \"SELECT * FROM users where username = :username\", {\"username\": username}\n )\n rows = cursorObj.fetchall()\n for row in rows:\n results_dict = {}\n results_dict[\"id\"] = row[0]\n results_dict[\"username\"] = row[1]\n results_dict[\"hash\"] = row[2]\n results_lst.append(results_dict)\n except Error:\n print(Error)\n finally:\n con.close()\n return results_lst\n\n\ndef register_user(username, hash):\n try:\n con = sqlite3.connect(\"wtracker.db\")\n cursorObj = con.cursor()\n sql_str = \"INSERT INTO users(username, hash) VALUES (:username, :hash)\"\n\n cursorObj.execute(sql_str, {\"username\": username, \"hash\": hash})\n con.commit()\n except Error:\n print(Error)\n finally:\n con.close()\n\n\ndef add_weight(user_id, weight):\n try:\n con = sqlite3.connect(\"wtracker.db\")\n cursorObj = con.cursor()\n sql_str = \"INSERT INTO weights(user_id, weight) VALUES (:user_id, :weight)\"\n\n cursorObj.execute(sql_str, {\"user_id\": user_id, \"weight\": weight})\n con.commit()\n except Error:\n print(Error)\n finally:\n con.close()\n\n\nif __name__ == \"__main__\":\n rows = get_weight(2)\n weights = [row[\"weight\"] for row in rows]\n","repo_name":"jesusrevilla/weight_tracker","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69833576552","text":"# coding: utf-8\n\nimport os\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\n\nclass MPIIGazeDataset(torch.utils.data.Dataset):\n def __init__(self, subject_id, dataset_dir):\n path = os.path.join(dataset_dir, '{}.npz'.format(subject_id))\n with np.load(path) as fin:\n self.images = fin['image']\n self.poses = fin['pose']\n self.gazes = fin['gaze']\n self.length = len(self.images)\n\n self.images = torch.unsqueeze(torch.from_numpy(self.images), 1)\n self.poses = torch.from_numpy(self.poses)\n self.gazes = torch.from_numpy(self.gazes)\n\n def __getitem__(self, index):\n return self.images[index], self.poses[index], self.gazes[index]\n\n def __len__(self):\n return self.length\n\n def __repr__(self):\n return self.__class__.__name__\n\n\ndef get_loader(dataset_dir, test_subject_id, batch_size, num_workers, use_gpu):\n assert os.path.exists(dataset_dir)\n assert test_subject_id in range(15)\n subject_ids = ['p{:02}'.format(index) for index in range(15)]\n test_subject_id = subject_ids[test_subject_id]\n\n train_dataset = torch.utils.data.ConcatDataset([\n MPIIGazeDataset(subject_id, dataset_dir) for subject_id in subject_ids\n if subject_id != test_subject_id\n ])\n test_dataset = MPIIGazeDataset(test_subject_id, dataset_dir)\n\n assert len(train_dataset) == 42000\n assert len(test_dataset) == 3000\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=use_gpu,\n drop_last=True,\n )\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False,\n pin_memory=use_gpu,\n drop_last=False,\n )\n return train_loader, test_loader\n","repo_name":"xiamenwcy/pictorial_net","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"33598801396","text":"import re\nimport subprocess\n\n\ndef is_running(process):\n s = subprocess.Popen([\"ps\", \"-faux\"], stdout=subprocess.PIPE)\n\n for x in s.stdout:\n if re.search(process, x):\n print(\"Process \" + str(process) + \" Is running\")\n\n elif not re.search:\n print(str(process) + \" not running\")\n\n\nis_running(b\"code\")\n","repo_name":"peacengell/Udemy-python-tutorial-for-beginners","sub_path":"process_check.py","file_name":"process_check.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15213609037","text":"''' Controller para fornecer dados da CEE '''\nfrom flask import request\nfrom flask_restful_swagger_2 import swagger\nfrom resources.base import BaseResource\n\nclass CensoAgroMunicipiosResource(BaseResource):\n ''' Classe de múltiplas incidências '''\n CUSTOM_SWAGGER_PARAMS = [\n {\"name\": \"categorias\", \"required\": True, \"type\": 'string', \"in\": \"query\",\n \"description\": \"Informações que devem ser trazidas no dataset. \\\n Campos disponíveis: cod_mun, qt_ocupados, qt_ocupados_menores14, \\\n percent_ocupados_men_14. \" + BaseResource.CAT_DETAIL}\n ]\n\n @swagger.doc({\n 'tags':['censo_agro'],\n 'description':'Obtém todos os dados do censo de áreas rurais, de acordo \\\n com os parâmetros informados.',\n 'parameters': CUSTOM_SWAGGER_PARAMS + BaseResource.DEFAULT_SWAGGER_PARAMS,\n 'responses': {\n '200': {'description': 'Censo Agro'}\n }\n })\n def get(self):\n ''' Obtém os registros de Censo Rural, conforme parâmetros informados '''\n options = self.build_options(request.args)\n options['theme'] = 'censoagromunicipal'\n return self.get_domain().find_dataset(options)\n","repo_name":"smartlab-br/datahub-api","sub_path":"app/resources/v1/ti/censo_agro.py","file_name":"censo_agro.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38969550485","text":"'''\n 접근법\n 재귀를 통해 구현\n \n'''\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\narr = [list(map(int,input().split())) for _ in range(n)]\n\ndef cnn(i,arr):\n # 1x1 \n if i == 1:\n return arr[0][0]\n\n # 222-풀링\n next = []\n for r in range(0,i,2):\n next_r = []\n for c in range(0,i,2):\n two = [arr[r][c],arr[r][c+1],arr[r+1][c],arr[r+1][c+1]]\n two.sort()\n next_r.append(two[2])\n next.append(next_r)\n return cnn(i//2,next)\n\nprint(cnn(n,arr))","repo_name":"chaemj97/Algorithm","sub_path":"2023년/백준_실버2_17829_222_풀링.py","file_name":"백준_실버2_17829_222_풀링.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16654251900","text":"\n# ? Write a function max_of_three(a, b, c) which returns the biggest of the three numbers. \n# ? Then print the result calling the function\n\n\n# ! Rezolvarea 1 - folosind functia max()\ndef max_of_three(a, b, c):\n return max(a,b,c)\n\nprint(max_of_three(-2, 2, 3))\n\n# ! Rezolvarea 2 - folosinf if statement\ndef max_of_three2(a, b, c):\n if a > b and a > c:\n return a\n elif b > a and b > c:\n return b\n elif c > a and c > b:\n return c\nprint(max_of_three2(-2, 2, 3))\n\n# ! Rezolvarea 3 - folosind un for loop\ndef max_of_three3(a, b, c):\n max_num = 0\n for num in [a,b,c]:\n if num > max_num:\n max_num = num\n return max_num\n\nprint(max_of_three3(-2, 2, 3))\n\n\n# * Aceiasi functie facuta sa functioneze cu orice numar de argumente\n# ! Varinata 1 \ndef get_max(*args):\n return max(args)\nprint(get_max(-2, 2, 3, 5, 9, 10))\n\n# ! Varianta 2\ndef get_max2(*args):\n max_num = args[0]\n for arg in args:\n if arg > max_num:\n max_num = arg\n return max_num\n\nprint(get_max2(-2, 2, 3, 55, 12, 32, -54, 29))","repo_name":"solomoniosif/SDA_Python_Exercises","sub_path":"20_decembrie_2020/20-12-ex9-max_of_three.py","file_name":"20-12-ex9-max_of_three.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31821211557","text":"#製作5->新增會員電話, 6->刪除會員電話\nimport pymysql, os, prettytable\n\nos.system(\"cls\")\nsetting_pass = input(\"請輸入資料庫的密碼:\")\nsetting_port = input(\"請輸入資料庫的port:\")\n\nif setting_port ==\"\":\n setting_port = 3306\nelse:\n setting_port = int(setting_port)\n\nlink = pymysql.connect(\n host =\"localhost\",\n user = \"root\",\n passwd = setting_pass,\n db=\"python_ai\",\n charset =\"utf8\",\n port = setting_port\n)\n\ncmd = -1\nos.system(\"cls\") #cmd下清空畫面\nc =link.cursor()\n\nwhile True:\n if cmd == \"0\":\n break\n elif cmd ==\"1\":\n t = prettytable.PrettyTable([\"id\", \"name\", \"birthday\", \"address\", \"tel\"], encoding=\"utf8\")\n # LEFT JOIN是以member資料庫為主要顯示, 並且member資料庫的id連結tel資料庫的member_id\n c.execute(\"SELECT * FROM `member` LEFT JOIN `tel` ON `member`.`id`= `tel`.`member_id`\")\n n = c.rowcount #算出總共有幾筆資訊\n for i in range(0,n):\n r = c.fetchone()\n t.add_row([str(r[0]), r[1], r[2], r[3],r[6]])\n t.align[\"name\"]=\"l\"\n t.align[\"birthday\"]=\"l\"\n t.align[\"address\"]=\"l\"\n t.align[\"tel\"]=\"l\"\n print(t)\n\n elif cmd == \"2\":\n name_ = input(\"請輸入name:\")\n bir_ = input(\"請輸入birthday:\")\n address_ = input(\"請輸入address:\")\n\n c.execute(\n \"INSERT INTO `member`(`name`, `birthday`, `address`) \"+\n \"VALUES(%s,%s,%s)\",(name_, bir_, address_) #讀取外面資訊到%s裏面時的格式\n )\n link.commit()\n elif cmd ==\"3\":\n par = {\n \"id\":input(\"輸入你要修改資料的id:\"),\n \"nam\":input(\"輸入名字:\"),\n \"bir\":input(\"輸入生日:\"),\n \"add\":input(\"輸入地址:\")}\n c.execute(\n \"UPDATE `member` SET \"+\n \"`id`=%(id)s, `name`=%(nam)s, `birthday`=%(bir)s, `address`=%(add)s WHERE `id`=%(id)s;\"\n , par)\n link.commit()\n elif cmd ==\"4\":\n par = {\"id\":input(\"輸入要刪除的id:\")}\n c.execute(\"DELETE FROM `member` WHERE `id`=%(id)s\", par)\n link.commit()\n elif cmd ==\"5\":\n t = prettytable.PrettyTable([\"id\", \"name\", \"birthday\", \"address\", \"tel\"], encoding=\"utf8\")\n c.execute(\"SELECT * FROM `member` LEFT JOIN `tel` ON `member`.`id`= `tel`.`tel`\")\n n =c.rowcount\n for i in range(0,n):\n r = c.fetchone()\n t.add_row([str(r[0]), r[1], r[2], r[3],r[4]])\n t.align[\"name\"]=\"l\"\n t.align[\"birthday\"]=\"l\"\n t.align[\"address\"]=\"l\"\n t.align[\"tel\"]=\"l\"\n print(t)\n num = input(\"選擇要添加的會員編號:\")\n ph = input(\"請輸入電話號碼:\")\n c.execute(\n \"INSERT INTO `tel`(`member_id`, `tel`) \" +\n \"VALUES(%s,%s)\", (num, ph)) # 讀取外面資訊到%s裏面時的格式\n link.commit()\n elif cmd ==\"6\":\n t = prettytable.PrettyTable([\"id\", \"name\", \"birthday\", \"address\", \"tel\"], encoding=\"utf8\")\n c.execute(\"SELECT * FROM `member` LEFT JOIN `tel` ON `member`.`id`= `tel`.`member_id`\")\n n = c.rowcount\n for i in range(0, n):\n r = c.fetchone()\n t.add_row([str(r[0]), r[1], r[2], r[3], r[6]])\n t.align[\"name\"] = \"l\"\n t.align[\"birthday\"] = \"l\"\n t.align[\"address\"] = \"l\"\n t.align[\"tel\"] = \"l\"\n print(t)\n num = input(\"選擇要刪除的會員編號:\")\n t2 = prettytable.PrettyTable([\"id\", \"tel\"])\n c.execute(\"SELECT * FROM `tel` WHERE `member_id`=%s\", (num)) #由於member_id =id 所以當輸入id時可由member_id把所有資訊撈出來\n n = c.rowcount\n for i in range(0, n):\n r = c.fetchone()\n t2.add_row([r[0], r[2]])\n print(t2)\n #刪除動作\n ph = input(\"選擇要刪除的電話號碼編號:\")\n c.execute(\"DELETE `tel` FROM `tel` WHERE `id`=%s\", (ph))\n link.commit()\n\n print(\"(0) 離開程式:\")\n print(\"(1) 顯示會員列表:\")\n print(\"(2) 新增會員資料:\")\n print(\"(3) 更新會員資料:\")\n print(\"(4) 刪除會員資料:\")\n print(\"(5) 新增會員電話:\")\n print(\"(6) 刪除會員電話:\")\n\n cmd =input(\"指令\")\n os.system(\"cls\")\n\nlink.close()","repo_name":"Nier1112/tibame_AI02","sub_path":"MYSQL/XAMMP_0407-1.py","file_name":"XAMMP_0407-1.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37950696758","text":"from django.shortcuts import render,redirect\nfrom .models import Person,Group,Membership,MessageForm,Messageship,Message_seen\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import PersonSerializer,GroupSerializer,MessageSerializer,Mess\nfrom rest_framework import generics\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import permissions\nfrom .forms import SimpleForm\nfrom django.utils import timezone\nfrom phaseone.forms import TempForm\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom pyfcm import FCMNotification\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\n\ndef manage(request,ide):\n if not request.user.is_authenticated:\n return redirect('/login')\n print(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n profname=request.user\n form1 = SimpleForm()\n form2 = SimpleForm()\n l = []\n l2=[]\n grp1=Group.objects.filter(grp_name=ide,prof=request.user).exists()\n if grp1:\n grp=Group.objects.get(grp_name=ide,prof=request.user)\n else:\n return redirect('/blog')\n if grp.bool == 1 :\n if request.method =='POST': \n dict=request.POST\n print(dict)\n a=dict.getlist('student')\n if 'add' in dict:\n for studentid in a:\n per=Person.objects.get(userid=studentid)\n if grp.members.all().filter(userid=studentid).exists() :\n print(\"exists\")\n else :\n Membership.objects.create(person=per,group=grp)\n if 'remove' in dict:\n for studentid in a:\n per=Person.objects.get(userid=studentid)\n grp.members.remove(per)\n\n a=Person.objects.all()\n members=grp.members.all()\n for x in a:\n if x not in members:\n l2.append((x.userid,str(x.userid)+\" \"+str(x.name)))\n form2.fields['student'].choices = l2\n for x in members:\n l.append((x.userid,str(x.userid)+\" \"+str(x.name)))\n form1.fields['student'].choices = l\n return render(request,'manage_students.html',{'form1':form1,'form2':form2,'course':ide})\n else :\n return render(request,'manager.html')\ndef ta(request,ide):\n if not request.user.is_authenticated:\n return redirect('/login')\n print(\"********************************************\")\n profname=request.user\n form1 = SimpleForm()\n form2 = SimpleForm()\n l = []\n l2=[]\n grp1=Group.objects.filter(grp_name=ide,prof=request.user).exists()\n if grp1:\n grp=Group.objects.get(grp_name=ide,prof=request.user)\n else:\n return redirect('/blog')\n if grp.bool == 1 :\n if request.method =='POST': \n dict=request.POST\n print(dict)\n a=dict.getlist('student')\n if 'add' in dict:\n for studentid in a:\n per=Person.objects.get(userid=studentid)\n name=str(ide)+str(per.userid)\n if Group.objects.filter(grp_name=ide,prof=per.userid).exists() :\n print(\"exists\")\n else:\n Group.objects.create(bool=0,prof=studentid,grp_name=ide,name=name)\n if User.objects.filter(username=studentid).exists():\n print(\"exists1\")\n else :\n User.objects.create_user(studentid, per.email, per.password)\n if 'remove' in dict:\n for studentid in a:\n Group.objects.filter(grp_name=ide,prof=studentid).delete()\n \n\n a=Person.objects.all()\n l=[]\n l2=[]\n \n for x in a:\n l2.append((x.userid,str(x.userid)+\" \"+str(x.name)))\n form2.fields['student'].choices = l2\n\n for x in Group.objects.filter(grp_name=ide):\n if Person.objects.filter(userid=x.prof).exists():\n per=Person.objects.get(userid=x.prof)\n print(per)\n l.append((per.userid,str(per.userid)+\" \"+str(per.name)))\n\n form1.fields['student'].choices = l\n\n return render(request,'manage_ta.html',{'form1':form1,'form2':form2,'course':ide})\n else :\n return render(request,'manager.html')\n\ndef course(request,ide):\n if not request.user.is_authenticated:\n return redirect('/login')\n \n\n grp1=Group.objects.filter(grp_name=ide,prof=request.user).exists()\n if grp1:\n grp=Group.objects.get(grp_name=ide,prof=request.user)\n else:\n return redirect('/blog')\n \n b=request.POST\n print(b)\n form = TempForm(request.POST or None, request.FILES or None)\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1\")\n\n if request.method =='POST': \n if \"delete\" in b:\n MessageForm.objects.get(printer=b['delete']).delete()\n else:\n if form.is_valid(): \n timezone.activate('Asia/Kolkata')\n time=datetime.utcnow()\n messenger = request.user\n print(messenger)\n printer=str(messenger)+\" \"+str(time)\n MessageForm.objects.create(time=time,header=b['header'],text=b['text'],priority=b['priority'],printer=printer)\n print(timezone.now())\n\n message=MessageForm.objects.get(printer=printer)\n x = Group.objects.get(name=ide)\n Messageship.objects.create(group=x,form2=message)\n\n push_service = FCMNotification(api_key=\"AAAAff-npZk:APA91bEYHRssroImVg-vG_RlUrRyn-bSps85URjpiNBNcYpJ4ijjSlnsc1NmmftqO1G0pp_TbCS07PGXL5Fx7vDC2uttICAUeCE_bwB_r5aHuH3wwWcmZQxgoekbbX9JPO3hXlnWKW_X\")\n registration_ids = []\n for x in grp.members.all():\n if x.Token_key != '0' :\n registration_ids.append(x.Token_key)\n print(registration_ids)\n message_title = b['header']\n print(message_title)\n message_body = b['text']\n priority = b['priority']\n if (priority == '1' or priority == '2'):\n print(priority)\n result = push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body,sound=\"Default\",content_available=True)\n else:\n result = push_service.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body,sound=\"Default\",low_priority=True)\n print(result)\n form = TempForm() \n \n bool = grp.bool\n if bool :\n messages= grp.messages.all()\n else :\n grp = Group.objects.get(name=ide)\n messages=grp.messages.all()\n return render(request,'courses.html',{ 'course' : ide , 'messages' : messages ,'form':form,'bool':bool})\ndef seen(request,ide,msg):\n if not request.user.is_authenticated:\n return redirect('/login')\n grp1=Group.objects.filter(grp_name=ide,prof=request.user).exists()\n if grp1:\n a1=Group.objects.get(name=ide).messages.filter(printer=msg).exists()\n if a1 :\n a=MessageForm.objects.get(printer=msg)\n else :\n return redirect('/blog') \n else:\n return redirect('/blog')\n a=MessageForm.objects.get(printer=msg)\n seen = a.seen.all()\n print(seen)\n return render(request,'seen.html',{'seen':seen})\n\ndef home(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n msg=\"\"\n if request.method =='POST': \n dict=request.POST\n grpname=dict['group_name']\n grpname=str(grpname)+\" \"+str(request.user)\n a=Group.objects.filter(prof=request.user).filter(name=grpname).exists()\n if a :\n msg=\"course name already exists\"\n elif grpname!=\"\":\n Group.objects.create(name=grpname,prof=request.user,grp_name=grpname)\n a=Group.objects.filter(prof=request.user)\n print(a)\n bool=0\n for x in a:\n bool=x.bool\n\n return render(request,'home.html',{'courses':a,'msg':msg,'bool' : bool}) \n\nclass PersonList(generics.ListCreateAPIView):\n queryset = Person.objects.all()\n serializer_class = PersonSerializer\n\n\nclass PersonDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Person.objects.all()\n serializer_class = PersonSerializer\n\nclass GroupList(generics.ListCreateAPIView):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\nclass GroupDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\nclass MessageList(generics.ListCreateAPIView):\n queryset = MessageForm.objects.all()\n serializer_class = MessageSerializer\n\nclass MessageDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = MessageForm.objects.all()\n serializer_class = MessageSerializer\n\n\nclass MessList(generics.ListCreateAPIView):\n queryset = Message_seen.objects.all()\n serializer_class = Mess\n\nclass MessDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Message_seen.objects.all()\n serializer_class = Mess","repo_name":"s0nnandh/Mini-Moodle","sub_path":"project/bogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34089403509","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nimport numpy as np\nimport os\nimport time\nimport argparse\nfrom utils import Therm_Dataset, plot_losses\nfrom torchviz import make_dot\nfrom torch.autograd import Variable\nfrom torchsummary import summary\nimport torchvision\nfrom sklearn.svm import SVC\nfrom sklearn.svm import LinearSVC\nfrom sklearn import metrics\n\nparser = argparse.ArgumentParser(description='Simple Autoencoder')\nparser.add_argument('--lr', default=0.001, type=float, help='learning rate')\nparser.add_argument('--batch_size', default=1, type=int, help='batch size')\nparser.add_argument('--nepoch', default=25, type=int, help='Number of epochs')\n\nargs = parser.parse_args()\n\nclass AutoEncoder(nn.Module):\n def __init__(self): \n super(AutoEncoder,self).__init__() \n self.encoder = nn.Sequential(\n nn.Conv2d(300, 16, 3, stride=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(16, 8, 3, stride=2, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, stride=1),\n nn.Conv2d(8, 4, 3, stride=2, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d(2, stride=1)\n )\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(4, 8, 4, stride=2),\n nn.ReLU(True),\n nn.ConvTranspose2d(8, 16, 3, stride=2),\n nn.ReLU(True),\n nn.ConvTranspose2d(16, 8, 5, stride=3, padding=1),\n nn.ReLU(True),\n nn.ConvTranspose2d(8, 300, 6, stride=2, padding=1),\n nn.Tanh()\n )\n\n def forward(self, x):\n enc = self.encoder(x)\n dec = self.decoder(enc)\n return dec, enc\n \nclass TrainTest():\n \n def __init__(self,args, model, trainset, testset):\n self.model = model\n self.criterion = nn.MSELoss()\n self.optimizer = torch.optim.Adam(model.parameters(),lr=args.lr)\n self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True)\n self.testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False)\n \n self.model.to('cuda') \n\n self.args = args\n self.chkpath = './results/autoencoder.pt'\n\n if not os.path.exists('results/'):\n os.mkdir('results/')\n \n print(self.chkpath)\n if os.path.exists(self.chkpath) == True:\n print('load from results', end=' ')\n self.state = torch.load(self.chkpath)\n self.model.load_state_dict(self.state['model'])\n best_acc = self.state['acc']\n start_epoch = self.state['epoch']\n print('Epoch {}'.format(start_epoch))\n if start_epoch == self.args.nepoch:\n print('existing as epoch is max.')\n plot_losses(self.state['details'],'autoencoder')\n self.details = self.state['details'] \n self.best_acc = best_acc\n self.start_epoch = start_epoch + 1\n self.model.to('cuda') \n else:\n self.best_acc = -1.\n self.details = [] \n self.start_epoch = 0\n \n cudnn.benchmark = True\n \n def test(self):\n self.model.eval()\n correct = 0\n count = 0\n running_loss = 0.0\n latent_vals = np.empty([0,256])\n latent_labels = np.empty([0])\n with torch.no_grad():\n for data in self.testloader:\n inputs, labels = data\n inputs, labels = inputs.to('cuda'), labels.to('cuda')\n _, outputs = self.model(inputs)\n numpy_outputs = outputs.cpu().detach().numpy()\n numpy_labels = labels.cpu().detach().numpy()\n latent_vals = np.concatenate([latent_vals,numpy_outputs.reshape((1,-1))])\n latent_labels = np.concatenate([latent_labels,numpy_labels])\n count += outputs.shape[0] \n \n indices = [15,16,17,18,19,20,21,22,23,9,10,11,12,13,14]\n x_train = latent_vals[indices]\n y_train = latent_labels[indices]\n\n indices = [0,1,2,3,4,5,6,7,8,24,25,26,27,28,29]\n x_test = latent_vals[indices]\n y_test = latent_labels[indices]\n\n # Linear Kernel Classifier\n classifier = SVC(kernel='linear')\n classifier.fit(x_train, y_train)\n y_pred = classifier.predict(x_test)\n\n accuracy = metrics.accuracy_score(y_test, y_pred)\n precision = metrics.precision_score(y_test, y_pred)\n recall = metrics.recall_score(y_test, y_pred)\n\n print('Accuracy for {} classifier:'.format('linear'), accuracy)\n print('Precision for {} classifier:'.format('linear'), precision)\n print('Recall for {} classifier:'.format('linear'), recall)\n return 0., running_loss/count, 0.\n\n def train(self): \n for epoch in range(self.start_epoch,self.args.nepoch): \n self.model.train()\n start_time = time.time() \n running_loss = 0.0\n correct = 0.\n count = 0.\n self.optimizer.zero_grad() \n for i, data in enumerate(self.trainloader, 0): \n inputs, labels = data \n inputs, labels = inputs.to('cuda'), labels.to('cuda')\n outputs, _ = self.model(inputs)\n loss = self.criterion(outputs, inputs)\n loss.backward()\n if i%1==0 and i>1:\n self.optimizer.step()\n self.optimizer.zero_grad() \n loss = loss.item() \n running_loss = running_loss + loss\n count = count + outputs.shape[0]\n TRAIN_LOSS = running_loss/count \n TRAIN_ACC = 0\n TEST_ACC, TEST_LOSS, TEST_COUNT = 0, 0, 0\n \n self.details.append((TRAIN_LOSS,TRAIN_ACC,TEST_LOSS,TEST_ACC))\n\n if TEST_ACC >= self.best_acc: \n self.state = {\n 'model': self.model.state_dict(),\n 'acc': TEST_ACC,\n 'epoch': epoch,\n 'details':self.details, \n 'args_list':self.args,\n } \n torch.save(self.state, self.chkpath)\n self.best_acc = TEST_ACC\n else:\n self.state['epoch'] = epoch\n torch.save(self.state, self.chkpath)\n elapsed_time = time.time() - start_time\n print('[{}] [{:.1f}] [Loss {:.3f}]'.format(epoch, elapsed_time, TRAIN_LOSS)) \n plot_losses(self.state['details'],'autoencoder')\n\n\ndef main(args): \n \n trainset = Therm_Dataset('./npy_dataset',is_train=True, auto=True)\n testset = trainset\n model = AutoEncoder()\n \n summary(model, (300, 224, 224))\n \n EXEC = TrainTest(args,model=model, trainset=trainset, testset = testset)\n EXEC.train()\n EXEC.test()\n \n \nmain(args)","repo_name":"vinojjayasundara/thermal","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25720520302","text":"\r\n\r\naltura = input(\"insira a altura do triangulo \\n\")\r\nlargura = input(\"insira a largura do triangulo \\n\")\r\n\r\nvalorlarg = float(largura)\r\nvalorlalt = float(altura)\r\n\r\n\r\n\r\nprint(\"A área do seu triângulo é\", (valorlarg * valorlalt)/2 )\r\n\r\n\r\n\r\n#mecanismo de saida\r\ninput(\"\\naperte enter para sair \\n\")\r\n","repo_name":"miguelista/exercicios_python","sub_path":"EXEC 3 - ALTURA TRIANGULO.py","file_name":"EXEC 3 - ALTURA TRIANGULO.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7451013984","text":"from baselib.database.sqlserver import SQLServer\nfrom baselib.utils.Logger import Logger\nimport time\nimport datetime\nimport pandas\n\n_LOG = Logger(\"Blossom Process\").getlog()\n\n\ndef _get_query():\n query = \"\"\"\n SELECT\n DISTINCT\n Client_Id,\n Client_Type,\n Client_Name,\n Client_Name_Uni,\n ContactName,\n ClientCategory,\n ContactPhone,\n ContactCity,\n ContactEmail,\n ResultType,\n ClientIndustry\n FROM\n dbo.{0}('{1}', '{2}', '{3}', '{4}', {5})\n ORDER BY\n ResultType,\n Client_Name,\n Client_Name_Uni,\n Client_Type,\n Client_Id,\n ContactName,\n ClientCategory,\n ContactPhone,\n ContactCity,\n ContactEmail\n \"\"\"\n return query\n\n\ndef execute_blossom(procname, section, username, clientname, optype,\n nametype, limit=None):\n passphrase = \"This is my message\"\n # procname = \"GK_Tst1_GetClientsForUser\"\n query = _get_query().format(\n procname,\n username,\n clientname,\n optype,\n nametype,\n limit\n )\n with SQLServer(log=_LOG) as dbhandler:\n alldata = dbhandler.sql_to_df_timed(\n query=query,\n sectionname=section,\n passphrase=passphrase\n )\n df = alldata[0]\n starttime = alldata[1]\n endtime = alldata[2]\n deltatime = alldata[3]\n print(df.to_string())\n time.sleep(1)\n elapsed = int(deltatime.total_seconds() * 1000)\n _LOG.info(\"Start Time: {}\".format(str(starttime)))\n _LOG.info(\"End Time: {}\".format(str(endtime)))\n _LOG.info(\"Total Elapsed Time: {} milliseconds\".format(str(elapsed)))\n\n\ndef test_blossom_time(procname):\n username = \"jerry.majewski\"\n # clientname = \"Accent\"\n clientname = \"Black\"\n # clientname = \"Hew\"\n optype = \"add\"\n nametype = \"en\"\n limit = 100\n section = \"BLOSSOM_DATABASE\"\n _LOG.info(\"*****Executing Function : {}\".format(procname))\n execute_blossom(\n procname=procname, section=section, username=username,\n clientname=clientname, optype=optype, nametype=nametype,\n limit=limit\n )\n _LOG.info(\"*************************************************\")\n _LOG.info(\"\\n\")\n _LOG.info(\"\\n\")\n\n\ntest_blossom_time(\"GK_Tst1_GetClientsForUser\")\ntest_blossom_time(\"GK_Test_GetClientsForUser\")\ntest_blossom_time(\"GetClientsForUser\")\n","repo_name":"kumarkgu/gdimindialocal","sub_path":"jobs/test/test_blossom.py","file_name":"test_blossom.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7451024744","text":"import speech_recognition as sr\nimport pyaudio as pa\n\n\ndef check_audio():\n paudio = pa.PyAudio()\n for idx in range(0, paudio.get_device_count()):\n devinfo = paudio.get_device_info_by_index(idx)\n if str(devinfo[\"name\"]) == \"Microphone (Jabra Link 370)\":\n print(devinfo)\n\n\ndef speech_test():\n robj = sr.Recognizer()\n istimeout = 0\n with sr.Microphone(device_index=1) as source:\n try:\n audio = robj.listen(source=source, timeout=5)\n except sr.WaitTimeoutError:\n print(\"No Sound coming\")\n istimeout = 1\n if istimeout == 0:\n try:\n text_us = robj.recognize_google(audio_data=audio, language='en-US')\n text_in = robj.recognize_google(audio_data=audio, language='en-IN')\n print(\"You said (US): {}\".format(text_us))\n print(\"You said (IN): {}\".format(text_in))\n except LookupError:\n print(\"Could not understand Audio\")\n\n\ndef list_microphone():\n for idx, micname in enumerate(sr.Microphone.list_microphone_names()):\n print(\"Index: {}. Name is: {}\".format(str(idx), micname))\n\n\n# check_audio()\nspeech_test()\n# list_microphone()\n\n# {'index': 1, 'structVersion': 2, 'name': 'Microphone (Jabra Link 370)', 'hostApi': 0, 'maxInputChannels': 1, 'maxOutputChannels': 0, 'defaultLowInputLatency': 0.09, 'defaultLowOutputLatency': 0.09, 'defaultHighInputLatency': 0.18, 'defaultHighOutputLatency': 0.18, 'defaultSampleRate': 44100.0}\n# {'index': 7, 'structVersion': 2, 'name': 'Microphone (Jabra Link 370)', 'hostApi': 1, 'maxInputChannels': 1, 'maxOutputChannels': 0, 'defaultLowInputLatency': 0.12, 'defaultLowOutputLatency': 0.0, 'defaultHighInputLatency': 0.24, 'defaultHighOutputLatency': 0.0, 'defaultSampleRate': 44100.0}\n# {'index': 14, 'structVersion': 2, 'name': 'Microphone (Jabra Link 370)', 'hostApi': 3, 'maxInputChannels': 1, 'maxOutputChannels': 0, 'defaultLowInputLatency': 0.003, 'defaultLowOutputLatency': 0.0, 'defaultHighInputLatency': 0.01, 'defaultHighOutputLatency': 0.0, 'defaultSampleRate': 16000.0}\n# {'index': 21, 'structVersion': 2, 'name': 'Microphone (Jabra Link 370)', 'hostApi': 4, 'maxInputChannels': 1, 'maxOutputChannels': 0, 'defaultLowInputLatency': 0.01, 'defaultLowOutputLatency': 0.01, 'defaultHighInputLatency': 0.08533333333333333, 'defaultHighOutputLatency': 0.08533333333333333, 'defaultSampleRate': 16000.0}","repo_name":"kumarkgu/gdimindialocal","sub_path":"jobs/test_hardware/test_speech.py","file_name":"test_speech.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17441305453","text":"import zmq\nimport base64\nimport numpy as np\nfrom cscore import CameraServer\n\ndef main():\n\t#set up camera server\n\tcs=CameraServer.getInstance()\n\tcs.enableLogging()\n\toutputStream = cs.putVideo(\"Sandstream\", 320, 240)\n\t#set up zmq and init an empty image frame\n\tcontext = zmq.Context()\n\tfootage_socket = context.socket(zmq.SUB)\n\tfootage_socket.bind('tcp://*:5555')\n\tfootage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))\n\timg = np.zeros(shape=(240, 320, 3), dtype=np.uint8)\n\twhile True:\n\t\tframe = footage_socket.recv_string()\n\t\timg = base64.b64decode(frame)\n\t\tnpimg = np.fromstring(img, dtype=np.uint8) #converts 1-dimensional array into 2d numpy array\n\t\t#source = cv2.imdecode(npimg, 1)\n\t\toutputStream.putFrame(npimg)","repo_name":"FRC6332/2019TankDrive","sub_path":"vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28899148131","text":"\"\"\" \nCreates an Xception Model as defined in:\n\nFrancois Chollet\nXception: Deep Learning with Depthwise Separable Convolutions\nhttps://arxiv.org/pdf/1610.02357.pdf\n\nThis weights ported from the Keras implementation. Achieves the following performance on the validation set:\n\nLoss:0.9173 Prec@1:78.892 Prec@5:94.292\n\nREMEMBER to set your image size to 3x299x299 for both test and validation\n\nnormalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n\nThe resize parameter of the validation transform should be 333, and make sure to center crop at 299x299\n\"\"\"\nimport math\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom torch.nn import init\nimport torch\n\n__all__ = ['xception']\n\nmodel_urls = {\n 'xception': 'https://www.dropbox.com/s/1hplpzet9d7dv29/xception-c0a72b38.pth.tar?dl=1'\n}\n\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):\n super(SeparableConv2d, self).__init__()\n # 注意使用groups组卷积来实现DepthWise Conv\n self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,\n bias=bias)\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n # 有点不同,Xception是先1*1卷积(pointwise),再depthwise\n x = self.conv1(x)\n # conv1结束后,得到一个和in_channels通道数一样的特征图concat\n x = self.pointwise(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True):\n # reps指的是SeparableConv重复的次数\n # self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)\n super(Block, self).__init__()\n\n # middle flow中block的输入输出通道相同,并且步幅为1\n if out_filters != in_filters or strides != 1:\n self.skip = nn.Conv2d(in_filters, out_filters, 1, stride=strides, bias=False)\n self.skipbn = nn.BatchNorm2d(out_filters)\n else:\n self.skip = None\n\n self.relu = nn.ReLU(inplace=True)\n rep = []\n\n filters = in_filters # 这行代码再grow_first为False的时候奏效\n if grow_first: # 除了最后一个block以外,其余block的grow_first都为True\n rep.append(self.relu)\n rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))\n rep.append(nn.BatchNorm2d(out_filters))\n filters = out_filters\n\n for i in range(reps - 1): # 上面/下面有一个DepthWise_Conv了,这边需要reps - 1\n rep.append(self.relu)\n rep.append(SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False))\n rep.append(nn.BatchNorm2d(filters))\n\n if not grow_first: # 最后一个block的第一个Conv并不增加通道数,728 -> 728,在第二个Conv才从728 -> 1024\n rep.append(self.relu)\n rep.append(SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False))\n rep.append(nn.BatchNorm2d(out_filters))\n\n if not start_with_relu: # 第一个block,不是ReLU开头的,用切片切掉\n rep = rep[1:]\n else:\n rep[0] = nn.ReLU(inplace=False) # 其实这句不用\n\n if strides != 1: # 除了middle flow,其余的strides都为2,需要在block的末尾加一个maxpooling\n rep.append(nn.MaxPool2d(3, strides, 1))\n self.rep = nn.Sequential(*rep)\n\n def forward(self, inp):\n x = self.rep(inp)\n\n if self.skip is not None: # 残差连接\n skip = self.skip(inp)\n skip = self.skipbn(skip)\n else:\n skip = inp # middle flow\n\n x += skip\n return x\n\n\nclass Xception(nn.Module):\n \"\"\"\n Xception optimized for the ImageNet dataset, as specified in\n https://arxiv.org/pdf/1610.02357.pdf\n \"\"\"\n\n def __init__(self, num_classes=1000):\n \"\"\" Constructor\n Args:\n num_classes: number of classes\n \"\"\"\n super(Xception, self).__init__()\n\n self.num_classes = num_classes\n\n self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(32, 64, 3, bias=False)\n self.bn2 = nn.BatchNorm2d(64)\n # do relu here\n\n # entry flow中的3个block,strides皆为2\n self.block1 = Block(64, 128, reps=2, strides=2, start_with_relu=False, grow_first=True)\n self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)\n self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)\n\n # middle flow中的8个block,strides皆为1\n self.block4 = Block(728, 728, reps=3, strides=1, start_with_relu=True, grow_first=True)\n self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)\n\n # exit flow,注意grow_first为False,个人猜测意为第一个conv中的通道是否grow\n self.block12 = Block(728, 1024, reps=2, strides=2, start_with_relu=True, grow_first=False)\n\n self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)\n self.bn3 = nn.BatchNorm2d(1536)\n # do relu here\n\n self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1)\n self.bn4 = nn.BatchNorm2d(2048)\n # do relu here\n\n self.fc = nn.Linear(2048, num_classes)\n\n # ------- init weights --------\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n)) # 为什么std选这个???\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n # -----------------------------\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block5(x)\n x = self.block6(x)\n x = self.block7(x)\n x = self.block8(x)\n x = self.block9(x)\n x = self.block10(x)\n x = self.block11(x)\n x = self.block12(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.relu(x)\n\n x = F.adaptive_avg_pool2d(x, (1, 1)) # 全局池化,自适应选取池化窗口大小和步幅\n x = x.view(x.size(0), -1) # Flatten\n x = self.fc(x)\n\n return x\n\n\ndef xception(pretrained=False, **kwargs): # **kwargs键值对,字典\n \"\"\"\n Construct Xception.\n \"\"\"\n model = Xception(**kwargs)\n if pretrained:\n model.load_state_dict(torch.load(\"/data1/Huaming-Wang/Print_Aug/xception-c0a72b38.pth.tar\"))\n # models.load_state_dict(torch.load(r\"C:\\Users\\Wang Huaming\\.cache\\torch\\hub\\checkpoints\\xception-c0a72b38.pth.tar\"))\n # models.load_state_dict(model_zoo.load_url(model_urls['xception']))\n return model\n","repo_name":"MSFG/FDA","sub_path":"models/XceptionNet.py","file_name":"XceptionNet.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13556789528","text":"import json\nimport os\nimport pickle\nimport sys\nimport urllib\nfrom pathlib import Path\nfrom typing import IO, Any, Generator, cast\n\nimport arcp\nimport bagit\nimport pytest\nfrom rdflib import Graph, Namespace, URIRef\nfrom rdflib.namespace import DC, DCTERMS, RDF\nfrom rdflib.term import Literal\n\nimport cwltool.cwlprov as provenance\nfrom cwltool.cwlprov import provenance_constants\nfrom cwltool.cwlprov.ro import ResearchObject\nfrom cwltool.cwlprov.writablebagfile import close_ro, write_bag_file\nfrom cwltool.main import main\nfrom cwltool.stdfsaccess import StdFsAccess\n\nfrom .util import get_data, needs_docker, working_directory\n\n# RDF namespaces we'll query for later\nORE = Namespace(\"http://www.openarchives.org/ore/terms/\")\nPROV = Namespace(\"http://www.w3.org/ns/prov#\")\nRO = Namespace(\"http://purl.org/wf4ever/ro#\")\nWFDESC = Namespace(\"http://purl.org/wf4ever/wfdesc#\")\nWFPROV = Namespace(\"http://purl.org/wf4ever/wfprov#\")\nSCHEMA = Namespace(\"http://schema.org/\")\nCWLPROV = Namespace(\"https://w3id.org/cwl/prov#\")\nOA = Namespace(\"http://www.w3.org/ns/oa#\")\n\n\ndef cwltool(tmp_path: Path, *args: Any) -> Path:\n prov_folder = tmp_path / \"provenance\"\n prov_folder.mkdir()\n new_args = [\"--provenance\", str(prov_folder)]\n new_args.extend(args)\n # Run within a temporary directory to not pollute git checkout\n tmp_dir = tmp_path / \"cwltool-run\"\n tmp_dir.mkdir()\n with working_directory(tmp_dir):\n status = main(new_args)\n assert status == 0, f\"Failed: cwltool.main({args})\"\n return prov_folder\n\n\n@needs_docker\ndef test_hello_workflow(tmp_path: Path) -> None:\n check_provenance(\n cwltool(\n tmp_path,\n get_data(\"tests/wf/hello-workflow.cwl\"),\n \"--usermessage\",\n \"Hello workflow\",\n )\n )\n\n\n@needs_docker\ndef test_hello_single_tool(tmp_path: Path) -> None:\n check_provenance(\n cwltool(\n tmp_path,\n get_data(\"tests/wf/hello_single_tool.cwl\"),\n \"--message\",\n \"Hello tool\",\n ),\n single_tool=True,\n )\n\n\n@needs_docker\ndef test_revsort_workflow(tmp_path: Path) -> None:\n folder = cwltool(\n tmp_path,\n get_data(\"tests/wf/revsort.cwl\"),\n get_data(\"tests/wf/revsort-job.json\"),\n )\n check_output_object(folder)\n check_provenance(folder)\n\n\n@needs_docker\ndef test_revsort_workflow_shortcut(tmp_path: Path) -> None:\n \"\"\"Confirm that using 'cwl:tool' shortcut still snapshots the CWL files.\"\"\"\n folder = cwltool(\n tmp_path,\n get_data(\"tests/wf/revsort-job-shortcut.json\"),\n )\n check_output_object(folder)\n check_provenance(folder)\n assert not (folder / \"snapshot\" / \"revsort-job-shortcut.json\").exists()\n assert len(list((folder / \"snapshot\").iterdir())) == 4\n\n\n@needs_docker\ndef test_nested_workflow(tmp_path: Path) -> None:\n check_provenance(cwltool(tmp_path, get_data(\"tests/wf/nested.cwl\")), nested=True)\n\n\n@needs_docker\ndef test_secondary_files_implicit(tmp_path: Path) -> None:\n file1 = tmp_path / \"foo1.txt\"\n file1idx = tmp_path / \"foo1.txt.idx\"\n\n with open(str(file1), \"w\", encoding=\"ascii\") as f:\n f.write(\"foo\")\n with open(str(file1idx), \"w\", encoding=\"ascii\") as f:\n f.write(\"bar\")\n\n # secondary will be picked up by .idx\n folder = cwltool(tmp_path, get_data(\"tests/wf/sec-wf.cwl\"), \"--file1\", str(file1))\n check_provenance(folder, secondary_files=True)\n check_secondary_files(folder)\n\n\n@needs_docker\ndef test_secondary_files_explicit(tmp_path: Path) -> None:\n # Deliberately do NOT have common basename or extension\n file1dir = tmp_path / \"foo\"\n file1dir.mkdir()\n file1 = file1dir / \"foo\"\n file1idxdir = tmp_path / \"bar\"\n file1idxdir.mkdir()\n file1idx = file1idxdir / \"bar\"\n\n with open(file1, \"w\", encoding=\"ascii\") as f:\n f.write(\"foo\")\n with open(file1idx, \"w\", encoding=\"ascii\") as f:\n f.write(\"bar\")\n\n # explicit secondaryFiles\n job = {\n \"file1\": {\n \"class\": \"File\",\n \"path\": str(file1),\n \"basename\": \"foo1.txt\",\n \"secondaryFiles\": [\n {\n \"class\": \"File\",\n \"path\": str(file1idx),\n \"basename\": \"foo1.txt.idx\",\n }\n ],\n }\n }\n\n jobJson = tmp_path / \"job.json\"\n with open(jobJson, \"wb\") as fp:\n j = json.dumps(job, ensure_ascii=True)\n fp.write(j.encode(\"ascii\"))\n\n folder = cwltool(tmp_path, get_data(\"tests/wf/sec-wf.cwl\"), str(jobJson))\n check_provenance(folder, secondary_files=True)\n check_secondary_files(folder)\n\n\n@needs_docker\ndef test_secondary_files_output(tmp_path: Path) -> None:\n # secondary will be picked up by .idx\n folder = cwltool(tmp_path, get_data(\"tests/wf/sec-wf-out.cwl\"))\n check_provenance(folder, secondary_files=True)\n # Skipped, not the same secondary files as above\n # self.check_secondary_files()\n\n\n@needs_docker\ndef test_directory_workflow(tmp_path: Path) -> None:\n dir2 = tmp_path / \"dir2\"\n dir2.mkdir()\n sha1 = {\n # Expected hashes of ASCII letters (no linefeed)\n # as returned from:\n # for x in a b c ; do echo -n $x | sha1sum ; done\n \"a\": \"86f7e437faa5a7fce15d1ddcb9eaeaea377667b8\",\n \"b\": \"e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98\",\n \"c\": \"84a516841ba77a5b4648de2cd0dfcb30ea46dbb4\",\n }\n for x in \"abc\":\n # Make test files with predictable hashes\n with open(dir2 / x, \"w\", encoding=\"ascii\") as f:\n f.write(x)\n\n folder = cwltool(tmp_path, get_data(\"tests/wf/directory.cwl\"), \"--dir\", str(dir2))\n check_provenance(folder, directory=True)\n\n # Output should include ls stdout of filenames a b c on each line\n file_list = (\n folder\n / \"data\"\n / \"3c\"\n / \"3ca69e8d6c234a469d16ac28a4a658c92267c423\"\n # checksum as returned from:\n # echo -e \"a\\nb\\nc\" | sha1sum\n # 3ca69e8d6c234a469d16ac28a4a658c92267c423 -\n )\n assert file_list.is_file()\n\n # Input files should be captured by hash value,\n # even if they were inside a class: Directory\n for letter, l_hash in sha1.items():\n prefix = l_hash[:2] # first 2 letters\n p = folder / \"data\" / prefix / l_hash\n assert p.is_file(), f\"Could not find {letter} as {p}\"\n\n\n@needs_docker\ndef test_no_data_files(tmp_path: Path) -> None:\n folder = cwltool(\n tmp_path,\n get_data(\"tests/wf/conditional_step_no_inputs.cwl\"),\n )\n check_bagit(folder)\n\n\ndef check_output_object(base_path: Path) -> None:\n output_obj = base_path / \"workflow\" / \"primary-output.json\"\n compare_checksum = \"sha1$b9214658cc453331b62c2282b772a5c063dbd284\"\n compare_location = \"../data/b9/b9214658cc453331b62c2282b772a5c063dbd284\"\n with open(output_obj) as fp:\n out_json = json.load(fp)\n f1 = out_json[\"sorted_output\"]\n assert f1[\"checksum\"] == compare_checksum\n assert f1[\"location\"] == compare_location\n\n\ndef check_secondary_files(base_path: Path) -> None:\n foo_data = (\n base_path\n / \"data\"\n / \"0b\"\n / \"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33\"\n # checksum as returned from:\n # $ echo -n foo | sha1sum\n # 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 -\n )\n bar_data = base_path / \"data\" / \"62\" / \"62cdb7020ff920e5aa642c3d4066950dd1f01f4d\"\n assert foo_data.is_file(), \"Did not capture file.txt 'foo'\"\n assert bar_data.is_file(), \"Did not capture secondary file.txt.idx 'bar\"\n\n primary_job = base_path / \"workflow\" / \"primary-job.json\"\n with open(primary_job) as fp:\n job_json = json.load(fp)\n # TODO: Verify secondaryFile in primary-job.json\n f1 = job_json[\"file1\"]\n assert f1[\"location\"] == \"../data/0b/0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33\"\n assert f1[\"basename\"] == \"foo1.txt\"\n\n secondaries = f1[\"secondaryFiles\"]\n assert secondaries\n f1idx = secondaries[0]\n assert f1idx[\"location\"] == \"../data/62/62cdb7020ff920e5aa642c3d4066950dd1f01f4d\"\n assert f1idx[\"basename\"], \"foo1.txt.idx\"\n\n\ndef check_provenance(\n base_path: Path,\n nested: bool = False,\n single_tool: bool = False,\n directory: bool = False,\n secondary_files: bool = False,\n) -> None:\n check_folders(base_path)\n check_bagit(base_path)\n check_ro(base_path, nested=nested)\n check_prov(\n base_path,\n nested=nested,\n single_tool=single_tool,\n directory=directory,\n secondary_files=secondary_files,\n )\n\n\ndef check_folders(base_path: Path) -> None:\n required_folders = [\n \"data\",\n \"snapshot\",\n \"workflow\",\n \"metadata\",\n os.path.join(\"metadata\", \"provenance\"),\n ]\n\n for folder in required_folders:\n assert (base_path / folder).is_dir()\n\n\ndef check_bagit(base_path: Path) -> None:\n # check bagit structure\n required_files = [\n \"bagit.txt\",\n \"bag-info.txt\",\n \"manifest-sha1.txt\",\n \"tagmanifest-sha1.txt\",\n \"tagmanifest-sha256.txt\",\n ]\n\n for basename in required_files:\n assert (base_path / basename).is_file()\n\n bag = bagit.Bag(str(base_path))\n assert bag.has_oxum()\n (only_manifest, only_fs) = bag.compare_manifests_with_fs()\n assert not list(only_manifest), \"Some files only in manifest\"\n assert not list(only_fs), \"Some files only on file system\"\n missing_tagfiles = bag.missing_optional_tagfiles()\n assert not list(missing_tagfiles), \"Some files only in tagmanifest\"\n bag.validate()\n # TODO: Check other bag-info attributes\n assert arcp.is_arcp_uri(cast(str, bag.info.get(\"External-Identifier\")))\n\n\ndef find_arcp(base_path: Path) -> str:\n # First try to find External-Identifier\n bag = bagit.Bag(str(base_path))\n ext_id = cast(str, bag.info.get(\"External-Identifier\"))\n if arcp.is_arcp_uri(ext_id):\n return ext_id\n raise Exception(\"Can't find External-Identifier\")\n\n\ndef _arcp2file(base_path: Path, uri: str) -> Path:\n parsed = arcp.parse_arcp(uri)\n # arcp URIs, ensure they are local to our RO\n assert (\n parsed.uuid == arcp.parse_arcp(find_arcp(base_path)).uuid\n ), \"arcp URI must be local to the research object\"\n\n path = parsed.path[1:] # Strip first /\n return base_path / Path(path)\n\n\ndef check_ro(base_path: Path, nested: bool = False) -> None:\n manifest_file = base_path / \"metadata\" / \"manifest.json\"\n assert manifest_file.is_file(), f\"Can't find {manifest_file}\"\n arcp_root = find_arcp(base_path)\n base = urllib.parse.urljoin(arcp_root, \"metadata/manifest.json\")\n g = Graph()\n\n # Avoid resolving JSON-LD context https://w3id.org/bundle/context\n # so this test works offline\n context = Path(get_data(\"tests/bundle-context.jsonld\")).as_uri()\n with open(manifest_file, encoding=\"UTF-8\") as fh:\n jsonld = fh.read()\n # replace with file:/// URI\n jsonld = jsonld.replace(\"https://w3id.org/bundle/context\", context)\n g.parse(data=jsonld, format=\"json-ld\", publicID=base)\n if os.environ.get(\"DEBUG\"):\n print(\"Parsed manifest:\\n\\n\")\n g.serialize(cast(IO[bytes], sys.stdout), format=\"ttl\")\n _ro = None\n\n for _ro in g.subjects(ORE.isDescribedBy, URIRef(base)):\n break\n assert _ro is not None, \"Can't find RO with ore:isDescribedBy\"\n\n profile = None\n for dc in g.objects(_ro, DCTERMS.conformsTo):\n profile = dc\n break\n assert profile is not None, \"Can't find profile with dct:conformsTo\"\n assert profile == URIRef(\n provenance_constants.CWLPROV_VERSION\n ), f\"Unexpected cwlprov version {profile}\"\n\n paths = []\n externals = []\n for aggregate in g.objects(_ro, ORE.aggregates):\n if not arcp.is_arcp_uri(cast(str, aggregate)):\n externals.append(aggregate)\n # Won't check external URIs existence here\n # TODO: Check they are not relative!\n continue\n lfile = _arcp2file(base_path, cast(str, aggregate))\n paths.append(os.path.relpath(lfile, base_path))\n assert os.path.isfile(lfile), f\"Can't find aggregated {lfile}\"\n\n assert paths, \"Didn't find any arcp aggregates\"\n assert externals, \"Didn't find any data URIs\"\n\n for ext in [\"provn\", \"xml\", \"json\", \"jsonld\", \"nt\", \"ttl\"]:\n f = \"metadata/provenance/primary.cwlprov.%s\" % ext\n assert f in paths, \"provenance file missing \" + f\n\n for f in [\n \"workflow/primary-job.json\",\n \"workflow/packed.cwl\",\n \"workflow/primary-output.json\",\n ]:\n assert f in paths, \"workflow file missing \" + f\n # Can't test snapshot/ files directly as their name varies\n\n # TODO: check urn:hash::sha1 thingies\n # TODO: Check OA annotations\n\n packed = urllib.parse.urljoin(arcp_root, \"/workflow/packed.cwl\")\n primary_job = urllib.parse.urljoin(arcp_root, \"/workflow/primary-job.json\")\n primary_prov_nt = urllib.parse.urljoin(arcp_root, \"/metadata/provenance/primary.cwlprov.nt\")\n uuid = arcp.parse_arcp(arcp_root).uuid\n\n highlights = set(g.subjects(OA.motivatedBy, OA.highlighting))\n assert highlights, \"Didn't find highlights\"\n for h in highlights:\n assert (h, OA.hasTarget, URIRef(packed)) in g\n\n describes = set(g.subjects(OA.motivatedBy, OA.describing))\n for d in describes:\n assert (d, OA.hasBody, URIRef(arcp_root)) in g\n assert (d, OA.hasTarget, URIRef(uuid.urn)) in g\n\n linked = set(g.subjects(OA.motivatedBy, OA.linking))\n for link in linked:\n assert (link, OA.hasBody, URIRef(packed)) in g\n assert (link, OA.hasBody, URIRef(primary_job)) in g\n assert (link, OA.hasTarget, URIRef(uuid.urn)) in g\n\n has_provenance = set(g.subjects(OA.hasBody, URIRef(primary_prov_nt)))\n for p in has_provenance:\n assert (p, OA.hasTarget, URIRef(uuid.urn)) in g\n assert (p, OA.motivatedBy, PROV.has_provenance) in g\n # Check all prov elements are listed\n formats = set()\n for prov in g.objects(p, OA.hasBody):\n assert (\n prov,\n DCTERMS.conformsTo,\n URIRef(provenance_constants.CWLPROV_VERSION),\n ) in g\n # NOTE: DC.format is a Namespace method and does not resolve like other terms\n formats.update(set(g.objects(prov, DC[\"format\"])))\n assert formats, \"Could not find media types\"\n expected = {\n Literal(f)\n for f in (\n \"application/json\",\n \"application/ld+json\",\n \"application/n-triples\",\n 'text/provenance-notation; charset=\"UTF-8\"',\n 'text/turtle; charset=\"UTF-8\"',\n \"application/xml\",\n )\n }\n assert formats == expected, \"Did not match expected PROV media types\"\n\n if nested:\n # Check for additional PROVs\n # Let's try to find the other wf run ID\n otherRuns = set()\n for p in g.subjects(OA.motivatedBy, PROV.has_provenance):\n if (p, OA.hasTarget, URIRef(uuid.urn)) in g:\n continue\n otherRuns.update(set(g.objects(p, OA.hasTarget)))\n assert otherRuns, \"Could not find nested workflow run prov annotations\"\n\n\ndef check_prov(\n base_path: Path,\n nested: bool = False,\n single_tool: bool = False,\n directory: bool = False,\n secondary_files: bool = False,\n) -> None:\n prov_file = base_path / \"metadata\" / \"provenance\" / \"primary.cwlprov.nt\"\n assert prov_file.is_file(), f\"Can't find {prov_file}\"\n arcp_root = find_arcp(base_path)\n # Note: We don't need to include metadata/provnance in base URI\n # as .nt always use absolute URIs\n g = Graph()\n with open(prov_file, \"rb\") as f:\n g.parse(file=f, format=\"nt\", publicID=arcp_root)\n if os.environ.get(\"DEBUG\"):\n print(\"Parsed %s:\\n\\n\" % prov_file)\n g.serialize(cast(IO[bytes], sys.stdout), format=\"ttl\")\n runs = set(g.subjects(RDF.type, WFPROV.WorkflowRun))\n\n # main workflow run URI (as urn:uuid:) should correspond to arcp uuid part\n uuid = arcp.parse_arcp(arcp_root).uuid\n main_run = URIRef(uuid.urn)\n assert main_run in runs, f\"Can't find run {main_run} in {runs}\"\n # TODO: we should not need to parse arcp, but follow\n # the has_provenance annotations in manifest.json instead\n\n # run should have been started by a wf engine\n\n engines = set(g.subjects(RDF.type, WFPROV.WorkflowEngine))\n assert engines, \"Could not find WorkflowEngine\"\n assert len(engines) == 1, \"Found too many WorkflowEngines: %s\" % engines\n engine = engines.pop()\n\n assert (\n main_run,\n PROV.wasAssociatedWith,\n engine,\n ) in g, \"Wf run not associated with wf engine\"\n assert (\n engine,\n RDF.type,\n PROV.SoftwareAgent,\n ) in g, \"Engine not declared as SoftwareAgent\"\n\n if single_tool:\n activities = set(g.subjects(RDF.type, PROV.Activity))\n assert len(activities) == 1, \"Too many activities: %s\" % activities\n # single tool exec, there should be no other activities\n # than the tool run\n # (NOTE: the WorkflowEngine is also activity, but not declared explicitly)\n else:\n # Check all process runs were started by the main worklow\n stepActivities = set(g.subjects(RDF.type, WFPROV.ProcessRun))\n # Although semantically a WorkflowEngine is also a ProcessRun,\n # we don't declare that,\n # thus only the step activities should be in this set.\n assert main_run not in stepActivities\n assert stepActivities, \"No steps executed in workflow\"\n for step in stepActivities:\n # Let's check it was started by the main_run. Unfortunately, unlike PROV-N\n # in PROV-O RDF we have to check through the n-ary qualifiedStart relation\n starts = set(g.objects(step, PROV.qualifiedStart))\n assert starts, \"Could not find qualifiedStart of step %s\" % step\n assert len(starts) == 1, \"Too many qualifiedStart for step %s\" % step\n start = starts.pop()\n assert (\n start,\n PROV.hadActivity,\n main_run,\n ) in g, \"Step activity not started by main activity\"\n # Tip: Any nested workflow step executions should not be in this prov file,\n # but in separate file\n if nested:\n # Find some cwlprov.nt the nested workflow is described in\n prov_ids = set(g.objects(predicate=PROV.has_provenance))\n # FIXME: The above is a bit naive and does not check the subject is\n # one of the steps -- OK for now as this is the only case of prov:has_provenance\n assert prov_ids, \"Could not find prov:has_provenance from nested workflow\"\n\n nt_uris = [uri for uri in prov_ids if uri.endswith(\"cwlprov.nt\")]\n # TODO: Look up manifest conformsTo and content-type rather than assuming magic filename\n assert nt_uris, \"Could not find *.cwlprov.nt\"\n # Load into new graph\n g2 = Graph()\n nt_uri = nt_uris.pop()\n with open(_arcp2file(base_path, nt_uri), \"rb\") as f:\n g2.parse(file=f, format=\"nt\", publicID=nt_uri)\n # TODO: Check g2 statements that it's the same UUID activity inside\n # as in the outer step\n if directory:\n directories = set(g.subjects(RDF.type, RO.Folder))\n assert directories\n\n for d in directories:\n assert (d, RDF.type, PROV.Dictionary) in g\n assert (d, RDF.type, PROV.Collection) in g\n assert (d, RDF.type, PROV.Entity) in g\n assert len(list(g.objects(d, CWLPROV.basename))) == 1\n\n files = set()\n for entry in g.objects(d, PROV.hadDictionaryMember):\n assert (entry, RDF.type, PROV.KeyEntityPair) in g\n # We don't check what that filename is here\n assert set(g.objects(entry, PROV.pairKey))\n\n # RO:Folder aspect\n assert set(g.objects(entry, RO.entryName))\n assert (d, ORE.aggregates, entry) in g\n assert (entry, RDF.type, RO.FolderEntry) in g\n assert (entry, RDF.type, ORE.Proxy) in g\n assert (entry, ORE.proxyIn, d) in g\n assert (entry, ORE.proxyIn, d) in g\n\n # Which file?\n entities = set(g.objects(entry, PROV.pairEntity))\n assert entities\n ef = entities.pop()\n files.add(ef)\n assert (entry, ORE.proxyFor, ef) in g\n assert (ef, RDF.type, PROV.Entity) in g\n\n if not files:\n assert (d, RDF.type, PROV.EmptyCollection) in g\n assert (d, RDF.type, PROV.EmptyDictionary) in g\n if secondary_files:\n derivations = set(g.subjects(RDF.type, CWLPROV.SecondaryFile))\n assert derivations\n for der in derivations:\n sec = set(g.subjects(PROV.qualifiedDerivation, der)).pop()\n prim = set(g.objects(der, PROV.entity)).pop()\n\n # UUID specializes a hash checksum\n assert set(g.objects(sec, PROV.specializationOf))\n # extensions etc.\n sec_basename = set(g.objects(sec, CWLPROV.basename)).pop()\n sec_nameroot = set(g.objects(sec, CWLPROV.nameroot)).pop()\n sec_nameext = set(g.objects(sec, CWLPROV.nameext)).pop()\n assert str(sec_basename) == f\"{sec_nameroot}{sec_nameext}\"\n # TODO: Check hash data file exist in RO\n\n # The primary entity should have the same, but different values\n assert set(g.objects(prim, PROV.specializationOf))\n prim_basename = set(g.objects(prim, CWLPROV.basename)).pop()\n prim_nameroot = set(g.objects(prim, CWLPROV.nameroot)).pop()\n prim_nameext = set(g.objects(prim, CWLPROV.nameext)).pop()\n assert str(prim_basename) == f\"{prim_nameroot}{prim_nameext}\"\n\n\n@pytest.fixture\ndef research_object(tmp_path: Path) -> Generator[ResearchObject, None, None]:\n re_ob = ResearchObject(StdFsAccess(str(tmp_path / \"ro\")), temp_prefix_ro=str(tmp_path / \"tmp\"))\n yield re_ob\n close_ro(re_ob)\n\n\ndef test_absolute_path_fails(research_object: ResearchObject) -> None:\n with pytest.raises(ValueError):\n write_bag_file(research_object, \"/absolute/path/fails\")\n\n\ndef test_climboutfails(research_object: ResearchObject) -> None:\n with pytest.raises(ValueError):\n write_bag_file(research_object, \"../../outside-ro\")\n\n\ndef test_writable_string(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"file.txt\") as fh:\n assert fh.writable()\n fh.write(\"Hello\\n\")\n\n sha1 = os.path.join(research_object.folder, \"tagmanifest-sha1.txt\")\n assert os.path.isfile(sha1)\n\n with open(sha1, encoding=\"UTF-8\") as sha_file:\n stripped_sha = sha_file.readline().strip()\n assert stripped_sha.endswith(\"file.txt\")\n # stain@biggie:~/src/cwltool$ echo Hello | sha1sum\n # 1d229271928d3f9e2bb0375bd6ce5db6c6d348d9 -\n assert stripped_sha.startswith(\"1d229271928d3f9e2bb0375bd6ce5db6c6d348d9\")\n\n sha256 = os.path.join(research_object.folder, \"tagmanifest-sha256.txt\")\n assert os.path.isfile(sha256)\n\n with open(sha256, encoding=\"UTF-8\") as sha_file:\n stripped_sha = sha_file.readline().strip()\n\n assert stripped_sha.endswith(\"file.txt\")\n # stain@biggie:~/src/cwltool$ echo Hello | sha256sum\n # 66a045b452102c59d840ec097d59d9467e13a3f34f6494e539ffd32c1bb35f18 -\n assert stripped_sha.startswith(\n \"66a045b452102c59d840ec097d59d9467e13a3f34f6494e539ffd32c1bb35f18\"\n )\n\n sha512 = os.path.join(research_object.folder, \"tagmanifest-sha512.txt\")\n assert os.path.isfile(sha512)\n\n\ndef test_writable_unicode_string(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"file.txt\") as fh:\n assert fh.writable()\n fh.write(\"Here is a snowman: \\u2603 \\n\")\n\n\ndef test_writable_bytes(research_object: ResearchObject) -> None:\n string = \"Here is a snowman: \\u2603 \\n\".encode()\n with write_bag_file(research_object, \"file.txt\", encoding=None) as fh:\n fh.write(string) # type: ignore\n\n\ndef test_data(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"data/file.txt\") as fh:\n assert fh.writable()\n fh.write(\"Hello\\n\")\n\n # Because this is under data/ it should add to manifest\n # rather than tagmanifest\n sha1 = os.path.join(research_object.folder, \"manifest-sha1.txt\")\n assert os.path.isfile(sha1)\n with open(sha1, encoding=\"UTF-8\") as fh2:\n stripped_sha = fh2.readline().strip()\n assert stripped_sha.endswith(\"data/file.txt\")\n\n\ndef test_not_seekable(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"file.txt\") as fh:\n assert not fh.seekable()\n with pytest.raises(OSError):\n fh.seek(0)\n\n\ndef test_not_readable(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"file.txt\") as fh:\n assert not fh.readable()\n with pytest.raises(OSError):\n fh.read()\n\n\ndef test_truncate_fails(research_object: ResearchObject) -> None:\n with write_bag_file(research_object, \"file.txt\") as fh:\n fh.write(\"Hello there\")\n fh.truncate() # OK as we're always at end\n # Will fail because the checksum can't rewind\n with pytest.raises(OSError):\n fh.truncate(0)\n\n\nmod_validness = [\n # Taken from \"Some sample ORCID iDs\" on\n # https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier\n (\"0000-0002-1825-0097\", True),\n (\"0000-0001-5109-3700\", True),\n (\"0000-0002-1694-233X\", True),\n # dashes optional\n (\"0000000218250097\", True),\n (\"0000000151093700\", True),\n (\"000000021694233X\", True),\n # do not fail on missing digits\n (\"0002-1694-233X\", True),\n # Swap check-digits around to force error\n (\"0000-0002-1825-009X\", False),\n (\"0000-0001-5109-3707\", False),\n (\"0000-0002-1694-2330\", False),\n]\n\n\n@pytest.mark.parametrize(\"mod11,valid\", mod_validness)\ndef test_check_mod_11_2(mod11: str, valid: bool) -> None:\n assert provenance._check_mod_11_2(mod11) == valid\n\n\norcid_uris = [\n # https://orcid.org/ (Expected form)\n (\"https://orcid.org/0000-0002-1694-233X\", \"https://orcid.org/0000-0002-1694-233X\"),\n # orcid.org\n (\"http://orcid.org/0000-0002-1694-233X\", \"https://orcid.org/0000-0002-1694-233X\"),\n # just the number\n (\"0000-0002-1825-0097\", \"https://orcid.org/0000-0002-1825-0097\"),\n # lower-case X is OK (and fixed)\n (\"https://orcid.org/0000-0002-1694-233x\", \"https://orcid.org/0000-0002-1694-233X\"),\n # upper-case ORCID.ORG is OK.. (and fixed)\n (\"https://ORCID.ORG/0000-0002-1694-233X\", \"https://orcid.org/0000-0002-1694-233X\"),\n]\n\n\n@pytest.mark.parametrize(\"orcid,expected\", orcid_uris)\ndef test_valid_orcid(orcid: str, expected: str) -> None:\n assert provenance._valid_orcid(orcid) == expected\n\n\ninvalid_orcids = [\n # missing digit fails (even if checksum is correct)\n \"0002-1694-2332\",\n # Wrong checkdigit fails\n \"https://orcid.org/0000-0002-1694-2332\",\n \"0000-0002-1694-2332\",\n # Missing dashes fails (although that's OK for checksum)\n \"https://orcid.org/000000021694233X\",\n \"000000021694233X\",\n # Wrong hostname fails\n \"https://example.org/0000-0002-1694-233X\",\n # Wrong protocol fails\n \"ftp://orcid.org/0000-0002-1694-233X\",\n # Trying to be clever fails (no URL parsing!)\n \"https://orcid.org:443/0000-0002-1694-233X\",\n \"http://orcid.org:80/0000-0002-1694-233X\",\n # Empty string is not really valid\n \"\",\n]\n\n\n@pytest.mark.parametrize(\"orcid\", invalid_orcids)\ndef test_invalid_orcid(orcid: str) -> None:\n with pytest.raises(ValueError):\n provenance._valid_orcid(orcid)\n\n\ndef test_whoami() -> None:\n username, fullname = provenance._whoami()\n assert username and isinstance(username, str)\n assert fullname and isinstance(fullname, str)\n\n\ndef test_research_object() -> None:\n # TODO: Test ResearchObject methods\n pass\n\n\ndef test_research_object_picklability(research_object: ResearchObject) -> None:\n \"\"\"Research object may need to be pickled (for Toil).\"\"\"\n assert pickle.dumps(research_object) is not None\n","repo_name":"common-workflow-language/cwltool","sub_path":"tests/test_provenance.py","file_name":"test_provenance.py","file_ext":"py","file_size_in_byte":28315,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"72"} +{"seq_id":"20117823177","text":"def divide(x, y):\n return x / y\n\n\nnums = [1, 2, 3, 4, 5]\n\nprint(nums)\n# print(new_nums)\n\n# new_nums_comprehension = [num for num in nums]\n\n# print(new_nums_comprehension)\n\n# # Mesma coisa com FOR\n\n# new_nums_for = []\n\n# for nums in nums:\n# new_nums_for.append(nums)\n\n# print(new_nums_for)\n\n\"\"\"\nPorque usar list comprehension?\nEm alguns casos quando da pra fazer algo do for\ncom menos linhas\n\"\"\"\n\ndivide_by_2 = [num / 2 for num in nums]\nprint(divide_by_2)\n\ndouble = [num * 2 for num in nums]\nprint(double)\n\ndivided = [divide(num, 2) for num in nums]\nprint(divided)\n\n\nnew_nums = range(1, 11)\n\nnums_in_comprehension_filter = [num for num in new_nums if num >= 5]\nprint(nums_in_comprehension_filter)\n\nodd_numbers = [num for num in new_nums if num % 2 != 0]\npair_numbers = [num for num in new_nums if num % 2 != 0]\n\nprint(odd_numbers, '\\n', pair_numbers)\n","repo_name":"Augustohfa/Python","sub_path":"AulasModulo2/ListComprehension/ListComprehensionOtavioMiranda.py","file_name":"ListComprehensionOtavioMiranda.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28109896660","text":"import numpy as np\nfrom scipy.interpolate import interp1d, BSpline, make_interp_spline\nimport matplotlib.pyplot as plt\n\n\ndef smooth(x, y, N=1, tau=0.3):\n \"\"\" Smooths a function y(x) N times using tau as the smoothing factor\n \n Larger values of tau produce more smoothing\n\n Returns an interpolation function\n \"\"\"\n for _ in range(N):\n b = make_interp_spline(x, y, k=3)\n\n y = b(x)\n dx = np.diff(x)[0]\n alpha = dx/(tau+dx)\n z = [y[0]]\n for yi in y[1:]:\n z.append(z[-1] + alpha*(yi-z[-1]))\n y = z\n return interp1d(x, z, kind='cubic', axis=-1, copy=True, bounds_error=False, fill_value=(y[0], y[-1]), assume_sorted=True)\n\n\ndef test():\n import sys\n sys.path.append(\"./EntryGuidance\")\n from ParametrizedPlanner import profile\n\n switches = [40,120,160]\n banks = [0.06,-1,1,-0.1]\n bankProfile = lambda t: profile(t, switch=switches, bank=banks, order=0)\n\n t1 = np.linspace(0,210,2001)\n bankprof = bankProfile(t1)\n\n\n smoothprof = smooth(t1,bankprof,5)\n t = np.linspace(0,200,8001)\n smoothprof = smoothprof(t)\n # smoothprof = smooth(t, smoothprof, 2)(t)\n plt.plot(t1,bankprof,'r--')\n plt.plot(t,smoothprof,label='Smoothed')\n plt.legend()\n plt.show()\n\n\ndef test_1():\n x = np.linspace(0,1,15000)\n y = np.hstack((x,1+0.2*x,1.2-0.8*x))\n t = np.linspace(0,1,y.size)\n \n ys = smooth(t,y,1,tau=0.009)(t)\n ys2 = smooth(t,y,3,tau=0.004)(t)\n \n plt.plot(t,y,'k--')\n plt.plot(t,ys,label=\"Smoothed Once\")\n plt.plot(t,ys2,label=\"MultiSmoothed\")\n plt.legend()\n \n plt.figure()\n n = 2\n dt = np.diff(t)[n-1:]\n t = t[n:]\n plt.plot(t,np.diff(y,n)/dt**n,'k--')\n plt.plot(t,np.diff(ys,n)/dt**n,label=\"Smoothed Once\")\n plt.plot(t,np.diff(ys2,n)/dt**n,label=\"MultiSmoothed\")\n plt.title('First Derivatives')\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n test_1()\n # test()\n","repo_name":"CDNoyes/EDL-Py","sub_path":"Utils/smooth.py","file_name":"smooth.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"14825902029","text":"import dataclasses\nimport tempfile\nfrom collections import defaultdict\n\nimport torch\nfrom torch.autograd import DeviceType\nfrom .utils import create_bandwidth_info_str, do_bench, get_num_bytes\n\n_kernel_category_choices = [\n \"pointwise\",\n \"reduction\",\n \"persistent_reduction\",\n \"template\",\n \"foreach\",\n]\n\n\ndef get_kernel_category_by_source_code(src_code):\n \"\"\"\n Similar to get_kernel_category but use the source code. Call this API\n if we have not compile the src_code to module yet.\n \"\"\"\n choices = [ch for ch in _kernel_category_choices if f\"@{ch}\" in src_code]\n if len(choices) == 1:\n return choices[0]\n else:\n return \"unknown\"\n\n\ndef get_kernel_category(kernel_mod):\n \"\"\"\n Given the module defining a triton kernel, return the category of the kernel.\n Category can be one of:\n - pointwise\n - reduction\n - persistent_reduction\n\n Currently we simply decide the category depending on what decorator is imported\n by the kernel.\n \"\"\"\n choices = [ch for ch in _kernel_category_choices if ch in kernel_mod.__dict__]\n if len(choices) == 1:\n return choices[0]\n else:\n return \"unknown\"\n\n\ndef benchmark_all_kernels(benchmark_name, benchmark_all_configs):\n \"\"\"\n An experimental API used only when config.benchmark_kernel is true.\n\n Run the kernel benchmarks for all the kernels cached in PyCodeCache.\n Used in the compiled modules.\n\n Put this method here rather than codegen it for convenience since its implementation\n does not change based on different graph modules being compiled.\n \"\"\"\n from torch._inductor.codecache import PyCodeCache\n\n def get_triton_kernel(mod):\n from torch._inductor.triton_heuristics import CachingAutotuner\n\n cand_list = [\n v\n for k, v in mod.__dict__.items()\n if k.startswith(\"triton_\") and isinstance(v, CachingAutotuner)\n ]\n assert len(cand_list) == 1\n return cand_list[0]\n\n nfound = 0\n for kernel_key, kernel_mod in PyCodeCache.cache.items():\n if not hasattr(kernel_mod, \"get_args\") or not hasattr(kernel_mod, \"call\"):\n continue\n\n triton_kernel = get_triton_kernel(kernel_mod)\n kernel_category = get_kernel_category(kernel_mod)\n args = kernel_mod.get_args()\n num_in_out_ptrs = len(\n [\n arg_name\n for arg_name in triton_kernel.fn.arg_names\n if arg_name.startswith(\"in_out_ptr\")\n ]\n )\n num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9\n\n def get_info_str(ms, n_regs, n_spills, shared, prefix=\"\"):\n if not any(x is None for x in [n_regs, n_spills, shared]):\n kernel_detail_str = (\n f\" {n_regs:3} regs {n_spills:3} spills {shared:8} shared mem\"\n )\n else:\n kernel_detail_str = \"\"\n\n gb_per_s = num_gb / (ms / 1e3)\n return create_bandwidth_info_str(\n ms, num_gb, gb_per_s, prefix=prefix, suffix=kernel_detail_str\n )\n\n kernel_desc = (\n f\"{benchmark_name:20} {kernel_category[:3].upper()} {kernel_key[:10]}\"\n )\n if benchmark_all_configs:\n assert hasattr(kernel_mod, \"benchmark_all_configs\")\n bench_result = kernel_mod.benchmark_all_configs(args)\n print(kernel_desc)\n for launcher, ms in bench_result.items():\n print(\n f\" {get_info_str(ms, launcher.n_regs, launcher.n_spills, launcher.shared)} @ {launcher.config}\"\n )\n else:\n ms = do_bench(lambda: kernel_mod.call(args), rep=40, fast_flush=True)\n assert (\n len(triton_kernel.launchers) == 1\n ), \"Autotuner should have selected the best config\"\n launcher = triton_kernel.launchers[0]\n print(\n get_info_str(\n ms,\n launcher.n_regs,\n launcher.n_spills,\n launcher.shared,\n prefix=f\"{kernel_desc} \",\n )\n )\n\n nfound += 1\n if nfound == 0:\n print(\n \"No kernel with benchmark functionality found. Make sure you run inductor with config.benchmark_kernel being True\"\n )\n\n\n@dataclasses.dataclass\nclass ProfileEvent:\n category: str\n key: str\n self_cuda_time_ms: float\n # the benchmark is run multiple times and we average the count across all the\n # runs. It should be an integer but define a float just in case.\n count: float\n\n\ndef parse_profile_event_list(benchmark_name, event_list, wall_time_ms, nruns):\n def get_self_cuda_time(ev):\n \"\"\"\n ev.self_cuda_time_total is in microsecond. Convert to millisecond.\n \"\"\"\n return ev.self_cuda_time_total / 1000 / nruns\n\n all_events = defaultdict(list)\n\n def add_event(ev, category):\n profile_ev = ProfileEvent(\n category=category,\n key=ev.key,\n self_cuda_time_ms=get_self_cuda_time(ev),\n count=ev.count / nruns, # average across all runs\n )\n all_events[category].append(profile_ev)\n\n for ev in event_list:\n assert not ev.is_legacy, \"Don't support the legacy profiler\"\n if ev.device_type == DeviceType.CPU:\n # ignore the event on CPU side\n continue\n\n category = \"unknown\"\n if ev.key.startswith(\"triton_\"):\n if ev.key.startswith(\"triton_poi\"):\n category = \"triton_pointwise\"\n elif ev.key.startswith(\"triton_red\"):\n category = \"triton_reduction\"\n elif ev.key.startswith(\"triton_per\"):\n category = \"triton_persistent_reduction\"\n else:\n category = \"triton_unknown\"\n\n add_event(ev, category)\n\n def report_category(category, profile_events):\n from tabulate import tabulate\n\n profile_events.sort(key=lambda ev: ev.self_cuda_time_ms, reverse=True)\n\n rows = []\n total_time = 0.0\n print(f\"\\n == {category} category kernels == \")\n for ev in profile_events:\n total_time += ev.self_cuda_time_ms\n percent = f\"{ev.self_cuda_time_ms / wall_time_ms * 100:.2f}%\"\n rows.append([ev.key[:120], ev.self_cuda_time_ms, ev.count, percent])\n rows.append(\n [\"Total\", total_time, \"\", f\"{total_time / wall_time_ms * 100:.2f}%\"]\n )\n print(\n tabulate(\n rows, headers=[\"Kernel\", \"Self CUDA TIME (ms)\", \"Count\", \"Percent\"]\n )\n )\n return total_time\n\n def report():\n category_list = [\n \"triton_pointwise\",\n \"triton_reduction\",\n \"triton_persistent_reduction\",\n \"triton_unknown\",\n \"unknown\",\n ]\n assert set(all_events.keys()).issubset(\n set(category_list)\n ), f\"{list(all_events.keys())}\"\n\n per_category_wall_time = {}\n total_cuda_ms = 0.0\n for category in category_list:\n if category in all_events:\n _time = report_category(category, all_events[category])\n per_category_wall_time[category] = _time\n total_cuda_ms += _time\n\n gpu_busy_percent = f\"{total_cuda_ms / wall_time_ms * 100:.2f}%\"\n print(f\"\\nPercent of time when GPU is busy: {gpu_busy_percent}\")\n print(f\"Total wall time {wall_time_ms:.3f} ms\")\n\n # output such a line so we can gather such line from all compiled modules from all\n # benchmarks and tabulate it!\n # Columns: benchmark_name, pointwise_percent, reduction_percent, persistent_reduction_percent,\n # unknown_category_percent, GPU_busy_percent, wall_time_ms\n tabulate_line = f\"Output for tabulate: {benchmark_name}\"\n for category in category_list:\n percent = (\n f\"{per_category_wall_time.get(category, 0.0) / wall_time_ms * 100:.2f}%\"\n )\n tabulate_line += f\", {percent}\"\n tabulate_line += f\", {gpu_busy_percent}, {wall_time_ms:.3f}ms\"\n\n print(tabulate_line)\n\n report()\n\n\ndef compiled_module_main(benchmark_name, benchmark_compiled_module_fn):\n \"\"\"\n This is the function called in __main__ block of a compiled module.\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--benchmark-kernels\",\n \"-k\",\n action=\"store_true\",\n help=\"Whether to benchmark each individual kernels\",\n )\n parser.add_argument(\n \"--benchmark-all-configs\",\n \"-c\",\n action=\"store_true\",\n help=\"Whether to benchmark each individual config for a kernel\",\n )\n parser.add_argument(\n \"--profile\",\n \"-p\",\n action=\"store_true\",\n help=\"Whether to profile the compiled module\",\n )\n args = parser.parse_args()\n\n if args.benchmark_kernels:\n benchmark_all_kernels(benchmark_name, args.benchmark_all_configs)\n else:\n times = 10\n repeat = 10\n wall_time_ms = (\n benchmark_compiled_module_fn(times=times, repeat=repeat) / times * 1000\n )\n\n if not args.profile:\n return\n\n with torch.profiler.profile(record_shapes=True) as p:\n benchmark_compiled_module_fn(times=times, repeat=repeat)\n\n path = f\"{tempfile.gettempdir()}/compiled_module_profile.json\"\n p.export_chrome_trace(path)\n print(f\"Profiling result for a compiled module of benchmark {benchmark_name}:\")\n print(f\"Chrome trace for the profile is written to {path}\")\n event_list = p.key_averages(group_by_input_shape=True)\n print(event_list.table(sort_by=\"self_cuda_time_total\", row_limit=10))\n parse_profile_event_list(\n benchmark_name, event_list, wall_time_ms, times * repeat\n )\n","repo_name":"pytorch/pytorch","sub_path":"torch/_inductor/wrapper_benchmark.py","file_name":"wrapper_benchmark.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"37552767749","text":"import os\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\n\nBATCH_SIZE = 100\n\n\nclass PunksDataset(Dataset):\n def __init__(self, path=\"./data/punks.png\", transform=transforms.ToTensor(), start_idx=0, stop_idx=9999):\n self.punks = np.array(Image.open(path).convert('RGB'))\n # 10,000 crypto punks, 24x24 each\n self.transform = transform\n self.start_idx = start_idx\n self.stop_idx = stop_idx\n\n def __getitem__(self, idx):\n # 100 punks per row\n idx = self.start_idx + idx\n i = int((idx//100) * 24)\n j = int(idx % 100 * 24)\n\n img = self.punks[i:i+24, j:j+24, :]\n\n if self.transform:\n img = self.transform(img)\n\n return img, idx # img, label\n\n def __len__(self):\n return self.stop_idx - self.start_idx + 1\n\n\ndef load_train_and_valid_data(batch_size=BATCH_SIZE):\n train_dataset = PunksDataset(start_idx=0, stop_idx=5999)\n valid_dataset = PunksDataset(start_idx=6000, stop_idx=7999)\n\n return [\n DataLoader(train_dataset, batch_size=batch_size,\n shuffle=True, num_workers=os.cpu_count()),\n DataLoader(valid_dataset, batch_size=batch_size,\n shuffle=False, num_workers=os.cpu_count())\n ]\n\n\ndef load_test_data(batch_size=BATCH_SIZE, shuffle=False):\n test_dataset = PunksDataset(start_idx=8000, stop_idx=9999)\n return DataLoader(test_dataset, batch_size=batch_size, shuffle=shuffle)\n","repo_name":"TimothyDelille/variational-auto-encoder-pytorch","sub_path":"data_utils/punks_utils.py","file_name":"punks_utils.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10196325857","text":"#!/usr/bin/python\n# -*- coding: -*-\nfrom selenium import webdriver\nimport requests as req\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom urllib.request import urlopen\n\n# 브라우저를 크롬으로 만들어주고 인스턴스를 생성해준다.\nbrowser = webdriver.Chrome()\n# 브라우저를 오픈할 때 시간간격을 준다.\nbrowser.implicitly_wait(3)\n\ncount = 0\nkeywords = '화보'\n\nphoto_list = []\nbefore_src = \"\"\n\n# 개요에서 설명했다시피 google이 아니라 naver에서 긁어왔으며,\n# 추가적으로 나는 1027x760이상의 고화질의 해상도가 필요해서 아래와 같이 추가적인 옵션이 달려있다.\npath_keyword = \"https://search.naver.com/search.naver?where=image§ion=image&query=\" + keywords + \"&res_fr=786432&res_to=100000000&sm=tab_opt&face=0&color=0&ccl=0&nso=so%3Ar%2Ca%3Aall%2Cp%3Aall&datetype=0&startdate=0&enddate=0&start=1\"\n\n# 해당 경로로 브라우져를 오픈해준다.\nbrowser.get(path_keyword)\ntime.sleep(1)\n\n# 아래 태그의 출처는 사진에서 나오는 출처를 사용한것이다.\n# 여기서 주의할 점은 find_element가 아니라 elements를 사용해서 아래 span태그의 img_border클래스를\n# 모두 가져왔다.\nsrc_list = []\nfor i in range(100):\n photo_list = browser.find_elements_by_tag_name(\"span.img_border\")\n for index, img in enumerate(photo_list[count:]):\n # 위의 큰 이미지를 구하기 위해 위의 태그의 리스트를 하나씩 클릭한다.\n img.click()\n\n # 한번에 많은 접속을 하여 image를 크롤링하게 되면 naver, google서버에서 우리의 IP를 10~20분\n # 정도 차단을 하게된다. 때문에 Crawling하는 간격을 주어 IP 차단을 피하도록 장치를 넣어주었다.\n time.sleep(1)\n\n # 확대된 이미지의 정보는 img태그의 _image_source라는 class안에 담겨있다.\n html_objects = browser.find_element_by_tag_name('img._image_source')\n current_src = html_objects.get_attribute('src')\n print(\"=============================================================\")\n print(\"현재 src :\" + current_src)\n if current_src in src_list:\n continue\n else:\n t = urlopen(current_src).read()\n if count < 4000:\n filename = \"./downloads/화보/img_\" + str(count) + \".jpg\"\n with open(filename, \"wb\") as f:\n f.write(t)\n count += 1\n print(\"Img Save Success\")\n src_list.append(current_src)\n else:\n # browser.close()\n break\n","repo_name":"skyc5423/study_and_exercise","sub_path":"GANField/google_image_downloader.py","file_name":"google_image_downloader.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70391757033","text":"\"\"\"\n[[1,2,3,4,5], [6,7,8,9,10]] => [1,2,3,4,5,6,7,8,9,10]\n\n\"\"\"\n\nclass ZigZag:\n def __init__(self, ls1, ls2):\n self.queue = [ls1, ls2]\n\n def next(self):\n v = self.queue.pop(0)\n s = v.pop(0)\n if v:\n self.queue.append(v)\n return s\n\n def has_next(self):\n return True if self.queue else False\n\nlist1 = [1, 3, 5, 7, 9]\nlist2 = [2, 4, 6, 8, 10]\n\nz = ZigZag(list1, list2)\nlist_nums = []\n\nwhile z.has_next():\n list_nums.append(z.next())\n\nprint(list_nums)","repo_name":"pyr01ken/TheAlgorithms","sub_path":"algorithms/zigzag.py","file_name":"zigzag.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13666415099","text":"import graphene\nfrom graphene import (Mutation, Connection, InputObjectType, String,\n Field, Boolean, Node)\nfrom graphene_sqlalchemy import (SQLAlchemyObjectType)\nfrom app.database import Location as LocationModel\nfrom app import DB\nfrom helpers.utils import input_to_dictionary\nfrom .helpers import TotalCount\nfrom app.filters import FilterConnectionField\nfrom .state_province import check_state_province\n\n\ndef check_location(data):\n result = DB.session.query(LocationModel). \\\n filter_by(street_address=data['street_address'],\n city=data['city']).first()\n if result is None:\n result = LocationModel(**data)\n DB.session.add(result)\n DB.session.commit()\n return result\n\n\nclass LocationAttribute:\n street_address = String(description=\"Street Address\")\n city = String(description=\"City Location\")\n postal_code = String(description=\"Zip or Postal Code\")\n state_province = String(description=\"Name of state, province, \")\n\n\nclass LocationNode(SQLAlchemyObjectType):\n class Meta:\n model = LocationModel\n interfaces = (Node,)\n connection_field_factory = FilterConnectionField.factory\n\n\nclass LocationConnection(Connection):\n \"\"\" Location Connection \"\"\"\n class Meta:\n \"\"\" Location Connection \"\"\"\n node = LocationNode\n interfaces = (TotalCount,)\n\n\nclass CreateLocationInput(graphene.InputObjectType,\n LocationAttribute):\n pass\n\n\nclass CreateLocation(Mutation):\n Location = Field(\n lambda: LocationNode,\n description=\"StateProvince created by this mutation.\")\n\n class Arguments:\n input = CreateLocationInput(required=True)\n\n def mutate(self, info, input_value):\n data = input_to_dictionary(input_value)\n state_data = {'name': input['state_province']}\n state_province = check_state_province(state_data)\n data['state_province'] = state_province\n\n return CreateLocation(Location=check_location(data))\n\n\nclass UpdateLocationInput(InputObjectType,\n LocationAttribute):\n id = graphene.ID(required=True,\n description=\"Global Id of the Location.\")\n\n\nclass UpdateLocation(Mutation):\n Location = Field(lambda: LocationNode,\n description=\"Location updated by this mutation.\")\n\n class Arguments:\n input = UpdateLocationInput(required=True)\n\n def mutate(self, info, input_value):\n data = input_to_dictionary(input_value)\n\n location = DB.session.query(LocationModel).\\\n filter_by(id=data['id'])\n location.update(data)\n DB.session.commit()\n location = DB.session.query(LocationModel).\\\n filter_by(id=data['id']).first()\n\n return UpdateLocation(Location=location)\n\n\nclass DeleteLocation(Mutation):\n ok = Boolean()\n\n class Arguments:\n input = UpdateLocationInput(required=True)\n\n def mutate(self, info, input_value):\n data = input_to_dictionary(input_value)\n\n location = DB.session.query(LocationModel).filter_by(id=data['id'])\n location.delete()\n DB.session.commit()\n\n return DeleteLocation(ok=True)\n","repo_name":"BeeRaspberry/bee_api","sub_path":"app/schemas/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10662381717","text":"import argparse\n\nimport tensorflow.compat.v1 as tf\n\ntf.enable_eager_execution()\n\nfrom tensorflow.python.platform import gfile\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\n\n# import argparse\n# parser = argparse.ArgumentParser()\n# parser.add_argument('--data_dir', default='', help='base directory to save processed data')\n# opt = parser.parse_args()\n\nFRAMES_PER_VIDEO = 30\nIMG_SHAPE = (64, 64, 3)\n\ndef get_seq(dname):\n data_dir = '%s/%s' % (\"data\", dname)\n\n filenames = gfile.Glob(os.path.join(data_dir, '*'))\n if not filenames:\n raise RuntimeError('No data files found.')\n\n for f in filenames:\n k = 0\n for serialized_example in tf.python_io.tf_record_iterator(f):\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n image_seq = []\n for i in range(30):\n image_name = str(i) + '/image_aux1/encoded'\n byte_str = example.features.feature[image_name].bytes_list.value[0]\n # img = Image.open(io.BytesIO(byte_str))\n img = Image.frombytes('RGB', (64, 64), byte_str)\n arr = np.array(img.getdata()).reshape(img.size[1], img.size[0], 3)\n image_seq.append(arr.reshape(1, 64, 64, 3))\n image_seq = np.concatenate(image_seq, axis=0)\n k = k + 1\n yield f, k, image_seq\n\n\ndef generate_examples(filedir):\n files = tf.gfile.Glob(os.path.join(filedir, \"*.tfrecords\"))\n # For each file\n file_id = 0\n for filepath in sorted(files):\n # For each video inside the file\n for video_id, example_str in enumerate(\n tf.io.tf_record_iterator(filepath)):\n example = tf.train.SequenceExample.FromString(example_str)\n\n # Merge all frames together\n all_frames = {'image': [],\n 'action': [],\n 'endeffector_pos': [],\n 'frame_ind': [],\n 'file_idx': []}\n\n for frame_id in range(FRAMES_PER_VIDEO):\n # Extract all features from the original proto context field\n frame_feature = { # pylint: disable=g-complex-comprehension\n out_key: example.context.feature[in_key.format(frame_id)] # pylint: disable=g-complex-comprehension\n for out_key, in_key in [\n #(\"image_main\", \"{}/image_main/encoded\"),\n (\"image\", \"{}/image_aux1/encoded\"),\n (\"endeffector_pos\", \"{}/endeffector_pos\"),\n (\"action\", \"{}/action\"),\n ]\n }\n\n all_frames['frame_ind'].append(frame_id)\n all_frames['file_idx'].append(file_id)\n # Decode float\n for key in (\"endeffector_pos\", \"action\"):\n values = frame_feature[key].float_list.value\n frame_feature[key] = [values[i] for i in range(len(values))]\n all_frames[key].append(frame_feature[key])\n\n # Decode images (from encoded string)\n for key in [\"image\"]:\n img = frame_feature[key].bytes_list.value[0]\n img = np.frombuffer(img, dtype=np.uint8)\n img = np.reshape(img, IMG_SHAPE)\n frame_feature[key] = img\n\n all_frames[key].append(frame_feature[key])\n\n for key in all_frames:\n all_frames[key] = np.stack(all_frames[key])\n\n yield \"%s_%s\" % (filepath.split(\".\")[0], file_id), all_frames\n\n file_id += 1\n\ndef save_seq(args):\n if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)\n\n gen = generate_examples(args.data_dir)\n\n for f, data_seq in gen:\n f_save = f.split(\"/\")[-1] + \".npz\"\n np.savez(os.path.join(args.save_dir, f_save), **data_seq)\n print(f_save)\n\n#gen = get_seq(\"bair_push\")\n\n#print(tf.gfile.Glob(\"data/bair_push/*.tfrecords\"))\n#save_seq(\"data/bair_push/raw_test\", \"data/bair_push/test\")\n\ndef play_video():\n gen = generate_examples(\"data/bair_push/raw\")\n _, d = next(gen)\n img_seq = d['image']\n print(img_seq.shape, d['action'].shape, d['endeffector_pos'].shape)\n from visualizer import viz_imgseq\n viz_imgseq(img_seq, unnormalize=False)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # environment\n parser.add_argument('--domain_name', default='bair')\n parser.add_argument('--task_name', default='push')\n parser.add_argument('--seed', type=int, default=1)\n\n parser.add_argument(\"--data_dir\", default='data/bair_push/raw')\n parser.add_argument(\"--save_dir\", default='data/bair_push/orig')\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n save_seq(args)\n","repo_name":"thegyro/unsup_keyp_torch","sub_path":"get_bair_push.py","file_name":"get_bair_push.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8685202312","text":"import cv2\nimport glob\n#finds path name of files given a certain pattern\n\nimages=glob.glob(\"*.jpg\")\n#creates a list of file path names\n\nfor image in images:\n img=cv2.imread(image,0)\n re=cv2.resize(img,(100,100))\n cv2.imshow(\"Hey\",re)\n cv2.waitKey(500)\n cv2.destroyAllWindows()\n cv2.imwrite(\"resized_\"+image,re)","repo_name":"jonchuasoonkiat/OpenCVProj","sub_path":"batch_resizing.py","file_name":"batch_resizing.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42536829248","text":"'''Routes used to get a list of available signatures, outcomes and models for dropdown options.'''\nfrom flask import request\nfrom flask_restful import Resource\nfrom sqlalchemy.orm import load_only\nfrom db.models.signature_meta import Meta\n\nclass ExploreDropdownOption(Resource):\n def get(self):\n\n # fetch data from the database\n result = {\n 'signatures': [],\n 'outcome': [],\n 'model': []\n }\n options = Meta.query.with_entities(Meta.signature, Meta.outcome, Meta.model).distinct().filter().all()\n\n for option in options:\n result['signatures'].append(option[0])\n result['outcome'].append(option[1])\n result['model'].append(option[2])\n \n result['signatures'] = list(set(result['signatures']))\n result['outcome'] = list(set(result['outcome']))\n result['model'] = list(set(result['model']))\n\n result['signatures'].sort()\n result['outcome'].sort()\n result['model'].sort()\n\n return result, 200\n \n def post(self):\n return \"Only get method is allowed\", 400\n","repo_name":"bhklab/PredictIO-webapp","sub_path":"resources/dropdown_explore.py","file_name":"dropdown_explore.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"27415457105","text":"\nfrom multiprocessing import Value\nfrom scipy.interpolate.fitpack2 import InterpolatedUnivariateSpline, RectBivariateSpline\nfrom ecrad_pylib.Equilibrium_Utils_AUG import EQData\nfrom ecrad_pylib.Shotfile_Handling_AUG import load_IDA_data\nfrom ecrad_pylib.ece_optics_2019 import get_ECE_launch_v2, plot1DECE\nfrom ecrad_pylib.TB_Communication import launch, make_TORBEAM_no_data_load\nfrom ecrad_pylib.ECRad_Scenario import ECRadScenario\nfrom ecrad_pylib.ECRad_Results import ECRadResults\nimport numpy as np\nimport os\nfrom scipy import constants as cnst\nfrom plasma_math_tools.data_fitting import make_fit, gauss_fit_func\nfrom ecrad_pylib.Plotting_Configuration import plt\n\ndef CECE_workflow(working_dir, Results_filename, time, diag_name=\"CEC\"):\n Results = ECRadResults()\n Results.load(Results_filename)\n logfile = open(os.path.join(working_dir, f\"CECE_params_{Results.Scenario['shot']}\"), \"w\")\n get_BPD_width(Results, 0, logfile=logfile)\n run_ECE_TORBEAM_ECRad_Scenario(working_dir, Results, time, logfile=logfile, diag_name=diag_name)\n logfile.close()\n plt.show()\n\ndef get_BPD_width(Results, itime=0, logfile=None):\n # Gaussian fit of all BPD\n plt.figure()\n for ich in range(len(Results.Scenario[\"diagnostic\"][\"f\"][itime])):\n params = np.zeros(3)\n params[0] = np.max(Results[\"BPD\"][\"BPD\"][itime][ich][0])\n params[1] = Results[\"resonance\"][\"rhop_warm\"][0][itime][ich]\n params[2] = 0.05\n fitted_params, unc = make_fit(\"gauss\", Results[\"BPD\"][\"rhop\"][itime][ich][0], Results[\"BPD\"][\"BPD\"][itime][ich][0], \n p_est = params)\n plt.plot(Results[\"BPD\"][\"rhop\"][itime][ich][0], Results[\"BPD\"][\"BPD\"][itime][ich][0], label=f\"original ch. {ich+1}\")\n plt.plot(Results[\"BPD\"][\"rhop\"][itime][ich][0], \n gauss_fit_func(Results[\"BPD\"][\"rhop\"][itime][ich][0], fitted_params[0], \n fitted_params[1], fitted_params[2]), linestyle=\"--\", label=f\"fit ch. {ich+1}\")\n if(logfile is not None):\n logfile.write(\"Channel {0:d} rho_pol position: {1:1.3f}\\n\".format(ich + 1, fitted_params[1]))\n logfile.write(\"Channel {0:d} rho_pol width: {1:1.3f}\\n\".format(ich + 1, fitted_params[2]))\n else:\n print(\"Channel {0:d} rho_pol width: {1:1.3f}\".format(ich + 1, fitted_params[2]))\n mask = Results[\"ray\"][\"rhop\"][itime][ich][0][0] > 0.0\n R_center_spl = InterpolatedUnivariateSpline(Results[\"ray\"][\"s\"][itime][ich][0][0][mask], \n Results[\"ray\"][\"rhop\"][itime][ich][0][0][mask]\n - fitted_params[1])\n R_in_spl = InterpolatedUnivariateSpline(Results[\"ray\"][\"s\"][itime][ich][0][0][mask], \n Results[\"ray\"][\"rhop\"][itime][ich][0][0][mask]\n - fitted_params[1] + fitted_params[2]/2.0)\n R_out_spl = InterpolatedUnivariateSpline(Results[\"ray\"][\"s\"][itime][ich][0][0][mask], \n Results[\"ray\"][\"rhop\"][itime][ich][0][0][mask]\n - fitted_params[1] - fitted_params[2]/2.0)\n R_spl = InterpolatedUnivariateSpline(Results[\"ray\"][\"s\"][itime][ich][0][0], \n Results[\"ray\"][\"R\"][itime][ich][0][0]) \n s_center_roots = R_center_spl.roots()\n s_center = s_center_roots[np.argmin(np.abs(s_center_roots - Results[\"resonance\"][\"s_cold\"][0][itime][ich]))] \n s_in_roots = R_in_spl.roots()\n s_in = s_in_roots[np.argmin(np.abs(s_in_roots - Results[\"resonance\"][\"s_cold\"][0][itime][ich]))]\n s_out_roots = R_out_spl.roots()\n s_out = s_out_roots[np.argmin(np.abs(s_out_roots - Results[\"resonance\"][\"s_cold\"][0][itime][ich]))]\n width = np.abs(R_spl(s_in)-R_spl(s_out))\n if(logfile is not None):\n logfile.write(\"Channel {0:d} R position: {1:1.3f} cm\\n\".format(ich + 1, R_spl(s_center)))\n logfile.write(\"Channel {0:d} R width: {1:1.3f} mm\\n\".format(ich + 1, width*1.e3))\n else:\n print(\"Channel {0:d} R width: {1:1.3f} cm\\n\".format(ich + 1, width*1.e2))\n plt.legend()\n plt.gca().set_xlabel(r\"$\\rho_\\mathrm{pol}$\")\n plt.gca().set_ylabel(r\"$D_\\omega$\")\n plt.suptitle(f\"\\# {Results.Scenario['shot']}\")\n\n\ndef run_ECE_TORBEAM_ECRad_Scenario(working_dir, Results, time, logfile=None, imode=0, diag_name=\"CEC\"):\n itime = np.argmin(np.abs(Results.Scenario[\"time\"] - time))\n plasma_dict = Results.Scenario[\"plasma\"]\n eq_slice = Results.Scenario[\"plasma\"][\"eq_data_2D\"].GetSlice(time)\n diag = Results.Scenario[\"used_diags_dict\"][diag_name]\n f = Results.Scenario[\"diagnostic\"][\"f\"][itime]\n df = Results.Scenario[\"diagnostic\"][\"df\"][itime]\n if(diag_name is not \"CEC\"):\n wg = 8\n dtoECESI = 0.055\n corr = 0.84\n else:\n wg = diag.wg\n dtoECESI = diag.dtoECESI\n corr = diag.corr\n ece_launch = get_ECE_launch_v2(wgIn=wg, antenna=\"CECE\", dtoECESI=dtoECESI,\n freqsSI=f, dfreqsSI=df, R_start=2.5, corr=corr)\n TB_results = torbeam_interface(working_dir, Results.Scenario[\"shot\"], time, itime, \n plasma_dict, eq_slice, ece_launch, \n R_res = Results[\"resonance\"][\"R_warm\"][itime][imode], \n z_res = Results[\"resonance\"][\"z_warm\"][itime][imode], \n logfile=logfile, wg=wg, dtoECESI=dtoECESI, corr=corr)\n\n\n# def run_ECE_TORBEAM_AUG(working_dir, shot, time, frequencies, launch_override=None, EQ_exp=\"AUGD\", \n# EQ_diag=\"EQH\", EQ_ed=0, bt_vac_correction = 1.005, \n# IDA_exp=\"AUGD\", IDA_ed=0, wg=8,dtoECESI=0.055, corr=0.85):\n# # NOT TESTED!\n# IDA_dict = load_IDA_data(shot, [time], IDA_exp, IDA_ed)\n# itime = 0\n# EQ_obj = EQData(shot, EQ_exp=EQ_exp, EQ_diag=EQ_diag, EQ_ed=EQ_ed)\n# eq_slice = EQ_obj.GetSlice(time, bt_vac_correction=bt_vac_correction)\n# if(launch_override is not None):\n# ece_launch = launch_override\n# else:\n# ece_launch = get_ECE_launch_v2(wg, \"CECE\", dtoECESI, frequencies, np.zeros(len(frequencies)), R=2.5, corr=corr)\n# TB_results = torbeam_interface(working_dir, shot, time, itime, IDA_dict, eq_slice, ece_launch)\n\ndef torbeam_interface(working_dir, shot, time, itime, plasma_dict, eq_slice, ece_launch, R_res, z_res,\n wg=8, dtoECESI=0.055, corr=0.85, logfile=None, TB_plot=True, only_TB=False):\n cece_launches = []\n for ich in range(len(ece_launch[\"f\"])):\n cece_launches.append(launch())\n cece_launches[-1].parse_custom(ece_launch[\"f\"][ich], ece_launch[\"R\"][ich] * np.cos(np.deg2rad(ece_launch[\"phi\"][ich])), \n ece_launch[\"R\"][ich] * np.sin(np.deg2rad(ece_launch[\"phi\"][ich])), ece_launch[\"z\"][ich], \n ece_launch[\"phi_tor\"][ich], ece_launch[\"theta_pol\"][ich], ece_launch[\"width\"][ich], ece_launch[\"dist_focus\"][ich])\n make_TORBEAM_no_data_load(working_dir, shot, time, plasma_dict[\"rhop_prof\"][itime], plasma_dict[\"Te\"][itime], plasma_dict[\"ne\"][itime], \n eq_slice.R, eq_slice.z, eq_slice.Psi, eq_slice.Br, eq_slice.Bt, eq_slice.Bz,\n eq_slice.Psi_ax, eq_slice.Psi_sep, cece_launches, ITM=False, \n ITER=False, mode = -1)\n for ich in range(len(ece_launch[\"f\"])):\n TB_failed = False\n Rz_data = np.loadtxt(os.path.join(working_dir, \"{0:d}_{1:1.3f}_rays\".format(shot, time), \n \"Rz_beam_{0:1d}.dat\".format(ich + 1).replace(\",\", \"\")))\n R_center = Rz_data.T[0] * 1.e-2\n mask = np.logical_and(R_center > np.min(eq_slice.R), R_center < np.max(eq_slice.R))\n z_center = Rz_data.T[1] * 1.e-2\n mask[np.logical_and(z_center < np.min(eq_slice.z), z_center > np.max(eq_slice.z))] = False\n x = None\n z = None\n try:\n if(not np.any(mask)):\n raise ValueError(\"No points inside flux Matrix!\")\n if(TB_plot):\n plt.figure()\n plt.plot(R_center, z_center)\n plt.plot(Rz_data.T[2]*1.e-2, Rz_data.T[3]*1.e-2, \"--\")\n plt.plot(Rz_data.T[4]*1.e-2, Rz_data.T[5]*1.e-2, \"--\")\n x, z = plot1DECE(wgIn=wg, freq=ece_launch[\"f\"][ich]/1.e9, dtoECE=dtoECESI*1.e3, project='poloidal', doPlot=False)\n plt.plot(R_center, z_center)\n plt.plot(x, z.T[0], \":\")\n plt.plot(x, z.T[1], \":\")\n plt.plot(x, z.T[2], \"-.\")\n plt.plot(x, z.T[3], \":\")\n plt.plot(x, z.T[4], \":\")\n v_min = min(np.min(z), np.min(Rz_data.T[1:][::2]*1.e-2))\n v_max = max(np.max(z), np.max(Rz_data.T[1:][::2]*1.e-2))\n plt.vlines([R_res[ich]], v_min, v_max, linestyles=\"--\", colors=\"k\")\n plt.gca().set_xlabel(r\"$R$ [m]\")\n plt.gca().set_ylabel(r\"$z$ [m]\")\n plt.gca().set_aspect(\"equal\")\n i_min = np.argmin(np.abs((R_center[mask]-R_res[ich])**2 + (z_center[mask]-z_res[ich])**2))\n width = np.sqrt((Rz_data.T[2][i_min]*1.e-2 - R_center[i_min])**2 \n + (Rz_data.T[3][i_min]*1.e-2 - z_center[i_min])**2)\n if(logfile is not None):\n logfile.write(\"TORBEAM width channel {0:d}: {1:1.3f} cm\\n\".format(ich+1, width*1.e2))\n else:\n print(\"TORBEAM width channel {0:d}: {1:1.3f} cm\\n\".format(ich+1, width*1.e2))\n if only_TB:\n return\n except ValueError:\n print(\"Failed to run TORBEAM. Using vacuum values!\")\n TB_failed = True\n if(not only_TB or TB_failed):\n if(x is None or z is None):\n x, z = plot1DECE(wgIn=wg, freq=ece_launch[\"f\"][ich]/1.e9, dtoECE=dtoECESI*1.e3, project='poloidal', doPlot=False)\n waist = np.zeros(len(x))\n waist = np.sqrt( ((z[:,2]-z[:,0])**2)+((z[:,2]-z[:, 1])**2) ) \n waist += np.sqrt( ((z[:,2]-z[:,3])**2)+((z[:,2]-z[:, 4])**2) )\n waist /= 2\n R_center = x\n mask = np.logical_and(R_center > np.min(eq_slice.R), R_center < np.max(eq_slice.R))\n z_center = z.T[2]\n mask[np.logical_or(z_center < np.min(eq_slice.z), z_center > np.max(eq_slice.z))] = False\n if(not np.any(mask)):\n raise ValueError(\"No resonance found!\")\n i_min = np.argmin(np.abs((R_center-R_res[ich])**2 + (z_center-z_res[ich])**2))\n if(logfile is not None):\n logfile.write(\"Vacuum width channel {0:d}: {1:1.3f} cm\\n\".format(ich + 1, waist[i_min] * 100.0))\n else:\n print(\"Vacuum width channel {0:d}: {1:1.3f} cm\\n\".format(ich + 1, waist[i_min] * 100.0))\n\n\n\nif __name__ == \"__main__\":\n CECE_workflow(\"/mnt/c/Users/Severin/ECRad/AUG_CECE/\", \n \"/mnt/c/Users/Severin/ECRad/AUG_CECE/ECRad_38423_CEC_ed4.nc\", 3.0, diag_name=\"CEC\")","repo_name":"AreWeDreaming/ECRad_PyLib","sub_path":"src/ecrad_pylib/CECE_tools.py","file_name":"CECE_tools.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6168502868","text":"import numpy as np\nimport scipy as sc\nfrom scipy.sparse.linalg import eigs\nfrom sklearn.metrics import f1_score\nfrom sklearn.cluster import KMeans\nimport networkx as nx\n\ntrue_labels = np.array([1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0])\n\ndef k_means(X, k):\n return KMeans(n_clusters=k).fit(X).labels_\n\ndef spectral_clustering(A, k):\n d = np.sum(A, axis=0)\n d = np.array(d)\n if len(d.shape) == 2:\n d = d[0]\n D = np.diag(d)\n D = np.sqrt(np.linalg.inv(D))\n L = D.dot(A).dot(D)\n\n # Calculate eigenvalues and eigenvectors\n eigenvalues, eigenvectors = eigs(L, k)\n X = eigenvectors.real\n # rows_norm = sc.linalg.norm(X, axis=1, ord=2)\n # Y = (X.T / rows_norm).T\n labels = k_means(X, k)\n return labels\n\nif __name__ == '__main__':\n G = nx.karate_club_graph()\n adj = nx.to_numpy_matrix(G)\n pred_labels = spectral_clustering(adj, 2)\n f1 = f1_score(true_labels, pred_labels)\n if f1 < 0.5:\n # this is done since in binary it can go either way.\n true_labels = [1-x for x in true_labels]\n f1 = f1_score(true_labels, pred_labels)\n # print('true', true_labels)\n c1, c2 = [], []\n for i in range(0, len(pred_labels)):\n if pred_labels[i] == 0:\n c1.append(i+1)\n else:\n c2.append(i+1)\n print('Communities are C1 =', c1, ', C2 =', c2, '}')\n print(\"F1 score\", f1)","repo_name":"mohitsudhakar/clustering","sub_path":"spectral_clustering.py","file_name":"spectral_clustering.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70539086953","text":"# 문제 : https://www.acmicpc.net/problem/11279\n\nimport sys\nimport heapq\ninput = sys.stdin.readline\n\nn = int(input())\nnum = []\nfor _ in range(n):\n tmp = int(input())\n if tmp:\n heapq.heappush(num, -tmp)\n else:\n if num:\n print(-heapq.heappop(num))\n else:\n print(0)","repo_name":"fbghgus123/algorithm","sub_path":"python/백준/Heap/11278_최대힙.py","file_name":"11278_최대힙.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72340722792","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom multiprocessing import Manager\nimport re\nimport traceback\n\nfrom libcnmc.utils import CODIS_TARIFA, CODIS_ZONA, CINI_TG_REGEXP\nfrom libcnmc.utils import get_ine, get_comptador\nfrom libcnmc.core import MultiprocessBased\n\n\nclass F1(MultiprocessBased):\n def __init__(self, **kwargs):\n super(F1, self).__init__(**kwargs)\n self.codi_r1 = kwargs.pop('codi_r1')\n self.year = kwargs.pop('year', datetime.now().year - 1)\n manager = Manager()\n self.cts = manager.dict()\n self.base_object = 'CUPS'\n self.report_name = 'F1 - CUPS'\n\n def get_codi_tarifa(self, codi_tarifa):\n return CODIS_TARIFA.get(codi_tarifa, '')\n\n def get_sequence(self):\n search_params = []\n return self.connection.GiscedataCupsPs.search(search_params)\n\n def get_zona_qualitat(self, codi_ct):\n zona_qualitat = ''\n if codi_ct:\n if codi_ct in self.cts:\n return self.cts[codi_ct]\n else:\n ct_ids = self.connection.GiscedataCts.search(\n [('name', '=', codi_ct)])\n if ct_ids:\n dades_ct = self.connection.GiscedataCts.read(\n ct_ids[0], ['zona_id'])\n zona_desc = dades_ct['zona_id'][1].upper().replace(' ', '')\n if zona_desc in CODIS_ZONA:\n zona_qualitat = CODIS_ZONA[zona_desc]\n self.cts[codi_ct] = zona_qualitat\n return zona_qualitat\n\n def get_tipus_connexio(o, id_escomesa):\n bloc = o.GiscegisBlocsEscomeses.search(\n [('escomesa', '=', id_escomesa)]\n )\n tipus = ''\n if bloc:\n bloc = o.GiscegisBlocsEscomeses.read(bloc[0], ['node'])\n if bloc['node']:\n node = bloc['node'][0]\n edge = o.GiscegisEdge.search(\n ['|', ('start_node', '=', node), ('end_node', '=', node)]\n )\n if edge:\n edge = o.GiscegisEdge.read(edge[0], ['id_linktemplate'])\n if edge['id_linktemplate']:\n bt = o.GiscedataBtElement.search(\n [('id', '=', edge['id_linktemplate'])]\n )\n if bt:\n bt = o.GiscedataBtElement.read(\n bt[0], ['tipus_linia']\n )\n if bt['tipus_linia']:\n if bt['tipus_linia'][0] == 1:\n tipus = 'A'\n else:\n tipus = 'S'\n return tipus\n\n def consumer(self):\n o_codi_r1 = 'R1-%s' % self.codi_r1[-3:]\n O = self.connection\n ultim_dia_any = '%s-12-31' % self.year\n search_glob = [\n ('state', 'not in', ('esborrany', 'validar')),\n ('data_alta', '<=', ultim_dia_any),\n '|',\n ('data_baixa', '>=', ultim_dia_any),\n ('data_baixa', '=', False)\n ]\n context_glob = {'date': ultim_dia_any, 'active_test': False}\n while True:\n try:\n item = self.input_q.get()\n self.progress_q.put(item)\n fields_to_read = [\n 'name', 'id_escomesa', 'id_municipi', 'cne_anual_activa',\n 'cne_anual_reactiva', 'cnmc_potencia_facturada', 'et'\n ]\n\n cups = O.GiscedataCupsPs.read(item, fields_to_read)\n if not cups or not cups.get('name'):\n self.input_q.task_done()\n continue\n o_name = cups['name'][:22]\n o_codi_ine = ''\n o_codi_prov = ''\n o_zona = ''\n o_potencia_facturada = cups['cnmc_potencia_facturada'] or ''\n if 'et' in cups:\n o_zona = self.get_zona_qualitat(cups['et'])\n if cups['id_municipi']:\n municipi = O.ResMunicipi.read(\n cups['id_municipi'][0], ['ine']\n )\n ine = get_ine(self.connection, municipi['ine'])\n o_codi_ine = ine[1]\n o_codi_prov = ine[0]\n\n o_utmx = ''\n o_utmy = ''\n o_utmz = ''\n o_nom_node = ''\n o_tensio = ''\n o_connexio = ''\n if cups and cups['id_escomesa']:\n o_connexio = self.get_tipus_connexio(O,\n cups['id_escomesa'][0]\n )\n search_params = [('escomesa', '=', cups['id_escomesa'][0])]\n bloc_escomesa_id = O.GiscegisBlocsEscomeses.search(\n search_params\n )\n if bloc_escomesa_id:\n bloc_escomesa = O.GiscegisBlocsEscomeses.read(\n bloc_escomesa_id[0], ['node', 'vertex']\n )\n if bloc_escomesa['vertex']:\n vertex = O.GiscegisVertex.read(\n bloc_escomesa['vertex'][0], ['x', 'y']\n )\n o_utmx = round(vertex['x'], 3)\n o_utmy = round(vertex['y'], 3)\n if bloc_escomesa['node']:\n search_params = [('start_node', '=',\n bloc_escomesa['node'][0])]\n edge_id = O.GiscegisEdge.search(search_params)\n if not edge_id:\n search_params = [('end_node', '=',\n bloc_escomesa['node'][0])]\n edge_id = O.GiscegisEdge.search(search_params)\n if edge_id:\n edge = O.GiscegisEdge.read(\n edge_id[0], ['name']\n )\n o_nom_node = edge['name']\n\n search_params = [('cups', '=', cups['id'])] + search_glob\n polissa_id = O.GiscedataPolissa.search(\n search_params, 0, 1, 'data_alta desc', context_glob)\n o_potencia = ''\n o_cnae = ''\n o_pot_ads = ''\n o_equip = 'MEC'\n o_cod_tfa = ''\n o_estat_contracte = 0\n if polissa_id:\n fields_to_read = [\n 'potencia', 'cnae', 'tarifa', 'butlletins', 'tensio'\n ]\n\n polissa = O.GiscedataPolissa.read(\n polissa_id[0], fields_to_read, context_glob\n )\n if polissa['tensio']:\n o_tensio = polissa['tensio'] / 1000.0\n o_potencia = polissa['potencia']\n if polissa['cnae']:\n o_cnae = polissa['cnae'][1]\n # Mirem si té l'actualització dels butlletins\n if polissa.get('butlletins', []):\n butlleti = O.GiscedataButlleti.read(\n polissa['butlletins'][-1], ['pot_max_admisible']\n )\n o_pot_ads = butlleti['pot_max_admisible']\n comptador_actiu = get_comptador(\n self.connection, polissa['id'], self.year)\n if comptador_actiu:\n comptador_actiu = comptador_actiu[0]\n comptador = O.GiscedataLecturesComptador.read(\n comptador_actiu, ['cini', 'tg']\n )\n if not comptador['cini']:\n comptador['cini'] = ''\n if comptador.get('tg', False):\n o_equip = 'SMT'\n elif re.findall(CINI_TG_REGEXP, comptador['cini']):\n o_equip = 'SMT'\n else:\n o_equip = 'MEC'\n if polissa['tarifa']:\n o_cod_tfa = self.get_codi_tarifa(polissa['tarifa'][1])\n else:\n # Si no trobem polissa activa, considerem\n # \"Contrato no activo (CNA)\"\n o_equip = 'CNA'\n o_estat_contracte = 1\n\n #energies consumides\n o_anual_activa = cups['cne_anual_activa'] or 0.0\n o_anual_reactiva = cups['cne_anual_reactiva'] or 0.0\n o_any_incorporacio = self.year + 1\n self.output_q.put([\n o_nom_node,\n o_utmx,\n o_utmy,\n o_utmz,\n o_cnae,\n o_equip,\n o_cod_tfa,\n o_zona,\n o_name,\n o_codi_r1,\n o_codi_ine,\n o_codi_prov,\n o_connexio,\n o_tensio,\n o_estat_contracte,\n o_potencia,\n o_potencia_facturada,\n o_pot_ads or o_potencia,\n o_anual_activa,\n o_anual_reactiva,\n o_any_incorporacio\n ])\n except:\n traceback.print_exc()\n if self.raven:\n self.raven.captureException()\n finally:\n self.input_q.task_done()\n","repo_name":"gisce/libCNMC","sub_path":"libcnmc/cir_4_2014/F1.py","file_name":"F1.py","file_ext":"py","file_size_in_byte":9847,"program_lang":"python","lang":"ca","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"16686314100","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import (\n ElementNotVisibleException,\n InvalidElementStateException,\n ElementNotSelectableException,\n TimeoutException\n)\nimport base\nimport common\nfrom base import BasePage\nfrom controls.text import Text\nimport time\n\nlocators = {\n\t'menu': 'class=iconContainer-0-7',\n\t'iam_btn': \"css=button[data-e2e='tn-overview']\",\n\t'create_bucket': \"css=button[data-e2e='bucket-cta']\",\n\t'option_button':'id=topNavOptions',\n\t'profile':'id=profile',\n\t'switch_role':'id=switchRoles',\n\t'logout':'id=logout',\n\n\t'bucketname': \"name=Bucket\",\n\t'versionradio': \"name=Versioning\",\n\t'loggingradio': 'name=Logging',\n\t'nextbutton':'css=button[data-e2e=\"bucket-wiz-next\"]',\n\n\t'users':\"css=a[href='#/users']\",\n\t'groups':\"css=a[href='#/groups']\",\n\t'acccess_key':\"css=a[href='#/access_keys']\",\n\t'policies':\"css=a[href='#/policies']\",\n\t'roles':\"css=a[href='#/roles']\",\n\n\t'alias_btn': 'css=.Flex span.material-icons',\n\t'input_alias': 'css=.Flex input',\n\t'save_alias': 'css=.Flex button',\n\t}\n\nclass Overview(BasePage):\n\turl = common.URL + '#/overview'\n\tBucketName = Text(locators['bucketname'])\n\tinputAlias = Text(locators['input_alias'])\n\t#versioning = RadioButton(locators['versionradio'])\n\t#logging = RadioButton(locators['loggingradio'])\n\n\n\n\tdef wait_until_loaded(self):\n\t\tself.wait_for_available(locators['option_button'])\n\t\treturn self\n\n\n\tdef open(self):\n\t\tself.driver.get(self.url)\n\t\treturn self.wait_until_loaded()\n\n\n\tdef createBucket(self,bucketName):\n\t\tself.find_element_by_locator(locators['create_bucket']).click()\n\t\tself.BucketName = bucketName\n\n\t\tself.find_element_by_locator(locators['nextbutton']).click()\n\n\t\tself.find_element_by_locator(locators['nextbutton']).click()\n\n\t\tself.find_element_by_locator(locators['nextbutton']).click()\n\t\tprint('done')\n\t\t\t\t\n\n\t\n\tdef editAlias(self,alias):\n\t\t#click on alias\n\t\ttime.sleep(1)\n\t\tself.find_elements_by_locator(locators['alias_btn'])[0].click()\n\t\ttime.sleep(1)\n\t\tself.inputAlias = alias\n\t\ttime.sleep(1)\n\t\t#click save\n\t\tself.find_element_by_locator(locators['save_alias']).click()\n\n\n\n","repo_name":"varunbtr/wasabi-selenium","sub_path":"po/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393056552","text":"from bisect import bisect_left\n\n\ndef find_idx(s):\n if s == '-': return 0\n if s == 'python': return 3\n if s in ['cpp', 'backend', 'junior', 'chicken']: return 1\n if s in ['java', 'frontend', 'senior', 'pizza']: return 2\n return int(s)\n\n\ndef solution(info, queries):\n answer = []\n score_table = [[[[[] for _ in range(3)] for _ in range(3)] for _ in range(3)] for _ in range(4)]\n\n for appli in info:\n appli = appli.split()\n lang, job, rud, food, score = [find_idx(x) for x in appli]\n\n for i in [0, lang]:\n for j in [0, job]:\n for k in [0, rud]:\n for p in [0, food]:\n score_table[i][j][k][p].append(score)\n for i in range(4):\n for j in range(3):\n for k in range(3):\n for p in range(3):\n score_table[i][j][k][p].sort()\n\n for query in queries:\n query = query.replace('and', '')\n query = query.split()\n lang, job, rud, food, score = [find_idx(x) for x in query]\n ret = len(score_table[lang][job][rud][food]) - bisect_left(score_table[lang][job][rud][food], score)\n answer.append(ret)\n\n return answer\n\n\nprint(solution(\n [\"java backend junior pizza 150\", \"python frontend senior chicken 210\", \"python frontend senior chicken 150\",\n \"cpp backend senior pizza 260\", \"java backend junior chicken 80\", \"python backend senior chicken 50\"],\n [\"java and backend and junior and pizza 100\", \"python and frontend and senior and chicken 200\",\n \"cpp and - and senior and pizza 250\", \"- and backend and senior and - 150\", \"- and - and - and chicken 100\",\n \"- and - and - and - 150\"]))\n","repo_name":"112224/algorithm","sub_path":"python3/P_순위 검색.py","file_name":"P_순위 검색.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38285733655","text":"def juni_latin(word):\n punc = ''\n if word[-1] in \"?'.;,\":\n punc = word[-1]\n word = word[:-1]\n first = word[0]\n word = word[1:]\n new = \" \"\n if punc == \".\" or punc == \"?\":\n new = \"\\n\"\n word = word + first + \"ay\" + punc + new\n\n return word\n\n\nf = open(\"input_punctuation.txt\", \"r\")\na = open(\"juni_latin.txt\", \"w\")\n\nfile_list = f.readlines()\ninput_list = []\n\nfor i in range(len(file_list)):\n for j in file_list[i].split():\n input_list.append(j)\n\nfor i in range(len(input_list)):\n a.write(juni_latin(input_list[i].strip()))","repo_name":"AuritroSaha/auritro_coding_portfolio","sub_path":"JUNI/Python Level 3/AM12/Juni Latin With File IO.py","file_name":"Juni Latin With File IO.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30941640739","text":"import csv\nimport random\nimport math\nimport numpy as np\n\nn_sample = 1000 # number of sampling \nn_ben = 900 # number of benign alerts in each sample\nn_mal = 100 # number of attacks in each sample\nn_attack = 8\nn_alert = 5\n\ndata = []\nfilename = '../data/credit.csv'\nwith open(filename) as f:\n reader = csv.reader(f)\n data = list(reader)\n\n# 1. Get distributions of background false positive alerts\nFP_alert = np.zeros((n_alert, n_sample))\nfor i in range(n_sample):\n\t# Get the benigns\n\tben = random.sample(data, n_ben)\n\tfor j in range(n_ben):\n\t\tif ben[j][0] == 'A14':\n\t\t\tFP_alert[0][i] += 1\n\t\tif ben[j][0] == 'A11' and (ben[j][3] == 'A40' or ben[j][3] == 'A46'):\n\t\t\tFP_alert[1][i] += 1\n\t\tif (ben[j][0] == 'A12' or ben[j][0] == 'A13') and ben[j][3] == 'A40' and (ben[j][16] == 'A171' or ben[j][16] == 'A172'): \n\t\t\tFP_alert[2][i] += 1\n\t\tif (ben[j][0] == 'A12' or ben[j][0] == 'A13') and (ben[j][3] == 'A42' or ben[j][3] == 'A43' or ben[j][3] == 'A44') and (ben[j][16] == 'A171' or ben[j][16] == 'A172'):\n\t\t\tFP_alert[3][i] += 1\n\t\tif (ben[j][0] == 'A12' or ben[j][0] == 'A13') and ben[j][2] == 'A34' and ben[j][3] == 'A49':\n\t\t\tFP_alert[4][i] += 1\nprint(\"The mean of each type of alert\")\nfor i in range(n_alert):\n\tprint(\"Alert type\", i+1, \":\", math.ceil(sum(FP_alert[i])/n_sample))\n\n# 2. Get probability of triggering an alert for various alert types\nmal = random.sample(data, n_mal)\npr_alert = np.zeros((n_attack, n_alert))\nattack_type = ['A40','A41','A42','A43','A44','A45','A46','A49']\nfor i in range(n_attack):\n\tfor j in range(n_mal):\n\t\tmal[j][3] = attack_type[i]\n\t\tif mal[j][0] == 'A14':\n\t\t\tpr_alert[i][0] += 1\n\t\tif mal[j][0] == 'A11' and (mal[j][3] == 'A40' or mal[j][3] == 'A46'):\n\t\t\tpr_alert[i][1] += 1\n\t\tif (mal[j][0] == 'A12' or mal[j][0] == 'A13') and mal[j][3] == 'A40' and (mal[j][16] == 'A171' or mal[j][16] == 'A172'): \n\t\t\tpr_alert[i][2] += 1\n\t\tif (mal[j][0] == 'A12' or mal[j][0] == 'A13') and (mal[j][3] == 'A42' or mal[j][3] == 'A43' or mal[j][3] == 'A44') and (mal[j][16] == 'A171' or mal[j][16] == 'A172'):\n\t\t\tpr_alert[i][3] += 1\n\t\tif (mal[j][0] == 'A12' or mal[j][0] == 'A13') and mal[j][2] == 'A34' and mal[j][3] == 'A49':\n\t\t\tpr_alert[i][4] += 1\npr_alert /= 100.0\nprint(\"Probability of triggering an alert for various alert types\")\nprint(pr_alert)\n","repo_name":"aronlaszka/AlertPrioritization","sub_path":"code/v1/src/credit_data_process.py","file_name":"credit_data_process.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73169601512","text":"from abc import ABC, abstractmethod\nfrom typing import Optional, Tuple, List, Union, Dict\n\nfrom ivirse.server.client_manager import ClientManager\nfrom ivirse.common.typing import Parameters, FitRes\nfrom ivirse.server.client_proxy import ClientProxy\n\n\nclass Strategy(ABC):\n \"\"\"Abstract base class for server strategy implementations\"\"\"\n \n \n @abstractmethod\n def initialize_parameters(\n self, client_manager: ClientManager\n ) -> Optional[Parameters]:\n \"\"\"Initialize the (global) model parameters\n\n Args:\n client_manager (ClientManager):\n The client manager which holds all currently connected clients\n\n Returns:\n Optional[Parameters]:\n If parameters are returned, then the server will treat these\n as the initial global model parameters\n \"\"\"\n \n @abstractmethod\n def configure_fit(\n self, sever_round: int, parameters: Parameters, client_manager: ClientManager\n ):\n \"\"\"Configure the next round on training\n\n Args:\n sever_round (int): the current round of federated learning\n parameters (Parameters): The current (gloabal) model parameters\n client_manager (ClientManager): The client manager which holds all currently connected clients\n \"\"\"\n \n @abstractmethod\n def aggregate_fit(\n self,\n server_round: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]]\n ) -> Optional[Parameters]:\n \"\"\"Aggregate training results\n\n Args:\n server_round (int): \n The current round of federated learning.\n results (List[Tuple[ClientProxy, FitRes]]):\n Successful updates from the previously selected and configured\n clients. Each pair of `(ClientProxy, FitRes)` constitues a \n successful update from one of the previous selected clients. Not\n that not all previously selected clients are necessarily included in\n this list: a client might drop out and not submit a result. For each client\n that did not submit an update, there should be an `Exception`\n in `failures`\n failures (List[Union[Tuple[ClientProxy, FitRes], BaseException]]):\n Exceptions that occurred while the server was waiting for client\n updates.\n\n Returns:\n Optional[Parameters]:\n If parameters are returned, then the server will treat these as the\n new global model parameters (i.e., it will replace the previous parameters\n with the ones returned from this method). If `None` is returned\n (e.g, becuase there were only failures and no viable results)\n then the server will no update the previous model parameters,\n the updates received in this round are discarded, and the global\n model parameters remain the same.\n \"\"\"","repo_name":"sonhm3029/Basic-Federated-Learning-gRPC-Server","sub_path":"ivirse/server/strategy/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41977355066","text":"def ductility(strength_ratio: float, mu: float, period: float,\n case: str, period_c: float) -> float:\n \"\"\"This implements the R-mu-T relationship proposed by Guerrini et al 2017\n\n References\n -------\n Guerrini, G., Graziotti, F., Penna, A.;\n Magenes, G. (2017). Improved evaluation of inelastic displacement\n demands for short-period masonry structures. Earthquake Engineering\n & Structural Dynamics, 46(9), 1411-1430.\n https://doi.org/10.1002/eqe.2862\n\n Parameters\n -------\n strength_ratio : float\n Strength ratio\n period : float\n Period\n case : str\n See Table II in article, (options: FD, IN, SD)\n period_c : float\n Corner period\n\n Returns\n -------\n float\n Ductility demand\n\n Raises\n ------\n ValueError if case is not 'fd', 'in', 'sd'\n \"\"\"\n if case.lower() == \"fd\":\n ahyst = 0.7\n period_hyst = 0.055\n elif case.lower() == \"in\":\n ahyst = 0.2\n period_hyst = 0.030\n elif case.lower() == \"sd\":\n ahyst = 0.0\n period_hyst = 0.022\n else:\n raise ValueError(\n \"Case must be 'fd', 'in', 'sd', for details refer\"\n \"to https://doi.org/10.1002/eqe.2862\")\n\n b = 2.3\n c = 2.1\n\n return abs(mu - (strength_ratio + pow(strength_ratio - 1, c) /\n ((period / period_hyst + ahyst) *\n pow(period / period_c, b))))\n","repo_name":"davitshahnazaryan3/XGB-rhomut","sub_path":"xgbrhomut/r_mu_t/guerrini.py","file_name":"guerrini.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"17551299736","text":"#Day10_input\ninput_data=[int(i.strip(\"\\n\")) for i in open(\"Day10_input.txt\",\"r\").readlines()]\nbuilt_in_device=max(input_data)+3\n#input_data.append(built_in_device)\ninput_data.insert(0,0)\nsorted_input=sorted(input_data)\nspace=[]\nmultiplicator_list=[]\nvariations=1\ndiff1_row=0\n\n#get the spaces between the elements of the list\nfor index,item in enumerate(sorted_input):\n if index=3:\n multiplicator=(2**(diff1_row-3))*3+1\n multiplicator_list.append(multiplicator)\n diff1_row=0\nif diff1_row==2:\n multiplicator_list.append(2)\nelif diff1_row>=3:\n multiplicator=(2**(diff1_row-3))*3+1\n multiplicator_list.append(multiplicator)\ndiff1_row=0\n\n\nfor multiplyers in multiplicator_list:\n variations*=multiplyers\nprint(variations)\n ","repo_name":"JaySalma/Python_Advent_of_Code","sub_path":"Day10_2.py","file_name":"Day10_2.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"185630649","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.types import (\n DeliveryBucket,\n DeliveryOption,\n Model,\n Offer,\n Outlet,\n PickupBucket,\n PickupOption,\n Promo,\n PromoType,\n RegionalDelivery,\n Shop,\n)\nfrom core.testcase import TestCase, main\nfrom core.matcher import NoKey\n\n\nclass T(TestCase):\n @classmethod\n def prepare(cls):\n cls.settings.default_search_experiment_flags += ['market_filter_offers_with_model_without_sku=0']\n cls.index.shops += [\n Shop(fesh=1, priority_region=213, regions=[213], name='Котиковый магаз', delivery_service_outlets=[1]),\n Shop(fesh=2, priority_region=213, regions=[213], name='Магаз котов'),\n Shop(fesh=3, priority_region=213, regions=[213], name='Магаз котеек'),\n Shop(fesh=4, priority_region=213, regions=[213], name='У кота'),\n Shop(fesh=5, priority_region=213, regions=[213], name='Котодом'),\n Shop(fesh=6, priority_region=111, regions=[111], name='Оптовый дом Барские Наковальни'),\n ]\n\n cls.index.outlets += [\n Outlet(fesh=1, region=213, point_id=1),\n ]\n\n cls.index.pickup_buckets += [\n PickupBucket(\n bucket_id=1, options=[PickupOption(outlet_id=1)], delivery_program=DeliveryBucket.REGULAR_PROGRAM\n )\n ]\n\n cls.index.delivery_buckets += [\n DeliveryBucket(\n bucket_id=1,\n regional_options=[\n RegionalDelivery(rid=213, options=[DeliveryOption(price=123)]),\n ],\n ),\n ]\n\n cls.index.offers += [\n Offer(\n title='offer 1',\n price=100000,\n price_old=150000,\n price_history=200000,\n hyperid=100,\n fesh=1,\n pickup_buckets=[1],\n ),\n Offer(\n title='offer 2',\n price=110000,\n price_old=150000,\n price_history=200000,\n hyperid=100,\n fesh=1,\n pickup_buckets=[1],\n ),\n Offer(\n title='offer 3',\n price=100000,\n price_old=150000,\n price_history=200000,\n hyperid=101,\n fesh=2,\n delivery_buckets=[1],\n ),\n Offer(\n title='offer 4',\n hyperid=102,\n price=105000,\n promo=Promo(promo_type=PromoType.PROMO_CODE, key='promo1034701_01_key001', discount_value=50),\n fesh=3,\n delivery_buckets=[1],\n ),\n Offer(\n title='offer 5',\n hyperid=102,\n price=170000,\n promo=Promo(promo_type=PromoType.PROMO_CODE, key='promo1034701_01_key002', discount_value=50),\n fesh=3,\n delivery_buckets=[1],\n ),\n Offer(title='offer 6', hyperid=103, price=115000, fesh=4, delivery_buckets=[1]),\n Offer(\n title='offer 7',\n hyperid=103,\n price=185000,\n promo=Promo(promo_type=PromoType.PROMO_CODE, key='promo1034701_01_key003', discount_value=50),\n fesh=4,\n delivery_buckets=[1],\n ),\n Offer(title='offer 8', hyperid=101, price=110000, fesh=2, delivery_buckets=[1]),\n Offer(title='offer 9', hyperid=101, price=110000, fesh=2, delivery_buckets=[1]),\n Offer(\n title='offer 10', price=100000, price_old=150000, price_history=200000, hyperid=104, fesh=5\n ), # no delivery\n # test_prime_promo_price\n Offer(\n title='promo price 11',\n price=100000,\n benefit_price=90000,\n promo=Promo(promo_type=PromoType.PROMO_CODE, key='p11', discount_value=50),\n hyperid=105,\n fesh=6,\n ), # cheapest benefit_price for this model, good!\n Offer(\n title='promo price 12',\n price=100000,\n benefit_price=96000,\n promo=Promo(promo_type=PromoType.PROMO_CODE, key='p12', discount_value=50),\n hyperid=105,\n fesh=6,\n ), # not so cheap benefit_price for this model, bad!\n Offer(title='promo price 13', price=95000, hyperid=105, fesh=6), # just creating stats\n Offer(\n title='promo price 14',\n price=100000,\n benefit_price=90000,\n promo=Promo(promo_type=PromoType.GIFT_WITH_PURCHASE, key='p15'),\n hyperid=106,\n fesh=6,\n ), # good\n Offer(\n title='promo price 15',\n price=90000,\n benefit_price=0,\n promo=Promo(promo_type=PromoType.GIFT_WITH_PURCHASE, key='p16'),\n hyperid=106,\n fesh=6,\n ), # no benefit_price -> use main price\n Offer(title='promo price 16', price=95000, hyperid=106, fesh=6), # just creating stats\n Offer(\n title='promo price 17',\n price=100000,\n benefit_price=90000,\n promo=Promo(promo_type=PromoType.N_PLUS_ONE, key='p17'),\n hyperid=107,\n fesh=6,\n ), # good\n Offer(\n title='promo price 18',\n price=90000,\n benefit_price=0,\n promo=Promo(promo_type=PromoType.N_PLUS_ONE, key='p18'),\n hyperid=107,\n fesh=6,\n ), # no benefit_price -> use main price\n Offer(title='promo price 19', price=95000, hyperid=107, fesh=6), # just creating stats\n ]\n\n cls.index.models += [\n Model(hyperid=100, hid=100, title='model with offers 1'),\n Model(hyperid=101, hid=100, title='model with offers 2'),\n Model(hyperid=102, hid=100, title='model with offers 3'),\n Model(hyperid=103, hid=100, title='model with offers 4'),\n Model(hyperid=104, hid=100, title='model with offers 5'),\n Model(hyperid=105, hid=100, title='model with offers 6'),\n Model(hyperid=106, hid=100, title='model with offers 7'),\n Model(hyperid=107, hid=100, title='model with offers 8'),\n ]\n\n def test_promo_code_factor(self):\n response = self.report.request_json('place=prime&rids=213&text=offer&promo-check-min-price=5&debug=da')\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"offer 4\"},\n \"debug\": {\n \"factors\": {\n \"GIFTS_WITH_PURCHASE_PROMO_TYPE\": NoKey(\"GIFTS_WITH_PURCHASE_PROMO_TYPE\"),\n \"N_PLUS_M_PROMO_TYPE\": NoKey(\"N_PLUS_M_PROMO_TYPE\"),\n \"PROMO_CODE_PROMO_TYPE\": \"1\",\n }\n },\n },\n )\n\n def test_check_prime(self):\n response = self.report.request_json('place=prime&rids=213&text=offer&promo-check-min-price=5')\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 1\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 3\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 4\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 6\"}},\n ],\n )\n self.assertFragmentNotIn(response, [{\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 2\"}}])\n self.assertFragmentNotIn(response, [{\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 5\"}}])\n self.assertFragmentNotIn(response, [{\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 7\"}}])\n self.assertFragmentNotIn(response, [{\"entity\": \"product\"}])\n\n # если добавить фильтр по промо, то 6й оффер должен пропасть, но все остальное не собьется\n response = self.report.request_json('place=prime&rids=213&text=offer&promo-check-min-price=5&promo-type=market')\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 1\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 3\"}},\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"offer 4\"}},\n ],\n )\n\n def test_prime_promo_price(self):\n response = self.report.request_json('place=prime&rids=111&text=promo+price&promo-check-min-price=1')\n # promo-code\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 11\"}},\n ],\n )\n self.assertFragmentNotIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 12\"}},\n ],\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 13\"}},\n ],\n )\n\n # gift with purchase\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 14\"}},\n ],\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 15\"}},\n ],\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 16\"}},\n ],\n )\n\n # n plus m\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 17\"}},\n ],\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 18\"}},\n ],\n )\n self.assertFragmentIn(\n response,\n [\n {\"entity\": \"offer\", \"titles\": {\"raw\": \"promo price 19\"}},\n ],\n )\n\n def test_check_promo_list(self):\n response = self.report.request_json('place=promo_list&rids=213')\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"promo\",\n }\n ]\n * 5,\n allow_different_len=False,\n )\n\n response = self.report.request_json('place=promo_list&rids=213&promo-check-min-price=5')\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"promo\",\n }\n ]\n * 3,\n allow_different_len=False,\n )\n\n def test_n_offers(self):\n response = self.report.request_json('place=promo_list&rids=213&promo-check-min-price=5')\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"promo\",\n }\n ]\n * 3,\n allow_different_len=False,\n )\n\n response = self.report.request_json(\n 'place=promo_list&rids=213&promo-check-min-price=5&promo-check-min-price-n-offers=3'\n )\n self.assertFragmentIn(\n response,\n [\n {\n \"entity\": \"promo\",\n }\n ],\n allow_different_len=False,\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_promo_check_min_price.py","file_name":"test_promo_check_min_price.py","file_ext":"py","file_size_in_byte":11811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18025393222","text":"from collections import defaultdict\nfrom nltk.corpus import wordnet as wn\n\nfrom backend.common.common import info, save_pickle\nfrom backend.common.global_variables import lemmas_to_senses_py_file, pos_map\n\nassert wn.get_version() == '3.0'\n\ninfo('Extracting')\nlemmas_to_senses = defaultdict(set)\nfor synset in wn.all_synsets():\n pos = pos_map[synset.pos()]\n\n for word in synset.lemmas():\n sense_id = word.key()\n wordform = word.name()\n index = 1\n\n lemmas_to_senses[f'{wordform.lower()}:{pos}:{index}'].add(sense_id)\n\ninfo('Ordering')\nlemmas_to_senses_ordered = {}\nfor lemma_id, sense_ids in lemmas_to_senses.items():\n wordform, pos, index = lemma_id.split(':')\n synsets = wn.synsets(wordform)\n sense_ids_ordered = []\n for sense in wn.lemmas(wordform):\n sense_id = sense.key()\n if sense_id in sense_ids and sense_id not in sense_ids_ordered:\n sense_ids_ordered.append(sense_id)\n assert set(sense_ids_ordered) == sense_ids, f'{sense_ids_ordered} != {sense_ids}'\n assert len(sense_ids_ordered) == len(set(sense_ids_ordered))\n lemmas_to_senses_ordered[lemma_id] = sense_ids_ordered\n\ninfo('Saving')\nsave_pickle(lemmas_to_senses_py_file, lemmas_to_senses_ordered)\n","repo_name":"rowanhm/metaphor-annotation","sub_path":"backend/s4_build_lemma_to_sense_dict.py","file_name":"s4_build_lemma_to_sense_dict.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27599146449","text":"import argparse\n\nAFFIRMATIVE_CONFIRMATION = \"y\"\nNEGATIVE_CONFIRMATION = \"n\"\n\n\ndef normalize_input_confirmations(input):\n lower = input.lower()\n if lower in [\"yes\", \"y\"]:\n return AFFIRMATIVE_CONFIRMATION\n elif lower in [\"no\", \"n\"]:\n return NEGATIVE_CONFIRMATION\n else:\n return False\n\n\ndef build_arg_parser():\n parser = argparse.ArgumentParser()\n # study = parser.add_mutually_exclusive_group()\n\n lists = parser.add_mutually_exclusive_group()\n lists.add_argument(\"--create\", help=\"Create a new List\")\n\n add_words = parser.add_mutually_exclusive_group()\n add_words.add_argument(\"--add\", help=\"Add a word to a list\")\n\n parser.add_argument(\"-l\", \"--list\",\n help=\"The list to add words to. If not provided, will use a list named default\",\n default=\"default\")\n return parser.parse_args()\n","repo_name":"naudo/pomegranate-cli","sub_path":"core/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8662603531","text":"# Binary Search\nfrom typing import List\nfrom os import system\n\n# List must be:\n# - Sorted (Least to greatest)\n# - Have no repeats\ndef binary_search(find: int, numbers: List[int]) -> int:\n first = 0\n last = len(numbers) - 1\n # mid = (last + first) // 2\n mid = last\n\n while True:\n\n not_found = (last - first) <= 0\n\n if not_found:\n return -1\n \n if numbers[mid] == find:\n return mid\n elif numbers[mid] > find:\n last = mid\n mid = (last + first) // 2\n elif numbers[mid] < find:\n first = mid\n mid = (last + first) // 2\n\n\n# test stuff\n\ndef test_empty_list():\n assert binary_search(5, []) == -1\n\n\ndef test_number_not_found():\n dataset = list(range(100))\n assert binary_search(100, dataset) == -1\n\n dataset = list(range(3))\n assert binary_search(100, dataset) == -1\n\n\ndef test_found_last_index():\n dataset = list(range(100))\n assert binary_search(99, dataset) == 99\n\n\ndef test_found_middle_index():\n dataset = list(range(3))\n assert binary_search(1, dataset) == 1\n\n dataset = list(range(100, 200))\n assert binary_search(134, dataset) == 34\n\n\ndef test_found_first_index():\n dataset = list(range(100))\n assert binary_search(0, dataset) == 0","repo_name":"srisaipog/ICS4U-Classwork","sub_path":"Learning/Algorithms/Search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13320147631","text":"from decimal import Decimal\nfrom typing import Dict\nfrom typing import List\n\nfrom rich.console import Console\nfrom tabulate import tabulate\n\nfrom aura_voter.cast_vote import FailedToVoteException\nfrom aura_voter.cast_vote import cast_weighed_vote\nfrom aura_voter.constants import BOT_USERNAME\nfrom aura_voter.constants import GRAVIAURA\nfrom aura_voter.data_collectors.data_processors import extract_pools_with_target_token_included\nfrom aura_voter.data_collectors.data_processors import get_bribes\nfrom aura_voter.data_collectors.graph_collectors import get_all_aura_bribes\nfrom aura_voter.data_collectors.graph_collectors import get_all_balancer_pools\nfrom aura_voter.data_collectors.on_chain_collectors import does_pool_have_gauge\nfrom aura_voter.data_collectors.on_chain_collectors import get_balancer_pool_token_balance\nfrom aura_voter.data_collectors.on_chain_collectors import get_locked_graviaura_amount\nfrom aura_voter.data_collectors.on_chain_collectors import get_treasury_controlled_naked_graviaura\nfrom aura_voter.data_collectors.snapshot_collectors import get_current_hh_proposal_round\nfrom aura_voter.data_collectors.snapshot_collectors import get_gauge_weight_snapshot\nfrom aura_voter.discord import send_code_block_to_discord\nfrom aura_voter.discord import send_message_to_discord\nfrom aura_voter.utils import map_choice_id_to_pool_name\nfrom aura_voter.utils import reverse_choice_to_pool_name\nfrom aura_voter.voting_algorithms.poc_algorithm import POCVoter\n\nconsole = Console(width=100000, height=10000)\n\n\ndef collect_and_vote(dry_run=True):\n snapshot = get_gauge_weight_snapshot()\n if not snapshot:\n send_message_to_discord(\"> No active proposal found\", username=BOT_USERNAME)\n return\n send_message_to_discord(\n f\"> Fetched gauge proposal snapshot: {snapshot['id']}\",\n username=BOT_USERNAME,\n )\n send_message_to_discord(\n \"🗳️🗳️🗳️🗳️ New voting round AURA 🗳️🗳️🗳️🗳️\",\n username=BOT_USERNAME\n )\n amount_of_locked_aura = get_locked_graviaura_amount()\n amount_of_treasury_owned_naked_graviaura = get_treasury_controlled_naked_graviaura()\n send_message_to_discord(\n f\"> Locked AURA amount is: {round(amount_of_locked_aura, 2)}\",\n username=BOT_USERNAME\n )\n send_message_to_discord(\n f\"> Treasury owned naked graviAURA \"\n f\"amount is: {round(amount_of_treasury_owned_naked_graviaura, 2)}\",\n username=BOT_USERNAME\n )\n choices = map_choice_id_to_pool_name(snapshot['choices'])\n all_balancer_pools = get_all_balancer_pools() # type: List[Dict]\n # Extract only pools that have target token\n target_pools = extract_pools_with_target_token_included(\n token_addr=GRAVIAURA,\n subgraph_pool_data=all_balancer_pools\n )\n # Filter out pools without gauges\n pools_with_gauges = [pool for pool in target_pools if does_pool_have_gauge(pool['id'])]\n target_pools_with_balances = []\n for pool in pools_with_gauges:\n target_pools_with_balances.append(\n get_balancer_pool_token_balance(GRAVIAURA, pool['id'])\n )\n # Get all aura bribes\n bribes = get_all_aura_bribes()\n current_proposal_index = get_current_hh_proposal_round()\n # Filter our only the bribes that we are interested in for the given snapshot\n if bribes and current_proposal_index:\n bribes = get_bribes(snapshot, bribes, current_proposal_index)\n console.print(bribes)\n # TODO: Before passing pools to algorithm we have to map it to the pool names on Snapsot\n voter = POCVoter(\n Decimal(amount_of_locked_aura), target_pools_with_balances,\n )\n # TODO: Fix me after figuring out gauges\n votes = voter.propose_voting_choices_stable()\n if not votes:\n send_message_to_discord(\"> Nothing to vote for now\", username=BOT_USERNAME)\n return\n\n reversed_choices = reverse_choice_to_pool_name(choices)\n snapshot_formatted_votes = {reversed_choices[pool]: vote for pool, vote in votes.items()}\n console.print(snapshot_formatted_votes)\n suggesting_votes_table = []\n for pool, voting_weight in votes.items():\n vote_power = amount_of_locked_aura * (voting_weight / 100)\n suggesting_votes_table.append(\n [\n pool,\n f\"{round(voting_weight, 2)}%\",\n f\"{round(vote_power, 2)}\",\n ]\n )\n table = tabulate(suggesting_votes_table,\n tablefmt=\"grid\",\n headers=[\n \"Pool name\", \"Suggested vote\", \"AURA To Vote\",\n ])\n console.print(table, style=\"bold\")\n send_code_block_to_discord(msg=table, username=BOT_USERNAME)\n if not dry_run:\n try:\n cast_weighed_vote(snapshot_formatted_votes, snapshot['id'])\n except (FailedToVoteException, Exception) as e:\n send_message_to_discord(f\"> Voting failed with reason: {str(e)}\", username=BOT_USERNAME)\n else:\n send_message_to_discord(\"> 🤑🤑🤑🤑 Voting Succeeded 🤑🤑🤑🤑\", username=BOT_USERNAME)\n","repo_name":"cryptobuks/Badger-Finance-aura-autovoter","sub_path":"aura_voter/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13490327701","text":"from django.conf.urls import url\n\nfrom game_room.views import games, banners, room, room_info, apply_history, room_apply, room_apply_balance, upload_img, winner, create_room\nfrom tz_user.views import register, phone_code, login, check_token\n\nurlpatterns = [\n url(r'^games/$', games),\n url(r'^rooms/$', room),\n url(r'^room/$', room_info),\n url(r'^room/create/$', create_room),\n url(r'^banners/$', banners),\n url(r'^apply/history/$', apply_history),\n url(r'^apply/$', room_apply),\n url(r'^room_apply/balance/$', room_apply_balance),\n url(r'^upload_img/$', upload_img),\n url(r'^winner/$', winner),\n]\n","repo_name":"robot-nan/TZGame","sub_path":"game_room/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32468043756","text":"# pretty much just copied from last year with some changes\n\nfrom wpilib import Joystick\n\nclass RotaryJoystick(Joystick):\n def __init__(self, id : int):\n # give methods and properties of parent (Joystick) class\n super().__init__(id)\n\n # initialize some variables\n self.angle_offset = 0.0\n self.setTwistChannel(4)\n\n\n def generate_angle_midmax(self, mid, _min, _max):\n midmax = ((mid - _min) / (_max - _min))\n return midmax\n\n\n def rotary_inputs(self):\n x = self.getX()\n y = self.getY()\n z = self.getZ()\n\n _max = max(x, y, z)\n _min = min(x, y, z)\n\n if (_max - _min) == 0:\n print(\"Joystick is not connected\")\n\n else:\n if ((x <= y) and (y <= z)):\n # mid = y since it is greater that x but less than z\n mid = y\n\n angle = 60 - self.generate_angle_midmax(mid, _min, _max) * 60\n\n if ((y <= x) and (x <= z)):\n # mid = x since it is greater than y but less than z\n mid = x\n\n angle = 60 + self.generate_angle_midmax(mid, _min, _max) * 60\n\n if ((y <= z) and (z <= x)):\n # mid = z since it is greater than y but less than x\n mid = z\n\n angle = 180 - self.generate_angle_midmax(mid, _min, _max) * 60\n\n if ((z <= y) and (y <= x)):\n # mid = y since it is greater than z but less than x\n mid = y\n\n angle = 180 + self.generate_angle_midmax(mid, _min, _max) * 60\n\n if ((z <= x) and (x <= y)):\n # mid = x since it is greater than z but less than y\n mid = x\n\n angle = 300 - self.generate_angle_midmax(mid, _min, _max) * 60\n\n if ((x <= z) and (z <= y)):\n # mid = z since it is greater than x but less than y\n mid = z\n\n angle = 300 + self.generate_angle_midmax(mid, _min, _max) * 60\n\n angle += self.angle_offset\n\n return angle\n\n\n def reset_angle(self, angle):\n self.angle_offset = 0.0\n self.angle_offset = angle - self.rotary_inputs()\n #print(f\"calling reset angle, offset = {self.angle_offset}\")\n \n\n\n\n ","repo_name":"FRCTeam2984/ChargedUp2023","sub_path":"src/subsystems/rotary_controller.py","file_name":"rotary_controller.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5032453998","text":"#Initial list\r\nres = []\r\n\r\n# Input lengths\r\nlengths = int(input())\r\n\r\n# Add element\r\nfor i in range(lengths):\r\n # Input elements\r\n n = int(input())\r\n res.append(n)\r\n\r\nstrings = [str(re) for re in res ]\r\n\r\na_res = \"\".join(strings)\r\nnum = int(a_res)\r\n\r\nprint(num)","repo_name":"VoHinhF/GitHub_Vinh","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10262214717","text":"\"\"\"Add Name\n\nRevision ID: a775bd26132e\nRevises: 5f306060b731\nCreate Date: 2020-07-07 02:08:00.874532\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a775bd26132e'\ndown_revision = '5f306060b731'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('did', sa.Column('name', sa.String(length=128), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('did', 'name')\n # ### end Alembic commands ###\n","repo_name":"Kanika1799/nucypher-did-covidcreds","sub_path":"api/migrations/versions/a775bd26132e_add_name.py","file_name":"a775bd26132e_add_name.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23131370993","text":"import os\nimport shutil\n\n# Get the current repository (so that it can run in VS code run button)\nworkspace = __file__.replace('\\\\', '/').replace('/app.py', '')\n\nINPUT_DIRECTORY = workspace + '/input/'\nOUTPUT_DIRECTORY = workspace + '/output/'\n\n\n# Empties the output directory\ndef reset_directory():\n try:\n shutil.rmtree(OUTPUT_DIRECTORY)\n except:\n pass\n os.mkdir(OUTPUT_DIRECTORY)\n return\n\n# Convert the dir to text via pytesseract\ndef convert(dir, file = \"\"):\n import pytesseract\n from PIL import Image\n\n print(\"Starting conversion of \" + file)\n print(dir)\n text = pytesseract.image_to_string(Image.open(dir), lang=\"chi_tra\")\n f = open(OUTPUT_DIRECTORY + file.rsplit(\".\", maxsplit=1)[0] + '.txt', 'w', encoding=\"utf-8\")\n f.write(text)\n f.close()\n\n\n# Loop through all the directories\ndef loop_directories():\n for subdir, dirs, files in os.walk(INPUT_DIRECTORY):\n for file in files:\n if '.png' in file or '.jpg' in file:\n convert(os.path.join(subdir, file), file)\n\n\n# Main function\ndef main():\n reset_directory()\n loop_directories()\n\n\n\nmain()","repo_name":"Gameboy612/gef_project","sub_path":"code/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38445374212","text":"#!/usr/bin/python3\n# -*- encoding=utf8 -*-\n\n# This is pretty naive and simple spell checker for Russian texts.\n#\n# The code was taken from this article:\n# https://habr.com/ru/company/singularis/blog/358664/\n# https://gist.github.com/Kwentar/a957d29f7370f896b691c82ff9ebe7d2\n#\n# Many thanks to the author of this code!\n# I've modified it a little bit for my case:\n# 1) Education now based on Russian words dictionary instead of some\n# Russian text.\n# 2) Increased the size of the model\n# 3) Removed unnecessary Keras dependency.\n#\n\nimport os\nimport requests\nfrom termcolor import colored\n\nimport gensim\nimport pymorphy2\nfrom tqdm import tqdm\n\n\nspell_check_model_name = 'ru_spellcheck.model'\nnecessary_part = {'NOUN', 'ADJF', 'ADJS', 'VERB', 'INFN',\n 'PRTF', 'PRTS', 'GRND'}\n\n# Wrong Russian words to check the model:\nwrong_words = ['челавек', 'стулент', 'студечнеский', 'чиловенчость',\n 'учавствовать', 'тактка', 'вообщем', 'симпотичный', 'зделать',\n 'сматреть', 'алгаритм', 'ложить', 'проверка']\n# Correct words:\nwords_correct = ['человек', 'студент', 'студенческий', 'человечность',\n 'участвовать', 'тактика', 'вообще', 'симпатичный', 'сделать',\n 'смотреть', 'алгоритм', 'положить', 'проверка']\n\n# Download Russian dictionary:\nrussian_words_link = ('https://raw.githubusercontent.com/danakt/'\n 'russian-words/master/russian.txt')\nres = requests.get(russian_words_link)\nres.encoding = 'windows-1251'\nwords_for_eduction = res.text\n\n# Prepare dictionary words for eduction:\n# Note: here we use only first 100000 words, but\n# for real cases we need to use the whole list of words!\nsentences = words_for_eduction.split('\\n')[:10000] + words_correct\nall_dict = sentences\n\nif not os.path.isfile(spell_check_model_name):\n\n morph = pymorphy2.MorphAnalyzer()\n\n for i in tqdm(range(len(sentences))):\n\n word = sentences[i]\n sentence = []\n\n p = morph.parse(word)[0]\n if p.tag.POS in necessary_part:\n sentence.append(p.normal_form)\n\n sentences[i] = sentence\n sentences = [x for x in sentences if x]\n\n # Training the model (it can take 2+ hours):\n model = gensim.models.FastText(sentences, size=300,\n window=1, min_count=1, sg=1, iter=100,\n min_n=1, max_n=6)\n model.init_sims(replace=True)\n\n # Save model to the disk (it will take 200+ Mb on disk)\n print('Saving model to the disk...')\n model.save(spell_check_model_name)\n\n print('Neuron network model is ready!')\n\nelse:\n # Load model from file:\n model = gensim.models.FastText.load(spell_check_model_name)\n model.init_sims(replace=True)\n\n\nspell_checker = model.wv\n\n# Check what options model will suggest to correct each wrong word:\nfor index, word in enumerate(wrong_words):\n\n if word in all_dict:\n print('\\n ---- \\n\\n ', colored(word, 'green'))\n print('The word is correct!')\n else:\n print('\\n ---- \\n\\n ', colored(word, 'red'))\n\n print('Suggested options:')\n for i in spell_checker.most_similar(positive=[word])[:5]:\n print(colored(i[0], 'yellow'), i[1])\n\nprint('\\n ---- \\n\\n ')\n","repo_name":"TimurNurlygayanov/test-tasks-example","sub_path":"telegram/spellchecker.py","file_name":"spellchecker.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"321489192","text":"__all__ = ['Graph']\n\nimport sys, subprocess, tempfile, collections\n\ndef dot_val(obj):\n if isinstance(obj, (int, float, bool)):\n return str(obj)\n if isinstance(obj, unicode):\n obj = obj.encode('utf-8')\n if isinstance(obj, str):\n if obj.isalnum() and obj[0].isalpha():\n return obj\n return '\"' + (obj.replace('\\\\', '\\\\\\\\')\n .replace('\\n', '\\\\l')\n .replace('\"', '\\\\\"')) + '\"'\n raise TypeError(\"Don't know how to convert %r to dot\" % (obj,))\n\ndef dot_attrs(attrs):\n return '[%s]' % ','.join('%s=%s' % (k, dot_val(v)) for k, v in attrs.items()\n if v is not None)\n\nclass GNode(object):\n def __init__(self, obj, unique=False, **attrs):\n self.__obj = obj\n self.__unique = unique\n self.__attrs = tuple(attrs.iteritems())\n\n def __hash__(self):\n if self.__unique:\n return super(GNode, self).__hash__()\n return hash((self.__obj, self.__attrs))\n\n def __eq__(self, o):\n if self.__unique or o.__unique:\n return self is o\n return self.__obj == o.__obj and self.__attrs == o.__attrs\n\n def _to_dot(self, graph, nid):\n attrs = dict(self.__attrs)\n attrs.update(graph.obj_attrs(self.__obj))\n return '%s %s;' % (dot_val(nid), dot_attrs(attrs))\n\nclass Graph(object):\n def __init__(self):\n self.__graph_attrs = {}\n self.__node_attrs = {}\n self.__edge_attrs = {}\n # Use an ordered dict to keep the nodes in order\n self.__nodes = collections.OrderedDict()\n self.__edges = set()\n self.__ranks = collections.defaultdict(set)\n\n def graph_attrs(self, **attrs):\n self.__graph_attrs.update(attrs)\n return self\n\n def node_attrs(self, **attrs):\n self.__node_attrs.update(attrs)\n return self\n\n def edge_attrs(self, **attrs):\n self.__edge_attrs.update(attrs)\n return self\n\n def node(self, obj, unique=False, rank=None, **attrs):\n node = GNode(obj, unique, **attrs)\n self.__nodes[node] = True\n if rank is not None:\n self.__ranks[rank].add(node)\n return node\n\n def edge(self, n1, n2, **attrs):\n self.__edges.add((n1, n2, tuple(attrs.iteritems())))\n\n def show(self):\n try:\n with tempfile.NamedTemporaryFile(suffix=\".pdf\") as f:\n p = subprocess.Popen([\"dot\", \"-Tpdf\"], stdin=subprocess.PIPE,\n stdout=f)\n self.to_dot(p.stdin)\n p.stdin.close()\n p.wait()\n subprocess.check_call([\"evince\", f.name])\n except Exception as e:\n print >> sys.stderr, \"Suppressing exception from Graph.show():\"\n import traceback; traceback.print_exc()\n\n def to_dot(self, fp=sys.stdout):\n print >>fp, 'digraph G {'\n for name, attrs in [('graph', self.__graph_attrs),\n ('node', self.__node_attrs),\n ('edge', self.__edge_attrs)]:\n if attrs:\n print >>fp, '%s %s;' % (name, dot_attrs(attrs))\n nids = {}\n for i, node in enumerate(self.__nodes):\n nids[node] = 'n%d' % i\n print >>fp, node._to_dot(self, nids[node])\n for src, dst, attrs in self.__edges:\n print >>fp, '%s -> %s %s;' % (nids[src], nids[dst],\n dot_attrs(dict(attrs)))\n for rank_id, same_rank in self.__ranks.items():\n print >>fp, \"// %s\" % (rank_id,)\n print >>fp, \"{rank=same;%s;}\" % \\\n \";\".join(nids[node] for node in same_rank)\n print >>fp, '}'\n\n def obj_attrs(self, obj):\n return {'label': unicode(obj)}\n","repo_name":"aclements/commuter","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"72"} +{"seq_id":"35259209095","text":"import nibabel as nib\nimport argparse\nimport numpy as np\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--ori',help='The original data path that want to segment')\n parser.add_argument('--mask', help = 'The mask from segmentation')\n parser.add_argument('--out',help='out path of the segmented image')\n args = parser.parse_args()\n mask = nib.load(args.mask)\n img = nib.load(args.ori)\n mask_3d = mask.get_data()\n mask_3d = np.array([mask_3d >= 1][0]) \n img_3d = img.get_data()\n print (img_3d.shape, mask_3d.shape)\n img_new = img_3d * mask_3d\n out = nib.Nifti1Image(img_new, img.affine, img.header)\n print (args.out)\n nib.save(out, args.out)\n \nif __name__ == '__main__':\n main()","repo_name":"OliverrrD/lobe_seg_downsampled","sub_path":"func/tools/get_new_lung.py","file_name":"get_new_lung.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17106268828","text":"import torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nimport os\nimport sys\nimport argparse\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\n# sys.path.insert(1, 'D:/uni/MS/_MS_thesis/codes/our_prequential_learning')\nfrom dataset import ChunkDataset\nfrom samplers.pt_sampler import PtSampler\n\n## == Params ==========================\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='permuted_mnist', help='') #[permuted_mnist, permuted_fmnist, rotated_mnist, rotated_fmnist]\nparser.add_argument('--n_drift', type=int, default=3, help='')\nparser.add_argument('--saved', type=str, default='./dataset/', help='')\nparser.add_argument('--change_points', type=str, default='1,2,3', help='')\nparser.add_argument('--seed', type=int, default=1, help='')\nargs = parser.parse_args()\n\n## == additional params ===============\nargs.dataset_path = 'dataset/{}.csv'.format(args.dataset)\nargs.n_chunk = 70\nargs.chunk_size = 1000\nargs.n_classes = 10\n\n## == Apply seed ======================\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\n\ndef imshow(imgs):\n # imgs *= 255.0\n grid_imgs = torchvision.utils.make_grid(torch.tensor(imgs), nrow=10)\n plt.imshow(grid_imgs.permute(1, 2, 0))\n plt.show()\n\n\nif __name__ == '__main__':\n # change_drift_points = np.random.choice(np.arange(5, args.n_chunk-5), args.n_drift, replace=False)\n # change_drift_points = list(np.sort(change_drift_points))\n change_drift_points = [int(item) for item in args.change_points.split(',')]\n print('Change drift points: {}'.format(change_drift_points))\n \n fig, axs = plt.subplots(args.n_drift+1,)\n\n for idx, current_point in enumerate(change_drift_points + [args.n_chunk]):\n if idx == 0: pervious_point = 0\n else: pervious_point = change_drift_points[idx-1]\n \n # == Define Dataset & test Dataloder ========\n data = read_csv(args.dataset_path, sep=',', header=None).values \n chunk_data = data[pervious_point*args.chunk_size:current_point*args.chunk_size]\n dataset = ChunkDataset(chunk_data, args)\n\n sampler = PtSampler(\n dataset,\n n_way=args.n_classes,\n n_shot=1,\n n_query=0,\n n_tasks=1)\n dataloader = DataLoader(\n dataset,\n batch_sampler=sampler,\n num_workers=1,\n pin_memory=True,\n collate_fn=sampler.episodic_collate_fn)\n\n batch = next(iter(dataloader))\n support_images, support_labels, _, _ = batch\n support_images = torch.squeeze(support_images, 1)\n \n # imshow(support_images)\n grid_imgs = torchvision.utils.make_grid(torch.tensor(support_images), nrow=10)\n \n # For rotation\n angles = [0, 20, 40, 60, 80, 100, 120]\n axs[idx].set_title('rotation {}$^\\circ$'.format(angles[idx]), fontsize=11)\n # For permutation\n # axs[idx].set_title('permutation {}'.format(idx+1), fontsize=11)\n\n axs[idx].set_xticks([])\n axs[idx].set_yticks([])\n axs[idx].imshow(grid_imgs.permute(1, 2, 0))\n print('Change {} done!'.format(idx+1))\n \n fig.savefig('samples.png', format='png', dpi=800)\n plt.show()\n","repo_name":"HeydarSoudani/pt_prequential_learning","sub_path":"plot_chunk_samples.py","file_name":"plot_chunk_samples.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72376298154","text":"import sys\r\nsys.path.append('../')\r\n\r\nfrom Lstmndt.detector import Detector\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description='Parse path to anomaly labels if provided.')\r\nparser.add_argument('-l', '--labels_path', default=None, required=False)\r\nargs = parser.parse_args()\r\n\r\nif __name__ == '__main__':\r\n detector = Detector(labels_path=args.labels_path)\r\n\r\n file_name = 'SMAP/A-1.pkl'\r\n detector.run(file_name)","repo_name":"xuanhao-chen/anomaly-detection","sub_path":"Lstmndt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"31926686459","text":"from pedal import *\n# ########################\n# # 함수 매칭 유무 확인\n# ########################\n# match = find_match(\"\"\"\n# def summate():\n# __expr__\n# \"\"\")\n\n# if not match:\n# explain(\"함수가 정의되지 않았습니다.\", label=\"function_missing\")\n\n\n# ########################\n# # sum VS count\n# ########################\n\n# assert_equal(call('summate', [1,3,5]), 9) # 임의로 backend에서 인자를 넣어서 맞으면 True, 틀리면 False\n\n\n\n\n# ########################\n# # 다음으로 할 내용은...?\n# ########################\n\n\n# # Make sure they don't embed the answer\n# prevent_literal(10)\n# # Make sure they DO use these integers\n# ensure_literal(7)\n# ensure_literal(3)\n# # Make sure they print\n# ensure_function_call('print')\n# # Make sure they used addition operator\n# ensure_operation(\"+\")\n# # Give message if they try to use strings instead of numbers\n# prevent_ast(\"Str\")\n# # Check the actual output\n# assert_output(student, \"7\")\n\n\n# Partial credit for good progress\nif find_asts(\"For\"):\n compliment(\"Good, you have a `for` loop!\", score=\"+10%\")\n\n# Give feedback by finding a common problem-specific mistake\nmatches = find_matches(\"for _expr_ in _list_:\\n\"\n \" ___ = ____ + _list_\")\nif matches:\n explain(\"You shouldn't use the list inside the for loop.\",\n label=\"list_inside_loop\", title=\"List Inside Loop\")\n\n# Check they defined the function with the right type\n# 함수 정의와 리턴 타입 올바른 지 확인\n# ensure_function('add_prices', returns=int)\n# Unit test their function\n\n# 함수 결과가 입력과 출력과 같은지 확인 -> 메시지 출력\n# assert_equal(call('add_prices', [1,2,3]), 6)\n# ensure_function_call('add_prices')","repo_name":"Seongju-Lee/code-analysis","sub_path":"code_analysis.py","file_name":"code_analysis.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70052801192","text":"import numpy as np\nfrom task import Task\n\nclass RedBaron_Agent():\n def __init__(self, task):\n # Task (environment) information\n self.task = task\n self.state_size = task.state_size\n self.action_size = task.action_size\n self.action_low = task.action_low\n self.action_high = task.action_high\n self.action_range = self.action_high - self.action_low\n\n self.w = np.random.normal(\n size=(self.state_size, self.action_size), # weights for simple linear policy: state_space x action_space\n scale=(self.action_range / (2 * self.state_size))) # start producing actions in a decent range\n\n # Score tracker and learning parameters\n self.best_w = None\n self.best_score = -np.inf\n self.score = self.best_score\n self.noise_scale = 0.1\n\n # Episodes group used to find new best\n self.episodes_used_for_each_learning_step = 10\n self.current_try = []\n\n # Discretize actions\n self.num_possible_actions = 50\n self.action_unit_size = self.action_range / self.num_possible_actions\n\n # Episode variables\n self.reset_episode()\n\n def reset_episode(self):\n self.total_reward = 0.0\n self.count = 0\n state = self.task.reset()\n return state\n\n def step(self, reward, done):\n # Save experience / reward\n self.total_reward += reward\n self.count += 1\n\n # Learn, if at end of episode\n if done:\n # add experience to buffer\n self.current_try.append([self.w, self.total_reward])\n\n # change current weights\n self.reset_episode()\n if self.best_score == -np.inf:\n self.w = self.noise_scale * np.random.normal(size=self.w.shape)\n else:\n self.w = self.best_w + self.noise_scale * np.random.normal(size=self.w.shape)\n\n if len(self.current_try) == self.episodes_used_for_each_learning_step:\n self.learn()\n\n def act(self, state):\n # Choose action based on given state and policy\n action = np.dot(state, self.w) # simple linear policy\n return self.discreteAct(action)\n\n def discreteAct(self, action):\n integer_action = action / self.action_unit_size\n action = self.action_unit_size * integer_action\n return action\n\n def learn(self):\n # Learn by random policy search, using a reward-based score\n current_best_reward = -np.inf\n current_best_weights = []\n\n for weights, reward in self.current_try:\n if reward > current_best_reward:\n current_best_reward = reward\n current_best_weights = weights\n old_best = max(self.best_score, -1000)\n self.score = current_best_reward\n if self.score > self.best_score:\n self.best_score = self.score\n self.best_w = current_best_weights\n # set noise based on improvement, low improvement high noise\n print(\"\\n\")\n print(\"current best: \", current_best_reward)\n print(\"old best: \", old_best)\n print(\"diff: \", (current_best_reward - old_best))\n print(\"Best weights: \", self.best_w)\n print(\"\\n\")\n\n self.noise_scale = min(10, abs(current_best_reward-old_best)+0.1)\n self.current_try = []\n","repo_name":"domangi/rl-quadcopter","sub_path":"agents/redbaron.py","file_name":"redbaron.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"2700694660","text":"# 1. nyc_weather.csv contains new york city weather for first few days in the month of January. \n# Write a program that can answer following:\n# a. What was the average temperature in first week of Jan\n# b. What was the maximum temperature in first 10 days of Jan\n# Figure out data structure that is best for this problem\n\n#a\n'''Read csv file'''\nlistTemperatures = []\nfile = open(\"HashTable/HashTableProblems/nyc_weather.csv\", \"r\")\nnext(file)\nfor line in file:\n elements = line.split(\",\")\n temperature = int(elements[1])\n listTemperatures.append(temperature)\nsum = 0\nfor i in range(0,7):\n sum += listTemperatures[i]\nprint()\nprint(\"First Week Average: \" + str(round(sum/7, 2)))\nprint()\n\n#b.\nprint(\"Max Temperature 10 days: \" + str(max(listTemperatures[0:10])))\nprint()\n\n#Best data structure is a list","repo_name":"ishaansathaye/DataStructAlgorithms","sub_path":"HashTable/HashTableProblems/hashTableProb1.py","file_name":"hashTableProb1.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29062804948","text":"# -*- coding: utf-8 -*-\nfrom alibabacloud_credentials import providers, credentials\nfrom alibabacloud_credentials.models import Config\nfrom alibabacloud_credentials.utils import auth_constant as ac\nfrom functools import wraps\n\n\ndef attribute_error_return_none(f):\n @wraps(f)\n def i(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except AttributeError:\n return\n\n return i\n\n\nclass Client(object):\n cloud_credential = None\n\n def __init__(self, config=None):\n if config is None:\n config = Config()\n provider = providers.DefaultCredentialsProvider()\n self.cloud_credential = provider.get_credentials()\n self.cloud_credential = self.get_credential(config)\n\n def get_credential(self, config):\n if config.type == ac.ACCESS_KEY:\n return credentials.AccessKeyCredential(config.access_key_id, config.access_key_secret)\n elif config.type == ac.STS:\n return credentials.StsCredential(config.access_key_id, config.access_key_secret, config.security_token)\n elif config.type == ac.BEARER:\n return credentials.BearerTokenCredential(config.bearer_token)\n else:\n return self.get_provider(config).get_credentials()\n\n @staticmethod\n def get_provider(config):\n if config.type == ac.ECS_RAM_ROLE:\n return providers.EcsRamRoleCredentialProvider(config=config)\n elif config.type == ac.RAM_ROLE_ARN:\n return providers.RamRoleArnCredentialProvider(config=config)\n elif config.type == ac.RSA_KEY_PAIR:\n return providers.RsaKeyPairCredentialProvider(config=config)\n elif config.type == ac.OIDC_ROLE_ARN:\n return providers.OIDCRoleArnCredentialProvider(config=config)\n return providers.DefaultCredentialsProvider()\n\n @attribute_error_return_none\n def get_access_key_id(self):\n return self.cloud_credential.get_access_key_id()\n\n @attribute_error_return_none\n def get_access_key_secret(self):\n return self.cloud_credential.get_access_key_secret()\n\n @attribute_error_return_none\n def get_security_token(self):\n return self.cloud_credential.get_security_token()\n\n @attribute_error_return_none\n def get_type(self):\n return self.cloud_credential.credential_type\n\n @attribute_error_return_none\n def get_bearer_token(self):\n return self.cloud_credential.bearer_token\n","repo_name":"aliyun/credentials-python2","sub_path":"alibabacloud_credentials/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39980234512","text":"\"\"\" This file is part of metpx-sarracenia.\n\nmetpx-sarracenia\nDocumentation: https://github.com/MetPX/sarracenia\n\nsr_sarra_unit_test.py : test utility tool used for sr_sarra\n\n\nCode contributed by:\n Benoit Lapointe - Shared Services Canada\n\"\"\"\nimport unittest\n\ntry:\n from sr_sarra import *\nexcept ImportError:\n from sarra.sr_sarra import *\n\n\nclass SrSarraTestCase(unittest.TestCase):\n \"\"\" The parent class of all sr_sarra test cases \"\"\"\n sarra = None\n orig_exit = None\n\n @classmethod\n def setUpClass(cls) -> None:\n \"\"\" Setup basic config file with basic include\n\n :return: None\n \"\"\"\n f = open(\"./aaa.conf\", \"w\")\n f.write(\"broker amqp://tfeed@${FLOWBROKER}/\\n\")\n f.write(\"exchange xsarra\\n\")\n f.write(\"notify_only\\n\")\n f.close()\n cls.sarra = sr_sarra(\"aaa\")\n cls.orig_exit = os._exit\n os._exit = sys.exit\n\n @classmethod\n def tearDownClass(cls) -> None:\n \"\"\" Remove configs for sr_sarra tests\n\n :return: None\n \"\"\"\n os.unlink(\"./aaa.conf\")\n os.removedirs(cls.sarra.user_cache_dir)\n os._exit = cls.orig_exit\n\n def test_notify_only_exit(self):\n self.assertTrue(self.sarra.notify_only,\n \"notify_only option does not work properly\")\n\n\ndef suite():\n \"\"\" Create the test suite that include all sr_sarra test cases\n\n :return: sr_sarra test suite\n \"\"\"\n sr_sarra_suite = unittest.TestSuite()\n sr_sarra_suite.addTests(\n unittest.TestLoader().loadTestsFromTestCase(SrSarraTestCase))\n return sr_sarra_suite\n\n\nif __name__ == 'main':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n","repo_name":"MetPX/sarracenia","sub_path":"obsolete/test/unit_tests/sr_sarra_unit_test.py","file_name":"sr_sarra_unit_test.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"72"} +{"seq_id":"73518614633","text":"#### THIS WILL DO THE MAXIMUM LIKELIHOOD FITTING\n#\n# and assorted other things related to that\n#\n# It's kind of awful code. \n#\n# Separate classes for\n# - periodograms (distributed as chi^2_2 or chi^2_2m, for averaged periodograms)\n# - light curves (not really accurate, use scipy.optimize.curve_fit if you can)\n# - Gaussian Processes (for MAP estimates of GPs)\n#\n# Note: This script has grown over three years. It's not very optimised and doesn't\n# necessarily make sense to someone who is not me. Continue to read on your own peril.\n#\n#\n#\n\n#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\n\n#### GENERAL IMPORTS ###\nimport numpy as np\nimport scipy\nimport scipy.optimize\nimport scipy.stats\nimport scipy.signal\nimport copy\n\nfrom BayesPSD.parametricmodels import combine_models\n\ntry:\n from statsmodels.tools.numdiff import approx_hess\n comp_hessian = True\nexcept ImportError:\n comp_hessian = False\n\n\n### own imports\nfrom BayesPSD import posterior\nfrom BayesPSD import powerspectrum\n\n### global variables ####\nlogmin = -100.0\n\n\n#### CLASS THAT FITS POWER SPECTRA USING MLE ##################\n#\n# This class provides functionality for maximum likelihood fitting\n# of periodogram data to a set of models defined above.\n#\n# It draws heavily on the various optimization routines provided in\n# scipy.optimize, and additionally has the option to use R functionality\n# via rPy and a given set of functions defined in an R-script.\n#\n# Note that many different optimization routines are available, and not all\n# may be appropriate for a given problem. \n# Constrained optimization is available via the constrained BFGS and TNC routines.\n#\n#\nclass MaxLikelihood(object):\n\n ### x = x-coordinate of data\n ### y = y-coordinate of data\n ### obs= if True, compute covariances and print summary to screen\n ### \n ### fitmethod = choose optimization method\n ### options are:\n ### 'simplex': use simplex downhill algorithm\n ### 'powell': use modified Powell's algorithm\n ### 'gradient': use nonlinear conjugate gradient\n ### 'bfgs': use BFGS algorithm\n ### 'newton': use Newton CG \n ### 'leastsq' : use least-squares method\n ### 'constbfgs': constrained BFGS algorithm\n ### 'tnc': constrained optimization via a truncated Newton algorithm\n ### 'nlm': optimization via R's non-linear minimization routine\n ### 'anneal': simulated annealing for convex problems\n def __init__(self, x, y, obs=True, fitmethod='powell'):\n\n ### save power spectrum in attributes\n self.x= x\n self.y= y\n ### Is this a real observation or a fake periodogram to be fitted?\n self.obs = obs\n\n ### smooth data by three different factors\n self.smooth3 = scipy.signal.wiener(self.y, 3)\n self.smooth5 = scipy.signal.wiener(self.y, 5)\n self.smooth11 = scipy.signal.wiener(self.y, 11)\n\n self._set_fitmethod(self, fitmethod)\n\n def _set_fitmethod(self, fitmethod):\n ### select fitting method \n if fitmethod.lower() in ['simplex']:\n self.fitmethod = scipy.optimize.fmin\n elif fitmethod.lower() in ['powell']:\n self.fitmethod = scipy.optimize.fmin_powell\n elif fitmethod.lower() in ['gradient']:\n self.fitmethod = scipy.optimize.fmin_cg\n elif fitmethod.lower() in ['bfgs']:\n self.fitmethod = scipy.optimize.fmin_bfgs\n ### this one breaks because I can't figure out the syntax for fprime\n elif fitmethod.lower() in ['newton']:\n self.fitmethod = scipy.optimize.fmin_ncg\n elif fitmethod.lower() in ['leastsq']:\n self.fitmethod = scipy.optimize.leastsq\n elif fitmethod.lower() in ['constbfgs']:\n self.fitmethod = scipy.optimize.fmin_l_bfgs_b\n elif fitmethod.lower() in ['tnc']:\n self.fitmethod = scipy.optimize.fmin_tnc\n\n else:\n print(\"Minimization method not recognized. Using standard (Powell's) method.\")\n self.fitmethod = scipy.optimize.fmin_powell\n\n\n ### Do a maximum likelihood fitting with function func and \n ### initial parameters ain\n ### if residuals are to be fit, put a list of residuals into keyword 'residuals'\n\n ### func = function to be fitted\n ### ain = list with set of initial parameters\n ### obs = if True, compute covariance and print summary to screen\n ### noise = if True, the last parameter in ain is noise and will be renormalized\n ### residuals = put list of residuals here if they should be fit rather than self.y\n\n def mlest(self, func, ain, obs=True, noise=None, neg=True, functype='posterior'):\n\n fitparams = self._fitting(func, ain, obs=True)\n\n if functype in ['p', 'post', 'posterior']:\n fitparams['deviance'] = 2.0*func.loglikelihood(fitparams['popt'], neg=True)\n elif functype in ['l', 'like', 'likelihood']:\n fitparams['deviance'] = -2.0*func(fitparams['popt'])\n\n print(\"Fitting statistics: \")\n print(\" -- number of frequencies: \" + str(len(self.x)))\n print(\" -- Deviance [-2 log L] D = \" + str(fitparams['deviance']))\n\n return fitparams\n\n\n ### Fitting Routine\n ### optfunc: function to be minimized\n ### ain: initial parameter set\n ### optfuncprime: analytic derivative of optfunc (if required)\n ### neg: bool keyword for MAP estimation (if done):\n ### if True: compute the negative of the posterior\n def _fitting(self, optfunc, ain, optfuncprime=None, neg = True, obs=True):\n\n lenpower = float(len(self.y))\n\n if neg == True:\n if scipy.__version__ < \"0.10.0\":\n args = [neg]\n else:\n args = (neg,)\n else:\n args = ()\n\n ### different commands for different fitting methods,\n ### at least until scipy 0.11 is out\n \n funcval = 100.0\n while funcval == 100 or funcval == 200 or funcval == 0.0 or funcval == np.inf or funcval == -np.inf:\n ## constrained minimization with truncated newton or constrained bfgs\n\n ## Newton conjugate gradient, which doesn't work\n if self.fitmethod == scipy.optimize.fmin_ncg:\n aopt = self.fitmethod(optfunc, ain, optfuncprime, disp=0,args=args)\n\n ### BFGS algorithm\n elif self.fitmethod == scipy.optimize.fmin_bfgs:\n aopt = self.fitmethod(optfunc, ain, disp=0,full_output=True, args=args)\n\n warnflag = aopt[6]\n if warnflag == 1 :\n print(\"*** ACHTUNG! Maximum number of iterations exceeded! ***\")\n #elif warnflag == 2:\n #print(\"Gradient and/or function calls not changing!\")\n\n\n ## all other methods: Simplex, Powell, Gradient\n else:\n aopt = self.fitmethod(optfunc, ain, disp=0,full_output = True, args=args)\n\n funcval = aopt[1]\n ain = np.array(ain)*((np.random.rand(len(ain))-0.5)*4.0)\n \n ### make a dictionary with best-fit parameters:\n ## popt: best fit parameters (list)\n ## result: value of ML function at minimum\n ## model: the model used\n\n fitparams = {'popt':aopt[0], 'result':aopt[1]}\n\n\n ### calculate model power spectrum from optimal parameters\n #fitparams['mfit'] = func(self.x, *fitparams['popt'])\n ### degrees of freedom\n fitparams['dof'] = lenpower - float(len(fitparams['popt']))\n ### Akaike Information Criterion\n fitparams['aic'] = fitparams['result']+2.0*len(ain)\n ### Bayesian Information Criterion\n fitparams['bic'] = fitparams['result'] + len(ain)*len(self.x)\n \n ### compute deviance\n try:\n fitparams['deviance'] = 2.0*optfunc.loglikelihood(fitparams['popt'])\n except AttributeError:\n fitparams['deviance'] = 2.0*optfunc(fitparams['popt'])\n\n fitparams['sexp'] = 2.0*len(self.x)*len(fitparams['popt'])\n fitparams['ssd'] = np.sqrt(2.0*fitparams['sexp'])\n\n fitparams['smooth3'] = scipy.signal.wiener(self.y, 3)\n fitparams['smooth5'] = scipy.signal.wiener(self.y, 5)\n fitparams['smooth11'] = scipy.signal.wiener(self.y, 11)\n\n ### if this is an observation (not fake data), compute the covariance matrix\n if obs == True:\n ### for BFGS, get covariance from algorithm output\n if self.fitmethod == scipy.optimize.fmin_bfgs:\n print(\"Approximating covariance from BFGS: \")\n covar = aopt[3]\n stderr = np.sqrt(np.diag(covar))\n\n else:\n ### calculate Hessian approximating with finite differences\n print(\"Approximating Hessian with finite differences ...\")\n if comp_hessian:\n phess = approx_hess(aopt[0], optfunc, neg=args)\n\n ### covariance is the inverse of the Hessian\n print(\"Hessian (empirical): \" + str(phess))\n\n covar = np.linalg.inv(phess)\n stderr = np.sqrt(np.diag(covar))\n\n else:\n print(\"Cannot compute hessian! Use BFGS or install statsmodels!\")\n covar = None\n stderr = None\n\n print(\"Covariance (empirical): \" + str(covar))\n\n fitparams['cov'] = covar\n\n ### errors of parameters are on the diagonal of the covariance\n ### matrix; take square root to get standard deviation\n fitparams['err'] = stderr\n\n ### Print results to screen\n print(\"The best-fit model parameters plus errors are:\")\n for i,(x,y) in enumerate(zip(fitparams['popt'], stderr)):\n print(\"Parameter \" + str(i) + \": \" + str(x) + \" +/- \" + str(y))\n print(\"The Akaike Information Criterion of the power law model is: \"+ str(fitparams['aic']) + \".\")\n\n return fitparams\n\n #### This function computes the Likelihood Ratio Test between two nested models\n ### \n ### mod1: model 1 (simpler model)\n ### ain1: list of input parameters for model 1\n ### mod2: model 2 (more complex model)\n ### ain2: list of input parameters for model 2\n def compute_lrt(self, mod1, ain1, mod2, ain2, noise1 = -1, noise2 = -1, nmax=1):\n\n print(\"I AM USING THIS CODE!\")\n ### fit data with both models\n par1 = self.mlest(mod1, ain1, obs=self.obs, noise=noise1, nmax=nmax)\n par2 = self.mlest(mod2, ain2, obs=self.obs, noise=noise2, nmax=nmax)\n\n ### extract dictionaries with parameters for each\n varname1 = \"model1fit\"\n varname2 = \"model2fit\"\n\n self.__setattr__(varname1, par1)\n self.__setattr__(varname2, par2)\n \n ### compute log likelihood ratio as difference between the deviances\n self.lrt = par1['deviance'] - par2['deviance']\n\n if self.obs == True: \n print(\"The Likelihood Ratio for models %s and %s is: LRT = %.4f\"%(varname1, varname2, self.lrt))\n\n return self.lrt\n\n\n ### auxiliary function that makes a Lorentzian with a fixed centroid frequency\n ### needed for QPO search algorithm\n def __make_lorentzians(self,x):\n ### loop creates many function definitions lorentz, each differs only by the value\n ### of the centroid frequency f used in computing the spectrum\n for f in x:\n def create_my_func(f):\n def lorentz(x, a, b, e):\n result = qpo(x, a, b, f, e)\n return result\n return lorentz\n yield(create_my_func(f))\n\n\n #### Fit Lorentzians at each frequency in the spectrum\n #### and return a list of log-likelihoods at each value\n ### fitpars = parameters of broadband noise fit\n ### residuals: if true: divide data by best-fit model in fitpars\n def fitqpo(self, fitpars=None, residuals=False):\n\n if residuals:\n ### extract model fit\n mfit = fitpars['mfit']\n\n ### compute residuals: data/model\n residuals = np.array(fitpars[\"smooth5\"])/mfit\n\n else:\n residuals = np.array(fitpars[\"smooth5\"])\n\n ### constraint on width of QPO: must be bigger than 2*frequency resolution\n gamma_min = 2.0*(self.x[2]-self.x[1])\n\n ### empty list for log-likelihoods\n like_rat = []\n \n ### fit a Lorentzian at every frequency\n for f, func, res in zip(self.x[3:-3], self.__make_lorentzians(self.x[3:-3]), residuals[3:-3]):\n ### constraint on width of QPO: must be narrower than the centroid frequency/2\n gamma_max = f/2.0\n norm = np.mean(residuals)+np.var(residuals)\n ain = [gamma_min, norm, 0.0]\n \n ### fit QPO to data \n #pars = self.mlest(func, ain, noise = True, obs=False, residuals=None)\n pars = self.mlest(func, ain, noise = -1, obs=False, residuals=residuals)\n\n ### save fitted frequency and data residuals in parameter dictionary\n pars['fitfreq'] = f\n pars['residuals'] = residuals\n like_rat.append(pars) \n\n ### returns a list of parameter dictionaries\n return like_rat\n \n #### Find QPOs in Periodogram data\n ### func = broadband noise model\n ### ain = input parameters for broadband noise model\n ### fitmethod = which method to use for fitting the QPOs \n ### plot = if True, save a plot with log-likelihoods \n ### plotname = string used in filename if plot == True\n ### obs = if True, compute covariances and print out stuff\n def find_qpo(self, func, ain,\n fitmethod='nlm',\n plot=False,\n plotname=None,\n obs = False):\n\n ### fit broadband noise model to the data\n optpars = self.mlest(func, ain, obs=obs, noise=-1)\n\n ### fit a variable Lorentzian to every frequency and return parameter values\n lrts = self.fitqpo(fitpars=optpars, residuals=True)\n\n ### list of likelihood ratios\n like_rat = np.array([x['deviance'] for x in lrts])\n\n\n ### find minimum likelihood ratio\n minind = np.where(like_rat == min(like_rat))\n minind = minind[0][0]+3\n #print(minind)\n minfreq = self.x[minind] ### ... maybe not! Needs to be +1 because first frequency left out in psfit\n\n print(\"The frequency of the tentative QPO is: \" + str(minfreq))\n\n residuals = self.smooth5/optpars['mfit']\n\n best_lorentz = self.__make_lorentzians([minfreq]) \n\n noiseind = len(optpars['popt']) - 1\n\n\n ### minimum width of QPO\n gamma_min = np.log((self.x[1]-self.x[0])*3.0)\n ### maximum width of QPO\n gamma_max = minfreq/1.5\n\n print('combmod first component: ' + str(func))\n ### create a combined model of broadband noise model + QPO\n combmod = combine_models((func, len(optpars['popt'])), (qpo, 3), mode='add')\n\n ### make a list of input parameters\n inpars = list(optpars['popt'].copy())\n inpars.extend(lrts[minind-3]['popt'][:2])\n inpars.extend([minfreq])\n\n #qpobounds = [[None, None] for x in range(len(inpars)-3)]\n #qpobounds.extend([[gamma_min, gamma_max], [None, None], [None,None]])\n\n\n ### fit broadband QPO + noise model, using best-fit parameters as input\n qpopars = self.mlest(combmod, inpars,obs=obs, noise=noiseind, smooth=0)\n\n ### likelihood ratio of func+QPO to func\n lrt = optpars['deviance'] - qpopars['deviance']\n\n like_rat_norm = like_rat/np.mean(like_rat)*np.mean(self.y)*100.0\n\n if plot:\n plt.figure()\n axL = plt.subplot(1,1,1)\n plt.plot(self.x, self.y, lw=3, c='navy')\n plt.plot(self.x, qpopars['mfit'], lw=3, c='MediumOrchid') \n plt.xscale(\"log\") \n plt.yscale(\"log\")\n plt.xlabel('Frequency')\n plt.ylabel('variance normalized power')\n\n axR = plt.twinx()\n axR.yaxis.tick_right()\n axR.yaxis.set_label_position(\"right\")\n plt.plot(self.x[3:-3], like_rat, 'r--', lw=2, c=\"DeepSkyBlue\")\n plt.ylabel(\"-2*log-likelihood\")\n\n plt.axis([min(self.x), max(self.x), min(like_rat)-np.var(like_rat), max(like_rat)+np.var(like_rat)])\n\n plt.savefig(plotname+'.png', format='png')\n plt.close()\n\n return lrt, optpars, qpopars\n\n\n ### plot two fits of broadband models against each other\n def plotfits(self, par1, par2 = None, namestr='test', log=False):\n\n ### make a figure\n f = plt.figure(figsize=(12,10))\n ### adjust subplots such that the space between the top and bottom of each are zero\n plt.subplots_adjust(hspace=0.0, wspace=0.4)\n\n\n ### first subplot of the grid, twice as high as the other two\n ### This is the periodogram with the two fitted models overplotted\n s1 = plt.subplot2grid((4,1),(0,0),rowspan=2)\n\n if log:\n logx = np.log10(self.x)\n logy = np.log10(self.y)\n logpar1 = np.log10(par1['mfit']) \n logpar1s5 = np.log10(par1['smooth5'])\n\n p1, = plt.plot(logx, logy, color='black', linestyle='steps-mid')\n p1smooth = plt.plot(logx, logpar1s5, lw=3, color='orange')\n p2, = plt.plot(logx, logpar1, color='blue', lw=2)\n\n else:\n p1, = plt.plot(self.x, self.y, color='black', linestyle='steps-mid')\n p1smooth = plt.plot(self.x, par1['smooth5'], lw=3, color='orange')\n p2, = plt.plot(self.x, par1['mfit'], color='blue', lw=2)\n if par2:\n if log:\n logpar2 = np.log10(par2['mfit'])\n p3, = plt.plot(logx, logpar2, color='red', lw=2)\n else:\n p3, = plt.plot(self.x, par2['mfit'], color='red', lw=2)\n plt.legend([p1, p2, p3], [\"observed periodogram\", par1['model'] + \" fit\", par2['model'] + \" fit\"])\n else:\n plt.legend([p1, p2], [\"observed periodogram\", par1['model'] + \" fit\"])\n\n if log:\n plt.axis([min(logx), max(logx), min(logy)-1.0, max(logy)+1])\n plt.ylabel('log(Leahy-Normalized Power)', fontsize=18)\n\n else:\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n\n plt.axis([min(self.x), max(self.x), min(self.y)/10.0, max(self.y)*10.0])\n plt.ylabel('Leahy-Normalized Power', fontsize=18)\n plt.title(\"Periodogram and fits for burst \" + namestr, fontsize=18)\n\n \n ### second subplot: power/model for Power law and straight line\n s2 = plt.subplot2grid((4,1),(2,0),rowspan=1)\n pldif = self.y/par1['mfit']\n if par2:\n bpldif = self.y/par2['mfit']\n\n if log:\n plt.plot(logx, pldif, color='black', linestyle='steps-mid')\n plt.plot(logx, np.ones(len(self.x)), color='blue', lw=2)\n\n else:\n plt.plot(self.x, pldif, color='black', linestyle='steps-mid')\n plt.plot(self.x, np.ones(len(self.x)), color='blue', lw=2)\n plt.ylabel(\"Residuals, \\n\" + par1['model'] + \" model\", fontsize=18)\n\n if log:\n plt.axis([min(logx), max(logx), min(pldif), max(pldif)])\n\n else:\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.axis([min(self.x), max(self.x), min(pldif), max(pldif)])\n\n if par2:\n bpldif = self.y/par2['mfit']\n\n ### third subplot: power/model for bent power law and straight line\n s3 = plt.subplot2grid((4,1),(3,0),rowspan=1)\n\n if log:\n plt.plot(logx, bpldif, color='black', linestyle='steps-mid')\n plt.plot(logx, np.ones(len(self.x)), color='red', lw=2)\n plt.axis([min(logx), max(logx), min(bpldif), max(bpldif)])\n\n else:\n plt.plot(self.x, bpldif, color='black', linestyle='steps-mid')\n plt.plot(self.x, np.ones(len(self.x)), color='red', lw=2)\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.axis([min(self.x), max(self.x), min(bpldif), max(bpldif)])\n\n plt.ylabel(\"Residuals, \\n\" + par2['model'] + \" model\", fontsize=18)\n \n ax = plt.gca()\n \n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontsize(14)\n\n if log:\n plt.xlabel(\"log(Frequency) [Hz]\", fontsize=18)\n else:\n plt.xlabel(\"Frequency [Hz]\", fontsize=18)\n\n ### make sure xticks are taken from first plots, but don't appear there\n plt.setp(s1.get_xticklabels(), visible=False)\n \n ### save figure in png file and close plot device\n plt.savefig(namestr + '_ps_fit.png', format='png')\n plt.close()\n\n return\n\n\n\n##########################################################\n##########################################################\n##########################################################\n\n\n#### PERIODOGRAM FITTING SUBCLASS ################\n#\n# Compute Maximum A Posteriori (MAP) parameters\n# for periodograms via Maximum Likelihood\n# using the \n# posterior class above\n#\n#\n#\n#\n#\n#\n#\n#\n\n\nclass PerMaxLike(MaxLikelihood):\n\n ### ps = PowerSpectrum object with periodogram\n ### obs= if True, compute covariances and print summary to screen\n ### \n ### fitmethod = choose optimization method\n ### options are:\n ### 'simplex': use simplex downhill algorithm\n ### 'powell': use modified Powell's algorithm\n ### 'gradient': use nonlinear conjugate gradient\n ### 'bfgs': use BFGS algorithm\n ### 'newton': use Newton CG \n ### 'leastsq' : use least-squares method\n ### 'constbfgs': constrained BFGS algorithm\n ### 'tnc': constrained optimization via a truncated Newton algorithm\n ### 'nlm': optimization via R's non-linear minimization routine\n ### 'anneal': simulated annealing for convex problems\n def __init__(self, ps, obs=True, fitmethod='powell'):\n\n ### ignore first elements in ps.freq and ps.ps (= no. of photons)\n #ps.freq = np.array(ps.freq[1:])\n self.x = ps.freq[1:]\n #ps.ps = np.array(ps.ps[1:])\n self.y = ps.ps[1:]\n\n self.ps = ps\n\n ### Is this a real observation or a fake periodogram to be fitted?\n self.obs = obs\n\n ### set fitmethod\n self._set_fitmethod(fitmethod)\n\n\n def mlest(self, func, ain, obs=True, noise=None, nmax=1, residuals = None, smooth=0, m=1, map=True):\n\n if smooth == 0 :\n power = self.y\n elif smooth == 3:\n power = self.smooth3\n elif smooth == 5:\n power = self.smooth5\n elif smooth == 11:\n power = self.smooth11\n else:\n raise Exception('No valid option for kwarg \"smooth\". Options are 0,3,5 and 11!')\n\n if not residuals is None:\n power = residuals\n\n lenpower = float(len(power))\n\n ### renormalize normalization so it's in the right range\n varobs = np.sum(power)\n varmod = np.sum(func(self.x, *ain))\n renorm = varobs/varmod\n\n if len(ain) > 1:\n ain[1] = ain[1] + np.log(renorm)\n\n ### If last parameter is noise level, renormalize noise level\n ### to something useful:\n if not noise is None:\n #print(\"Renormalizing noise level ...\")\n ### take the last 50 elements of the power spectrum\n noisepower = power[-51:-1]\n meannoise = np.log(np.mean(noisepower))\n ain[noise] = meannoise\n\n ### set function to be minimized: posterior density for periodograms:\n\n pstemp = powerspectrum.PowerSpectrum()\n pstemp.freq = self.x\n pstemp.ps = power\n pstemp.df = self.ps.df\n\n if m == 1:\n lposterior = posterior.PerPosterior(pstemp, func)\n elif m > 1:\n lposterior = posterior.StackPerPosterior(pstemp, func, m)\n\n else: \n raise Exception(\"Number of power spectra is not a valid number!\")\n\n if not map:\n lpost = lposterior.loglikelihood\n else:\n lpost = lposterior\n\n fitparams = self._fitting(lpost, ain, neg = True, obs=obs)\n\n fitparams[\"model\"] = str(func).split()[1]\n fitparams[\"mfit\"] = func(self.x, *fitparams['popt']) \n\n ### calculate model power spectrum from optimal parameters\n #fitparams['mfit'] = func(self.x, *fitparams['popt'])\n ### figure-of-merit (SSE)\n fitparams['merit'] = np.sum(((power-fitparams['mfit'])/fitparams['mfit'])**2.0)\n\n\n ### find highest outlier\n plrat = 2.0*(self.y/fitparams['mfit'])\n #print(plrat)\n fitparams['sobs'] = np.sum(plrat)\n\n if nmax ==1:\n ### plmaxpow is the maximum of 2*data/model \n plmaxpow = max(plrat[1:])\n #print('plmaxpow: ' + str(plmaxpow))\n plmaxind = np.where(plrat == plmaxpow)[0]\n #print('plmaxind: ' + str(plmaxind))\n if len(plmaxind) > 1:\n plmaxind = plmaxind[0]\n elif len(plmaxind) == 0:\n plmaxind = -2\n plmaxfreq = self.x[plmaxind]\n\n else:\n\n plratsort = copy.copy(plrat)\n plratsort.sort()\n plmaxpow = plratsort[-nmax:]\n\n plmaxind, plmaxfreq = [], []\n for p in plmaxpow:\n try:\n plmaxind_temp = np.where(plrat == p)[0]\n if len(plmaxind_temp) > 1:\n plmaxind_temp = plmaxind_temp[0]\n elif len(plmaxind_temp) == 0:\n plmaxind_temp = -2\n plmaxind.append(plmaxind_temp)\n plmaxfreq.append(self.x[plmaxind_temp])\n\n except TypeError:\n plmaxind.append(None)\n plmaxfreq.append(None)\n\n\n\n fitparams['maxpow'] = plmaxpow\n fitparams['maxind'] = plmaxind\n fitparams['maxfreq'] = plmaxfreq\n\n\n s3rat = 2.0*(fitparams['smooth3']/fitparams['mfit'])\n fitparams['s3max'] = max(s3rat[1:])\n try:\n s3maxind = np.where(s3rat == fitparams['s3max'])[0]\n if len(s3maxind) > 1:\n s3maxind = s3maxind[0]\n fitparams['s3maxfreq'] = self.x[s3maxind]\n except TypeError:\n fitparams[\"s3maxfreq\"] = None\n s5rat = 2.0*(fitparams['smooth5']/fitparams['mfit'])\n fitparams['s5max'] = max(s5rat[1:])\n try:\n s5maxind = np.where(s5rat == fitparams['s5max'])[0]\n if len(s5maxind) > 1:\n s5maxind = s5maxind[0]\n fitparams['s5maxfreq'] = self.x[s5maxind]\n except TypeError:\n fitparams['s5maxfreq'] = None\n\n s11rat = 2.0*(fitparams['smooth11']/fitparams['mfit'])\n fitparams['s11max'] = max(s11rat[1:])\n try:\n s11maxind = np.where(s11rat == fitparams['s11max'])[0]\n if len(s11maxind) > 1:\n s11maxind = s11maxind[0]\n fitparams['s11maxfreq'] = self.x[s11maxind]\n except TypeError:\n fitparams['s11maxfreq'] = None\n\n ### compute binned periodograms and find highest outlier in those:\n df = (self.x[1]-self.x[0])\n ### first, compute the maximum binning that would even make sense\n bmax = int(self.x[-1]/(2.0*(self.x[1]-self.x[0])))\n #print('bmax: ' + str(bmax))\n bins = [1,3,5,7,10,15,20,30,50,70,100,200,300,500]\n\n bindict = {}\n\n for b in bins:\n if b < bmax:\n if b == 1:\n binps = self.ps\n else:\n binps = self.ps.rebinps(b*df)\n binpsname = \"bin\" + str(b)\n bindict[binpsname] = binps\n binpl = func(binps.freq, *fitparams[\"popt\"])\n binratio = 2.0*np.array(binps.ps)/binpl\n maxind = np.where(binratio[1:] == max(binratio[1:]))[0]\n if len(maxind) > 1:\n maxind = maxind[0]\n elif len(maxind) == 0 :\n maxind = -2\n binmaxpow = \"bmax\" + str(b)\n bindict[binmaxpow] = max(binratio[1:])\n binmaxfreq = \"bmaxfreq\" + str(b)\n bindict[binmaxfreq] = binps.freq[maxind+1]\n bindict['binpl' + str(b)] = binpl\n\n fitparams[\"bindict\"] = bindict\n\n ## do a KS test comparing residuals to the exponential distribution\n plks = scipy.stats.kstest(plrat/2.0, 'expon', N=len(plrat))\n fitparams['ksp'] = plks[1]\n\n if obs == True:\n print(\"The figure-of-merit function for this model is: \" + str(fitparams['merit']) + \" and the fit for \" + str(fitparams['dof']) + \" dof is \" + str(fitparams['merit']/fitparams['dof']) + \".\")\n\n print(\"Fitting statistics: \")\n print(\" -- number of frequencies: \" + str(len(self.x)))\n print(\" -- Deviance [-2 log L] D = \" + str(fitparams['deviance']))\n print(\" -- Highest data/model outlier 2I/S = \" + str(fitparams['maxpow']))\n print(\" at frequency f_max = \" + str(fitparams['maxfreq']))\n\n print(\" -- Highest smoothed data/model outlier for smoothing factor [3] 2I/S = \" + str(fitparams['s3max']))\n print(\" at frequency f_max = \" + str(fitparams['s3maxfreq']))\n print(\" -- Highest smoothed data/model outlier for smoothing factor [5] 2I/S = \" + str(fitparams['s5max']))\n print(\" at frequency f_max = \" + str(fitparams['s5maxfreq']))\n print(\" -- Highest smoothed data/model outlier for smoothing factor [11] 2I/S = \" + str(fitparams['s11max']))\n print(\" at frequency f_max = \" + str(fitparams['s11maxfreq']))\n\n print(\" -- Summed Residuals S = \" + str(fitparams['sobs']))\n print(\" -- Expected S ~ \" + str(fitparams['sexp']) + \" +- \" + str(fitparams['ssd']))\n print(\" -- KS test p-value (use with caution!) p = \" + str(fitparams['ksp']))\n print(\" -- merit function (SSE) M = \" + str(fitparams['merit']))\n\n return fitparams\n\n\n def compute_lrt(self, mod1, ain1, mod2, ain2, noise1=-1, noise2=-1, m=1, map=True, nmax=1):\n\n\n ### fit data with both models\n par1 = self.mlest(mod1, ain1, obs=self.obs, noise=noise1, m = m, map = map, nmax=nmax)\n par2 = self.mlest(mod2, ain2, obs=self.obs, noise=noise2, m = m, map = map, nmax=nmax)\n\n ### extract dictionaries with parameters for each\n varname1 = \"model1fit\"\n varname2 = \"model2fit\"\n\n self.__setattr__(varname1, par1)\n self.__setattr__(varname2, par2)\n\n ### compute log likelihood ratio as difference between the deviances\n self.lrt = par1['deviance'] - par2['deviance']\n\n if self.obs == True:\n print(\"The Likelihood Ratio for models %s and %s is: LRT = %.4f\"%(varname1, varname2, self.lrt))\n\n\n return self.lrt\n\n\n\n","repo_name":"dhuppenkothen/BayesPSD","sub_path":"BayesPSD/mle.py","file_name":"mle.py","file_ext":"py","file_size_in_byte":30986,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"17912243941","text":"import pygame as pg\nimport neat\nimport sys\nimport pickle\n\nfrom tiles.tiles import load_map, SCREEN_WIDTH, SCREEN_HEIGHT\nfrom car.car import Car\n\nWHITE = pg.Color(255, 255, 255)\n\nrace_map = \"map.json\"\nconfig_file_path = \"config-feedforward.txt\"\ntiles, collisions = load_map(race_map)\nclock = pg.time.Clock()\nscreen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n\ndef train(genomes, config):\n nets, cars = [], []\n rotate_car_angle = 0\n\n \"\"\" \n Initialize all nets and cars \n Build radars for the cars so neat would be able to know collisions' distance\n Update the position of the cars on the map \n \"\"\"\n\n for _, gen in genomes:\n net = neat.nn.FeedForwardNetwork.create(gen, config)\n car = Car()\n car.update(380, 45) # Initial position\n car.build_radars(collisions, rotate_car_angle)\n\n nets.append(net)\n cars.append(car)\n gen.fitness = 0.0\n\n def build_map():\n for tile in tiles:\n if tile:\n screen.blit(tile.image, tile.rect)\n\n done = False\n while not done:\n build_map()\n #screen.blit(carTest.rotate_image , carTest.rect)\n #carTest.build_radars(collisions , rotate_car_angle , screen)\n\n if (rotate_car_angle == 360 or rotate_car_angle == -360):\n rotate_car_angle = 0\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n cars.clear()\n pg.quit()\n sys.exit(0)\n\n if event.type == pg.KEYDOWN:\n\n if event.key == pg.K_ESCAPE: # Exit on escape key pressed\n cars.clear()\n pg.quit()\n sys.exit(0)\n\n remainders = 0\n for (_, gen), net, car in zip(genomes, nets, cars):\n\n if car.isAlive:\n remainders += 1\n output = net.activate(car.radars)\n ind = output.index(max(output))\n\n if(ind == 0):\n rotate_car_angle -= 1\n car.rotate(rotate_car_angle)\n\n else:\n rotate_car_angle += 1\n car.rotate(rotate_car_angle)\n\n car.calculate_new_xy(10, rotate_car_angle)\n car.build_radars(collisions, rotate_car_angle)\n\n if car.is_colliding(collisions):\n car.isAlive = False\n continue\n\n gen.fitness = car.distance / 100\n\n # Draw cars even if it is not alive anymore\n screen.blit(car.rotate_image, car.rect)\n\n pg.display.flip()\n clock.tick(60) # 60 FPS\n\n if not remainders:\n done = True\n\n\ndef run_training():\n # Set configuration file\n config_path = config_file_path\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)\n\n # Create core evolution algorithm class\n p = neat.Population(config)\n\n # Add reporter for fancy statistical result\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n\n #Init pygame\n pg.init()\n\n # Run NEAT\n winner = p.run(train)\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n with open(\"winner.pkl\", \"wb\") as file:\n pickle.dump(winner, file)\n\n\nif __name__ == \"__main__\":\n run_training()\n","repo_name":"caiovini/Neat-racing","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39966655390","text":"# Program to take sample phishing mail and count the most coomonly occuring word\npmail = [\n \"winlottery@indianloot.com\",\n \"luckywinner@lottery.com\",\n \"spinwheel@flipkart.com\",\n \"luckydeal@snapdeal.com\" , \n \"youarethewinner@myprize.com\",\n \"luckywinner@mymoney.com\", \n \"Claimyourprize@money.com\",\n \"luckyjackpot@americanlottery.com\"\n ]\nmyd = {}\nfor e in pmail :\n z = e.split('@')\n for w in z :\n if w not in myd:\n myd[w] = 1\n else:\n myd[w] += 1\n\nmaxw = max(myd, key = myd.get)\nprint(\"Most common Occuring Word is : \", maxw)\n","repo_name":"mht009/Programs","sub_path":"Python/Email coomon word.py","file_name":"Email coomon word.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28804669862","text":"import win32gui\nimport win32con\nimport pycodestyle\nimport random\nimport pygame as pg\nfrom ctypes import windll\n\nfrom class_snake import Snake\nfrom neural_network_snake import NeuralNetwork1\n\n\nclass Board:\n def __init__(self, num_row, num_col, num_players, len_snake=5, training=False, save_file='test'):\n # set the width and height of the board\n self.num_row = num_row\n self.num_col = num_col\n self.training = training\n self.NN_playing = False\n self.food = set()\n\n # draw screen without a snake or food in PyGame, only when the neural network is NOT being trained\n self.square = None\n self.x_left = None\n self.y_top = None\n self.scr = None\n\n if not self.training:\n self.draw_empty_scr()\n\n # make set of all coordinates to be able to place food\n self.all_coords = {(x, y) for y in range(self.num_row) for x in range(self.num_col)}\n\n # check if a NN is playing\n if num_players < 0:\n num_players *= -1\n self.NN_playing = True\n self.NN_list = [NeuralNetwork1(1, 1, save_file + f'_{i+1}') for i in range(num_players)]\n\n # make snake(s) and place the first food\n self.snakes_list = [Snake(self, len_snake, index, num_players) for index in range(num_players)]\n self.place_food()\n\n def draw_empty_scr(self):\n # start PyGame\n pg.init()\n\n # set the pygame window in fullscreen\n windll.user32.SetProcessDPIAware()\n true_res = (windll.user32.GetSystemMetrics(0), windll.user32.GetSystemMetrics(1))\n self.scr = pg.display.set_mode(true_res, pg.FULLSCREEN)\n\n # set the PyGame window in focus\n hwnd = pg.display.get_wm_info()['window']\n win32gui.ShowWindow(hwnd, win32con.SW_MINIMIZE)\n win32gui.ShowWindow(hwnd, win32con.SW_RESTORE)\n win32gui.SetForegroundWindow(hwnd)\n\n # get the dimensions of the screen\n x_max = pg.Surface.get_width(self.scr)\n y_max = pg.Surface.get_height(self.scr)\n\n # make the complete screen white\n self.scr.fill((255, 255, 255))\n\n # find size of 1 tile\n self.square = min((x_max - 100)//self.num_col, (y_max - 70)//self.num_row)\n\n # find corners of the board\n self.x_left = int((x_max - self.num_col*self.square)/2)\n self.y_top = int((y_max - self.num_row*self.square)/2)\n\n x_right = int(self.x_left + self.num_col*self.square)\n y_bottom = int(self.y_top + self.num_row*self.square)\n\n # draw the lines\n for i in range(self.num_row+1):\n pg.draw.line(self.scr, (0, 0, 0), (self.x_left, self.y_top + i*self.square),\n (x_right, self.y_top + i*self.square), 1)\n\n for j in range(self.num_col+1):\n pg.draw.line(self.scr, (0, 0, 0), (self.x_left + j*self.square, self.y_top),\n (self.x_left + j*self.square, y_bottom), 1)\n\n def place_food(self):\n available_coords = self.all_coords.copy()\n\n # subtract the coordinates of the snake(s) from all the coordinates\n for s in self.snakes_list:\n available_coords = list(set(available_coords) - set(s.snake))\n\n # randomly choose one coordinate out of the remaining coordinates and add it to the list with food\n random_coord = random.choice(available_coords)\n self.food.add(random_coord)\n\n # draw the food\n if not self.training:\n self.draw_tile(random_coord, (255, 0, 0))\n\n def draw_tile(self, coord, color):\n pg.draw.rect(self.scr, color,\n (self.x_left + coord[0]*self.square + 1, self.y_top + coord[1]*self.square + 1,\n self.square - 1, self.square - 1))\n\n pg.display.flip()\n\n\nif __name__ == '__main__':\n # Run PEP 8 check on this file\n pycodestyle.StyleGuide().check_files([__file__])\n\n # Test the list_of_board() function\n b = Board(5, 6, 2, 3)\n print(b.snakes_list[0].list_of_board())\n","repo_name":"Bavo2002/Snake-RL","sub_path":"class_board.py","file_name":"class_board.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40993075910","text":"import serial\nfrom struct import *\nimport collections\nimport time\nimport math\n\nSITRACE_PORT = 'COM13'\nSITRACE_BAUDRATE = 921600/2\nband = 1\nadd_A000_static_para=[]\nadd_5007_static_para = []\ndef int2bin(n, count=24):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])\n \ndef send_cli_cmd(ser, cmd):\n cmd_len = len(cmd) + 1;\n data = pack('HHBBHL', 0xFACE, 0x10, 0, 0xFF, cmd_len, 0) + cmd + '\\x00';\n ser.write(data)\n\ndef write_reg_tonemask(tone_mask_file):\n add_5007 = []\n add_base = 0x500710C0\n add_cur = add_base - 4\n strc = ''\n fp_obj = open(tone_mask_file)\n for line in fp_obj.readlines():\n strc += str(int(line.strip(), 2) ^ 1)\n\n ss = ''\n ss1 = []\n for i in range(0, int(512 / 32)):\n for j in range(0, 32):\n ss += strc[j + i * 32]\n ss = ss[::-1]\n add_cur += 4\n add_str = hex(add_cur)\n s = '{:x}'.format(int(ss, 2))\n cmd_str_tone = 'memwrite 32 ' + add_str + ' 0x' + s\n add_5007.append(cmd_str_tone + '\\r\\n')\n ss1.append(s)\n ss = ''\n return add_5007\n\ndef phy_static_para(ValidCarrierNum):\n global add_A000_static_para\n global add_5007_static_para\n add_A000 = []\n add_5007 = []\n tmi_dict = collections.OrderedDict()\n tmi_dict['B_tmi0']=[520,4,'qpsk',1./2]\n tmi_dict['B_tmi1']=[520,2,'qpsk',1./2]\n tmi_dict['B_tmi2']=[136,5,'qpsk',1./2]\n tmi_dict['B_tmi3']=[136,11,'bpsk',1./2]\n tmi_dict['B_tmi4']=[136,7,'bpsk',1./2]\n tmi_dict['B_tmi5']=[136,11,'qpsk',1./2]\n tmi_dict['B_tmi6']=[136,7,'qpsk',1./2]\n tmi_dict['B_tmi7']=[520,7,'bpsk',1./2]\n tmi_dict['B_tmi8']=[520,4,'bpsk',1./2]\n tmi_dict['B_tmi9']=[520,7,'qpsk',1./2]\n tmi_dict['B_tmi10']=[520,2,'bpsk',1./2]\n tmi_dict['B_tmi11']=[264,7,'qpsk',1./2]\n tmi_dict['B_tmi12']=[264,7,'bpsk',1./2]\n tmi_dict['B_tmi13']=[72,7,'qpsk',1./2]\n tmi_dict['B_tmi14']=[72,7,'bpsk',1./2]\n tmi_dict['E_tmi1']=[520,1,'16qam',16./18]\n tmi_dict['E_tmi2']=[520,2,'16qam',16./18]\n tmi_dict['E_tmi3']=[520,1,'16qam',1./2]\n tmi_dict['E_tmi4']=[520,2,'16qam',1./2]\n tmi_dict['E_tmi5']=[520,4,'16qam',1./2]\n tmi_dict['E_tmi6']=[520,1,'qpsk',1./2]\n tmi_dict['E_tmi10']=[136,5,'16qam',1./2]\n tmi_dict['E_tmi11']=[136,2,'qpsk',1./2]\n tmi_dict['E_tmi12']=[136,2,'16qam',1./2]\n tmi_dict['E_tmi13']=[136,1,'qpsk',1./2]\n tmi_dict['E_tmi14']=[136,1,'16qam',1./2]\n InterNumVec = [0,1,8,0,8,10,0,14,0,0,0,11]\n InterNumGVec =[0,1,4,0,2, 2,0, 2,0,0,0, 1]\n cmd_list=[]\n add_list=[]\n add_base=0xA0003080\n add_cur=add_base-4\n add_cur_140 = 0x50071140\n add_cur_ofdm = 0x5007120c\n ofdm_print_flag = 0\n for key in tmi_dict:\n #print(key)\n #fp_log.write(key+'\\n')\n DataBitsLen = int(tmi_dict[key][0]*8/tmi_dict[key][3])\n CopyNum = tmi_dict[key][1]\n InterNum = InterNumVec[tmi_dict[key][1]]\n UsedCarrierNum = InterNum*math.floor(ValidCarrierNum/InterNum)\n CarrierNumPerGroup = math.floor(UsedCarrierNum/tmi_dict[key][1])\n CarrierNumPerInter = math.floor(ValidCarrierNum/InterNum)\n if tmi_dict[key][2]=='bpsk':\n BPC = 1\n elif tmi_dict[key][2]=='qpsk':\n BPC = 2\n elif tmi_dict[key][2]=='16qam':\n BPC = 4\n BitsPerOFDM = BPC*UsedCarrierNum\n BitsPerGroup = BPC*CarrierNumPerGroup\n BitsInLastOFDM = DataBitsLen-BitsPerOFDM*math.floor(DataBitsLen/BitsPerOFDM)\n if BitsInLastOFDM==0:\n BitsInLastOFDM = BitsPerOFDM\n BitsInLastGroup = BitsPerGroup\n else:\n BitsInLastGroup = BitsInLastOFDM-BitsPerGroup*math.floor((BitsInLastOFDM-1)/BitsPerGroup)\n PadBitsNum = int(BitsPerGroup-BitsInLastGroup)\n InterNumPerCopy = int((PadBitsNum+DataBitsLen)/(BPC*CarrierNumPerInter))\n OFDMSymPerPB = int((PadBitsNum+DataBitsLen)/(BitsPerOFDM)*CopyNum)\n SamplePiontNum=(OFDMSymPerPB-2)*(264+1024)+2*(458+1024)\n\n\n if CopyNum==1:\n InterNumPerCopy=1\n \"\"\n tmi_dict[key].append(DataBitsLen)\n tmi_dict[key].append(InterNum)\n tmi_dict[key].append(UsedCarrierNum)\n tmi_dict[key].append(CarrierNumPerGroup)\n tmi_dict[key].append(CarrierNumPerInter)\n tmi_dict[key].append(BPC)\n tmi_dict[key].append(BitsPerOFDM)\n tmi_dict[key].append(BitsPerGroup)\n tmi_dict[key].append(BitsInLastOFDM)\n tmi_dict[key].append(BitsInLastGroup)\n tmi_dict[key].append(PadBitsNum)\n tmi_dict[key].append(InterNumPerCopy)\n tmi_dict[key].append(OFDMSymPerPB)\n tmi_dict[key].append(SamplePiontNum)\n \"\"\n if CopyNum==1:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n elif CopyNum==2:\n if BitsInLastOFDM<=BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n else:\n GroupShiftNum = [1,0,0,0,0,0,0,0,0,0]\n elif CopyNum==4:\n if BitsInLastOFDM<=BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n elif BitsInLastOFDM<=2*BitsPerGroup:\n GroupShiftNum = [0,1,1,0,0,0,0,0,0,0]\n elif BitsInLastOFDM<=3*BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n else:\n GroupShiftNum = [1,2,3,0,0,0,0,0,0,0]\n elif CopyNum==5:\n if BitsInLastOFDM<=4*BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n else:\n GroupShiftNum = [1,2,3,4,0,0,0,0,0,0]\n elif CopyNum==7:\n if BitsInLastOFDM<=6*BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n else:\n GroupShiftNum = [1,2,3,4,5,6,0,0,0,0]\n elif CopyNum==11:\n if BitsInLastOFDM<=10*BitsPerGroup:\n GroupShiftNum = [0,0,0,0,0,0,0,0,0,0]\n else:\n GroupShiftNum = [1,2,3,4,5,6,7,8,9,10]\n \"\"\n GroupShiftNum_L = GroupShiftNum[0:2]\n GroupShiftNum_H = GroupShiftNum[2:11]\n tmi_dict[key].append(GroupShiftNum)\n InterShiftStep = math.floor(CarrierNumPerInter/(2*InterNum))\n if InterShiftStep<1:\n InterShiftStep=0\n elif InterShiftStep<2:\n InterShiftStep=1\n elif InterShiftStep<4:\n InterShiftStep=2\n elif InterShiftStep<8:\n InterShiftStep=4\n elif InterShiftStep<16:\n InterShiftStep=8\n if CopyNum==1:\n InterShiftStep=1\n tmi_dict[key].append(InterShiftStep)\n \"\"\n PARA1=''\n PARA1+=int2bin(GroupShiftNum_L[1],4)\n PARA1+=int2bin(GroupShiftNum_L[0],4)\n PARA1+=int2bin(InterShiftStep,4)\n PARA1+=int2bin(int(PadBitsNum),11)\n PARA1+=int2bin(int(UsedCarrierNum),9)\n cmd_list.append(PARA1)\n add_cur = add_cur+4\n add_list.append(add_cur)\n add_str = hex(add_cur)\n cmd_str = 'memwrite 32 '+add_str[:-1]+' 0x'+'{:x}'.format(int(PARA1,2))\n #print(cmd_str)\n #fp_log.write(cmd_str+'\\n')\n add_A000.append(cmd_str+'\\r\\n')\n\n PARA2=''\n PARA2+=int2bin(GroupShiftNum_H[7],4)\n PARA2+=int2bin(GroupShiftNum_H[6],4)\n PARA2+=int2bin(GroupShiftNum_H[5],4)\n PARA2+=int2bin(GroupShiftNum_H[4],4)\n PARA2+=int2bin(GroupShiftNum_H[3],4)\n PARA2+=int2bin(GroupShiftNum_H[2],4)\n PARA2+=int2bin(GroupShiftNum_H[1],4)\n PARA2+=int2bin(GroupShiftNum_H[0],4)\n cmd_list.append(PARA2)\n add_cur = add_cur+4\n add_list.append(add_cur)\n add_str = hex(add_cur)\n cmd_str = 'memwrite 32 '+add_str[:-1]+' 0x'+'{:x}'.format(int(PARA2,2))\n #print(cmd_str)\n #fp_log.write(cmd_str+'\\n')\n add_A000.append(cmd_str+'\\r\\n')\n\n PARA3=''\n PARA3+=int2bin(int(CarrierNumPerInter),6)\n PARA3+='0'\n PARA3+=int2bin(int(BitsPerGroup),11)\n PARA3+=int2bin(InterNumPerCopy,14)\n cmd_list.append(PARA3)\n add_cur = add_cur+4\n add_list.append(add_cur)\n add_str = hex(add_cur)\n cmd_str = 'memwrite 32 '+add_str[:-1]+' 0x'+'{:x}'.format(int(PARA3,2))\n #print(cmd_str)\n #fp_log.write(cmd_str+'\\n')\n add_A000.append(cmd_str+'\\r\\n')\n\n add_cur_140+=4\n add_cur_140_str = hex(add_cur_140)\n cmd_str1 = 'memwrite 32 ' +add_cur_140_str + ' 0x'+'{:x}'.format(int(int2bin(SamplePiontNum,23),2))\n #print(cmd_str1)\n #fp_log.write(cmd_str1+'\\n')\n add_5007.append(cmd_str1+'\\r\\n')\n\n if ofdm_print_flag==0:\n ofdm_print_flag = 1\n last_ofdm_num = OFDMSymPerPB\n else:\n ofdm_print_flag = 0\n ofdm_para = ''\n ofdm_para+=int2bin(int(OFDMSymPerPB),13)\n ofdm_para+=int2bin(int(last_ofdm_num),13)\n add_cur_ofdm_str = hex(add_cur_ofdm)\n cmd_str2 = 'memwrite 32 ' +add_cur_ofdm_str + ' 0x'+'{:x}'.format(int(ofdm_para,2))\n #print(cmd_str2)\n #fp_log.write(cmd_str2+'\\n')\n add_5007.append(cmd_str2+'\\r\\n')\n add_cur_ofdm+=4\n\n val_rp = int(round((1./ValidCarrierNum)*2**15))\n if val_rp>=255:\n val_rp=255\n cmd_140 = '0000'+int2bin(val_rp,8)+'00000000000'+int2bin(int(ValidCarrierNum),9)\n #cmd_140 = '0000'+int2bin(int(round((1/UsedCarrierNum)*2**15)),8)+'00000000000'+int2bin(int(UsedCarrierNum),9)\n cmd_str2 = 'memwrite 32 ' + '0x50071140' + ' 0x'+'{:x}'.format(int(cmd_140,2))\n add_5007.append(cmd_str2+'\\r\\n')\n\n add_A000_static_para = add_A000\n add_5007_static_para = add_5007\n #logger.info('static para OK')\n\n\n#######################################################\n\ndut_ser = serial.Serial()\ndut_ser.port = SITRACE_PORT\ndut_ser.baudrate = SITRACE_BAUDRATE\ndut_ser.open()\n\nif band==0:\n ValidCarrierNum = 411;\nelif band==1:\n ValidCarrierNum = 131;\nelif band==2:\n ValidCarrierNum = 89;\nelif band==3:\n ValidCarrierNum = 49;\n\ncmd_write = collections.OrderedDict()\n#cmd_write['1'] ='1240 0x83906826'\ncmd_write['1'] ='1240 0x83fc6a76'\n# 1240: [31] notch_zero_check enable, [30:25]: notch zero thres; [12:4]: fdmeanpwr_start_idx; [3:0]: fdmeanpwr_len\n# [24:18]: fdpwr_start_idx\n# 0x83fc6a76: [31]:1, [30:25]:1, [12:4]:167, [3:0]:6, [24:18]: 127\n#cmd_write['2'] ='11ac 0x60000854'\ncmd_write['2'] ='11ac 0x20080852'\n# 11ac, [19]:par check,[18:16]:par; [30]: power saving mode enable; [2:0]: fd_notch_ratio\n# [23]:max2sub check, [22:20]: thres:1.5/2/2.5/3/3.5/4\n#cmd_write['3'] ='0040 0x73190100'\ncmd_write['3'] ='0040 0x73191100'\n# 0040: [12]: sync&ce tone mask mode enable\n#cmd_write['4'] ='1110 0x10010333'\ncmd_write['4'] ='1110 0x08008333'\n# 1110: [31:22]: 1st coarse sync thres; [21:10]: 2nd coarse sync thres\n#cmd_write['5'] ='1100 0x000024E5'\ncmd_write['5'] ='1100 0x000036E5'\n# 1100: [15:8]: agc ref pwr, default 0x24; [7:0]: aft min gain\n#cmd_write['6'] ='111c 0x20C05233'\ncmd_write['6'] ='111c 0x60C05233'\n# 111c: [30]: iir bypass; [29]: 1st fine-sync symbol re-tuned peak position 1024+/-1; [27:20]: convex thres; [19:8]: fine-sync thres; [\n# [7:0]: iir alpha coef\nnarrow_mask_file = \"E:\\\\lichao\\\\reg_write\\\\tonemask_narrow_b1_3m.txt\"\ntonemask = write_reg_tonemask(narrow_mask_file)\nphy_static_para(ValidCarrierNum)\n\nfor key in cmd_write:\n send_cli_cmd(dut_ser,'memwrite 32 0x5007'+cmd_write[key]+'\\r\\n')\n print('memwrite 32 0x5007'+cmd_write[key]+'\\r\\n')\ntime.sleep(1)\nfor cmd in tonemask:\n send_cli_cmd(dut_ser,cmd)\n print(cmd)\ntime.sleep(2)\n\nfor cmd in add_A000_static_para:\n send_cli_cmd(dut_ser,cmd)\n print(cmd)\ntime.sleep(2)\nfor cmd in add_5007_static_para:\n send_cli_cmd(dut_ser,cmd)\n print(cmd)\ntime.sleep(2)\n\ndut_ser.close()","repo_name":"siwaveliu/PLC_SYSTEM_TEST","sub_path":"lib/narrow_tonemask_reg_write.py","file_name":"narrow_tonemask_reg_write.py","file_ext":"py","file_size_in_byte":11689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35633772855","text":"import requests\nimport json\nfrom guessit import guessit\nimport config\n\nclass Movie:\n def __init__(self, data):\n self.set_torrent(data)\n self.set_search_title()\n self.set_omdb_link()\n self.set_movie_info()\n \n def set_torrent(self, data):\n torrent = ''\n for item in data.find_all('a', class_=\"index\"):\n torrent = item.get('href')\n self.torrent = torrent\n\n def set_search_title(self):\n torrent_title = self.torrent.split('name=')[1].replace('%20', '.')\n clean_title = guessit(torrent_title)['title']\n self.search_title = clean_title.replace(' ', '+')\n\n def set_omdb_link(self):\n self.omdb_link = config.omdb_url + self.search_title + config.omdb_api\n\n def set_movie_info(self):\n response = requests.get(self.omdb_link)\n info = json.loads(response.text)\n self.movie_info = info\n self.movie_id = info['imdbID']\n self.title = info['Title']\n self.rating = float(info['imdbRating'])","repo_name":"spukas/site-scraper","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37328101641","text":"class Solution:\n def makeSmallestPalindrome(self, s: str) -> str:\n ans = list(s)\n cmt = s[::-1]\n n = len(s)\n for i in range(n // 2 + 1):\n if s[i] != cmt[i]:\n if s[i] <= cmt[i]:\n ans[n - i - 1] = s[i]\n else:\n ans[i] = cmt[i]\n return ''.join(ans)\n\n","repo_name":"Kan19990810/Python","sub_path":"weekly/2023.5.21/maskSmallestPalindrome.py","file_name":"maskSmallestPalindrome.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72933498153","text":"import os\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndata_path = os.path.expanduser(\"~/projects/SeqDemote/data/ATAC/mouse_asa/mouse_asa_2k.h5\")\n\n### training params\nbatch_size = 128\nsubsequence_size = 200\nmomentum = 0.9\ncuda=False\n\n# set schedule for learning rate decreases\nlearning_rate_schedule = {\n 0: 0.01,\n 10: 0.001,\n 20: 0.0001,\n}\n\nvalidate_every = 2\nsave_every = 5\n\n# This is the DeepLIFT simulation arch for testing\n#Convolution with 50 filters of width 11\n#• ReLU\n#• Convolution with 50 filters of width 11\n#• ReLU\n#• Global Average Pooling\n#• Fully connected layer with 50 neurons\n#• ReLU\n#• Dropout layer with probability 0.5 of leaving out units\n#• Fully connected layer with 3 neurons\n#• Sigmoid output\n\nclass DeepLiftRegressor(nn.Module):\n \n def __init__(self):\n super(DeepLiftRegressor, self).__init__()\n \n self.conv1 = nn.Conv2d(in_channels=4, out_channels=50, kernel_size=(1,11))\n self.conv2 = nn.Conv2d(in_channels=50, out_channels=50, kernel_size=(1,11))\n \n self.fc1 = nn.Linear(50, 50)\n self.dropout1 = nn.Dropout2d(p=0.5)\n self.fc2 = nn.Linear(50, 5)\n \n def forward(self, input):\n #l0 = nn.layers.InputLayer((batch_size, data_rows, 1, data_cols))\n \n x = F.relu(self.conv1(input))\n x = F.relu(self.conv2(x))\n \n # Global pooling here\n x = torch.mean(x.view(x.size(0), x.size(1), -1), dim=2)\n \n x = F.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n return x\n \n \n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nnet = DeepLiftRegressor()\n\n\n\n","repo_name":"lzamparo/SeqDemote","sub_path":"src/models/torch_models/torch_asa_simple_multitask.py","file_name":"torch_asa_simple_multitask.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24908721428","text":"import logging\nimport re\nfrom copy import copy\n\nimport rispy\n\nfrom .authors import get_author_short_text, normalize_authors\n\nlogger = logging.getLogger(__name__)\n\n\nclass RisImporter:\n @classmethod\n def get_mapping(cls):\n mapping = copy(rispy.TAG_KEY_MAPPING)\n mapping.update(\n {\"AT\": \"accession_type\", \"PM\": \"pubmed_id\", \"N2\": \"abstract2\", \"SV\": \"serial_volume\"}\n )\n return mapping\n\n @classmethod\n def file_readable(cls, f):\n # ensure that file can be successfully parsed\n try:\n reader = rispy.load(f, mapping=cls.get_mapping())\n [content for content in reader]\n f.seek(0)\n return True\n except OSError as err:\n logger.warning(err)\n return False\n\n def __init__(self, f, encoding=\"utf-8\"):\n if isinstance(f, str):\n f = open(f, encoding=encoding)\n else:\n f = f\n reader = rispy.load(f, mapping=self.get_mapping(), encoding=encoding)\n contents = [content for content in reader]\n f.close()\n self.raw_references = contents\n\n @property\n def references(self):\n if not hasattr(self, \"_references\"):\n self._references = self._format()\n return self._references\n\n def _format(self):\n formatted_content = []\n for content in self.raw_references:\n parser = ReferenceParser(content)\n formatted_content.append(parser.format())\n return formatted_content\n\n\nclass ReferenceParser:\n ID_MISSING = \"ID field not found for all references\"\n\n PLACEHOLDER_TEXT = \"\"\n\n # field types\n TITLE_FIELDS = (\n \"translated_title\",\n \"title\",\n \"primary_title\",\n \"secondary_title\",\n \"tertiary_title\",\n \"short_title\",\n )\n\n AUTHOR_LIST_FIELDS = (\n \"authors\",\n \"first_authors\",\n \"secondary_authors\",\n \"tertiary_authors\",\n \"subsidiary_authors\",\n )\n\n ABSTRACT_FIELDS = (\"abstract\", \"abstract2\")\n\n YEAR_FIELDS = (\"year\", \"publication_year\")\n\n # Extract the scopus EID\n re_scopus_eid = re.compile(r\"eid=([-\\.\\w]+)(?:&|$)\", flags=re.UNICODE)\n\n EXTRACTED_FIELDS = [\n \"authors_short\",\n \"authors\",\n \"title\",\n \"year\",\n \"citation\",\n \"abstract\",\n \"PMID\",\n \"doi\",\n \"accession_number\",\n \"accession_db\",\n \"reference_type\",\n \"id\",\n \"json\",\n ]\n\n # Match any number (some PMIDs contain number and/or URL)\n re_pmid = re.compile(r\"([\\d]+)\")\n\n def __init__(self, content):\n self.content = content\n\n def format(self):\n \"\"\"Format RIS annotations for processing in HAWC\n\n Raises:\n ValueError: if data is in incorrect format\n\n Returns:\n Dictionary of reference metadata; saved to self._formatted\n \"\"\"\n if not hasattr(self, \"_formatted\"):\n self._formatted = dict(\n authors_short=self._get_authors_short(),\n authors=self._authors,\n title=self._get_field(self.TITLE_FIELDS, self.PLACEHOLDER_TEXT),\n year=self.get_year(self._get_field(self.YEAR_FIELDS, None)),\n citation=self._get_citation(),\n abstract=self._get_field(self.ABSTRACT_FIELDS, \"\"),\n PMID=self._get_pmid(),\n doi=self.get_doi(self.content.get(\"doi\")),\n accession_number=self._get_accession_number(),\n accession_db=self.content.get(\"name_of_database\", None),\n reference_type=self.content.get(\"type_of_reference\", None),\n id=self.get_id(self.content.get(\"id\")),\n json=self.content,\n )\n return self._formatted\n\n def get_year(self, value: str | None) -> int | None:\n if value is None or value.strip() == \"\":\n return None\n try:\n return int(value)\n except ValueError:\n raise ValueError(f\"Invalid year: {value}\")\n\n def get_doi(self, val) -> str | None:\n doi = self.content.get(\"doi\", None)\n if doi and len(doi) > 256:\n raise ValueError(f\"DOI too long: {doi}\")\n return doi\n\n def get_id(self, val) -> int:\n try:\n return int(val)\n except (TypeError, ValueError):\n raise ValueError(self.ID_MISSING)\n\n def _get_field(self, fields, default):\n for fld in fields:\n if fld in self.content:\n return self.content.get(fld)\n return default\n\n def _get_pmid(self) -> int | None:\n # get PMID if specified in that field\n if \"pubmed_id\" in self.content:\n pubmed_id = self.content[\"pubmed_id\"]\n if type(pubmed_id) is int:\n return pubmed_id\n else:\n m = self.re_pmid.findall(pubmed_id)\n if len(m) > 0:\n # no try/catch req'd; return first matching int\n return int(m[0])\n\n # get value accession number is NLM\n if self.content.get(\"name_of_database\", \"\") == \"NLM\" and \"accession_number\" in self.content:\n try:\n return int(self.content[\"accession_number\"])\n except ValueError:\n pass\n\n return None\n\n def _get_accession_number(self):\n number = self.content.get(\"accession_number\", None)\n\n if number and isinstance(number, str):\n number = number.strip()\n if \"eid=\" in number:\n # extract the Scopus EID\n m = self.re_scopus_eid.findall(number)\n if len(m) > 0:\n number = m[0]\n elif number.lower() == \"null\":\n # sometimes a \"null\" is returned\n return None\n\n return number\n\n def _clean_authors(self):\n authors = []\n for fld in self.AUTHOR_LIST_FIELDS:\n if fld in self.content:\n authors.extend([author for author in self.content[fld]])\n self._authors = normalize_authors(authors)\n\n def _get_authors_short(self):\n if not hasattr(self, \"_authors\"):\n self._clean_authors()\n return get_author_short_text(self._authors)\n\n def _get_journal_citation(self):\n # volume is sometimes blank; only add parens if non-blank\n volume = str(self.content.get(\"volume\", \"\"))\n if len(volume) > 0:\n volume = f\"; {volume}\"\n\n # issue is sometimes blank; only add parens if non-blank\n issue = str(self.content.get(\"note\", \"\"))\n if len(issue) > 0:\n issue = f\" ({issue})\"\n\n # pages is sometimes blank; only add colon if non-blank\n pages = str(self.content.get(\"start_page\", \"\"))\n if len(pages) > 0:\n pages = f\":{pages}\"\n\n sec_title = str(self.content.get(\"secondary_title\", \"\")) # journal\n year = self.content.get(\"year\", \"\") # year\n return f\"{sec_title} {year}{volume}{issue}{pages}\"\n\n def _get_book_citation(self):\n vals = []\n if \"secondary_title\" in self.content:\n vals.append(f\"{self.content['secondary_title']}.\")\n if \"year\" in self.content:\n vals.append(f\"{self.content['year']}.\")\n if \"start_page\" in self.content:\n vals.append(f\"Pages {self.content['start_page']}.\")\n if \"issn\" in self.content:\n vals.append(f\"{self.content['issn']}\")\n return \" \".join(vals)\n\n def _get_citation(self):\n refType = self.content.get(\"type_of_reference\", \"\")\n citation = self.PLACEHOLDER_TEXT\n if refType in (\"JFULL\", \"JOUR\"):\n citation = self._get_journal_citation()\n elif refType in (\"BOOK\", \"CHAP\"):\n citation = self._get_book_citation()\n elif refType == \"SER\":\n citation = self.content.get(\"alternate_title1\", \"\")\n elif refType == \"CONF\":\n citation = self.content.get(\"short_title\", \"\")\n else:\n id_ = self.content.get(\"id\", None)\n logger.warning(f'Unknown type: \"{refType}\", id=\"{id_}\"')\n return citation\n","repo_name":"shapiromatron/hawc","sub_path":"hawc/services/utils/ris.py","file_name":"ris.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"10531838890","text":"si_mappings = dict(\n g=dict(symbol=\"kg\", factor=0.001),\n hg=dict(symbol=\"kg\", factor=0.01),\n dg=dict(symbol=\"kg\", factor=0.1),\n kg=dict(symbol=\"kg\", factor=1),\n ml=dict(symbol=\"l\", factor=0.001),\n hl=dict(symbol=\"l\", factor=0.01),\n dl=dict(symbol=\"l\", factor=0.1),\n l=dict(symbol=\"l\", factor=1),\n)\npiece_units = [\n \"stk\",\n \"boks\",\n \"flaske\",\n]\n\nquantity_units = [key for key, _ in si_mappings.items()]\n\nquantity_value_units = [\"/{}\".format(x) for x in quantity_units]\nquantity_value_units.extend([\"kr/{}\".format(x) for x in quantity_units])\nquantity_value_units.extend([\"kr{}\".format(x) for x in quantity_units])\npiece_value_units = [\"/{}\".format(x) for x in piece_units]\npiece_value_units.extend([\"kr/{}\".format(x) for x in piece_units])\npiece_value_units.extend([\"kr{}\".format(x) for x in piece_units])\n","repo_name":"viktorfa/mpn-serverless","sub_path":"python-grocery-functions/util/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73750805994","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\n# Redefine function such as nllloss if you wish to use them as loss. See the example below\n# Note that the shape of the given input is [mxn] and the target is [m] (just as for NLLLoss).\n# See Accuracy for example to see how to handle the specific format.\n\nclass NLLLoss(nn.NLLLoss):\n \"\"\"\n Redefinition of NLLLoss, just to take into account specificities in the forward pass\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Take exactly the same arguments as the nn.NLLLoss (see:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html)\n \"\"\"\n super(NLLLoss, self).__init__(*args, **kwargs)\n\n def forward(self, input_, target, *args, **kwargs):\n \"\"\"\n Exactly as the nll loss, we simply ignore the other attributes\n \"\"\"\n return super().forward(input_, target)\n\n\nclass AccuracyLoss(nn.Module):\n \"\"\"\n Compute the accuracy loss, in a compatible way as nn.Module (Mostly for CUDA)\n \"\"\"\n\n def __init__(self, device=\"cpu\"):\n super(AccuracyLoss, self).__init__()\n\n def forward(self, input_, target, *args, **kwargs):\n \"\"\"\n Compute the accuracy\n \"\"\"\n # Selecting the outputs that should be maximized (equal to 1)\n out = input_[range(len(target)), target]\n # computing accuracy loss (distance to 1\n return 1 - out.mean()\n\n\nclass MSELoss(nn.MSELoss):\n \"\"\"\n Wrapper around the torch mse loss to facilitate integration with other custom losses\n \"\"\"\n\n def __init__(self, device=\"cpu\"):\n super(MSELoss, self).__init__()\n\n def forward(self, input_, target, *args, **kwargs):\n \"\"\" Just ignore args and kwargs\"\"\"\n return super().forward(input_, target)\n\n\nclass CircularAccuracyLoss(nn.Module):\n \"\"\"\n Compute a circular accuracy loss: the target is bins away from the real target value. If a value is above a maximum,\n then we map it back to the minimum value.\n \"\"\"\n\n def __init__(self, max_=3, step=2, device=\"cpu\"):\n \"\"\"\n Initialization\n :param max_: the maximum value of all possible targets\n :param step: the difference between the real value and the wished target\n :param device: the device on which to compute the results.\n \"\"\"\n super(CircularAccuracyLoss, self).__init__()\n self.max_ = max_ + 1\n self.step = step\n self.device = device\n self.fn = AccuracyLoss(device)\n\n def forward(self, input_, target, *args, **kwargs):\n \"\"\"\n Compute the loss\n :param input_: the predicted values\n :param target: the real value without any modifications, straight out of the dataset\n \"\"\"\n t = (target + self.step) % self.max_\n t.to(self.device)\n return self.fn(input_, t, *args, **kwargs)\n\n\nclass BalancedErrorRateLoss(nn.Module):\n \"\"\"\n Compute the balanced error rate loss.\n \"\"\"\n\n def __init__(self, targetBer=1 / 2, device=\"cpu\"):\n \"\"\"\n :param targetBer: the value of the BER to be closed to\n \"\"\"\n super(BalancedErrorRateLoss, self).__init__()\n self.targetBer = targetBer\n self.device = device\n\n def get_true_value(self, computed_ber):\n \"\"\"\n Return the true value of the computed BER, not the distance from target. BER is between [0,1/2]. Impossible to\n go beyond that interval unless wrongly implemented.\n :param computed_ber: the distance from the target BER. Must be of type numpy array.\n \"\"\"\n if isinstance(computed_ber, list):\n computed_ber = np.array(computed_ber)\n b = -computed_ber + self.targetBer\n return np.abs(b)\n\n def forward(self, input_, target, group, *args, **kwargs):\n \"\"\"\n Comput the balanced error rate\n :param input_: the input\n :param target: the target values\n :param group: the attribute whose values allow group constructions\n :param args: the argument to ignore\n :param kwargs: the keyword arguments to ignore\n :return: the computed loss\n \"\"\"\n # Selecting the right predictions,\n out = input_[range(len(target)), target]\n # Computing errors\n out = torch.abs(1 - out)\n # Reshaping data\n group.to(self.device)\n group = group.view(-1)\n out = out.view(-1, 1)\n l = len(out)\n # Summing by mask values\n # From: https://discuss.pytorch.org/t/groupby-aggregate-mean-in-pytorch/45335\n m = torch.zeros(group.max() + 1, l).to(self.device)\n m[group, torch.arange(l)] = 1\n m = nn.functional.normalize(m, p=1, dim=1)\n # Computing the mean of each group\\\n out = torch.mm(m, out)\n out = out.mean()\n # k = torch.abs(input.argmax(1) - target).type(torch.FloatTensor).to(self.device)\n # k = (k[sens==1].mean() + k[sens==0].mean())/2\n return torch.abs(self.targetBer - out)\n\n\nclass MaeSacLoss(nn.Module):\n \"\"\"\n Mae Sac loss. First loss is the reconstruction Loss, second is the sensitive attribute one.\n \"\"\"\n\n def __init__(self, alpha_=0.25, device=\"cpu\", ae_reduction=\"mean\", *args, **kwargs):\n \"\"\"\n Initialisation\n :param alpha_: the coefficient for fairness. reconstruction will be (1-alpha_)\n :param device: the device on which to compute the loss\n :param ae_reduction: the type of computation wanted for the reconstruction loss. Either sum, mean or none.\n :param ber_target: the target of the sensitive attribute loss.\n \"\"\"\n super(MaeSacLoss, self).__init__()\n self.post_rec = lambda x: x\n self.rec_loss = nn.L1Loss(reduction=ae_reduction)\n if ae_reduction == \"none\":\n self.post_rec = lambda x: x.mean(0)\n self.group_loss = AccuracyLoss(device=device)\n self.alpha_ = alpha_\n\n def forward(self, data_, data_target, disc_pred, disc_target, group, *args, **kwargs):\n rec = self.rec_loss(data_, data_target)\n rec = self.post_rec(rec)\n gr = self.group_loss(disc_pred, disc_target, group)\n return (1 - self.alpha_) * rec, self.alpha_ * gr\n\n\nclass MaeBerLoss(nn.Module):\n \"\"\"\n Mae Sac loss. First loss is the reconstruction Loss, second is the sensitive attribute one.\n \"\"\"\n\n def __init__(self, alpha_=0, device=\"cpu\", ae_reduction=\"mean\", ber_target=1 / 2, *args, **kwargs):\n \"\"\"\n Initialisation\n :param alpha_: the coefficient for fairness. reconstruction will be (1-alpha_)\n :param device: the device on which to compute the loss\n :param ae_reduction: the type of computation wanted for the reconstruction loss. Either sum, mean or none.\n :param ber_target: the target of the sensitive attribute loss.\n \"\"\"\n super(MaeBerLoss, self).__init__()\n self.post_rec = lambda x: x\n self.rec_loss = nn.L1Loss(reduction=ae_reduction)\n if ae_reduction == \"none\":\n self.post_rec = lambda x: x.mean(0)\n # self.group_loss = BalancedErrorRateLoss(targetBer=ber_target, device=device)\n self.alpha_ = alpha_\n\n def forward(self, data_, data_target, *args, **kwargs):\n rec = self.rec_loss(data_, data_target)\n rec = self.post_rec(rec)\n # gr = self.group_loss(disc_pred, disc_target, group)\n # First loss is the reconstruction Loss, second is the sensitive attribute one.\n return (1 - self.alpha_) * rec #, self.alpha_ * gr\n\n# Reconstruction + KL divergence losses summed over all elements and batch\ndef vae_loss(recon_x, x, mu, logvar):\n loss_fn = torch.nn.L1Loss(reduction='none')\n l1_loss = loss_fn(recon_x, x)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n # KLD = (KLD-mu)/logvar\n\n return l1_loss + KLD\n\nclass DamageAttributeLoss(nn.Module):\n \"\"\" Compute the loss per attribute and return it as a vector. \"\"\"\n\n def __init__(self, categorical_indexes, numerical_indexes, hard=False, alpha_=0.25, device=\"cpu\", ber_target=1 / 2,\n *args, **kwargs):\n \"\"\"\n Initialisation. We assume that for numerical columns, we will use the l1-loss. Change accordingly\n :param categorical_indexes: indexes of the categorical columns in list of dict\n :param numerical_indexes: index of numerical columns as list.\n :param hard: Should we be hard on the categorical error or should we just maximise the right element ?\n See hard_categorical for a more in depth explanation\n :param alpha_: the coefficient for fairness. reconstruction will be (1-alpha_)\n :param device: the device on which to compute the loss\n :param ber_target: the target of the sensitive attribute loss.\n \"\"\"\n super(DamageAttributeLoss, self).__init__()\n self.l1_none = nn.L1Loss(reduction=\"none\")\n self.c_l1_mean = lambda x, target: self.l1_none(x, target).mean(0)\n self.cat_indexes = categorical_indexes\n self.num_indexes = numerical_indexes\n self.hard = hard\n if self.hard:\n self.process_target = lambda x: x\n self.categorical_loss_func = self.hard_categorical\n else:\n self.process_target = lambda x: x.argmax(1)\n self.categorical_loss_func = AccuracyLoss(device=device)\n self.numerical_loss_func = self.c_l1_mean\n self.group_loss = BalancedErrorRateLoss(targetBer=ber_target, device=device)\n # self.alpha_ = alpha_\n\n def hard_categorical(self, data, target):\n \"\"\"\n As the model output tensor contains non zero elements (it is not always\n o_perfect =\n 1, 0, 0\n 0, 1, 0\n but instead (sometimes better values if a softmax is applied on cat output ?)\n o =\n 0.46, 0.64, 0.001\n 0.01, 0.02, 0.003\n\n We can not use argmax as it is not differentiable. We can not use equality input == target as we have decimals\n and we will most of the time end up with not even a single equality in the matrix, whereas doing\n input.argmax() == True might give some good results (remember that argmax is not differentiable).\n\n We can therefore do two solutions:\n \n hard = False: We pick the right element and we maximise the probability of the element, hence the accuracy\n overall: suppose target is\n t =\n 1, 0, 0\n 0, 0, 1\n We convert the target into indexes, yielding:\n 0\n 2\n And using the model output above, we obtain the vector\n v =\n 0.46\n 0.003\n which we compute the mean vm = v.mean() == accuracy, and we minimise the accuracy error 1 - vm\n\n hard = True, we want to impose some threshold on other values as well, not only on the specific target\n we compute the l1 per index, yielding\n 0.54, 0.64, 0.001\n 0.001, 0.002, 0.997\n We compute the mean per column\n cm =\n 0.2705, 0.321, 0.499\n then we sum and divide by the number of element in cm different from 0. Yielding the proportion of mistakes if\n o were equal to o_perfect\n\n \"\"\"\n all_wrong = self.c_l1_mean(data, target)\n denominator = (all_wrong != 0).sum()\n denominator = denominator if denominator != 0 else 1\n all_wrong = all_wrong.sum() / denominator\n return all_wrong\n\n def forward(self, data_, data_target, disc_pred=1, disc_target=1, group=1, *args, **kwargs):\n rec = self.numerical_loss_func(data_[:, self.num_indexes], data_target[:, self.num_indexes])\n rec = rec.view(-1)\n for c in self.cat_indexes:\n # if len(c) != 0:\n target = self.process_target(data_target[:, c])\n l = self.categorical_loss_func(data_[:, c], target)\n if len(rec) != 0:\n rec = torch.cat((rec, l.view(-1)), 0)\n else:\n rec = l.view(-1)\n\n # gr = self.group_loss(disc_pred, disc_target, group)\n return rec #(1 - self.alpha_) * rec, self.alpha_ * gr\n\n def __str__(self):\n return \"{}(\\n numerical Loss: {}\\n Group Loss: {}\\n Hard on Categorical: {}\\n)\" \\\n .format(self.__class__.__name__, str(self.numerical_loss_func), str(self.group_loss), self.hard)\n","repo_name":"jctaillandier/reconstructor","sub_path":"Modules/customLosses.py","file_name":"customLosses.py","file_ext":"py","file_size_in_byte":12628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42468589416","text":"import unittest\nimport jSonParser\n\n\nclass jSonParser_Test(unittest.TestCase):\n def test_load(self):\n callback = str\n assert_iteself(jSonParser.load, callback, ('[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]'))\n assert_iteself(jSonParser.load, callback, '[\"\\\\\"foo\\\\bar\"]')\n\n long_text = \"\"\"{\n \"programmers\": [{\n \"firstName\": \"Brett\",\n \"lastName\": \"McLaughlin\",\n \"email\": \"aaaa\"\n }, {\n \"firstName\": \"Jason\",\n \"lastName\": \"Hunter\",\n \"email\": \"bbbb\"\n }, {\n \"firstName\": \"Elliotte\",\n \"lastName\": \"Harold\",\n \"email\": \"cccc\"\n }],\n \"authors\": [{\n \"firstName\": \"Isaac\",\n \"lastName\": \"Asimov\",\n \"genre\": \"sciencefiction\"\n }, {\n \"firstName\": \"Tad\",\n \"lastName\": \"Williams\",\n \"genre\": \"fantasy\"\n }, {\n \"firstName\": \"Frank\",\n \"lastName\": \"Peretti\",\n \"genre\": \"christianfiction\"\n }],\n \"musicians\": [{\n \"firstName\": \"Eric\",\n \"lastName\": \"Clapton\",\n \"instrument\": \"guitar\"\n }, {\n \"firstName\": \"Sergei\",\n \"lastName\": \"Rachmaninoff\",\n \"instrument\": \"piano\"\n }]\n }\n \"\"\"\n assert_iteself(jSonParser.load, callback, long_text)\n\n def test_update(self):\n long_text = \"\"\"{\n \"programmers\": [{\n \"test\": \"test\"\n }],\n \"authors\": [{\n \"firstName\": \"Isaac\",\n \"lastName\": \"Asimov\"\n }, {\n \"firstName\": \"Tad\",\n \"lastName\": \"Williams\"\n }]\n }\n \"\"\"\n r = jSonParser.load(long_text)\n r[\"authors\"][0][\"firstName\"] = jSonParser.JString(\"Test_F\")\n r[\"authors\"][0][\"midlleName\"] = jSonParser.JString(\"Test_M\")\n r[\"authors\"].append(jSonParser.JNull())\n r[\"authors\"].append(jSonParser.JBool(True))\n r[\"authors\"].append(jSonParser.JString(\"xxx\"))\n assert str(r[\"authors\"][0]) == '{\"firstName\":\"Test_F\",\"lastName\":\"Asimov\",\"midlleName\":\"Test_M\"}'\n assert str(r[\"authors\"][2]) == 'null'\n assert str(r[\"authors\"][3]) == 'true'\n assert str(r[\"authors\"][4]) == '\"xxx\"'\n\n def test_parse_object(self):\n callback = lambda x: str(x[0])\n assert_iteself(jSonParser.parse_obj, callback, '{\"1\":true}')\n assert_iteself(jSonParser.parse_obj, callback, '{\"1\":true, \"wa\":[1,2,3,4]}')\n assert_iteself(jSonParser.parse_obj, callback, '{\"name\":\"test\",\"age\":\"18\"}')\n assert_iteself(jSonParser.parse_obj, callback, '{\"people\":null}')\n\n def test_parse_array(self):\n callback = lambda x: str(x[0])\n assert_iteself(jSonParser.parse_array, callback, '[1,\\\"w]a\\\"]haha')\n assert_iteself(jSonParser.parse_array, callback, \"[1, true]\")\n assert_iteself(jSonParser.parse_array, callback, \"[1, true ]\")\n\n\ndef assert_iteself(fn, callback, input):\n r1 = callback(fn(input))\n r2 = callback(fn(r1))\n assert r1 == r2\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"leehoawki/jSonParser","sub_path":"jSonParser_Test.py","file_name":"jSonParser_Test.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35355715632","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\nfrom cryptography.fernet import Fernet\nfrom flask import jsonify\nimport os\nimport requests\nimport base64\nfrom flask import render_template\napp = Flask(__name__)\n\n\n# Specify the path to the key file\nkey_file_path = 'BlockShare\\\\back-end\\\\python-backend\\\\keys\\\\key.key'\n\n\n\nwith open(key_file_path, 'rb') as filekey:\n key = filekey.read()\n\n# Create the Fernet class instance\ncipher_suite = Fernet(key)\nprint(key)\nCORS(app)\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n if 'file' not in request.files:\n return jsonify({'error': 'No file part'})\n\n file = request.files['file']\n \n if file.filename == '':\n return jsonify({'error': 'No selected file'})\n\n if file:\n filename = secure_filename(file.filename)\n\n # Read the uploaded file\n file_data = file.read()\n\n # Encrypt the file data\n encrypted_data = cipher_suite.encrypt(file_data)\n\n # Pin the encrypted file to Pinata\n headers = {\n 'pinata_api_key': '17c77d008bc1cd3adef3',\n 'pinata_secret_api_key': '354b0d3623035dab594d7b8e5a47c0fc9adfc16a2d92f48a8f48f4fb29ddfb24'\n }\n files = {\n 'file': (filename, encrypted_data)\n }\n response = requests.post('https://api.pinata.cloud/pinning/pinFileToIPFS', headers=headers, files=files)\n\n if response.status_code == 200:\n return jsonify({'message': 'File uploaded and pinned successfully'})\n\n return jsonify({'error': 'Failed to upload and pin the file'})\n\n@app.route('/getfiles', methods=['GET'])\ndef get_files():\n headers = {\n 'pinata_api_key': '17c77d008bc1cd3adef3',\n 'pinata_secret_api_key': '354b0d3623035dab594d7b8e5a47c0fc9adfc16a2d92f48a8f48f4fb29ddfb24'\n }\n response = requests.get('https://api.pinata.cloud/data/pinList?status=pinned', headers=headers)\n\n print(response.json())\n\n if response.status_code == 200:\n pinned_files = response.json().get('rows', [])\n\n # print(pinned_files)\n # Decrypt files before returning\n decrypted_files = []\n for file_info in pinned_files:\n file_hash = file_info.get('ipfs_pin_hash')\n file_name = file_info.get('metadata').get('name')\n print(file_hash)\n\n # Fetch file content from Pinata\n file_response = requests.get(f'https://gateway.pinata.cloud/ipfs/{file_hash}')\n\n if file_response.status_code == 200:\n encrypted_data = file_response.content\n print(key)\n decrypted_data = cipher_suite.decrypt(encrypted_data)\n\n decrypted_data = base64.b64encode(decrypted_data).decode('utf-8')\n\n\n # Create a dictionary with 'name' and 'content'\n decrypted_files.append({\n 'name': file_name, \n 'content': decrypted_data,\n 'hash': file_hash, \n \n })\n print(\"a\",decrypted_files)\n\n return jsonify(files=decrypted_files)\n\n return jsonify({'error': 'Failed to retrieve files from Pinata'})\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"RohitBhandare/blockshare","sub_path":"back-end/python-backend/FileED.py","file_name":"FileED.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22715123087","text":"# https://scipython.com/book/chapter-6-numpy/examples/fitting-the-beer-lambert-law-with-numpy/\n\nimport numpy as np\nimport pylab\n\n# Path length, cm\npath = 0.8\n# The data: concentrations (M) and It/I0\nc = np.array([0.4, 0.6, 0.8, 1.0, 1.2])\nIt_over_I0 = np.array([ 0.891 , 0.841, 0.783, 0.744, 0.692])\n\nn = len(c)\nA = np.vstack((c, np.ones(n))).T\nT = np.log(It_over_I0)\n\nx, resid, _, _ = np.linalg.lstsq(A, T)\nm, k = x\nalpha = - m / path\nprint('alpha = {:.3f} M-1.cm-1'.format(alpha))\nprint('k', k)\nprint('rms residual = ', np.sqrt(resid[0]))\n\npylab.plot(c, T, 'o')\npylab.plot(c, m*c + k)\npylab.xlabel('$c\\;/\\mathrm{M}$')\npylab.ylabel('$\\ln(I_\\mathrm{t}/I_0)$')\npylab.show()\n","repo_name":"j3ffyang/ai","sub_path":"scripts/numpy/12beer_lambert.py","file_name":"12beer_lambert.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38400663910","text":"from __main__ import vtk, qt, ctk, slicer\nimport os\nimport random\n\n\nclass VtkObjConverter:\n def __init__(self, parent):\n parent.title = \"VTK to OBJ converter\"\n parent.categories = [\"MyModule\"]\n parent.dependencies = []\n parent.contributors = [\"Alessandro Di Girolamo (as.digirolamo@gmail.com).\"]\n parent.helpText = \"\"\"VTK to OBJ format converter. Drag and drop the VTK file you want to convert, then press \n the Apply button. A random color is given to the OBJ model and saved in a separated MTL \n file. Both the OBJ and the MTL files are saved in the same directory of the input VTK file.\n \"\"\"\n # parent.acknowledgementText = \"\"\"Many thanks to...\"\"\"\n\n # Set module icon\n iconPath = os.path.dirname(os.path.realpath(__file__)) + '/icon.jpeg'\n if os.path.isfile(iconPath):\n parent.icon = qt.QIcon(iconPath)\n\n self.parent = parent\n\n\nclass VtkObjConverterWidget:\n def __init__(self, parent=None):\n if not parent:\n self.parent = slicer.qMRMLWidget()\n self.parent.setLayout(qt.QVBoxLayout())\n self.parent.setMRMLScene(slicer.mrmlScene)\n else:\n self.parent = parent\n self.layout = self.parent.layout()\n if not parent:\n self.setup()\n self.parent.show()\n\n def setup(self):\n\n # Instantiate and connect widgets ...\n\n #\n # Parameters Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Input\"\n self.layout.addWidget(parametersCollapsibleButton)\n\n # Layout within the collapsible button\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n\n #\n # Input model selector\n #\n self.inputModelSelector = slicer.qMRMLNodeComboBox()\n self.inputModelSelector.nodeTypes = [\"vtkMRMLModelNode\"]\n self.inputModelSelector.addEnabled = False\n self.inputModelSelector.removeEnabled = True\n self.inputModelSelector.renameEnabled = True\n self.inputModelSelector.noneEnabled = False\n self.inputModelSelector.showHidden = False\n self.inputModelSelector.showChildNodeTypes = False\n self.inputModelSelector.setMRMLScene(slicer.mrmlScene)\n self.inputModelSelector.setToolTip(\"Drag and drop the VTK file you want to convert\")\n parametersFormLayout.addRow(\"Model: \", self.inputModelSelector)\n\n #\n # Apply Button\n #\n self.applyButton = qt.QPushButton(\"Apply\")\n self.applyButton.toolTip = \"Start the conversion\"\n self.applyButton.enabled = False\n parametersFormLayout.addRow(self.applyButton)\n\n # Connections\n self.inputModelSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelect)\n self.applyButton.connect('clicked(bool)', self.onApplyButton)\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n # Refresh Apply button state\n self.onSelect()\n\n def onSelect(self):\n self.applyButton.enabled = self.inputModelSelector.currentNode()\n\n def onApplyButton(self):\n logic = VtkObjLogic()\n logic.vtk_to_obj_converter(self.inputModelSelector.currentNode())\n\n\nclass VtkObjLogic:\n\n def vtk_to_obj_converter(self, node, radius=0.1, number_of_sides=3):\n\n qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)\n\n polydata = node.GetPolyData()\n tuber = vtk.vtkTubeFilter()\n tuber.SetNumberOfSides(int(number_of_sides))\n tuber.SetRadius(radius)\n tuber.SetInputData(polydata)\n tuber.Update()\n\n tubes = tuber.GetOutputDataObject(0)\n # scalars = tubes.GetPointData().GetArray(0)\n # scalars.SetName(\"scalars\")\n\n triangles = vtk.vtkTriangleFilter()\n triangles.SetInputData(tubes)\n triangles.Update()\n\n tripolydata = vtk.vtkPolyData()\n tripolydata.ShallowCopy(triangles.GetOutput())\n\n # Decrease the number of triangle of 30% to reduce Blender loading costs\n decimate = vtk.vtkDecimatePro()\n decimate.SetInputData(tripolydata)\n decimate.SetTargetReduction(.30)\n decimate.Update()\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(decimate.GetOutputPort())\n\n r = random.randrange(0, 256, 1)\n g = random.randrange(0, 256, 1)\n b = random.randrange(0, 256, 1)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n # actor.GetProperty().SetColor(0, 0, 0) # Ka: ambient color of the material (r, g, b)\n actor.GetProperty().SetDiffuseColor(r, g, b) # Kd: diffuse color of the material (r, g, b)\n # actor.GetProperty().SetSpecularColor(0, 0, 0) # Ks: specular color of the material (r, g, b)\n\n renderer = vtk.vtkRenderer()\n renderer.AddActor(actor)\n\n window = vtk.vtkRenderWindow()\n window.AddRenderer(renderer)\n\n # Get output path\n storageNode = node.GetStorageNode()\n filepath = storageNode.GetFullNameFromFileName()\n filename = filepath.rsplit('.', 1)\n\n writer = vtk.vtkOBJExporter()\n writer.SetFilePrefix(filename[0])\n writer.SetInput(window)\n writer.Write()\n\n qt.QApplication.restoreOverrideCursor()\n\n if writer.Write() == 0:\n qt.QMessageBox.critical(None, \"Conversion\", \"Conversion failed\")\n else:\n qt.QMessageBox.information(None, \"Conversion\", \"Conversion done\")\n","repo_name":"aledigi/vtk_to_obj_converter","sub_path":"VtkObjConverter.py","file_name":"VtkObjConverter.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15110579169","text":"N,K = map(int,input().split())\r\nh = [int(input()) for _ in range(N)]\r\n\r\nh = sorted(h)\r\nhK = h[0:K]\r\nans = hK[K-1]-h[0]\r\nfor i in range(N-K+1):\r\n ans = min(ans,h[i+K-1]-h[i])\r\n\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc115/C/4896644.py","file_name":"4896644.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"25896327849","text":"\"\"\"\nGO Enrichment Analysis\n----------------------\n\n\"\"\"\nimport sys\nimport math\nimport gc\nimport webbrowser\nimport operator\n\nfrom collections import defaultdict\nfrom functools import reduce\n\nfrom AnyQt.QtWidgets import (\n QTreeWidget, QTreeWidgetItem, QTreeView, QMenu, QCheckBox, QSplitter,\n QDialog, QVBoxLayout, QLabel, QDialogButtonBox, QLayout, QItemDelegate\n)\nfrom AnyQt.QtGui import QBrush\nfrom AnyQt.QtCore import Qt, QSize\n\nimport numpy\n\nimport Orange.data\nfrom Orange.widgets.utils.datacaching import data_hints\n\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.utils.concurrent import ThreadExecutor\n\nfrom requests.exceptions import ConnectTimeout\n\nfrom .. import gene, go, taxonomy\nfrom ..utils import serverfiles\nfrom ..utils import stats\nfrom ..widgets.utils.download import EnsureDownloaded\n\n\ndef isstring(var):\n return isinstance(var, Orange.data.StringVariable)\n\n\ndef listAvailable():\n files = serverfiles.listfiles(\"GO\")\n ret = {}\n for file in files:\n tags = serverfiles.info(\"GO\", file)[\"tags\"]\n td = dict([tuple(tag.split(\":\")) for tag in tags\n if tag.startswith(\"#\") and \":\" in tag])\n if \"association\" in file.lower():\n ret[td.get(\"#organism\", file)] = file\n\n essential = [\"gene_association.%s.tar.gz\" % go.from_taxid(taxid)\n for taxid in taxonomy.common_taxids()\n if go.from_taxid(taxid)]\n essentialNames = [taxonomy.name(taxid)\n for taxid in taxonomy.common_taxids()\n if go.from_taxid(taxid)]\n ret.update(zip(essentialNames, essential))\n return ret\n\n\nclass TreeNode(object):\n def __init__(self, value, children):\n self.value = value\n self.children = children\n\n\nclass GOTreeWidget(QTreeWidget):\n def contextMenuEvent(self, event):\n super().contextMenuEvent(event)\n term = self.itemAt(event.pos()).term\n self._currMenu = QMenu()\n self._currAction = self._currMenu.addAction(\"View term on AmiGO website\")\n self._currAction.triggered.connect(lambda: self.BrowserAction(term))\n self._currMenu.popup(event.globalPos())\n\n def BrowserAction(self, term):\n if isinstance(term, go.Term):\n term = term.id\n webbrowser.open(\"http://amigo.geneontology.org/cgi-bin/amigo/term-details.cgi?term=\"+term)\n\n\nclass OWGOEnrichmentAnalysis(widget.OWWidget):\n name = \"GO Browser\"\n description = \"Enrichment analysis for Gene Ontology terms.\"\n icon = \"../widgets/icons/GOBrowser.svg\"\n priority = 2020\n\n inputs = [(\"Cluster Data\", Orange.data.Table,\n \"setDataset\", widget.Single + widget.Default),\n (\"Reference Data\", Orange.data.Table,\n \"setReferenceDataset\")]\n\n outputs = [(\"Data on Selected Genes\", Orange.data.Table),\n (\"Data on Unselected Genes\", Orange.data.Table),\n (\"Data on Unknown Genes\", Orange.data.Table),\n (\"Enrichment Report\", Orange.data.Table)]\n\n settingsHandler = settings.DomainContextHandler()\n\n annotationIndex = settings.ContextSetting(0)\n geneAttrIndex = settings.ContextSetting(0)\n useAttrNames = settings.ContextSetting(False)\n geneMatcherSettings = settings.Setting([True, False, False, False])\n useReferenceDataset = settings.Setting(False)\n aspectIndex = settings.Setting(0)\n\n useEvidenceType = settings.Setting(\n {et: True for et in go.evidenceTypesOrdered})\n\n filterByNumOfInstances = settings.Setting(False)\n minNumOfInstances = settings.Setting(1)\n filterByPValue = settings.Setting(True)\n maxPValue = settings.Setting(0.2)\n filterByPValue_nofdr = settings.Setting(False)\n maxPValue_nofdr = settings.Setting(0.01)\n probFunc = settings.Setting(0)\n\n selectionDirectAnnotation = settings.Setting(0)\n selectionDisjoint = settings.Setting(0)\n selectionAddTermAsClass = settings.Setting(0)\n\n Ready, Initializing, Running = 0, 1, 2\n\n def __init__(self, parent=None):\n super().__init__(self, parent)\n\n self.clusterDataset = None\n self.referenceDataset = None\n self.ontology = None\n self.annotations = None\n self.loadedAnnotationCode = \"---\"\n self.treeStructRootKey = None\n self.probFunctions = [stats.Binomial(), stats.Hypergeometric()]\n self.selectedTerms = []\n\n self.selectionChanging = 0\n self.__state = OWGOEnrichmentAnalysis.Initializing\n\n self.annotationCodes = []\n\n #############\n ## GUI\n #############\n self.tabs = gui.tabWidget(self.controlArea)\n ## Input tab\n self.inputTab = gui.createTabPage(self.tabs, \"Input\")\n box = gui.widgetBox(self.inputTab, \"Info\")\n self.infoLabel = gui.widgetLabel(box, \"No data on input\\n\")\n\n gui.button(box, self, \"Ontology/Annotation Info\",\n callback=self.ShowInfo,\n tooltip=\"Show information on loaded ontology and annotations\")\n\n box = gui.widgetBox(self.inputTab, \"Organism\")\n self.annotationComboBox = gui.comboBox(\n box, self, \"annotationIndex\", items=self.annotationCodes,\n callback=self._updateEnrichment, tooltip=\"Select organism\")\n\n genebox = gui.widgetBox(self.inputTab, \"Gene Names\")\n self.geneAttrIndexCombo = gui.comboBox(\n genebox, self, \"geneAttrIndex\", callback=self._updateEnrichment,\n tooltip=\"Use this attribute to extract gene names from input data\")\n self.geneAttrIndexCombo.setDisabled(self.useAttrNames)\n\n cb = gui.checkBox(genebox, self, \"useAttrNames\", \"Use column names\",\n tooltip=\"Use column names for gene names\",\n callback=self._updateEnrichment)\n cb.toggled[bool].connect(self.geneAttrIndexCombo.setDisabled)\n\n gui.button(genebox, self, \"Gene matcher settings\",\n callback=self.UpdateGeneMatcher,\n tooltip=\"Open gene matching settings dialog\")\n\n self.referenceRadioBox = gui.radioButtonsInBox(\n self.inputTab, self, \"useReferenceDataset\",\n [\"Entire genome\", \"Reference set (input)\"],\n tooltips=[\"Use entire genome for reference\",\n \"Use genes from Referece Examples input signal as reference\"],\n box=\"Reference\", callback=self._updateEnrichment)\n\n self.referenceRadioBox.buttons[1].setDisabled(True)\n gui.radioButtonsInBox(\n self.inputTab, self, \"aspectIndex\",\n [\"Biological process\", \"Cellular component\", \"Molecular function\"],\n box=\"Aspect\", callback=self._updateEnrichment)\n\n ## Filter tab\n self.filterTab = gui.createTabPage(self.tabs, \"Filter\")\n box = gui.widgetBox(self.filterTab, \"Filter GO Term Nodes\")\n gui.checkBox(box, self, \"filterByNumOfInstances\", \"Genes\",\n callback=self.FilterAndDisplayGraph, \n tooltip=\"Filter by number of input genes mapped to a term\")\n ibox = gui.indentedBox(box)\n gui.spin(ibox, self, 'minNumOfInstances', 1, 100,\n step=1, label='#:', labelWidth=15,\n callback=self.FilterAndDisplayGraph,\n callbackOnReturn=True,\n tooltip=\"Min. number of input genes mapped to a term\")\n\n gui.checkBox(box, self, \"filterByPValue_nofdr\", \"p-value\",\n callback=self.FilterAndDisplayGraph,\n tooltip=\"Filter by term p-value\")\n\n gui.doubleSpin(gui.indentedBox(box), self, 'maxPValue_nofdr', 1e-8, 1,\n step=1e-8, label='p:', labelWidth=15,\n callback=self.FilterAndDisplayGraph,\n callbackOnReturn=True,\n tooltip=\"Max term p-value\")\n\n #use filterByPValue for FDR, as it was the default in prior versions\n gui.checkBox(box, self, \"filterByPValue\", \"FDR\",\n callback=self.FilterAndDisplayGraph,\n tooltip=\"Filter by term FDR\")\n gui.doubleSpin(gui.indentedBox(box), self, 'maxPValue', 1e-8, 1,\n step=1e-8, label='p:', labelWidth=15,\n callback=self.FilterAndDisplayGraph,\n callbackOnReturn=True,\n tooltip=\"Max term p-value\")\n\n box = gui.widgetBox(box, \"Significance test\")\n\n gui.radioButtonsInBox(box, self, \"probFunc\", [\"Binomial\", \"Hypergeometric\"],\n tooltips=[\"Use binomial distribution test\",\n \"Use hypergeometric distribution test\"],\n callback=self._updateEnrichment)\n box = gui.widgetBox(self.filterTab, \"Evidence codes in annotation\",\n addSpace=True)\n self.evidenceCheckBoxDict = {}\n for etype in go.evidenceTypesOrdered:\n ecb = QCheckBox(\n etype, toolTip=go.evidenceTypes[etype],\n checked=self.useEvidenceType[etype])\n ecb.toggled.connect(self.__on_evidenceChanged)\n box.layout().addWidget(ecb)\n self.evidenceCheckBoxDict[etype] = ecb\n\n ## Select tab\n self.selectTab = gui.createTabPage(self.tabs, \"Select\")\n box = gui.radioButtonsInBox(\n self.selectTab, self, \"selectionDirectAnnotation\",\n [\"Directly or Indirectly\", \"Directly\"],\n box=\"Annotated genes\",\n callback=self.ExampleSelection)\n\n box = gui.widgetBox(self.selectTab, \"Output\", addSpace=True)\n gui.radioButtonsInBox(\n box, self, \"selectionDisjoint\",\n btnLabels=[\"All selected genes\",\n \"Term-specific genes\",\n \"Common term genes\"],\n tooltips=[\"Outputs genes annotated to all selected GO terms\",\n \"Outputs genes that appear in only one of selected GO terms\", \n \"Outputs genes common to all selected GO terms\"],\n callback=[self.ExampleSelection,\n self.UpdateAddClassButton])\n\n self.addClassCB = gui.checkBox(\n box, self, \"selectionAddTermAsClass\", \"Add GO Term as class\",\n callback=self.ExampleSelection)\n\n # ListView for DAG, and table for significant GOIDs\n self.DAGcolumns = ['GO term', 'Cluster', 'Reference', 'p-value',\n 'FDR', 'Genes', 'Enrichment']\n\n self.splitter = QSplitter(Qt.Vertical, self.mainArea)\n self.mainArea.layout().addWidget(self.splitter)\n\n # list view\n self.listView = GOTreeWidget(self.splitter)\n self.listView.setSelectionMode(QTreeView.ExtendedSelection)\n self.listView.setAllColumnsShowFocus(1)\n self.listView.setColumnCount(len(self.DAGcolumns))\n self.listView.setHeaderLabels(self.DAGcolumns)\n\n self.listView.header().setSectionsClickable(True)\n self.listView.header().setSortIndicatorShown(True)\n self.listView.setSortingEnabled(True)\n self.listView.setItemDelegateForColumn(\n 6, EnrichmentColumnItemDelegate(self))\n self.listView.setRootIsDecorated(True)\n\n self.listView.itemSelectionChanged.connect(self.ViewSelectionChanged)\n\n # table of significant GO terms\n self.sigTerms = QTreeWidget(self.splitter)\n self.sigTerms.setColumnCount(len(self.DAGcolumns))\n self.sigTerms.setHeaderLabels(self.DAGcolumns)\n self.sigTerms.setSortingEnabled(True)\n self.sigTerms.setSelectionMode(QTreeView.ExtendedSelection)\n self.sigTerms.setItemDelegateForColumn(\n 6, EnrichmentColumnItemDelegate(self))\n\n self.sigTerms.itemSelectionChanged.connect(self.TableSelectionChanged)\n\n self.sigTableTermsSorted = []\n self.graph = {}\n\n self.inputTab.layout().addStretch(1)\n self.filterTab.layout().addStretch(1)\n self.selectTab.layout().addStretch(1)\n\n self.setBlocking(True)\n self._executor = ThreadExecutor()\n self._init = EnsureDownloaded(\n [(taxonomy.Taxonomy.DOMAIN, taxonomy.Taxonomy.FILENAME),\n (\"GO\", \"taxonomy.pickle\")]\n )\n self._init.finished.connect(self.__initialize_finish)\n self._executor.submit(self._init)\n\n\n def sizeHint(self):\n return QSize(1000, 700)\n\n def __initialize_finish(self):\n self.setBlocking(False)\n\n try:\n self.annotationFiles = listAvailable()\n except ConnectTimeout:\n self.error(2, \"Internet connection error, unable to load data. \" + \\\n \"Check connection and create a new GO Browser widget.\")\n self.filterTab.setEnabled(False)\n self.inputTab.setEnabled(False)\n self.selectTab.setEnabled(False)\n self.listView.setEnabled(False)\n self.sigTerms.setEnabled(False)\n else:\n self.annotationCodes = sorted(self.annotationFiles.keys())\n self.annotationComboBox.clear()\n self.annotationComboBox.addItems(self.annotationCodes)\n self.annotationComboBox.setCurrentIndex(self.annotationIndex)\n self.__state = OWGOEnrichmentAnalysis.Ready\n\n def __on_evidenceChanged(self):\n for etype, cb in self.evidenceCheckBoxDict.items():\n self.useEvidenceType[etype] = cb.isChecked()\n self._updateEnrichment()\n\n def UpdateGeneMatcher(self):\n \"\"\"Open the Gene matcher settings dialog.\"\"\"\n dialog = GeneMatcherDialog(self, defaults=self.geneMatcherSettings, modal=True)\n if dialog.exec_() != QDialog.Rejected:\n self.geneMatcherSettings = [getattr(dialog, item[0]) for item in dialog.items]\n if self.annotations:\n self.SetGeneMatcher()\n self._updateEnrichment()\n\n def clear(self):\n self.infoLabel.setText(\"No data on input\\n\")\n self.warning(0)\n self.warning(1)\n self.geneAttrIndexCombo.clear()\n self.ClearGraph()\n\n self.send(\"Data on Selected Genes\", None)\n self.send(\"Data on Unselected Genes\", None)\n self.send(\"Data on Unknown Genes\", None)\n self.send(\"Enrichment Report\", None)\n\n def setDataset(self, data=None):\n if self.__state == OWGOEnrichmentAnalysis.Initializing:\n self.__initialize_finish()\n\n self.closeContext()\n self.clear()\n self.clusterDataset = data\n\n if data is not None:\n domain = data.domain\n allvars = domain.variables + domain.metas\n self.candidateGeneAttrs = [var for var in allvars if isstring(var)]\n\n self.geneAttrIndexCombo.clear()\n for var in self.candidateGeneAttrs:\n self.geneAttrIndexCombo.addItem(*gui.attributeItem(var))\n taxid = data_hints.get_hint(data, \"taxid\", \"\")\n code = None\n try:\n code = go.from_taxid(taxid)\n except KeyError:\n pass\n except Exception as ex:\n print(ex)\n\n if code is not None:\n filename = \"gene_association.%s.tar.gz\" % code\n if filename in self.annotationFiles.values():\n self.annotationIndex = \\\n [i for i, name in enumerate(self.annotationCodes) \\\n if self.annotationFiles[name] == filename].pop()\n\n self.useAttrNames = data_hints.get_hint(data, \"genesinrows\",\n self.useAttrNames)\n self.openContext(data)\n\n self.geneAttrIndex = min(self.geneAttrIndex,\n len(self.candidateGeneAttrs) - 1)\n if len(self.candidateGeneAttrs) == 0:\n self.useAttrNames = True\n self.geneAttrIndex = -1\n elif self.geneAttrIndex < len(self.candidateGeneAttrs):\n self.geneAttrIndex = len(self.candidateGeneAttrs) - 1\n\n self._updateEnrichment()\n\n def setReferenceDataset(self, data=None):\n self.referenceDataset = data\n self.referenceRadioBox.buttons[1].setDisabled(not bool(data))\n self.referenceRadioBox.buttons[1].setText(\"Reference set\")\n if self.clusterDataset is not None and self.useReferenceDataset:\n self.useReferenceDataset = 0 if not data else 1\n graph = self.Enrichment()\n self.SetGraph(graph)\n elif self.clusterDataset:\n self.__updateReferenceSetButton()\n\n def handleNewSignals(self):\n super().handleNewSignals()\n\n def _updateEnrichment(self):\n if self.clusterDataset is not None and \\\n self.__state == OWGOEnrichmentAnalysis.Ready:\n pb = gui.ProgressBar(self, 100)\n self.Load(pb=pb)\n graph = self.Enrichment(pb=pb)\n self.FilterUnknownGenes()\n self.SetGraph(graph)\n\n def __updateReferenceSetButton(self):\n allgenes, refgenes = None, None\n if self.referenceDataset:\n try:\n allgenes = self.genesFromTable(self.referenceDataset)\n except Exception:\n allgenes = []\n refgenes, unknown = self.FilterAnnotatedGenes(allgenes)\n self.referenceRadioBox.buttons[1].setDisabled(not bool(allgenes))\n self.referenceRadioBox.buttons[1].setText(\"Reference set \" + (\"(%i genes, %i matched)\" % (len(allgenes), len(refgenes)) if allgenes and refgenes else \"\"))\n\n def genesFromTable(self, data):\n if self.useAttrNames:\n genes = [v.name for v in data.domain.variables]\n else:\n attr = self.candidateGeneAttrs[min(self.geneAttrIndex, len(self.candidateGeneAttrs) - 1)]\n genes = [str(ex[attr]) for ex in data if not numpy.isnan(ex[attr])]\n if any(\",\" in gene for gene in genes):\n self.information(0, \"Separators detected in gene names. Assuming multiple genes per example.\")\n genes = reduce(operator.iadd, (genes.split(\",\") for genes in genes), [])\n return genes\n\n def FilterAnnotatedGenes(self, genes):\n matchedgenes = self.annotations.get_gene_names_translator(genes).values()\n return matchedgenes, [gene for gene in genes if gene not in matchedgenes]\n\n def FilterUnknownGenes(self):\n if not self.useAttrNames and self.candidateGeneAttrs:\n geneAttr = self.candidateGeneAttrs[min(self.geneAttrIndex, len(self.candidateGeneAttrs)-1)]\n indices = []\n for i, ex in enumerate(self.clusterDataset):\n if not any(self.annotations.genematcher.match(n.strip()) for n in str(ex[geneAttr]).split(\",\")):\n indices.append(i)\n if indices:\n data = self.clusterDataset[indices]\n else:\n data = None\n self.send(\"Data on Unknown Genes\", data)\n else:\n self.send(\"Data on Unknown Genes\", None)\n\n def Load(self, pb=None):\n\n if self.__state == OWGOEnrichmentAnalysis.Ready:\n go_files, tax_files = serverfiles.listfiles(\"GO\"), serverfiles.listfiles(\"Taxonomy\")\n calls = []\n pb, finish = (gui.ProgressBar(self, 0), True) if pb is None else (pb, False)\n count = 0\n if not tax_files:\n calls.append((\"Taxonomy\", \"ncbi_taxnomy.tar.gz\"))\n count += 1\n org = self.annotationCodes[min(self.annotationIndex, len(self.annotationCodes)-1)]\n if org != self.loadedAnnotationCode:\n count += 1\n if self.annotationFiles[org] not in go_files:\n calls.append((\"GO\", self.annotationFiles[org]))\n count += 1\n\n if \"gene_ontology_edit.obo.tar.gz\" not in go_files:\n calls.append((\"GO\", \"gene_ontology_edit.obo.tar.gz\"))\n count += 1\n if not self.ontology:\n count += 1\n pb.iter += count * 100\n\n for args in calls:\n serverfiles.localpath_download(*args, **dict(callback=pb.advance))\n\n i = len(calls)\n if not self.ontology:\n self.ontology = go.Ontology(progress_callback=lambda value: pb.advance())\n i += 1\n\n if org != self.loadedAnnotationCode:\n self.annotations = None\n gc.collect() # Force run garbage collection\n code = self.annotationFiles[org].split(\".\")[-3]\n self.annotations = go.Annotations(code, genematcher=gene.GMDirect(), progress_callback=lambda value: pb.advance())\n i += 1\n self.loadedAnnotationCode = org\n count = defaultdict(int)\n geneSets = defaultdict(set)\n\n for anno in self.annotations.annotations:\n count[anno.evidence] += 1\n geneSets[anno.evidence].add(anno.geneName)\n for etype in go.evidenceTypesOrdered:\n ecb = self.evidenceCheckBoxDict[etype]\n ecb.setEnabled(bool(count[etype]))\n ecb.setText(etype + \": %i annots(%i genes)\" % (count[etype], len(geneSets[etype])))\n if finish:\n pb.finish()\n\n def SetGeneMatcher(self):\n if self.annotations:\n taxid = self.annotations.taxid\n matchers = []\n for matcher, use in zip([gene.GMGO, gene.GMKEGG, gene.GMNCBI, gene.GMAffy], self.geneMatcherSettings):\n if use:\n try:\n if taxid == \"352472\":\n matchers.extend([matcher(taxid), gene.GMDicty(),\n [matcher(taxid), gene.GMDicty()]])\n # The reason machers are duplicated is that we want `matcher` or `GMDicty` to\n # match genes by them self if possible. Only use the joint matcher if they fail. \n else:\n matchers.append(matcher(taxid))\n except Exception as ex:\n print(ex)\n self.annotations.genematcher = gene.matcher(matchers)\n self.annotations.genematcher.set_targets(self.annotations.gene_names)\n\n def Enrichment(self, pb=None):\n assert self.clusterDataset is not None\n\n pb = gui.ProgressBar(self, 100) if pb is None else pb\n if not self.annotations.ontology:\n self.annotations.ontology = self.ontology\n\n if isinstance(self.annotations.genematcher, gene.GMDirect):\n self.SetGeneMatcher()\n self.error(1)\n self.warning([0, 1])\n\n if self.useAttrNames:\n clusterGenes = [v.name for v in self.clusterDataset.domain.attributes]\n self.information(0)\n elif 0 <= self.geneAttrIndex < len(self.candidateGeneAttrs):\n geneAttr = self.candidateGeneAttrs[self.geneAttrIndex]\n clusterGenes = [str(ex[geneAttr]) for ex in self.clusterDataset\n if not numpy.isnan(ex[geneAttr])]\n if any(\",\" in gene for gene in clusterGenes):\n self.information(0, \"Separators detected in cluster gene names. Assuming multiple genes per example.\")\n clusterGenes = reduce(operator.iadd, (genes.split(\",\") for genes in clusterGenes), [])\n else:\n self.information(0)\n else:\n self.error(1, \"Failed to extract gene names from input dataset!\")\n return {}\n\n genesSetCount = len(set(clusterGenes))\n\n self.clusterGenes = clusterGenes = self.annotations.get_gene_names_translator(clusterGenes).values()\n\n self.infoLabel.setText(\"%i unique genes on input\\n%i (%.1f%%) genes with known annotations\" % (genesSetCount, len(clusterGenes), 100.0*len(clusterGenes)/genesSetCount if genesSetCount else 0.0))\n\n referenceGenes = None\n if not self.useReferenceDataset or self.referenceDataset is None:\n self.information(2)\n self.information(1)\n referenceGenes = self.annotations.gene_names\n\n elif self.referenceDataset is not None:\n if self.useAttrNames:\n referenceGenes = [v.name for v in self.referenceDataset.domain.attributes]\n self.information(1)\n elif geneAttr in (self.referenceDataset.domain.variables +\n self.referenceDataset.domain.metas):\n referenceGenes = [str(ex[geneAttr]) for ex in self.referenceDataset\n if not numpy.isnan(ex[geneAttr])]\n if any(\",\" in gene for gene in clusterGenes):\n self.information(1, \"Separators detected in reference gene names. Assuming multiple genes per example.\")\n referenceGenes = reduce(operator.iadd, (genes.split(\",\") for genes in referenceGenes), [])\n else:\n self.information(1)\n else:\n self.information(1)\n referenceGenes = None\n\n if referenceGenes is None:\n referenceGenes = list(self.annotations.gene_names)\n self.referenceRadioBox.buttons[1].setText(\"Reference set\")\n self.referenceRadioBox.buttons[1].setDisabled(True)\n self.information(2, \"Unable to extract gene names from reference dataset. Using entire genome for reference\")\n self.useReferenceDataset = 0\n else:\n refc = len(referenceGenes)\n referenceGenes = self.annotations.get_gene_names_translator(referenceGenes).values()\n self.referenceRadioBox.buttons[1].setText(\"Reference set (%i genes, %i matched)\" % (refc, len(referenceGenes)))\n self.referenceRadioBox.buttons[1].setDisabled(False)\n self.information(2)\n else:\n self.useReferenceDataset = 0\n\n if not referenceGenes:\n self.error(1, \"No valid reference set\")\n return {}\n\n self.referenceGenes = referenceGenes\n evidences = []\n for etype in go.evidenceTypesOrdered:\n if self.useEvidenceType[etype]:\n evidences.append(etype)\n aspect = [\"P\", \"C\", \"F\"][self.aspectIndex]\n\n if clusterGenes:\n self.terms = terms = self.annotations.get_enriched_terms(\n clusterGenes, referenceGenes, evidences, aspect=aspect,\n prob=self.probFunctions[self.probFunc], use_fdr=False,\n progress_callback=lambda value: pb.advance())\n ids = []\n pvals = []\n for i, d in self.terms.items():\n ids.append(i)\n pvals.append(d[1])\n for i, fdr in zip(ids, stats.FDR(pvals)): # save FDR as the last part of the tuple\n terms[i] = tuple(list(terms[i]) + [ fdr ])\n\n else:\n self.terms = terms = {}\n if not self.terms:\n self.warning(0, \"No enriched terms found.\")\n else:\n self.warning(0)\n\n pb.finish()\n self.treeStructDict = {}\n ids = self.terms.keys()\n\n self.treeStructRootKey = None\n\n parents = {}\n for id in ids:\n parents[id] = set([term for _, term in self.ontology[id].related])\n\n children = {}\n for term in self.terms:\n children[term] = set([id for id in ids if term in parents[id]])\n\n for term in self.terms:\n self.treeStructDict[term] = TreeNode(self.terms[term], children[term])\n if not self.ontology[term].related and not getattr(self.ontology[term], \"is_obsolete\", False):\n self.treeStructRootKey = term\n return terms\n\n def FilterGraph(self, graph):\n if self.filterByPValue_nofdr:\n graph = go.filterByPValue(graph, self.maxPValue_nofdr)\n if self.filterByPValue: #FDR\n graph = dict(filter(lambda item: item[1][3] <= self.maxPValue, graph.items()))\n if self.filterByNumOfInstances:\n graph = dict(filter(lambda item: len(item[1][0]) >= self.minNumOfInstances, graph.items()))\n return graph\n\n def FilterAndDisplayGraph(self):\n if self.clusterDataset:\n self.graph = self.FilterGraph(self.originalGraph)\n if self.originalGraph and not self.graph:\n self.warning(1, \"All found terms were filtered out.\")\n else:\n self.warning(1)\n self.ClearGraph()\n self.DisplayGraph()\n\n def SetGraph(self, graph=None):\n self.originalGraph = graph\n if graph:\n self.FilterAndDisplayGraph()\n else:\n self.graph = {}\n self.ClearGraph()\n\n def ClearGraph(self):\n self.listView.clear()\n self.listViewItems=[]\n self.sigTerms.clear()\n\n def DisplayGraph(self):\n fromParentDict = {}\n self.termListViewItemDict = {}\n self.listViewItems = []\n enrichment = lambda t: len(t[0]) / t[2] * (len(self.referenceGenes) / len(self.clusterGenes))\n maxFoldEnrichment = max([enrichment(term) for term in self.graph.values()] or [1])\n\n def addNode(term, parent, parentDisplayNode):\n if (parent, term) in fromParentDict:\n return\n if term in self.graph:\n displayNode = GOTreeWidgetItem(self.ontology[term], self.graph[term], len(self.clusterGenes), len(self.referenceGenes), maxFoldEnrichment, parentDisplayNode)\n displayNode.goId = term\n self.listViewItems.append(displayNode)\n if term in self.termListViewItemDict:\n self.termListViewItemDict[term].append(displayNode)\n else:\n self.termListViewItemDict[term] = [displayNode]\n fromParentDict[(parent, term)] = True\n parent = term\n else:\n displayNode = parentDisplayNode\n\n for c in self.treeStructDict[term].children:\n addNode(c, parent, displayNode)\n\n if self.treeStructDict:\n addNode(self.treeStructRootKey, None, self.listView)\n\n terms = self.graph.items()\n terms = sorted(terms, key=lambda item: item[1][1])\n self.sigTableTermsSorted = [t[0] for t in terms]\n\n self.sigTerms.clear()\n for i, (t_id, (genes, p_value, refCount, fdr)) in enumerate(terms):\n item = GOTreeWidgetItem(self.ontology[t_id],\n (genes, p_value, refCount, fdr),\n len(self.clusterGenes),\n len(self.referenceGenes),\n maxFoldEnrichment,\n self.sigTerms)\n item.goId = t_id\n\n self.listView.expandAll()\n for i in range(5):\n self.listView.resizeColumnToContents(i)\n self.sigTerms.resizeColumnToContents(i)\n self.sigTerms.resizeColumnToContents(6)\n width = min(self.listView.columnWidth(0), 350)\n self.listView.setColumnWidth(0, width)\n self.sigTerms.setColumnWidth(0, width)\n\n # Create and send the enrichemnt report table.\n termsDomain = Orange.data.Domain(\n [], [],\n # All is meta!\n [Orange.data.StringVariable(\"GO Term Id\"),\n Orange.data.StringVariable(\"GO Term Name\"),\n Orange.data.ContinuousVariable(\"Cluster Frequency\"),\n Orange.data.ContinuousVariable(\"Genes in Cluster\", number_of_decimals=0),\n Orange.data.ContinuousVariable(\"Reference Frequency\"),\n Orange.data.ContinuousVariable(\"Genes in Reference\", number_of_decimals=0),\n Orange.data.ContinuousVariable(\"p-value\"),\n Orange.data.ContinuousVariable(\"FDR\"),\n Orange.data.ContinuousVariable(\"Enrichment\"),\n Orange.data.StringVariable(\"Genes\")])\n\n terms = [[t_id,\n self.ontology[t_id].name,\n len(genes) / len(self.clusterGenes),\n len(genes),\n r_count / len(self.referenceGenes),\n r_count,\n p_value,\n fdr,\n len(genes) / len(self.clusterGenes) * \\\n len(self.referenceGenes) / r_count,\n \",\".join(genes)\n ]\n for t_id, (genes, p_value, r_count, fdr) in terms]\n\n if terms:\n X = numpy.empty((len(terms), 0))\n M = numpy.array(terms, dtype=object)\n termsTable = Orange.data.Table.from_numpy(termsDomain, X, metas=M)\n else:\n termsTable = Orange.data.Table(termsDomain)\n self.send(\"Enrichment Report\", termsTable)\n\n def ViewSelectionChanged(self):\n if self.selectionChanging:\n return\n\n self.selectionChanging = 1\n self.selectedTerms = []\n selected = self.listView.selectedItems()\n self.selectedTerms = list(set([lvi.term.id for lvi in selected]))\n self.ExampleSelection()\n self.selectionChanging = 0\n\n def TableSelectionChanged(self):\n if self.selectionChanging:\n return\n\n self.selectionChanging = 1\n self.selectedTerms = []\n selectedIds = set([self.sigTerms.itemFromIndex(index).goId for index in self.sigTerms.selectedIndexes()])\n\n for i in range(self.sigTerms.topLevelItemCount()):\n item = self.sigTerms.topLevelItem(i)\n selected = item.goId in selectedIds\n term = item.goId\n\n if selected:\n self.selectedTerms.append(term)\n\n for lvi in self.termListViewItemDict[term]:\n try:\n lvi.setSelected(selected)\n if selected:\n lvi.setExpanded(True)\n except RuntimeError: # Underlying C/C++ object deleted\n pass\n\n self.ExampleSelection()\n self.selectionChanging = 0\n\n def UpdateAddClassButton(self):\n self.addClassCB.setEnabled(self.selectionDisjoint == 1)\n\n def ExampleSelection(self):\n self.commit()\n\n def commit(self):\n if self.clusterDataset is None:\n return\n\n terms = set(self.selectedTerms)\n genes = reduce(operator.ior,\n (set(self.graph[term][0]) for term in terms), set())\n\n evidences = []\n for etype in go.evidenceTypesOrdered:\n if self.useEvidenceType[etype]:\n# if getattr(self, \"useEvidence\" + etype):\n evidences.append(etype)\n allTerms = self.annotations.get_annotated_terms(\n genes, direct_annotation_only=self.selectionDirectAnnotation,\n evidence_codes=evidences)\n\n if self.selectionDisjoint > 0:\n count = defaultdict(int)\n for term in self.selectedTerms:\n for g in allTerms.get(term, []):\n count[g] += 1\n ccount = 1 if self.selectionDisjoint == 1 else len(self.selectedTerms)\n selectedGenes = [gene for gene, c in count.items()\n if c == ccount and gene in genes]\n else:\n selectedGenes = reduce(\n operator.ior,\n (set(allTerms.get(term, [])) for term in self.selectedTerms),\n set())\n\n if self.useAttrNames:\n vars = [self.clusterDataset.domain[gene]\n for gene in set(selectedGenes)]\n domain = Orange.data.Domain(\n vars, self.clusterDataset.domain.class_vars,\n self.clusterDataset.domain.metas)\n newdata = self.clusterDataset.from_table(domain, self.clusterDataset)\n\n self.send(\"Data on Selected Genes\", newdata)\n self.send(\"Data on Unselected Genes\", None)\n elif self.candidateGeneAttrs:\n selectedExamples = []\n unselectedExamples = []\n\n geneAttr = self.candidateGeneAttrs[min(self.geneAttrIndex, len(self.candidateGeneAttrs)-1)]\n\n if self.selectionDisjoint == 1:\n goVar = Orange.data.DiscreteVariable(\n \"GO Term\", values=list(self.selectedTerms))\n newDomain = Orange.data.Domain(\n self.clusterDataset.domain.variables, goVar,\n self.clusterDataset.domain.metas)\n goColumn = []\n for i, ex in enumerate(self.clusterDataset):\n if not numpy.isnan(ex[geneAttr]) and any(gene in selectedGenes for gene in str(ex[geneAttr]).split(\",\")):\n if self.selectionDisjoint == 1 and self.selectionAddTermAsClass:\n terms = filter(lambda term: any(gene in self.graph[term][0] for gene in str(ex[geneAttr]).split(\",\")) , self.selectedTerms)\n term = sorted(terms)[0]\n goColumn.append(goVar.values.index(term))\n selectedExamples.append(i)\n else:\n unselectedExamples.append(i)\n\n if selectedExamples:\n selectedExamples = self.clusterDataset[selectedExamples]\n if self.selectionDisjoint == 1 and self.selectionAddTermAsClass:\n selectedExamples = Orange.data.Table.from_table(newDomain, selectedExamples)\n view, issparse = selectedExamples.get_column_view(goVar)\n assert not issparse\n view[:] = goColumn\n else:\n selectedExamples = None\n\n if unselectedExamples:\n unselectedExamples = self.clusterDataset[unselectedExamples]\n else:\n unselectedExamples = None\n\n self.send(\"Data on Selected Genes\", selectedExamples)\n self.send(\"Data on Unselected Genes\", unselectedExamples)\n\n def ShowInfo(self):\n dialog = QDialog(self)\n dialog.setModal(False)\n dialog.setLayout(QVBoxLayout())\n label = QLabel(dialog)\n label.setText(\"Ontology:\\n\" + self.ontology.header\n if self.ontology else \"Ontology not loaded!\")\n dialog.layout().addWidget(label)\n\n label = QLabel(dialog)\n label.setText(\"Annotations:\\n\" + self.annotations.header.replace(\"!\", \"\")\n if self.annotations else \"Annotations not loaded!\")\n dialog.layout().addWidget(label)\n dialog.show()\n\n def onDeleteWidget(self):\n \"\"\"Called before the widget is removed from the canvas.\n \"\"\"\n self.annotations = None\n self.ontology = None\n gc.collect() # Force collection\n\nfmtp = lambda score: \"%0.5f\" % score if score > 10e-4 else \"%0.1e\" % score\nfmtpdet = lambda score: \"%0.9f\" % score if score > 10e-4 else \"%0.5e\" % score\n\n\nclass GOTreeWidgetItem(QTreeWidgetItem):\n def __init__(self, term, enrichmentResult, nClusterGenes, nRefGenes,\n maxFoldEnrichment, parent):\n super().__init__(parent)\n self.term = term\n self.enrichmentResult = enrichmentResult\n self.nClusterGenes = nClusterGenes\n self.nRefGenes = nRefGenes\n self.maxFoldEnrichment = maxFoldEnrichment\n\n querymapped, pvalue, refmappedcount, fdr = enrichmentResult\n\n querymappedcount = len(querymapped)\n if refmappedcount > 0 and nRefGenes > 0 and nClusterGenes > 0:\n enrichment = (querymappedcount / refmappedcount) * (nRefGenes / nClusterGenes)\n else:\n enrichment = numpy.nan\n\n self.enrichment = enrichment # = lambda t: len(t[0]) / t[2] * (nRefGenes / (nClusterGenes or 1))\n\n self.setText(0, term.name)\n\n fmt = \"%\" + str(-int(math.log(max(nClusterGenes, 1)))) + \"i (%.2f%%)\"\n self.setText(1, fmt % (querymappedcount,\n 100.0 * querymappedcount / (nClusterGenes or 1)))\n\n fmt = \"%\" + str(-int(math.log(max(nRefGenes, 1)))) + \"i (%.2f%%)\"\n self.setText(2, fmt % (refmappedcount,\n 100.0 * refmappedcount / (nRefGenes or 1)))\n\n self.setText(3, fmtp(pvalue))\n self.setToolTip(3, fmtpdet(pvalue))\n self.setText(4, fmtp(fdr)) # FDR\n self.setToolTip(4, fmtpdet(fdr))\n self.setText(5, \", \".join(querymapped))\n self.setText(6, \"%.2f\" % (enrichment))\n self.setToolTip(6, \"%.2f\" % (enrichment))\n self.setToolTip(0, \"

\" + term.__repr__()[6:].strip().replace(\"\\n\", \"
\"))\n self.sortByData = [term.name, querymappedcount, refmappedcount,\n pvalue, fdr, \", \".join(querymapped), enrichment]\n\n def data(self, col, role):\n if role == Qt.UserRole:\n if self.maxFoldEnrichment > 0:\n return self.enrichment / self.maxFoldEnrichment\n else:\n return numpy.nan\n else:\n return super().data(col, role)\n\n def __lt__(self, other):\n col = self.treeWidget().sortColumn()\n return self.sortByData[col] < other.sortByData[col]\n\n\nclass EnrichmentColumnItemDelegate(QItemDelegate):\n def paint(self, painter, option, index):\n self.drawBackground(painter, option, index)\n value = index.data(Qt.UserRole)\n if isinstance(value, float) and numpy.isfinite(value):\n painter.save()\n painter.setBrush(QBrush(Qt.blue, Qt.SolidPattern))\n painter.drawRect(option.rect.x(), option.rect.y(),\n int(value * (option.rect.width() - 1)),\n option.rect.height() - 1)\n painter.restore()\n else:\n super().paint(painter, option, index)\n\n\nclass GeneMatcherDialog(QDialog):\n items = [(\"useGO\", \"Use gene names from Gene Ontology annotations\"),\n (\"useKEGG\", \"Use gene names from KEGG Genes database\"),\n (\"useNCBI\", \"Use gene names from NCBI Gene info database\"),\n (\"useAffy\", \"Use Affymetrix platform reference ids\")]\n\n def __init__(self, parent=None, defaults=[True, False, False, False],\n enabled=[False, True, True, True], **kwargs):\n super().__init__(parent, **kwargs)\n self.setLayout(QVBoxLayout())\n for item, default in zip(self.items, defaults):\n setattr(self, item[0], default)\n\n for item, enable, checked in zip(self.items, enabled, defaults):\n cb = QCheckBox(text=item[1], checked=checked, enabled=enable)\n cb.toggled[bool].connect(\n lambda state, name=item[0]:\n setattr(self, name, state)\n )\n self.layout().addWidget(cb)\n\n bbox = QDialogButtonBox(\n Qt.Horizontal,\n standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n )\n bbox.accepted.connect(self.accept)\n bbox.rejected.connect(self.reject)\n\n self.layout().addWidget(bbox)\n\n self.layout().setSizeConstraint(QLayout.SetFixedSize)\n\n\ndef test_main(argv=sys.argv):\n from AnyQt.QtWidgets import QApplication\n app = QApplication(list(argv))\n argv = app.arguments()\n if len(argv) > 1:\n data = Orange.data.Table(argv[1])\n else:\n data = None\n\n w = OWGOEnrichmentAnalysis()\n w.show()\n w.raise_()\n w.setDataset(data)\n w.handleNewSignals()\n rval = app.exec_()\n w.setDataset(None)\n w.handleNewSignals()\n w.saveSettings()\n w.onDeleteWidget()\n return rval\n\nif __name__ == \"__main__\":\n sys.exit(test_main())\n","repo_name":"yofayed/orange-bio","sub_path":"orangecontrib/bio/widgets3/OWGOEnrichmentAnalysis.py","file_name":"OWGOEnrichmentAnalysis.py","file_ext":"py","file_size_in_byte":43684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"10492398018","text":"import time\nimport requests\n\n\ndef questions(tag, fromdate, todate):\n site = 'https://api.stackexchange.com/2.3/questions'\n params = {'fromdate': fromdate, 'todate': todate, 'order': 'desc', 'sort': 'activity', 'tagged': tag,\n 'site': 'stackoverflow'}\n response = requests.get(site, params=params)\n dict_json = response.json()\n print(f\"Посты за последние 2 дня с тэгом - {tag}:\\n\")\n for question in dict_json['items']:\n print(f\"- {question['title']}\")\n print(question['link'], \"\\n\")\n\n\ncurrent_time = int(time.time())\ntwo_days = 172800\nstart_time = current_time - two_days\ntag = 'Python'\n\nquestions(tag, start_time, current_time)\n","repo_name":"Nikolaymelni/requests","sub_path":"3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73051466793","text":"import mysql.connector\nfrom mysql.connector import Error\nfrom mysql.connector import errorcode\nimport serial\ntry:\n connection = mysql.connector.connect(host='localhost', database='SmartAtt', user='root', password='12345678')\n cursor = connection.cursor()\n ser = serial.Serial('/dev/cu.wchusbserial14130', 9600)\n sql_update_query = \"\"\" UPDATE `Student` SET CardId=%s WHERE StudentId=%s\"\"\"\n\n while True:\n CardId = ser.readline() # type: str\n StudentId = 3\n if CardId:\n print(CardId)\n input = (CardId, StudentId)\n cursor.execute(sql_update_query, input)\n connection.commit()\n\n print(\"Record Updated successfully with prepared statement\")\nexcept mysql.connector.Error as error :\n #connection.rollback() #rollback if any exception occured\n print(\"Failed to update record to database: {}\".format(error))\nfinally:\n #closing database connection.\n if connection.is_connected():\n #cursor.close()\n connection.close()\n print(\"connection is closed\")\n\n","repo_name":"berilbaltaci/SmartAttendanceSystemArduino","sub_path":"listenWriteDB.py","file_name":"listenWriteDB.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35582617215","text":"import boto3\nimport json\nimport os\nfrom boto3.dynamodb.conditions import Attr\nfrom boto3.dynamodb.conditions import Key\n\nIDENTITY_POOL_ID = os.environ['IDENTITY_POOL_ID']\nCOGNITO_USER_POOL = os.environ['COGNITO_USER_POOL'] \nCOGNITO_POOL_ID = os.environ['COGNITO_POOL_ID'] \n\ndynamodb = boto3.resource('dynamodb')\ntables = [\n dynamodb.Table(os.environ[\"RESULTS\"]),\n dynamodb.Table(os.environ[\"DATASETS\"]),\n dynamodb.Table(os.environ[\"NETWORKS\"]),\n dynamodb.Table(os.environ[\"OTHERS\"])\n]\ns3 = boto3.resource('s3')\nbucket = s3.Bucket(os.environ[\"S3_BUCKET\"])\ncognito = boto3.client('cognito-identity')\ncognitoIdp = boto3.client('cognito-idp')\n\n \ndef lambda_handler(event, context):\n \"\"\"Delete user and everything he owns\"\"\"\n \n token = event[\"headers\"][\"Authorization\"].replace(\"Bearer \", \"\", 1)\n event = event[\"body\"]\n \n for table in tables:\n if table == tables[0]:\n expression = \"userId, requestId\"\n else:\n expression = \"userId, datasetId\"\n \n datasets = table.query(\n KeyConditionExpression = Key('userId').eq(event['userId']),\n ProjectionExpression = expression\n )\n \n with table.batch_writer() as batch:\n for item in datasets['Items']:\n batch.delete_item(Key=item)\n \n \n Logins = {\n COGNITO_USER_POOL: token\n }\n \n response = cognito.get_id(\n IdentityPoolId=IDENTITY_POOL_ID,\n Logins=Logins\n )\n \n toDelete = s3.meta.client.list_objects(Bucket=os.environ[\"S3_BUCKET\"], Prefix='private/' + response[\"IdentityId\"] + '/')\n \n deleteKeys = {'Objects' : []}\n deleteKeys['Objects'] = [{'Key' : k} for k in [obj['Key'] for obj in toDelete.get('Contents', [])]]\n \n if deleteKeys[\"Objects\"]: bucket.delete_objects(Delete=deleteKeys)\n\n response = cognito.delete_identities(\n IdentityIdsToDelete=[\n response[\"IdentityId\"]\n ]\n )\n \n response = cognitoIdp.admin_delete_user(\n UserPoolId=COGNITO_POOL_ID,\n Username=event[\"userId\"]\n )\n \n return {\n 'statusCode': 200\n }","repo_name":"vascocandeias/maestro-cloud","sub_path":"deleteUser.py","file_name":"deleteUser.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39716902336","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport subprocess\n\nclass ImportWSL:\n\n def __init__(self, root):\n self.root = root\n\n self.root.title('Import WSL Image')\n self.root.bind('', self.quit)\n\n self.name = StringVar()\n self.path = StringVar()\n self.image = StringVar()\n self.done = BooleanVar()\n\n self.done.set(False)\n\n if (True == self.done.get()):\n self.draw_done()\n\n elif (False == self.done.get() and ( not self.name.get() or not self.path.get() or not self.image.get() )):\n self.draw_fields()\n\n def validate_name(self, inStr):\n return ' ' not in inStr\n\n def draw_done(self):\n mainframe = ttk.Frame(self.root, padding=\"3 3 12 12\")\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n mainframe.columnconfigure(0, weight=4)\n mainframe.rowconfigure(0, weight=1)\n\n success_text = Label(mainframe, text=\"Successfully imported.\",\n fg=\"green\", font=\"none 18 bold\")\n success_text.config(anchor=CENTER, padx=30, pady=50)\n success_text.grid(column=1, row=2, sticky=(N, W, E, S))\n\n def draw_fields(self):\n mainframe = ttk.Frame(self.root, padding=\"3 3 12 12\")\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n mainframe.columnconfigure(0, weight=4)\n mainframe.rowconfigure(0, weight=1)\n\n validation = self.root.register(self.validate_name)\n name_label = ttk.Label(mainframe, text=\"Name\")\n name_entry = ttk.Entry(mainframe, textvariable=self.name,\n validate=\"key\", validatecommand=(validation, '%S'))\n\n path_label = ttk.Label(mainframe, text=\"Path\")\n path_entry = ttk.Entry(mainframe, textvariable=self.path)\n path_browser = ttk.Button(mainframe, text=\"...\", command=self.browsepath)\n\n image_label = ttk.Label(mainframe, text=\"Image\")\n image_entry = ttk.Entry(mainframe, textvariable=self.image)\n image_browser = ttk.Button(mainframe, text=\"...\", command=self.browseimage)\n\n submit_button = ttk.Button(mainframe, text=\"Import\", command=self.process)\n\n name_entry.config(width=20)\n path_entry.config(width=20)\n\n name_label.grid(column=0, row=1, sticky=W)\n name_entry.grid(column=1, row=1, columnspan=3, sticky=(W, E))\n\n path_label.grid(column=0, row=2, sticky=W)\n path_entry.grid(column=1, row=2, columnspan=2, sticky=(W, E))\n path_browser.grid(column=3, row=2, sticky=(W))\n\n image_label.grid(column=0, row=3, sticky=W)\n image_entry.grid(column=1, row=3, columnspan=2, sticky=(W, E))\n image_browser.grid(column=3, row=3, sticky=(W))\n\n submit_button.grid(column=2, row=4, sticky=(W))\n\n for child in mainframe.winfo_children():\n child.grid_configure(padx=5, pady=5)\n\n if (len(self.name.get()) and len(self.path.get()) and len(self.image.get()) and not self.run.get()):\n self.root.bind(\"\", self.process)\n\n def browsepath(self):\n Tk().withdraw()\n filepath = filedialog.askdirectory()\n self.path.set(filepath)\n\n def browseimage(self):\n Tk().withdraw()\n filepath = filedialog.askopenfilename()\n self.image.set(filepath)\n\n def process(self):\n cmd = 'wsl --import {} \"{}\" \"{}\"'.format(self.name.get(), self.path.get(), self.image.get())\n completed = subprocess.run([\"powershell\", \"-Command\", cmd], capture_output=True)\n if completed.returncode != 0:\n print(\"An error occured: %s\", completed.stderr)\n else:\n self.done.set(True)\n self.draw_done()\n return completed\n\n def quit(self, event):\n self.root.quit()\n\n\nroot = Tk()\nImportWSL(root)\nroot.mainloop()\n","repo_name":"danieliser/wsl-python-gui","sub_path":"src/wsl-import.py","file_name":"wsl-import.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33906206525","text":"# Followed this tutorial: https://nextjournal.com/gkoehler/pytorch-mnist\nimport datetime\nimport time\nimport os\nimport pickle\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\nTrains and tests a torch Module using the provided Optimizer and DataLoaders.\n\"\"\"\n\nclass MLTest():\n log_interval = 10000\n\n\n def __init__(self):\n random_seed = 1\n torch.backends.cudnn.enabled = False\n torch.manual_seed(random_seed)\n self.cnn = False\n\n\n # Entry point into training a module, called from main.\n def run(self, n_epochs, model, optimizer,\n train_loader, test_loader, loss, sgd,\n save_model=False, log=False, trials=1, tag=''):\n self.optimizer = optimizer\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.network = model\n self.loss = loss\n self.sgd = sgd\n self.train_losses = [[] for i in range(trials)]\n self.test_losses = [[] for i in range(trials)]\n idt = lambda x: x\n idt.__name__ = ''\n # report rmse if doing matrix factorization\n self.loss_metric = math.sqrt if self.loss == F.mse_loss else idt\n\n print(f'Model: {type(self.model).__name__}\\nParams:{self.optimizer.get_params()}\\n')\n\n start = time.time()\n self.epoch_start = start\n\n print(f'Training for {n_epochs} epochs! sgd? {sgd} / logging? {log}')\n\n for i in range(trials):\n print(f'Trial: {i}')\n self.optimizer.initialize()\n if i > 0:\n self.network.reset_parameters()\n for epoch in range(1, n_epochs + 1):\n self.epoch_start = time.time()\n self.train(epoch, i)\n if not self.cnn:\n self.test(i)\n print()\n print(f'Training took {time.time() - start} seconds.')\n if save_model:\n self._save_model()\n\n if log:\n return self._log_training(tag)\n\n\n # Saves the model/optimizer info and the training/test losses to a pickle for later plots.\n def _log_training(self, tag=''):\n fname = f'./log/{type(self.model).__name__}-{tag}-{datetime.datetime.now().strftime(\"%m-%dT%H:%M:%S\")}'\n\n if not os.path.exists('./log'):\n os.makedirs('./log')\n\n with open(fname, 'wb') as logfile:\n print(f'Saving training log to {fname}')\n d = {'name': type(self.model).__name__,\n 'loss': f'{self.loss_metric.__name__} {self.loss.__name__}',\n 'params': self.optimizer.get_params(),\n 'train_losses': self.train_losses,\n 'test_losses': self.test_losses}\n pickle.dump(d, logfile)\n return fname\n\n # Implement in subclass if needed.\n def _save_model(self):\n raise NotImplementedError()\n\n\n # Backprops gradients for one epoch and prints the training loss.\n def train(self, epoch, trial):\n total_loss = 0\n n_train = len(self.train_loader[0])\n self.model.train()\n\n data = self.train_loader[0]\n target = self.train_loader[1]\n def closure():\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.loss(output, target)\n loss.backward()\n return loss\n\n loss = self.optimizer.step(closure)\n total_loss += loss.item()\n\n if not self.sgd:\n avg_loss = total_loss / (self.log_interval if self.sgd else 1)\n\n print('Epoch: {} \\t{}: {:.4f}'.format(\n epoch,\n f'{self.loss_metric.__name__} {self.loss.__name__}',\n self.loss_metric(avg_loss)))\n\n self.train_losses[trial].append(self.loss_metric(avg_loss))\n total_loss = 0\n\n\n # Calculates the score (e.g. accuracy or rmse) and prints the test loss.\n def test(self, trial):\n self.model.eval()\n test_loss = 0\n correct = 0\n\n with torch.no_grad():\n for data, target in self.test_loader:\n output = self.model(data)\n test_loss += self.loss(output, target, reduction='sum').item()\n if self.loss == F.nll_loss:\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).sum()\n\n test_loss = self.loss_metric(test_loss / len(self.test_loader.dataset))\n self.test_losses[trial].append(test_loss)\n\n if self.loss == F.nll_loss:\n print('Test set: {}: {:.4f}, Accuracy: {}/{} ({:.0f}%), dur: {:.2f}'.format(\n f'{self.loss_metric.__name__} {self.loss.__name__}',\n test_loss,\n correct,\n len(self.test_loader.dataset),\n 100. * correct / len(self.test_loader.dataset),\n time.time() - self.epoch_start))\n else:\n print(f'Test set: {self.loss_metric.__name__} {self.loss.__name__}: {test_loss:.4f}, dur: {time.time() - self.epoch_start:.2f}\\n')\n","repo_name":"chsanford/opt-ml-project","sub_path":"ml_testing/ml_testing.py","file_name":"ml_testing.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41146470650","text":"import re\nfrom datetime import datetime, timedelta, date\n\nfrom BasicLibrary import ObjectHelper\n\n\n# +--------------------------------------------------------------------------\n# |::说明:| Python内置两个方法的说明\n# |·······| strptime()方法,里面的p是parse的意思,就是把字符串解析成日期时间类型\n# |·······| strftime()方法,里面的f是format的意思,就是把日期时间类型格式化为某种格式的字符串\n# +--------------------------------------------------------------------------\n\nclass DateTimeHelper:\n @staticmethod\n def convert_from_string(date_time_string, formatter=\"\"):\n \"\"\"\n 从字符串转换为日期时间\n :param date_time_string:\n :param formatter: 待解析字符串中包含的日期时间格式,如果为\"\"时候系统自动推断(仅限常见的格式可以推断)\n :return:\n \"\"\"\n if formatter == \"\":\n formatter = DateTimeHelper.get_format(date_time_string)\n\n _type = type(date_time_string)\n if _type is str:\n return datetime.strptime(date_time_string, formatter)\n else:\n if _type is datetime:\n return date_time_string\n else:\n return None\n\n @staticmethod\n def convert_from_compact_string(date_time_string):\n \"\"\"\n 将类似“20210221”的字符串,转化成日期数据(在股市交易中常用)\n :param date_time_string:\n :return:\n \"\"\"\n return DateTimeHelper.convert_from_string(date_time_string, \"%Y%m%d\")\n\n @staticmethod\n def get_compact_string(date_time_value=None):\n \"\"\"\n 获取“20210221”这种类型的日期字符串(在股市交易中常用);跟get_compact_date_string功能一样\n :param date_time_value:\n :return:\n \"\"\"\n return DateTimeHelper.get_string(date_time_value, \"%Y%m%d\")\n\n @classmethod\n def get_compact_date_string(cls, date_time_value=None):\n \"\"\"\n 获取“20210221”这种类型的日期字符串(在股市交易中常用);get_compact_string的别名\n :param date_time_value:\n :return:\n \"\"\"\n return cls.get_compact_string(date_time_value)\n pass\n\n @classmethod\n def get_compact_date_time_string(cls, date_time_value=None):\n \"\"\"\n 获取“20210221030526”这种类型的日期时间字符串\n :param date_time_value:\n :return:\n \"\"\"\n return DateTimeHelper.get_string(date_time_value, \"%Y%m%d%H%M%S\")\n pass\n\n @staticmethod\n def get_short_string(date_time_value=None, formatter=\"%Y-%m-%d\"):\n \"\"\"\n 获取“2021-02-21”这种短类型格式的日期字符串\n :param date_time_value:\n :param formatter:\n :return:\n \"\"\"\n return DateTimeHelper.get_string(date_time_value, formatter)\n\n @staticmethod\n def get_standard_string(date_time_value=None):\n \"\"\"\n 获取给定时间的标准格式日期字符串表示形式(类似2021-10-11 03:34:25)\n :param date_time_value:\n :return:\n \"\"\"\n return DateTimeHelper.get_string(date_time_value, \"%Y-%m-%d %H:%M:%S\")\n\n @classmethod\n def get_string(cls, date_time_value=None, formatter=\"%Y-%m-%d %H:%M:%S\"):\n \"\"\"\n 获取给定时间的字符串表示形式(默认情况下返回当前的时间数据)\n :param formatter:\n :param date_time_value: 需要展示的日期时间数据,默认不传递此参数则返回当前时间数据\n :return: 给定时间的字符串表示形式\n \"\"\"\n if date_time_value is None:\n date_time_value = datetime.now()\n\n if ObjectHelper.get_type(date_time_value) is str:\n date_time_value = cls.convert_from_string(date_time_value)\n\n result = date_time_value.strftime(formatter)\n return result\n\n @staticmethod\n def add_days(original_date=None, delta=1, original_date_formatter=\"%Y%m%d\"):\n \"\"\"\n\n :param original_date: 可以是日期类型,也可以是字符串类型\n :param delta: 增加或者减少的天数\n :param original_date_formatter: 日期格式。如果是字符串类型的original_date,那么需要指定本参数\n :return:\n \"\"\"\n _delta = timedelta(days=delta)\n if original_date is None:\n original_date = datetime.now()\n\n if type(original_date) is str:\n original_date = DateTimeHelper.convert_from_string(original_date, original_date_formatter)\n\n _type = type(original_date)\n if _type is datetime or _type is date:\n return original_date + _delta\n else:\n return None\n\n @staticmethod\n def get_format(datetime_string):\n \"\"\"\n 从字符串推断日期时间格式\n :param datetime_string:\n :return:\n \"\"\"\n regex = r'(\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2})'\n if re.search(regex, datetime_string):\n return \"%Y-%m-%d %H:%M:%S\"\n\n regex = r'(\\d{2}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2})'\n if re.search(regex, datetime_string):\n return \"%y-%m-%d %H:%M:%S\"\n\n regex = r'(\\d{4}\\d{1,2}\\d{1,2}\\d{1,2}\\d{1,2}\\d{1,2})'\n if re.search(regex, datetime_string):\n return \"%Y%m%d%H%M%S\"\n\n regex = r'(\\d{4}-\\d{1,2}-\\d{1,2})'\n if re.search(regex, datetime_string):\n return \"%Y-%m-%d\"\n\n regex = r'(\\d{4}\\d{2}\\d{2})'\n if re.search(regex, datetime_string):\n return \"%Y%m%d\"\n\n regex = r'(\\d{2}\\d{2}\\d{2})'\n if re.search(regex, datetime_string):\n return \"%y%m%d\"\n\n return \"\"\n","repo_name":"notinmood/BasicLibrary.PY","sub_path":"BasicLibrary/data/dateTimeHelper.py","file_name":"dateTimeHelper.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9720537492","text":"import os\nimport sys\nimport csv\nfrom datetime import datetime\nimport argparse\n\nfrom modules.output_module import print_results\nfrom modules.categorisation_module import categorize_transaction\nfrom modules.keywords_module import add_tegenrekening_keywords, add_omschrijving_keywords\nfrom modules.classes import Transaction, Category, DatetimeRange\nfrom modules.timespan_module import get_timespan\n\n# Required for relative imports to also work when called\n# from project root directory.\nsys.path.append(os.path.dirname(__file__))\n\ndef initialize_categories(category_list, category):\n # Create categories\n for cat in category_list:\n category[cat] = Category(cat)\n\n # Add keywords to categories\n add_tegenrekening_keywords(category)\n add_omschrijving_keywords(category)\n\ndef main():\n # Create help\n parser = argparse.ArgumentParser(description='Goes through .csv file with banking details and categorizes transactions.')\n parser.add_argument('--category', action='store_true', help='Prints more detailed information about a category.')\n parser.add_argument('--csv', action='store_true', help='Generates .csv file with transaction history of the category specified with --category.')\n parser.add_argument('--transactions', action='store_true', help='Prints every transaction in the category specified with --category. Useful for a quick view.')\n parser.add_argument('--dates', action='store_true', help='Specify transactions per day.')\n parser.add_argument('--intervals', action='store_true', help='Specify transactions per time interval (inclusive).')\n args = parser.parse_args()\n\n # Create category instances based on category list\n category = {}\n category_list = [\n 'Totaal',\n 'Eigen rekeningen',\n 'Bijdrage familie',\n 'DUO',\n 'Boodschappen',\n 'Kleding',\n 'Media',\n 'Reiskosten',\n 'Woning',\n 'Drugs',\n 'Cadeaus',\n 'Terugbetalingen',\n 'Loon',\n 'Belastingen',\n 'Horeca',\n 'Goede doelen',\n 'Vaste lasten',\n 'Online bestellingen',\n 'Zorg',\n 'Feesten',\n 'Hobbies',\n 'Vakanties',\n 'Spaar',\n 'Gepind',\n 'Geen categorie']\n\n initialize_categories(category_list, category)\n\n # Timespan that is considered\n requested_dates = get_timespan()\n\n # Declare variables\n transaction_number = 0\n saldo_start = 0\n acceptable_uncategorized_percentage = 5\n\n # Read in .csv file\n FILE_NAME_IN = 'betaal_2.csv'\n current_dir = os.path.dirname(os.path.realpath(__file__))\n path_in = os.path.join(current_dir + '/inputs', FILE_NAME_IN)\n\n with open(path_in) as infile:\n reader = csv.reader(infile, delimiter = ';')\n\n # Create transaction object\n for row in reader:\n\n attributes = {\n 'transaction_number' : transaction_number,\n 'boekingsdatum': datetime.strptime(row[0], '%d-%m-%Y'),\n 'opdrachtgeversrekening': row[1],\n 'tegenrekening': row[2],\n 'naam_tegenrekening': row[3],\n 'adres': row[4],\n 'postcode': row[5],\n 'plaats': row[6],\n 'valuta': row[7],\n 'saldo_voor': float(row[8]),\n 'valuta_bedrag': row[9],\n 'bedrag': float(row[10]),\n 'journaaldatum': datetime.strptime(row[11], '%d-%m-%Y'),\n 'valutadatum': datetime.strptime(row[12], '%d-%m-%Y'),\n 'code_intern': row[13],\n 'code_globaal': row[14],\n 'volgnummer': row[15],\n 'kenmerk': row[16],\n 'omschrijving': row[17],\n 'afschriftnummer': row[18]\n }\n\n for interval in requested_dates:\n if interval.__contains__(attributes['boekingsdatum']):\n transaction = Transaction(attributes)\n if transaction.attributes['transaction_number'] == 0:\n saldo_start = transaction.attributes['saldo_voor']\n categorize_transaction(transaction, category)\n transaction_number += 1\n\n #Print results\n print_results(category, saldo_start,acceptable_uncategorized_percentage)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"WesselPoorthuis/bookkeeper","sub_path":"bookkeeper.py","file_name":"bookkeeper.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28598432849","text":"# flake8: noqa\n# type: ignore\n\nimport os\nimport random\nfrom typing import List\n\nimport pandas as pd\nfrom manim import *\nfrom scipy.interpolate import interp1d\n\nfrom codenames.solvers.sna_hinter import ( # , opponent_force, friendly_force\n ForceNode,\n step_from_forces,\n)\nfrom codenames.solvers.utils.algebra import (\n cosine_distance,\n geodesic,\n normalize_vector,\n single_gram_schmidt,\n)\n\n\ndef text2color(text):\n if text == \"blue\":\n return BLUE\n if text == \"red\":\n return RED\n if text == \"black\":\n return DARK_GRAY\n if text == \"gray\":\n return GRAY\n\n\ndef random_ints(n: int, k: int) -> List[int]:\n ints_list = [random.randint(0, n) for i in range(k)]\n return ints_list\n\n\ndef geodesic_object(v, u):\n geodesic_function = geodesic(v, u)\n return ParametricFunction(geodesic_function, t_range=[0, 1], color=CONNECTIONS_COLOR)\n\n\ndef surrounding_circle_object(centroid, radius_radians):\n circle_function = surrounding_circle(centroid, radius_radians)\n return ParametricFunction(circle_function, t_range=[0, 2 * PI], color=SURROUNDING_CIRC_COLOR)\n\n\ndef generate_random_subsets(elements_list: List, average_subset_size: float) -> List[List]:\n k = len(elements_list)\n n = int(k / average_subset_size)\n subsets_map = random_ints(n, k)\n zip_iterator = zip(elements_list, subsets_map)\n mapper = list(zip_iterator)\n subsets_list = []\n for s in set(subsets_map):\n subset_elements = [element[0] for element in mapper if element[1] == s]\n subsets_list.append(subset_elements)\n return subsets_list\n\n\ndef generate_cluster_connections(cluster_vectors: List[np.ndarray]) -> List[ParametricFunction]:\n n = len(cluster_vectors)\n connections_list = []\n if n < 2:\n return connections_list\n for i in range(n - 1):\n for j in range(i + 1, n):\n geodesic_function = geodesic_object(cluster_vectors[i], cluster_vectors[j])\n connections_list.append(geodesic_function)\n return connections_list\n\n\ndef generate_random_connections(vectors_list, average_cluster_size):\n vectors_clusters = generate_random_subsets(vectors_list, average_subset_size=average_cluster_size)\n connections_list = []\n for cluster in vectors_clusters:\n cluster_connections = generate_cluster_connections(cluster)\n connections_list.extend(cluster_connections)\n return connections_list\n\n\ndef polar_to_cartesian(r, phi, theta):\n x = r * np.sin(phi) * np.cos(theta)\n y = r * np.sin(phi) * np.sin(theta)\n z = r * np.cos(phi)\n return np.array([x, y, z])\n\n\ndef text_len_to_time(text, min_time=2, seconds_per_char=0.05):\n n = len(text)\n return np.max([min_time, n * seconds_per_char])\n\n\ndef enrich_nodes(centroid, nodes_list): # :List[ForceNode,...]\n nodes = []\n for node in nodes_list:\n d = cosine_distance(centroid, node.force_origin)\n if node.force_sign:\n node.force_size = attractor_force(d)\n else:\n node.force_size = repeller_force(d)\n nodes.append(node)\n return nodes\n\n\ndef record_trajectory(starting_point, nodes_list, num_of_iterations=50, arc_radians=0.01):\n trajectory = [starting_point]\n centroid = starting_point\n for i in range(num_of_iterations):\n nodes = enrich_nodes(centroid, nodes_list)\n centroid = step_from_forces(centroid, nodes, arc_radians=arc_radians)\n trajectory.append(centroid)\n return trajectory\n\n\ndef attractor_force(d):\n return -d\n\n\ndef repeller_force(d):\n return 0.4 * (1 / (d + 1) - 0.5)\n\n\ndef generate_progression_dict(titles_texts):\n n = len(titles_texts)\n progression_line = Line(\n start=ORIGIN + DOWN + PROGRESSION_LINE_LENGTH / 2 * LEFT,\n end=ORIGIN + DOWN + PROGRESSION_LINE_LENGTH / 2 * RIGHT,\n )\n progression_dots = VGroup()\n for i in range(4):\n dot = AnnotationDot(\n point=ORIGIN + DOWN + (i - (n - 1) / 2) * (PROGRESSION_LINE_LENGTH / (n - 1)) * RIGHT,\n fill_color=FUTURE_TOPICS_COLOR,\n stroke_width=PROGRESSION_DOTS_S_WIDTH,\n )\n progression_dots.add(dot)\n\n progression_marker = Arrow(start=ORIGIN, end=ORIGIN + UP * 1.5, color=GREEN).next_to(progression_dots[0], DOWN)\n\n titles = VGroup()\n titles.add(*[Text(titles_texts[i]) for i in range(n)])\n\n progression_dict = VDict(\n [(\"line\", progression_line), (\"dots\", progression_dots), (\"marker\", progression_marker), (\"titles\", titles)]\n )\n return progression_dict\n\n\ndef vec_arbitrary_rotation(v, radians, rotation_seed=np.array([0, 0, 1])):\n v_normed, orthogonal_vector = single_gram_schmidt(v, rotation_seed)\n rotation_mat = rotation_matrix(radians, orthogonal_vector) # , vec_to_rotation(orthogonal_vector, radians)\n rotated = rotation_mat @ v\n return rotated\n\n\ndef surrounding_circle(centroid, radians_radius, rotation_seed=np.array([0, 0, 1])):\n centroid = normalize_vector(centroid)\n rotated_from_centroid = vec_arbitrary_rotation(centroid, radians_radius, rotation_seed=rotation_seed)\n\n def circle_trail(s):\n rotation_from_centroid = rotation_matrix(s, centroid) # vec_to_rotation(centroid, s)\n return SPHERE_RADIUS * rotation_from_centroid @ rotated_from_centroid\n\n return circle_trail\n\n\n# def connecting_line(v, u):\n# c = v.T @ u\n# f = lambda t: (t*v+(1-t)*u) / np.sqrt(t**2+(1-t**2) + 2*t*(1-t)*c)\n# return f\n\nBOARD_WORDS = [\n \"cloak\",\n \"kiss\",\n \"flood\", ####\n \"mail\",\n \"skates\",\n \"paper\",\n \"frog\",\n \"house\",\n \"moon\", ####\n \"egypt\",\n \"teacher\",\n \"storm\", ####\n \"newton\",\n \"violet\",\n \"drill\",\n \"fever\",\n \"ninja\",\n \"jupiter\",\n \"ski\",\n \"attic\",\n \"beach\", ####\n \"lock\",\n \"earth\",\n \"park\", ####\n \"gymnast\",\n]\n\nCARDS_ANNOTATION_DURATION = 1\nCARDS_SCALING_FACTOR = 1.3\nRED_CARDS_INDICES = [0, 4, 9, 10, 14, 18, 22, 24]\nBLUE_CARDS_INDICES = [1, 2, 6, 8, 11, 16, 17, 20, 23]\nBLACK_CARD_INDEX = [19]\nGRAY_CARDS_INDICES = list(set(range(25)) - set(RED_CARDS_INDICES) - set(BLUE_CARDS_INDICES) - set(BLACK_CARD_INDEX))\nHINT_COLOR = GREEN\nHINT_RADIUS = 0.3\nSURROUNDING_CIRC_COLOR = HINT_COLOR\nFUTURE_TOPICS_COLOR = GRAY\nPAST_TOPICS_COLOR = GREEN\nCURRENT_TOPICS_COLOR = YELLOW\nPROGRESSION_DOTS_S_WIDTH = 2\nCARDS_HEIGHT = 1.0\nPROGRESSION_LINE_LENGTH = 8\nCARDS_WIDTH = 1.6\nCARDS_FILL_COLOR = GOLD_E\nCARDS_FILL_OPACITY = 0.3\nCARDS_HORIZONTAL_SPACING = 0.15\nCARDS_VERTICAL_SPACING = 0.3\nCARDS_FONT_SIZE = 25.0\nSPHERE_RADIUS = 3\nFONT_SIZE_LABELS = 20\nFONT_SIZE_TEXT = 25\nARROWS_THICKNESS = 0.001\nARROWS_COLOR = LIGHT_BROWN\nDOT_SIZE = 0.2\nLABELS_COLOR = PURE_RED\nCONNECTIONS_COLOR = GREEN\nSKI_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.5 * PI, 0)\nWATER_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.5 * PI, 0.2 * PI)\nBEACH_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.70 * PI, 0.27 * PI)\nPARK_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.75 * PI, 0.1 * PI)\nJUPITER_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.2 * PI, -0.3 * PI)\nNEWTON_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.27 * PI, -0.11 * PI)\nTEACHER_VEC = polar_to_cartesian(SPHERE_RADIUS, 0.35 * PI, -0.2 * PI)\n\nvectors_list = [SKI_VEC, WATER_VEC, BEACH_VEC, PARK_VEC, JUPITER_VEC, NEWTON_VEC, TEACHER_VEC]\nlabels_list = [\"ski\", \"water\", \"beach\", \"park\", \"jupiter\", \"newton\", \"teacher\"]\nwords_list_len = len(vectors_list)\n\nsna_connections_list = [\n ParametricFunction(geodesic(SKI_VEC, WATER_VEC), t_range=[0, 1]).set_color(CONNECTIONS_COLOR),\n ParametricFunction(geodesic(BEACH_VEC, PARK_VEC), t_range=[0, 1]).set_color(CONNECTIONS_COLOR),\n ParametricFunction(geodesic(JUPITER_VEC, NEWTON_VEC), t_range=[0, 1]).set_color(CONNECTIONS_COLOR),\n ParametricFunction(geodesic(TEACHER_VEC, JUPITER_VEC), t_range=[0, 1]).set_color(CONNECTIONS_COLOR),\n ParametricFunction(geodesic(NEWTON_VEC, TEACHER_VEC), t_range=[0, 1]).set_color(CONNECTIONS_COLOR),\n]\n\nTITLES_TEXTS = [\n \"Part 1: The Codenames board game\",\n \"Part 2: The Word2Vec Model\",\n \"Part 3: The Algorithm\",\n \"Part 4: Examples\",\n]\n\n\ndef get_svg(name: str) -> SVGMobject:\n path = os.path.join(\"visualizer\", \"svg\", f\"{name}.svg\")\n return SVGMobject(path)\n\n\nclass SVG:\n @staticmethod\n def blue_hinter():\n return get_svg(\"blue_hinter\")\n\n @staticmethod\n def red_hinter():\n return get_svg(\"red_hinter\")\n\n @staticmethod\n def blue_guesser():\n return get_svg(\"blue_guesser\")\n\n @staticmethod\n def red_guesser():\n return get_svg(\"red_guesser\")\n\n @staticmethod\n def bubble():\n return get_svg(\"centered_bubble\")\n\n\nclass KalirmozExplanation(ThreeDScene):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.simple_mode = False\n self.waitings = True\n\n def construct(self):\n\n progression_dict = generate_progression_dict(TITLES_TEXTS)\n\n # self.scene_intro()\n #\n # self.advance_progress_markers(progression_dict, 0, 0)\n #\n # self.scene_game_rules()\n\n # self.advance_progress_markers(progression_dict, 1)\n\n # self.scene_word2vec_explanation()\n\n # self.advance_progress_markers(progression_dict, 2, extra_waiting_time=5)\n\n # self.scene_sphere()\n\n # self.add_fixed_orientation_mobjects(progression_dict)\n # self.remove(progression_dict)\n self.advance_progress_markers(progression_dict, 3)\n\n self.scene_guesser_views()\n\n self.scene_ending_title()\n\n def scene_intro(self):\n t1 = Text(\"Code Names Algorithm\", color=BLUE)\n t2 = Text(\"by the Kali brothers\", color=RED).scale(0.8).next_to(t1, DOWN)\n self.write_3d_text(t1)\n self.write_3d_text(t2)\n self.play(FadeOut(t1), FadeOut(t2))\n\n def scene_game_rules(self):\n blue_hinter = SVG.blue_hinter().to_corner(UR).shift(DOWN).scale(0.8)\n red_hinter = SVG.red_hinter().to_corner(DR).scale(0.8)\n blue_guesser = SVG.blue_guesser().to_corner(UL).align_to(blue_hinter, DOWN).shift(LEFT * 0.2)\n red_guesser = SVG.red_guesser().to_corner(DL).shift(LEFT * 0.2)\n\n board = self.generate_board(BOARD_WORDS)\n self.play(\n FadeIn(blue_hinter, shift=DOWN),\n FadeIn(blue_guesser, shift=DOWN),\n FadeIn(red_hinter, shift=DOWN),\n FadeIn(red_guesser, shift=DOWN),\n )\n # self.add(board, blue_hinter, blue_guesser, red_hinter, red_guesser)\n self.dynamic_wait(4)\n self.annotate_objects([blue_hinter, blue_guesser], 1.3, 1)\n self.annotate_objects([red_hinter, red_guesser], 1.3, 1)\n self.dynamic_wait(2.5)\n self.annotate_objects([blue_hinter, red_hinter], 1.3, 1.5)\n self.dynamic_wait(1.5)\n self.annotate_objects([blue_guesser, red_guesser], 1.3, 1.5)\n self.dynamic_wait(5)\n self.play(DrawBorderThenFill(board))\n self.dynamic_wait(3)\n\n self.expose_board_colors(board)\n\n blue_hinter_bubble, blue_hinter_text = self.animate_hint(blue_hinter, \"planets\", 2)\n self.dynamic_wait(10)\n\n blue_guesser_bubble = self.animate_guess(\n board=board,\n card_color=BLUE,\n word=\"earth\",\n guesser_obj=blue_guesser,\n guesser_bubble=None,\n finish_turn=False,\n guess_reveal_wait_time=5,\n )\n\n self.animate_guess(\n board=board, card_color=BLUE, word=\"jupiter\", guesser_bubble=blue_guesser_bubble, finish_turn=True\n )\n\n self.play(FadeOut(blue_hinter_bubble), FadeOut(blue_hinter_text))\n\n red_hinter_bubble, red_hinter_text = self.animate_hint(red_hinter, \"water\", 4, final_wait=1.5)\n\n red_guesser_bubble = self.animate_guess(\n board=board,\n card_color=RED,\n word=\"flood\",\n guesser_obj=red_guesser,\n guesser_bubble=None,\n quick_mode=True,\n finish_turn=False,\n )\n\n self.animate_guess(\n board=board,\n card_color=LIGHT_GRAY,\n word=\"moon\",\n guesser_bubble=red_guesser_bubble,\n finish_turn=True,\n quick_mode=True,\n )\n\n self.play(FadeOut(red_hinter_bubble), FadeOut(red_hinter_text))\n\n self.animate_hint(blue_hinter, \"costume\", 2)\n\n self.animate_guess(\n board=board,\n card_color=DARK_GRAY,\n word=\"ninja\",\n guesser_obj=blue_guesser,\n finish_turn=True,\n quick_mode=True,\n winning_title=True,\n )\n\n self.dynamic_wait(3)\n\n self.remove_everything()\n\n def animate_hint(self, hinter_obj, hint_word, hint_number, final_wait=3):\n hinter_bubble = SVG.bubble().scale(0.5).next_to(hinter_obj, UP)\n hinter_text = Text(f'\"{hint_word}\", {hint_number}', font_size=18).move_to(hinter_bubble).shift(UP * 0.2)\n self.play(FadeIn(hinter_bubble, shift=DOWN))\n self.play(Write(hinter_text))\n self.dynamic_wait(final_wait)\n return hinter_bubble, hinter_text\n\n def animate_guess(\n self,\n board,\n card_color,\n word,\n guesser_obj=None,\n guesser_bubble=None,\n finish_turn=False,\n guess_reveal_wait_time=1,\n quick_mode=False,\n winning_title=False,\n ):\n if quick_mode is True:\n quick_factor = 0.5\n else:\n quick_factor = 1\n if guesser_bubble is None:\n guesser_bubble = SVG.bubble().scale(0.6).next_to(guesser_obj, UP).shift(DOWN * 0.3)\n self.play(FadeIn(guesser_bubble, shift=DOWN), run_time=quick_factor)\n guesser_text = Text(f'I Guess: \"{word}\"', font_size=17).move_to(guesser_bubble).shift(UP * 0.2)\n self.play(Write(guesser_text), run_time=quick_factor)\n self.dynamic_wait(guess_reveal_wait_time * quick_factor)\n self.change_card_color(board, word, card_color, annotate=True, winning_title=winning_title)\n self.wait(quick_factor)\n if finish_turn:\n self.play(FadeOut(guesser_text), FadeOut(guesser_bubble), run_time=quick_factor)\n else:\n self.play(FadeOut(guesser_text), run_time=quick_factor)\n return guesser_bubble\n\n def advance_progress_markers(self, progression_dict, i, previous_i=None, extra_waiting_time=1):\n if previous_i is None:\n previous_i = i - 1\n\n progression_dict[\"dots\"][previous_i].set_fill(CURRENT_TOPICS_COLOR)\n for j in range(previous_i):\n progression_dict[\"dots\"][j].set_fill(PAST_TOPICS_COLOR)\n for j in range(previous_i + 1, len(progression_dict[\"dots\"])):\n progression_dict[\"dots\"][j].set_fill(FUTURE_TOPICS_COLOR)\n progression_dict[\"marker\"].next_to(progression_dict[\"dots\"][previous_i], DOWN)\n\n self.play(\n FadeIn(progression_dict[\"marker\"]),\n FadeIn(progression_dict[\"line\"]),\n FadeIn(progression_dict[\"dots\"]),\n FadeIn(progression_dict[\"titles\"][previous_i]),\n )\n self.dynamic_wait(1)\n\n if previous_i != i:\n self.play(\n progression_dict[\"marker\"].animate.next_to(progression_dict[\"dots\"][i], DOWN),\n *[progression_dict[\"dots\"][j].animate.set_fill(PAST_TOPICS_COLOR) for j in range(i)],\n *[\n progression_dict[\"dots\"][j].animate.set_fill(FUTURE_TOPICS_COLOR)\n for j in range(i + 1, len(progression_dict[\"dots\"]))\n ],\n progression_dict[\"dots\"][i].animate.set_fill(CURRENT_TOPICS_COLOR),\n FadeOut(progression_dict[\"titles\"][previous_i], shift=DOWN),\n FadeIn(progression_dict[\"titles\"][i], shift=DOWN),\n )\n self.dynamic_wait(1)\n self.wait(extra_waiting_time)\n\n self.play(*[FadeOut(mob) for mob in self.mobjects])\n\n def change_card_color(self, board, word, color, annotate=False, winning_title=False):\n card_index = BOARD_WORDS.index(word)\n if annotate:\n self.play(board[card_index].animate.scale(1.2), rate_func=there_and_back, run_time=2)\n if winning_title:\n title = Text(\"Red team Wins!\", color=RED).scale(0.8).to_edge(UP).shift(UP * 0.15)\n self.play(board[card_index][0].animate.set_color(color), Write(title), run_time=0.5)\n else:\n self.play(board[card_index][0].animate.set_color(color))\n\n def scene_sphere(self):\n theta = 240 * DEGREES\n phi = 75 * DEGREES\n axes = ThreeDAxes(\n x_range=[-1.5, 1.5, 1], y_range=[-1.5, 1.5, 1], z_range=[-1.5, 1.5, 1], x_length=8, y_length=8, z_length=8\n )\n sphere = Sphere(\n center=(0, 0, 0), radius=SPHERE_RADIUS, resolution=(20, 20), u_range=[0.001, PI - 0.001], v_range=[0, TAU]\n ).set_opacity(0.2)\n\n self.set_camera_orientation(phi=phi, theta=theta) # 75 * DEGREES, theta=30 * DEGREES\n if not self.simple_mode:\n self.renderer.camera.light_source.move_to(3 * IN)\n self.begin_ambient_camera_rotation(rate=0.1)\n self.play(Create(axes), Create(sphere))\n self.dynamic_wait(1)\n\n words_labels_list = [\n self.generate_card(\n text=labels_list[i],\n height=0.4,\n width=1.4,\n fill_color=CARDS_FILL_COLOR,\n fill_opacity=0.4,\n font_size=CARDS_FONT_SIZE,\n stroke_color=GOLD_E,\n ).move_to(vectors_list[i] * 1.1)\n # Text(labels_list[i], font_size=FONT_SIZE_LABELS, color=LABELS_COLOR).move_to(vectors_list[i])\n for i in range(words_list_len)\n ]\n arrows_list = [\n Line(start=[0, 0, 0], end=vector, color=ARROWS_COLOR) for vector in vectors_list\n ] # stroke_width=ARROWS_THICKNESS\n self.play(*[Create(arrows_list[i]) for i in range(words_list_len)])\n for i in range(words_list_len):\n self.add_fixed_orientation_mobjects(words_labels_list[i])\n # words_labels_list[i].add_updater(lambda x, i=i: x.move_to(self.coords_to_point(vectors_list[i])))\n self.play(*[FadeIn(words_labels_list[i]) for i in range(words_list_len)])\n self.dynamic_wait(24)\n # self.add(*arrows_list, *words_labels_list)\n\n self.play(*[Create(connection, run_time=3) for connection in sna_connections_list])\n self.dynamic_wait(17)\n self.play(*[Uncreate(connection, run_time=1) for connection in sna_connections_list])\n self.dynamic_wait(2)\n\n self.animate_random_connections(vectors_list=vectors_list, number_of_examples=15, example_length=0.3)\n self.dynamic_wait(19)\n\n starting_point = normalize_vector(BEACH_VEC + PARK_VEC) * SPHERE_RADIUS\n nodes_list = [ForceNode(BEACH_VEC, True), ForceNode(PARK_VEC, True), ForceNode(WATER_VEC, False)]\n self.animate_physical_system(\n starting_point=starting_point,\n nodes_list=nodes_list,\n num_of_iterations=1000,\n arc_radians=0.01,\n run_time=5,\n first_waiting_time=20,\n )\n\n starting_point = normalize_vector(SKI_VEC + WATER_VEC) * SPHERE_RADIUS\n nodes_list = [\n ForceNode(SKI_VEC, True),\n ForceNode(WATER_VEC, True),\n ForceNode(PARK_VEC, False),\n ForceNode(TEACHER_VEC, False),\n ]\n self.animate_physical_system(\n starting_point=starting_point,\n nodes_list=nodes_list,\n num_of_iterations=1000,\n arc_radians=0.01,\n run_time=5,\n first_waiting_time=2,\n )\n\n starting_point = normalize_vector(NEWTON_VEC + JUPITER_VEC) * SPHERE_RADIUS\n nodes_list_b = [ForceNode(JUPITER_VEC, True), ForceNode(NEWTON_VEC, True), ForceNode(TEACHER_VEC, False)]\n self.animate_physical_system(\n starting_point=starting_point,\n nodes_list=nodes_list_b,\n num_of_iterations=1000,\n arc_radians=0.01,\n run_time=5,\n first_waiting_time=8,\n )\n\n starting_point = normalize_vector(WATER_VEC + TEACHER_VEC + SKI_VEC) * SPHERE_RADIUS\n nodes_list = [\n ForceNode(WATER_VEC, True),\n ForceNode(TEACHER_VEC, True),\n ForceNode(SKI_VEC, True),\n ForceNode(NEWTON_VEC, False),\n ForceNode(JUPITER_VEC, False),\n ]\n self.animate_physical_system(\n starting_point=starting_point,\n nodes_list=nodes_list,\n num_of_iterations=1000,\n arc_radians=0.01,\n run_time=5,\n first_waiting_time=5,\n )\n self.dynamic_wait(2)\n\n self.play(\n Uncreate(axes),\n Uncreate(sphere),\n *[Uncreate(arrows_list[i]) for i in range(words_list_len)],\n *[FadeOut(words_labels_list[i]) for i in range(words_list_len)],\n ) # , FadeOut(text_box)\n if not self.simple_mode:\n self.stop_ambient_camera_rotation()\n\n def scene_guesser_views(self):\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\planets.csv\", \"planets (2 cards)\", waiting_time=26)\n self.plot_guesser_view_chart(\n r\"visualizer\\graphs_data\\redevelopment_fine.csv\", \"redevelopment (2 cards)\", waiting_time=28\n )\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\dark_bad_choose_it.csv\", 'dark (2 cards)', waiting_time=21)\n self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\apollo_bad.csv\", \"apollo (2 cards)\", waiting_time=18)\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\rhino_bad.csv\", \"rhino (2 cards)\", waiting_time=21)\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\rhino_bad.csv\", 'rhino')\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\advice.csv\", \"advice\")\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\beneath_good.csv\", 'beneath')\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\ceilings_good.csv\", 'ceillings')\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\father.csv\", 'father')\n # self.plot_guesser_view_chart(r\"visualizer\\graphs_data\\gear_good.csv\", 'gear')\n\n self.play(*[FadeOut(mob) for mob in self.mobjects])\n\n def scene_ending_title(self):\n ending_title = Text(\"Thanks for watching!\")\n self.write_3d_text(ending_title)\n self.dynamic_wait(2)\n self.play(FadeOut(ending_title))\n self.dynamic_wait(1)\n\n @staticmethod\n def specific_color_map(i):\n if i in RED_CARDS_INDICES:\n return RED\n elif i in BLUE_CARDS_INDICES:\n return BLUE\n elif i in BLACK_CARD_INDEX:\n return DARK_GRAY\n else:\n return LIGHT_GRAY\n\n def annotate_objects(self, objects, scale_factor, run_time):\n self.play(*[obj.animate.scale(scale_factor) for obj in objects], rate_func=there_and_back, run_time=run_time)\n\n def expose_board_colors(self, board):\n self.play(*[board[i][0].animate.set_color(self.specific_color_map(i)) for i in range(25)])\n self.annotate_objects([board[i] for i in BLUE_CARDS_INDICES], CARDS_SCALING_FACTOR, CARDS_ANNOTATION_DURATION)\n self.annotate_objects([board[i] for i in RED_CARDS_INDICES], CARDS_SCALING_FACTOR, CARDS_ANNOTATION_DURATION)\n self.annotate_objects([board[i] for i in BLACK_CARD_INDEX], CARDS_SCALING_FACTOR, CARDS_ANNOTATION_DURATION)\n self.annotate_objects([board[i] for i in GRAY_CARDS_INDICES], CARDS_SCALING_FACTOR, CARDS_ANNOTATION_DURATION)\n self.dynamic_wait(7)\n self.play(*[board[i][0].animate.set_color(CARDS_FILL_COLOR) for i in range(25)])\n self.dynamic_wait(14)\n\n def generate_board(self, words_list):\n board = VGroup()\n for r in range(5):\n for c in range(5):\n current_card = self.generate_card(words_list[5 * r + c])\n current_card.move_to(\n ORIGIN\n + (r - 2.5) * CARDS_HEIGHT * (1 + CARDS_VERTICAL_SPACING) * UP\n + (c - 2.5) * CARDS_WIDTH * (1 + CARDS_HORIZONTAL_SPACING) * RIGHT\n )\n board.add(current_card)\n board.move_to(ORIGIN)\n return board\n\n @staticmethod\n def generate_card(\n text,\n height=CARDS_HEIGHT,\n width=CARDS_WIDTH,\n fill_color=CARDS_FILL_COLOR,\n fill_opacity=CARDS_FILL_OPACITY,\n font_size=CARDS_FONT_SIZE,\n stroke_color=None,\n ):\n card = VGroup()\n rectangle = Rectangle(\n height=height, width=width, fill_color=fill_color, fill_opacity=fill_opacity, stroke_color=stroke_color\n )\n text = Text(text, font_size=font_size).move_to(rectangle)\n card.add(rectangle, text)\n return card\n\n # def animate_game_explanation(self):\n # self.play(FadeIn(SVGMobject(r\"visualizer\\svg\\hinter.svg\")))\n\n def scene_word2vec_explanation(self):\n spacing_constant = 3\n lion_color = BLUE\n deer_color = RED\n national_color = GREEN\n arrows_scale = 1\n words_horizontal_shift = 2\n word2vec_title = Text(\"Word2Vec\").scale(1.2).to_edge(UP)\n self.play(Write(word2vec_title))\n\n lion_text = Text(\"lion:\", color=lion_color).shift(words_horizontal_shift * LEFT)\n deer_text = Text(\"deer:\", color=deer_color).shift(words_horizontal_shift * LEFT)\n nationalism_text = Text(\"nationalism:\", color=national_color).shift(words_horizontal_shift * LEFT)\n lion_theta = 0.45 * PI\n deer_theta = 0.54 * PI\n nationalism_theta = 3 / 2 * PI\n lion_vec = arrows_scale * (np.cos(lion_theta) * RIGHT + np.sin(lion_theta) * UP)\n deer_vec = arrows_scale * (np.cos(deer_theta) * RIGHT + np.sin(deer_theta) * UP)\n nationalism_vec = arrows_scale * (np.cos(nationalism_theta) * RIGHT + np.sin(nationalism_theta) * UP)\n lion_arrow = (\n Arrow(start=ORIGIN, end=lion_vec).next_to(lion_text, spacing_constant * RIGHT).set_color(lion_color)\n )\n lion_arrow.add_updater(lambda x: x.next_to(lion_text, RIGHT))\n deer_arrow = (\n Arrow(start=ORIGIN, end=deer_vec).next_to(deer_text, spacing_constant * RIGHT).set_color(deer_color)\n )\n deer_arrow.add_updater(lambda x: x.next_to(deer_text, RIGHT))\n nationalism_arrow = (\n Arrow(start=ORIGIN, end=nationalism_vec)\n .next_to(nationalism_text, spacing_constant * RIGHT)\n .set_color(national_color)\n )\n nationalism_arrow.add_updater(lambda x: x.next_to(nationalism_text, RIGHT))\n\n self.wait(7)\n\n self.play(FadeIn(lion_text))\n self.play(Create(lion_arrow))\n\n self.wait(2)\n\n self.play(lion_text.animate.shift(1.5 * UP))\n deer_text.next_to(lion_text, spacing_constant * DOWN)\n nationalism_text.next_to(deer_text, spacing_constant * DOWN)\n self.play(FadeIn(deer_text, shift=UP), FadeIn(deer_arrow, shift=UP))\n self.play(FadeIn(nationalism_text, shift=UP), FadeIn(nationalism_arrow, shift=UP))\n lion_arrow.clear_updaters()\n deer_arrow.clear_updaters()\n nationalism_arrow.clear_updaters()\n arrows_anchor = 4 * RIGHT\n self.dynamic_wait(5)\n self.play(\n lion_arrow.animate.put_start_and_end_on(start=arrows_anchor, end=arrows_anchor + lion_vec),\n deer_arrow.animate.put_start_and_end_on(start=arrows_anchor, end=arrows_anchor + deer_vec),\n nationalism_arrow.animate.put_start_and_end_on(start=arrows_anchor, end=arrows_anchor + nationalism_vec),\n )\n self.dynamic_wait(4)\n small_angle = Angle(lion_arrow, deer_arrow, radius=0.6 * arrows_scale)\n big_angle = Angle(nationalism_arrow, lion_arrow, radius=0.3 * arrows_scale)\n self.play(Create(small_angle))\n self.play(Create(big_angle))\n self.dynamic_wait(3)\n self.remove_everything()\n\n def animate_physical_system(\n self,\n starting_point: np.array,\n nodes_list,\n guess_word=None,\n num_of_iterations=10,\n arc_radians=0.01,\n run_time=7,\n first_waiting_time=1,\n ): # :List[np.array,...]\n trajectory = record_trajectory(\n starting_point=starting_point,\n nodes_list=nodes_list,\n num_of_iterations=num_of_iterations,\n arc_radians=arc_radians,\n )\n x = np.linspace(0, 1, len(trajectory))\n y = np.stack(trajectory)\n trajectory_interp = interp1d(x, y, axis=0)\n t = ValueTracker(0)\n centroid = Line(start=[0, 0, 0], end=starting_point)\n centroid_dot = AnnotationDot(point=starting_point, stroke_width=4, stroke_color=DARK_GRAY)\n centroid_label = self.generate_card(\n text=\"centroid\",\n height=0.35,\n width=1.2,\n fill_color=LIGHT_GRAY,\n fill_opacity=0.4,\n font_size=CARDS_FONT_SIZE * 0.8,\n stroke_color=DARK_GRAY,\n ).move_to(starting_point * 1.2)\n centroid.add_updater(lambda x: x.become(Line(start=[0, 0, 0], end=trajectory_interp(t.get_value()))))\n centroid_dot.add_updater(\n lambda x: x.become(\n AnnotationDot(point=trajectory_interp(t.get_value()), stroke_width=4, stroke_color=DARK_GRAY)\n )\n )\n centroid_label.add_updater(\n lambda x: x.become(\n self.generate_card(\n text=\"centroid\",\n height=0.32,\n width=1.2,\n fill_color=LIGHT_GRAY,\n fill_opacity=0.4,\n font_size=CARDS_FONT_SIZE * 0.8,\n stroke_color=DARK_GRAY,\n ).move_to(trajectory_interp(t.get_value()) * 1.2)\n )\n )\n\n # forces = []\n # for i, node in enumerate(nodes_list):\n # force = geodesic_object(node.force_origin, normalize_vector(centroid.get_end()))\n # if node.force_sign:\n # force.set_color(BLUE)\n # else:\n # force.set_color(RED)\n # force.add_updater(lambda x, i=i: x.become(geodesic_object(node.force_origin, normalize_vector(trajectory_interp(t.get_value())))))\n # forces.append(force)\n # This is an explicit version of the forl\n\n park_force = geodesic_object(nodes_list[0].force_origin, normalize_vector(centroid.get_end())).set_color(\n self.sign_to_color(nodes_list[0].force_sign)\n )\n park_force.add_updater(\n lambda x: x.become(\n geodesic_object(nodes_list[0].force_origin, normalize_vector(trajectory_interp(t.get_value())))\n ).set_color(self.sign_to_color(nodes_list[0].force_sign))\n )\n teacher_force = geodesic_object(nodes_list[1].force_origin, normalize_vector(centroid.get_end())).set_color(\n self.sign_to_color(nodes_list[1].force_sign)\n )\n teacher_force.add_updater(\n lambda x: x.become(\n geodesic_object(nodes_list[1].force_origin, normalize_vector(trajectory_interp(t.get_value())))\n ).set_color(self.sign_to_color(nodes_list[1].force_sign))\n )\n ski_force = geodesic_object(nodes_list[2].force_origin, normalize_vector(centroid.get_end())).set_color(\n self.sign_to_color(nodes_list[2].force_sign)\n )\n ski_force.add_updater(\n lambda x: x.become(\n geodesic_object(nodes_list[2].force_origin, normalize_vector(trajectory_interp(t.get_value())))\n ).set_color(self.sign_to_color(nodes_list[2].force_sign))\n )\n if len(nodes_list) > 3:\n water_force = geodesic_object(nodes_list[3].force_origin, normalize_vector(centroid.get_end())).set_color(\n self.sign_to_color(nodes_list[3].force_sign)\n )\n water_force.add_updater(\n lambda x: x.become(\n geodesic_object(nodes_list[3].force_origin, normalize_vector(trajectory_interp(t.get_value())))\n ).set_color(self.sign_to_color(nodes_list[3].force_sign))\n )\n\n self.add_fixed_orientation_mobjects(centroid_dot, centroid_label)\n self.play(Create(centroid), Create(centroid_dot), Create(centroid_label))\n if len(nodes_list) > 3:\n cluster_forces = [\n force\n for force in [water_force, ski_force, teacher_force, park_force]\n if force.color.hex == str.lower(BLUE)\n ]\n non_cluster_forces = [\n force\n for force in [water_force, ski_force, teacher_force, park_force]\n if force.color.hex == str.lower(RED)\n ]\n else:\n cluster_forces = [\n force for force in [ski_force, teacher_force, park_force] if force.color.hex == str.lower(BLUE)\n ]\n non_cluster_forces = [\n force for force in [ski_force, teacher_force, park_force] if force.color.hex == str.lower(RED)\n ]\n self.play(*[Create(force) for force in cluster_forces])\n self.dynamic_wait(first_waiting_time)\n self.play(*[Create(force) for force in non_cluster_forces])\n # if len(nodes_list) > 3:\n # self.play(Create(park_force), Create(ski_force), Create(water_force),\n # Create(teacher_force))\n # else:\n # self.play(Create(park_force), Create(ski_force),\n # Create(teacher_force))\n self.play(t.animate.set_value(1), run_time=run_time, rate_func=linear)\n\n hint_boundaries = surrounding_circle_object(trajectory[-1], HINT_RADIUS)\n self.play(Create(hint_boundaries))\n hint_vec = vec_arbitrary_rotation(trajectory[-1], HINT_RADIUS * 0.7)\n hint_vec_obj = Line(start=[0, 0, 0], end=hint_vec, color=HINT_COLOR)\n hint_dot = AnnotationDot(point=hint_vec, fill_color=HINT_COLOR, stroke_width=4, stroke_color=DARK_GRAY)\n self.add_fixed_orientation_mobjects(hint_dot)\n hint_word = self.generate_card(\n text=\"hint\",\n height=0.32,\n width=1.0,\n fill_color=LIGHT_GRAY,\n fill_opacity=0.4,\n font_size=CARDS_FONT_SIZE * 0.8,\n stroke_color=DARK_GRAY,\n ).move_to(hint_vec * 1.2)\n self.add_fixed_orientation_mobjects(hint_word)\n self.play(Create(hint_vec_obj), Create(hint_dot), Create(hint_word))\n centroid_label.clear_updaters()\n centroid.clear_updaters()\n centroid_dot.clear_updaters()\n park_force.clear_updaters()\n ski_force.clear_updaters()\n teacher_force.clear_updaters()\n if len(nodes_list) > 3:\n water_force.clear_updaters()\n self.dynamic_wait(2)\n if len(nodes_list) > 3:\n self.play(\n FadeOut(centroid_label),\n FadeOut(hint_vec_obj),\n FadeOut(hint_boundaries),\n FadeOut(hint_dot),\n FadeOut(hint_word),\n FadeOut(centroid),\n FadeOut(centroid_dot),\n FadeOut(park_force),\n FadeOut(ski_force),\n FadeOut(teacher_force),\n FadeOut(water_force),\n )\n else:\n self.play(\n FadeOut(centroid_label),\n FadeOut(hint_vec_obj),\n FadeOut(hint_boundaries),\n FadeOut(hint_dot),\n FadeOut(hint_word),\n FadeOut(centroid),\n FadeOut(centroid_dot),\n FadeOut(park_force),\n FadeOut(ski_force),\n FadeOut(teacher_force),\n )\n\n @staticmethod\n def sign_to_color(sign):\n if sign:\n return BLUE\n else:\n return RED\n\n def animate_random_connections(self, vectors_list, number_of_examples, example_length):\n for i in range(number_of_examples):\n connections = generate_random_connections(vectors_list, average_cluster_size=1.2)\n self.add(*connections)\n self.wait(example_length)\n self.remove(*connections)\n\n def coords_to_point(self, coords):\n theta = -self.camera.get_theta()\n phi = -self.camera.get_phi()\n coords_np = np.array(coords)\n constant_rotation = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n theta_rotation = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])\n phi_rotation = np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])\n rotated = phi_rotation @ theta_rotation @ constant_rotation @ coords_np\n return rotated\n\n def plot_guesser_view_chart(self, data_path, title, waiting_time=1):\n df = pd.read_csv(data_path)\n df = df.loc[0:10, :]\n colors = df[\"colors\"].apply(text2color).to_list()\n bar_names = df.iloc[:, 0].to_list()\n horizontal_factor = 1\n CONFIG_dict = {\n \"height\": 5,\n \"width\": 9 * horizontal_factor,\n # \"n_ticks\": 4,\n # \"tick_width\": 0.2,\n \"label_y_axis\": \"Kaki\",\n # \"y_axis_label\": \"Kaki\",\n # \"y_axis_label_height\": 0.25,\n \"max_value\": 0.5,\n \"bar_colors\": colors,\n # \"bar_fill_opacity\": 0.8,\n # \"bar_stroke_width\": 0,\n \"bar_names\": bar_names,\n \"bar_label_scale_val\": 0,\n }\n\n chart_position_x = 0\n chart_position_y = 0.2\n labels_size = 0.55\n labels_separation = 0.78 * horizontal_factor\n labels_shift_x = chart_position_x - 3.7\n labels_shift_y = chart_position_y - 3\n bar_labels = VGroup()\n for i in range(len(bar_names)):\n label = Text(bar_names[i])\n label.scale(labels_size)\n label.move_to(UP * labels_shift_y + (i * labels_separation + labels_shift_x) * RIGHT)\n label.rotate(np.pi * (1.5 / 6))\n bar_labels.add(label)\n\n chart = BarChart(values=df[\"distance_to_centroid\"].to_list(), **CONFIG_dict)\n chart.shift(chart_position_y * UP + chart_position_x * RIGHT)\n title = Text(f\"Hint word: {title}\")\n y_label = Text(\"Cosine distance to hinted word\")\n x_label = Text(\"Board words\")\n title.next_to(chart, UP).shift(RIGHT - chart_position_y * UP)\n y_label.rotate(angle=TAU / 4, axis=OUT).next_to(chart, LEFT).scale(0.6)\n x_label.next_to(bar_labels, DOWN).scale(0.6).shift(0.2 * UP)\n\n self.add_fixed_in_frame_mobjects(title)\n self.play(Write(title))\n self.add_fixed_in_frame_mobjects(chart, y_label, x_label, bar_labels)\n self.play(\n DrawBorderThenFill(chart),\n FadeIn(bar_labels, shift=UP),\n FadeIn(y_label, shift=UP),\n FadeIn(x_label, shift=UP),\n run_time=1,\n )\n self.dynamic_wait(waiting_time)\n self.play(FadeOut(chart), FadeOut(bar_labels), FadeOut(title), FadeOut(y_label), FadeOut(x_label), run_time=1)\n\n def update_position_to_camera(self, mob, coordinate):\n mob.move_to(self.coords_to_point(coordinate))\n\n def write_3d_text(self, text_object, fade_out=False, waiting_time=None):\n if waiting_time is None:\n waiting_time = text_len_to_time(text_object.original_text)\n self.add_fixed_in_frame_mobjects(text_object)\n self.play(Write(text_object))\n self.wait(waiting_time)\n if fade_out:\n self.play(FadeOut(text_object))\n\n def remove_3d_text(self, *text_objects):\n self.play(*[FadeOut(text_object) for text_object in text_objects])\n\n def remove_everything(self):\n self.play(*[FadeOut(mob) for mob in self.mobjects])\n\n def dynamic_wait(self, duration):\n if self.waitings:\n self.wait(duration)\n\n\n# test_scene = KalirmozExplanation()\n# test_scene.scene_sphere(simple_mode=True)\n# starting_point = polar_to_cartesian(1, 0.52 * PI, 1.95 * PI)\n# nodes_list = [ForceNode(SKI_VEC, True), ForceNode(WATER_VEC, True), ForceNode(PARK_VEC, False)]\n# test_scene.animate_physical_system(\n# starting_point=starting_point, nodes_list=nodes_list, num_of_iterations=5, arc_radians=0.01\n# )\n# hint_word = \"planets\"\n# test_scene.plot_guesser_view_chart(f\"visualizer\\graphs_data\\{hint_word}.csv\", hint_word)\n","repo_name":"mkali-personal/codenames","sub_path":"visualizer/explanation.py","file_name":"explanation.py","file_ext":"py","file_size_in_byte":40481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"32716669520","text":"def solution(tickets):\n graph = dict()\n for a, b in tickets:\n graph[a] = graph.get(a, []) + [b]\n print(graph)\n for i in graph.keys():\n graph[i].sort(reverse=True)\n q = ['ICN']\n answer = []\n while q:\n top = q[-1]\n if top not in graph or len(graph[top]) == 0:\n answer.append(q.pop())\n else:\n q.append(graph[top][-1])\n graph[top] = graph[top][:-1]\n \n return answer[::-1]","repo_name":"ohilikeit/Coding_Test_Practice","sub_path":"프로그래머스/lv3/43164. 여행경로/여행경로.py","file_name":"여행경로.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32847002710","text":"url = input('Enter - ')\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'html.parser')\n\n# Retrieve all of the anchor tags\ntags = soup('a')\nfor tag in tags:\n print(tag.get('href', None))\n\n# Code: http://www.py4e.com/code3/urllinks.py\n","repo_name":"onurcanust/py4e","sub_path":"4/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41166583692","text":"from sklearn.neighbors import NearestNeighbors\nfrom sklearn.svm import OneClassSVM\n\n\nclass KNN:\n def __init__(self, n_neighbors=5, out=0.05, algorithm=\"ball_tree\"):\n self.n_neighbors = n_neighbors\n self.out = out\n self.model = False\n self.algorithm = algorithm\n self.distances = False\n self.indices = False\n self.threshold = False\n self.len_data = False\n\n def fit(self, X):\n self.len_data = len(X)\n self.model = NearestNeighbors(\n n_neighbors=self.n_neighbors, algorithm=self.algorithm\n ).fit(X)\n self.distances, self.indices = self.model.kneighbors(X)\n self.threshold = sorted(self.distances[:, self.n_neighbors - 1])[\n int((self.len_data - 1) * (1 - self.out))\n ]\n\n def transform(self, x):\n self.distances, self.indices = self.model.kneighbors(x)\n return self.distances[:, self.n_neighbors - 1]\n\n def transform_bin(self, x):\n self.transform(x)\n self.Z = self.distances[:, self.n_neighbors - 1]\n return np.where(self.Z >= self.threshold, 0, 1)\n\n\nclass OCSVM:\n def __init__(self, out=0.05):\n self.model = OneClassSVM()\n self.out = out\n self.threshold = False\n self.len_data = False\n\n def fit(self, X):\n self.len_data = len(X)\n self.model.fit(X)\n self.threshold = sorted(self.model.decision_function(X))[\n int((self.len_data - 1) * self.out)\n ]\n\n def transform(self, x):\n return self.model.decision_function(x)\n\n def transform_bin(self, x):\n self.Z = self.model.decision_function(x)\n return np.where(self.Z >= self.threshold, 1, 0)\n","repo_name":"maskot1977/scikitallstars","sub_path":"scikitallstars/avd.py","file_name":"avd.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"27317099763","text":"import argparse\nimport os\nimport sys\n\nimport numpy as np\nimport cv2\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nfrom tqdm import tqdm\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--output_dir', type=str, default='/mnt/fs2/2019/Takamuro/m2_research/weather_transferV2/results/c_UNet/tensorboard')\nparser.add_argument('--log_path', type=str,\n default='runs/Nov24_13-08-43_647e57fb8e44_lr-2*0.0001_bs-32_ne-150_name-Flickr_cUNet_w-c_res101-1122e15_Wosampler-BS32_sampler-False_loss_lamda-c1-w1-CE_b1-0.5_b2-0.9_GDratio-8_amp-True_MGpu-True'\n )\nargs = parser.parse_args()\n\nsys.path.append(os.getcwd())\noutput_path = os.path.join(args.output_dir, args.log_path.split('/')[-1])\nprint(output_path)\nprint('please press enter')\ninput()\nos.makedirs(output_path, exist_ok=True)\n\npath = args.log_path # Tensorboard ログファイル\nevent_acc = EventAccumulator(path, size_guidance={'images': 0})\nevent_acc.Reload() # ログファイルのサイズによっては非常に時間がかかる\n\nfor tag in event_acc.Tags()['images']:\n events = event_acc.Images(tag)\n tag_name = tag.replace('/', '_')\n for index, event in tqdm(enumerate(events), total=len(events)):\n # 画像はエンコードされているので戻す\n s = np.frombuffer(event.encoded_image_string, dtype=np.uint8)\n image = cv2.imdecode(s, cv2.IMREAD_COLOR) # カラー画像の場合\n # 保存\n output_path_ = os.path.join(output_path, '{}_{:04}.jpg'.format(tag_name, index))\n cv2.imwrite(output_path_, image)\n","repo_name":"Sota0726/Weather_UNet_v2","sub_path":"sh/write_image_summary.py","file_name":"write_image_summary.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17959426445","text":"#Memoization for smaller subproblems\nglobaldict={}\n\n#n is the number of villagers initially\ndef solve(n):\n #Editing the dictionary within a method\n global globaldict\n #Base case with 0 villagers\n if (n == 0):\n #0 people survive on average, it doesn't matter how many take poison\n return (0,None)\n #Base case with 1 villager\n elif (n==1):\n #0 people survive on average, it doesn't matter how many take poison\n return (0,None)\n #Case where we have already solved this subproblem\n elif (n in globaldict.keys()):\n #The return value is the value associated with the argument\n return globaldict.get(n)\n #Case of new problem\n else:\n #ls is the number of people we expect to survive\n ls=0\n #lp is the number of people who should take poison\n lp=None\n #We want to test if anywhere from 1 to n-1 villagers inclusive should take poison\n for i in range(n-1):\n #Check if the number of villagers that would survive if i+1 villagers took poison is greater than the current amount that would survive\n if ((i+1)/n)*(n-(i+1)) + ((n-(i+1))/n)*solve(n-(i+1)-1)[0] > ls:\n #If it is, update the expected number that survive\n ls = ((i+1)/n)*(n-(i+1)) + ((n-(i+1))/n)*solve(n-(i+1)-1)[0]\n #Also if it is, update the number that should take poison\n lp = (i+1)\n #After solving, put our solution in the dictionary for memoization\n globaldict[n]=(ls,lp)\n #Return out solution\n return (ls,lp)\n\n#Solve the problem for when the number of villagers ranges from 0 to 49\nfor j in range(50):\n #Print the number of villagers followed by a tuple containing the expected number to survive and the amount that should take poison\n print(j,solve(j))\n","repo_name":"ayelet-w/Mobileye-Project-Traffic-lights","sub_path":"Villager Werewolf GT Problem.py","file_name":"Villager Werewolf GT Problem.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72529194153","text":"import numpy as np\n\n# Create the arrays\narray1 = np.random.random(1000).astype(np.float32)\narray2 = np.random.random(2000).astype(np.float32)\narray3 = np.random.random(3000).astype(np.float32)\narray4 = np.random.random(4000).astype(np.float32)\n\n# Save the arrays to a binary file\nwith open(\"arrays.bin\", \"wb\") as file:\n file.write(array1.tobytes())\n file.write(array2.tobytes())\n file.write(array3.tobytes())\n file.write(array4.tobytes())\n\n\n\nprint(array2[:10])\n","repo_name":"hassanfv/SPH_2","sub_path":"11_Dec_2022/GPU_sph/Analytical_models/SPH/GPU/01/test0.py","file_name":"test0.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21915383400","text":"\"\"\"Utilities for pandas dataframes\"\"\"\nfrom __future__ import annotations\n\nimport sys\nimport pandas as pd\n\nfrom io import StringIO\n\n\ndef print_dataframe(df: pd.DataFrame,\n title=None, precision=6, sortby=None, file=sys.stdout, end=None, display=None) -> None:\n \"\"\"\n Print entire pandas DataFrame.\n\n Args:\n df: pandas DataFrame.\n title: Optional string to print as initial title.\n precision: Floating point output precision (number of significant digits).\n This is only a suggestion [default: 6] [currently: 6]\n sortby: string name or list of names which refer to the axis items to be sorted (dataframe is not changed)\n file: a file-like object (stream); defaults to the current sys.stdout.\n If file == \"string\", a temporary stream is created and a string is returned.\n end: End string.\n display: Use ipython rich display protocol by invoking _repr_`display_ and returning the result.\n Use e.g. display=\"html\" to get HTML table.\n \"\"\"\n return_string = file == \"string\"\n if return_string:\n file = StringIO()\n\n if title is not None: print(title, file=file)\n if sortby is not None and sortby in df:\n df = df.sort_values(sortby, inplace=False)\n\n with pd.option_context(\"display.max_rows\", len(df),\n \"display.max_columns\", len(list(df.keys())),\n \"display.precision\", precision,\n ):\n if display is None:\n print(df, file=file)\n print(\" \", file=file)\n if end is not None:\n print(end, file=file)\n if return_string: return file.getvalue()\n else:\n from IPython.core.display import HTML\n output = getattr(df, \"_repr_%s_\" % display)()\n return HTML(output)\n","repo_name":"abinit/abipy","sub_path":"abipy/tools/printing.py","file_name":"printing.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"72"} +{"seq_id":"23739526199","text":"\"\"\"Analyse set of graphs using simple degree-based filtration.\"\"\"\n\nimport argparse\nimport pickle\nimport torch\n\nimport igraph as ig\nimport numpy as np\n\nfrom pyper.persistent_homology.graphs import calculate_persistence_diagrams\nfrom pyper.persistent_homology.graphs import extend_filtration_to_edges\nfrom pyper.vectorisation import featurise_distances\nfrom pyper.vectorisation import featurise_pairwise_distances\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom sklearn.svm import SVC\n\n\ndef build_graph_from_edge_list(x, edge_list):\n \"\"\"Build graph from edge list and return it.\"\"\"\n n_vertices = edge_list.max().numpy() + 1\n g = ig.Graph(n_vertices)\n\n x = x.numpy()\n x = np.linalg.norm(x, axis=1)\n\n for u, v in edge_list.numpy().transpose():\n g.add_edge(u, v)\n\n if args.random:\n g.vs['attribute'] = np.random.normal(size=len(g.degree()))\n elif args.norm_filtration:\n g.vs['attribute'] = x\n else:\n g.vs['attribute'] = g.degree()\n\n if args.perturb:\n g.vs['attribute'] += np.random.normal(size=len(g.degree()))\n\n g = extend_filtration_to_edges(\n g,\n vertex_attribute='attribute',\n edge_attribute='attribute',\n )\n\n return g\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('INPUT', type=str, nargs='+', help='Input file(s)')\n parser.add_argument(\n '-l', '--labels',\n type=str,\n help='Path to labels'\n )\n\n parser.add_argument(\n '-r', '--random',\n action='store_true',\n help='If set, uses random node features instead of degrees.'\n )\n\n parser.add_argument(\n '-p', '--perturb',\n action='store_true',\n help='If set, perturbs filtration values.'\n )\n\n parser.add_argument(\n '-N', '--norm-filtration',\n action='store_true',\n help='If set, use norm-based filtration.'\n )\n\n args = parser.parse_args()\n\n # Will contain all graphs in `igraph` format. They will form the\n # basis for the analysis in terms of a fixed filtration later on\n graphs = []\n\n # Ditto for labels, but this is optional.\n labels = []\n\n if args.labels is not None:\n labels = torch.load(args.labels).numpy()\n\n for filename in args.INPUT:\n with open(filename, 'rb') as f:\n x_list, edge_lists = pickle.load(f)\n\n for x, edge_list in zip(x_list, edge_lists):\n graphs.append(build_graph_from_edge_list(x, edge_list))\n\n persistence_diagrams = [\n calculate_persistence_diagrams(\n graph,\n vertex_attribute='attribute',\n edge_attribute='attribute',\n unpaired=100,\n ) for graph in graphs\n ]\n\n X0 = []\n X1 = []\n\n for (D0, D1) in persistence_diagrams:\n x0 = featurise_distances(D0)\n x1 = featurise_distances(D1)\n\n # Simple padding...this one might bite us at some point.\n x0 += [0] * (50 - len(x0))\n x1 += [0] * (50 - len(x1))\n\n X0.append(x0)\n X1.append(x1)\n\n X0 = np.asarray(X0)\n X1 = np.asarray(X1)\n X = np.hstack((X0, X1))\n\n scores = []\n\n for i in range(10):\n cv = StratifiedKFold(n_splits=3, shuffle=True)\n\n # Could also be done in a stratified fashion...\n scaler = StandardScaler()\n X = scaler.fit_transform(X)\n\n param_grid = {\n 'C': 10. ** np.arange(-3, 4), # 10^{-3}..10^{3}\n 'gamma': ['auto'],\n }\n\n svm = SVC(kernel='rbf')\n clf = GridSearchCV(svm, param_grid, scoring='accuracy', cv=3)\n clf.fit(X, labels)\n\n scores.append(np.mean(cross_val_score(clf, X, labels, cv=cv)))\n print(f'Iteration {i}: {100 * scores[-1]:.2f}')\n\n print(f'{100 * np.mean(scores):.2f} +- {100 * np.std(scores):.2f}')\n","repo_name":"BorgwardtLab/TOGL","sub_path":"topognn/analyse_graphs_ph.py","file_name":"analyse_graphs_ph.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"72"} +{"seq_id":"20350930511","text":"import numpy as np\n\n\ndef fsolve(func, x0):\n epsfcn = np.finfo(np.dtype(float)).eps\n eps = epsfcn**0.5\n delta_x = 1.0\n x = x0\n while abs(delta_x) > eps:\n f_x = func(x)\n d_x = max(eps, eps * abs(x))\n df_x = (func(x + d_x) - f_x) / d_x\n delta_x = -f_x / df_x\n x += delta_x\n return x\n\n\ndef crea_max_array(arr):\n _list = []\n for val in arr:\n _list.append(max(arr))\n _arr_resu = np.array(_list)\n return _arr_resu\n\n\nclass PostRocheAnalytic:\n \"\"\"classe pour calcul analytique des valeurs de post roche\"\"\"\n\n # loi ramberg osgood\n _K = 0.01\n _n = 2\n _E = 2.0e11\n # Z : module d'inertie pour les poutres circulaires : Iy / R\n _Z = 7.36311e-09 / 0.01\n # B_2 = 1. pour les tronçons de partie droite\n _B_2 = 1.0\n # coefficients de contraintes\n _D_21 = 0.712\n _D_22 = 0.429 * (0.02 / 0.005) ** 0.16\n _D_23 = _D_22\n\n def __init__(\n self,\n mfy_poids_propre,\n mt_deplacement_impose,\n mfy_sismique_dyn,\n mfz_sismique_dyn,\n mfy_sismique_qs,\n mfz_sismique_qs,\n mfy_dilatation_thermique,\n mt_sismique_dyn,\n mt_sismique_qs,\n mt_sismique,\n mfy_sismique,\n mfz_sismique,\n pression=0.0,\n ):\n\n ep = 0.005\n rayon = 0.01\n self._sigpres = 0.87 * pression * (rayon - ep) / ep\n self._mfy_poids_propre = mfy_poids_propre\n self._mt_deplacement_impose = mt_deplacement_impose\n self._mfy_sismique_dyn = mfy_sismique_dyn\n self._mfz_sismique_dyn = mfz_sismique_dyn\n self._mfy_sismique_qs = mfy_sismique_qs\n self._mfz_sismique_qs = mfz_sismique_qs\n\n self._mfy_dilatation_thermique = mfy_dilatation_thermique\n self._mt_sismique_dyn = mt_sismique_dyn\n self._mt_sismique_qs = mt_sismique_qs\n\n self._mt_sismique = mt_sismique\n self._mfy_sismique = mfy_sismique\n self._mfz_sismique = mfz_sismique\n\n def _epsi_p(self, sigma):\n \"\"\"epsi plastique pour Ranberg osgood\"\"\"\n return self._K * (sigma / self._E) ** (1 / self._n)\n\n def calcul_ressort(self):\n \"\"\"calcule self._sigma_deplacement_ref, self._sigma_sismique_ref, self._t, self._ts, self._T, self._T_s, self._r_m et self._r_s\"\"\"\n # moments en deplacements à abattres\n mt_deplacement = np.abs(self._mt_deplacement_impose)\n mf_delpacement = np.abs(self._mfy_dilatation_thermique)\n mf_sismique = np.sqrt(self._mfy_sismique**2 + self._mfz_sismique**2)\n # sigma ref\n self._sigma_deplacement_ref = (\n (0.87 * mt_deplacement / self._Z) ** 2\n + (0.79 * self._B_2 * mf_delpacement / self._Z) ** 2\n ) ** 0.5\n self._sigma_sismique_ref = (\n (0.87 * self._mt_sismique / self._Z) ** 2\n + (0.79 * self._B_2 * mf_sismique / self._Z) ** 2\n ) ** 0.5\n # reversibilites locales\n sig_over_E = self._sigma_deplacement_ref / self._E\n sig_over_E = (sig_over_E > 1e-6) * sig_over_E\n if np.count_nonzero(sig_over_E) == 0:\n self._t = np.zeros([6])\n else:\n self._t = sig_over_E / self._epsi_p(self._sigma_deplacement_ref)\n sig_over_E = self._sigma_sismique_ref / self._E\n sig_over_E = (sig_over_E > 1e-6) * sig_over_E\n if np.count_nonzero(sig_over_E) == 0:\n self._t_s = np.zeros([6])\n else:\n self._t_s = sig_over_E / self._epsi_p(self._sigma_sismique_ref)\n\n if np.count_nonzero(self._t) == 0:\n # reversibilité totales\n T_1 = 0.0\n T_2 = 0.0\n self._T = np.array([T_1] * 4 + [T_2] * 2)\n # ressorts\n self._r_m = np.zeros([6])\n r_mmax1 = np.amax(self._r_m[:4])\n r_mmax2 = np.amax(self._r_m[4:])\n self._r_mmax = np.array([r_mmax1] * 4 + [r_mmax2] * 2)\n else:\n\n # reversibilité totales\n T_1 = sum(self._sigma_deplacement_ref[:4][self._t[:4] != 0] ** 2) / sum(\n self._sigma_deplacement_ref[:4][self._t[:4] != 0] ** 2\n / self._t[:4][self._t[:4] != 0]\n )\n T_2 = sum(self._sigma_deplacement_ref[4:][self._t[4:] != 0] ** 2) / max(\n sum(\n self._sigma_deplacement_ref[4:][self._t[4:] != 0] ** 2\n / self._t[4:][self._t[4:] != 0]\n ),\n 1.0e-6,\n )\n self._T = np.array([T_1] * 4 + [T_2] * 2)\n self._r_m = np.maximum(\n np.array([T / t - 1 if t != 0.0 else 0.0 for t, T in zip(self._t, self._T)]),\n np.zeros([6]),\n )\n\n r_mmax1 = np.amax(self._r_m[:4])\n r_mmax2 = np.amax(self._r_m[4:])\n self._r_mmax = np.array([r_mmax1] * 4 + [r_mmax2] * 2)\n\n if np.count_nonzero(self._t_s) == 0:\n T_s_1 = 0.0\n T_s_2 = 0.0\n self._T_s = np.array([T_s_1] * 4 + [T_s_2] * 2)\n # ressorts\n self._r_s = np.zeros([6])\n\n r_smax1 = np.amax(self._r_s[:4])\n r_smax2 = np.amax(self._r_s[4:])\n self._r_smax = np.array([r_smax1] * 4 + [r_smax1] * 2)\n\n else:\n\n # reversibilité totales\n T_s_1 = sum(self._sigma_sismique_ref[:4][self._t_s[:4] != 0] ** 2) / sum(\n self._sigma_sismique_ref[:4][self._t_s[:4] != 0] ** 2\n / self._t_s[:4][self._t_s[:4] != 0]\n )\n T_s_2 = sum(self._sigma_sismique_ref[4:][self._t_s[4:] != 0] ** 2) / max(\n sum(\n self._sigma_sismique_ref[4:][self._t_s[4:] != 0] ** 2\n / self._t_s[4:][self._t_s[4:] != 0]\n ),\n 1.0e-6,\n )\n self._T_s = np.array([T_s_1] * 4 + [T_s_2] * 2)\n # ressorts\n self._r_s = np.array(\n [T / t - 1 if t != 0.0 else 0.0 for t, T in zip(self._t_s, self._T_s)]\n )\n\n r_smax1 = np.amax(self._r_s[:4])\n r_smax2 = np.amax(self._r_s[4:])\n self._r_smax = np.array([r_smax1] * 4 + [r_smax1] * 2)\n\n return self._r_m, self._r_s, self._r_mmax, self._r_smax\n\n def calcul_abattement(self):\n \"\"\"calcule self._sigma_deplacement, self._sigma_sismique, self._g, self._g_s\"\"\"\n\n def func(x):\n \"\"\"equation de contrainte vrai\"\"\"\n if x < 0:\n x = 0.0\n return (\n (x / self._E + self._epsi_p(x) - (sigma_ref + self._sigpres) / self._E)\n - self._epsi_p(self._sigpres)\n + r * (x - sigma_ref - self._sigpres) / self._E\n )\n\n # resolution sigma_deplacement\n sigma_deplacement = []\n\n for sigma_ref, r in zip(self._sigma_deplacement_ref, self._r_m):\n if sigma_ref == 0.0:\n root = 0.0\n else:\n root = fsolve(func, sigma_ref)\n sigma_deplacement.append(root)\n self._sigma_deplacement = np.array(sigma_deplacement)\n\n # pour g +opt\n sigma_deplacement_opt = []\n for sigma_ref, r in zip(self._sigma_deplacement_ref, self._r_mmax):\n if sigma_ref == 0.0:\n root = 0.0\n else:\n root = fsolve(func, sigma_ref)\n sigma_deplacement_opt.append(root)\n self._sigma_deplacement_opt = np.array(sigma_deplacement_opt)\n\n # resolution sigma_sismique\n sigma_sismique = []\n for sigma_ref, r in zip(self._sigma_sismique_ref, self._r_s):\n if sigma_ref == 0.0:\n root = 0.0\n else:\n root = fsolve(func, sigma_ref)\n sigma_sismique.append(root)\n self._sigma_sismique = np.array(sigma_sismique)\n\n # pour gsopt\n sigma_sismique_opt = []\n for sigma_ref, r in zip(self._sigma_sismique_ref, self._r_smax):\n if sigma_ref == 0.0:\n root = 0.0\n else:\n root = fsolve(func, sigma_ref)\n sigma_sismique_opt.append(root)\n self._sigma_sismique_opt = np.array(sigma_sismique_opt)\n\n # abbatements g\n self._g = []\n for sigma_ref, sigma_vrai in zip(self._sigma_deplacement_ref, self._sigma_deplacement):\n if sigma_vrai < self._sigpres or sigma_vrai == 0:\n g = 1.0\n else:\n g = (sigma_vrai - self._sigpres) / sigma_ref\n self._g.append(g)\n self._g = np.array(self._g)\n\n # abbatements gopt\n self._gopt = []\n for sigma_ref, sigma_vrai in zip(self._sigma_deplacement_ref, self._sigma_deplacement_opt):\n if sigma_vrai < self._sigpres or sigma_vrai == 0:\n g = 1.0\n else:\n g = (sigma_vrai - self._sigpres) / sigma_ref\n self._gopt.append(g)\n self._gopt = np.array(self._gopt)\n\n # abbatements gs\n self._g_s = []\n for sigma_ref, sigma_vrai in zip(self._sigma_sismique_ref, self._sigma_sismique):\n if sigma_vrai < self._sigpres or sigma_vrai == 0:\n g = 1.0\n else:\n g = (sigma_vrai - self._sigpres) / sigma_ref\n self._g_s.append(g)\n self._g_s = np.array(self._g_s)\n\n # abbatements gsopt\n self._g_sopt = []\n for sigma_ref, sigma_vrai in zip(self._sigma_sismique_ref, self._sigma_sismique_opt):\n if sigma_vrai < self._sigpres or sigma_vrai == 0:\n g = 1.0\n else:\n g = (sigma_vrai - self._sigpres) / sigma_ref\n self._g_sopt.append(g)\n self._g_sopt = np.array(self._g_sopt)\n\n return self._g, self._g_s, self._gopt, self._g_sopt\n\n def calcul_sigma_eq(self):\n \"\"\"calcule le sigma equivalent\"\"\"\n\n self._sigma_eq = (\n 1\n / self._Z\n * (\n self._D_21**2\n * (\n abs(self._mt_sismique_qs)\n + self._g * abs(self._mt_deplacement_impose)\n + self._g_s * abs(self._mt_sismique_dyn)\n )\n ** 2\n + self._D_22**2\n * (\n abs(self._mfy_poids_propre)\n + abs(self._mfy_sismique_qs)\n + self._g * abs(self._mfy_dilatation_thermique)\n + self._g_s * abs(self._mfy_sismique_dyn)\n )\n ** 2\n + self._D_23**2\n * (abs(self._mfz_sismique_qs) + self._g_s * abs(self._mfz_sismique_dyn)) ** 2\n )\n ** 0.5\n )\n\n self._sigma_eq_opt = (\n 1\n / self._Z\n * (\n self._D_21**2\n * (\n abs(self._mt_sismique_qs)\n + self._gopt * abs(self._mt_deplacement_impose)\n + self._g_sopt * abs(self._mt_sismique_dyn)\n )\n ** 2\n + self._D_22**2\n * (\n abs(self._mfy_poids_propre)\n + abs(self._mfy_sismique_qs)\n + self._gopt * abs(self._mfy_dilatation_thermique)\n + self._g_sopt * abs(self._mfy_sismique_dyn)\n )\n ** 2\n + self._D_23**2\n * (abs(self._mfz_sismique_qs) + self._g_sopt * abs(self._mfz_sismique_dyn)) ** 2\n )\n ** 0.5\n )\n\n return self._sigma_eq, self._sigma_eq_opt\n","repo_name":"Krande/code-aster-copy","sub_path":"astest/sdll156a_fonctions.py","file_name":"sdll156a_fonctions.py","file_ext":"py","file_size_in_byte":11557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25321413156","text":"from typing import Union, Dict, List\nfrom server.util import APIError, to_float\nfrom server.logging import get_logger\nimport numpy as np\n\nLOGGER = get_logger(__name__)\n\n\nclass Endpoint:\n def __init__(self, endpoint_data:Dict):\n self.route = endpoint_data[\"route\"]\n self.model_name = endpoint_data[\"model_name\"]\n self.inputs = endpoint_data[\"inputs\"]\n self.outputs = endpoint_data[\"outputs\"]\n\n def _normalize_input(self, value: Union[str, int, float], input_info: Dict):\n try:\n return (float(value) - input_info.get(\"offset\", 0)) / input_info.get(\n \"scaling\", 1\n )\n except ValueError:\n message = f\"Input could not be coerced to float: {input_info} = {value}\"\n LOGGER.error(message)\n raise APIError(message, 400)\n\n def _denormalize_output(self, value: Union[float, np.float], output_info: Dict):\n return (to_float(value) * output_info.get(\"scaling\", 1)) + output_info.get(\n \"offset\", 0\n )\n\n def process_input(self, data:Dict) -> List[float]:\n sample = []\n for input_info in self.inputs:\n value = data.get(input_info[\"name\"])\n if value is not None:\n sample.append(self._normalize_input(value, input_info))\n else:\n sample.append(0.0)\n return sample\n\n def process_output(self, results:List[List[Union[float, np.float]]]) -> Dict[str,Union[float, np.float]]:\n if len(self.outputs) == 1:\n return {\n self.outputs[0][\"name\"]: self._denormalize_output(\n results, self.outputs[0]\n )\n }\n return {\n b[\"name\"]: self._denormalize_output(a, b)\n for a, b in zip(results[0], self.outputs)\n }\n","repo_name":"fitle-dev/scikit-deploy","sub_path":"scikit_deploy/scikit_deploy_server/server/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"20701109325","text":"# -*- coding: utf-8 -*-\n\n\nimport MySQLdb\n\n\nclass GetUid:\n def __init__(self):\n self.conn = MySQLdb.connect(\n host=\"xxxxx\",\n port=3306,\n user=\"xxxxx\",\n passwd=\"xxxxx\",\n database=\"xxxxx\",\n charset='utf8',\n )\n self.cur = self.conn.cursor()\n self.dic = dict()\n self.lis = []\n\n def execute_sql(self, s, e):\n sql = 'select wb_url, wb_name from t_media_wb where id ' \\\n 'between %d and %d' % (s, e)\n try:\n self.cur.execute(sql)\n data = self.cur.fetchall()\n for row in data:\n self.dic['url'] = row[0]\n self.dic['username'] = row[1]\n self.lis.append(dict(self.dic))\n return self.lis\n except:\n print('Error: unable to fetch data from db')\n self.conn.close()\n\n def save_content(self, content):\n sql = 'insert into wb_content(wb_id, content_id, content_url, content, pic, forward, comment, `like`, `type`, `from`, pub_time, grab_time) ' \\\n 'values(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' \\\n % (content['uid'], content['id'], content['url'], content['content'], content['pic'], content['forward'],\n content['comment'], content['like'], content['type'], content['from'], content['pub_time'], content['grab_time'])\n\n try:\n self.cur.execute(sql)\n self.conn.commit()\n except:\n self.conn.rollback()\n print(sql)\n print('fail to insert %s' % content['url'])\n","repo_name":"LichMscy/Weibo","sub_path":"Weibo_v2/searchuid.py","file_name":"searchuid.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73746228393","text":"unit_name_list = [\n \"Alice\",\n \"Becky\",\n \"Cathy\",\n \"Daisy\",\n \"Emilia\",\n \"Flora\",\n \"Grace\",\n\n \"Reina\",\n \"Selena\",\n \"Tina\",\n \"Vivian\",\n \"Willow\",\n \"Yvonne\",\n \"Zara\"\n ]\n\n# unit_name_listから、\n# class SkillOfName(Skill):\n# def __init__(self):\n# super().__init__()\n # def effect(\n # self,\n # ALL_UNIT_LIST,\n # FIELD,\n # ALL_AREA_LIST,\n # ALL_BENCH_LIST,\n # ALL_OUTSIDE_LIST,\n # STARTING_MEMBER_LIST,\n # SKILL_PROCESSOR\n # ):\n# をつくる\nfor name in unit_name_list:\n print(\n \"class\", \"SkillOf{}(Skill):\".format(name)\n )\n print(\" def __init__(self):\")\n print(\" super().__init__()\")\n print(\"\"\" def effect(\n self,\n ALL_UNIT_LIST,\n FIELD,\n ALL_AREA_LIST,\n ALL_BENCH_LIST,\n ALL_OUTSIDE_LIST,\n STARTING_MEMBER_LIST,\n SKILL_PROCESSOR\n ):\n pass\"\"\")\n print(\"\")\n\n\n# # unit_name_listから、\n# # skill_of_Name = SkillOfName()\n# # をつくる\n# for name in unit_name_list:\n# print(\n# \"skill_of_{} = SkillOf{}()\".format(name, name)\n# )","repo_name":"qoolaaq/summon_qoolaaq_2020","sub_path":"test_for_effect_write.py","file_name":"test_for_effect_write.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25854035849","text":"from pyspark.sql import SparkSession, functions, types\nspark = SparkSession.builder.appName('example application').getOrCreate()\nsc = spark.sparkContext\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.sql import Row\nimport pandas as pd\n\n\n\ntrainfile = 'train_rating.txt'\ntestfile = 'test_rating.txt'\n\n# trainfile = '/home/ravibisla/PycharmProjects/DataScience/train_rating.txt'\n# testfile = '/home/ravibisla/PycharmProjects/DataScience/test_rating.txt'\n\ntrainDf=spark.read.csv(trainfile,header='true')\ntestDf=spark.read.csv(testfile,header='true')\n# trainDf1=trainDf.select('user_id','business_id','rating')\n# testDf1=testDf.select('user_id','business_id','rating')\n\ntrainDf2 = trainDf.select(trainDf.user_id.cast('int').alias('userid'),trainDf.business_id.cast('int').alias('business_id'),trainDf.rating.cast('float').alias('rating'))\ntestDf2 = testDf.select(testDf.user_id.cast('int').alias('userid'),testDf.business_id.cast('int').alias('business_id'))\n\n\n(training, test) = trainDf2.randomSplit([0.8, 0.2])\n# Build the recommendation model using ALS on the training data\n\n\n# als = ALS(maxIter=20, regParam=0.1,rank=20, userCol=\"userid\", itemCol=\"business_id\", ratingCol=\"rating\",\n# coldStartStrategy=\"drop\")\n# als = ALS(maxIter=20, regParam=0.1,rank=20, userCol=\"userid\", itemCol=\"business_id\", ratingCol=\"rating\",\n# coldStartStrategy=\"drop\") # 1.6653266320794866\nals = ALS(maxIter=20, regParam=0.1,rank=20, userCol=\"userid\", itemCol=\"business_id\", ratingCol=\"rating\",\n coldStartStrategy=\"drop\") # 1.6653266320794866\nmodel = als.fit(training)\n\n# Evaluate the model by computing the RMSE on the test data\n\n\npredictions = model.transform(test)\n#Masking\n\npredpand=predictions.toPandas()\nmask = predpand.prediction > 5\nmask1 = predpand.prediction < 0\ncolumn_name = 'prediction'\npredpand.loc[mask, column_name] = 5\npredpand.loc[mask1, column_name] = 1\nevall= spark.createDataFrame(predpand)\n\n# End Masking\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"rating\",\n predictionCol=\"prediction\")\n\nrmse = evaluator.evaluate(evall)\nprint(\"Root-mean-square error = \" + str(rmse))","repo_name":"bislaravi/Recommender_System","sub_path":"ALS_Spark.py","file_name":"ALS_Spark.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21837588601","text":"# Import models and utility functions\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.datasets import load_digits\nfrom sklearn import metrics\nfrom scipy.ndimage import shift\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom keras.datasets import mnist\n\n\n# Setting SEED for reproducibility\nSEED = 23\n\n# Importing the dataset\n# X, y = load_digits(return_X_y=True)\n\n# Splitting dataset\n# train_X, test_X, train_y, test_y = train_test_split(\n# X, y, test_size=0.25, random_state=SEED\n# )\n\n(train_X, train_y), (test_X, test_y) = mnist.load_data()\ntrain_X = train_X.reshape(train_X.shape[0], -1)\ntest_X = test_X.reshape(test_X.shape[0], -1)\n\n\n# Creating Augmented Dataset\n\n\n# Method to shift the image by given dimension\ndef shift_image(image, dx, dy):\n image = image.reshape((28, 28))\n shifted_image = shift(image, [dy, dx], cval=0, mode=\"constant\")\n return shifted_image.reshape([-1])\n\n\nX_train_augmented = [image for image in train_X]\ny_train_augmented = [image for image in train_y]\n\nprint(\"creating augmented data source\")\nfor dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n for image, label in zip(train_X, train_y):\n X_train_augmented.append(shift_image(image, dx, dy))\n y_train_augmented.append(label)\n\n\n# Shuffle the dataset\nshuffle_idx = np.random.permutation(len(X_train_augmented))\ntrain_X = np.array(X_train_augmented)[shuffle_idx]\ntrain_y = np.array(y_train_augmented)[shuffle_idx]\n\n# Instantiate Gradient Boosting Classifier\ngbc = HistGradientBoostingClassifier(\n max_iter=2000, learning_rate=0.05, random_state=100, max_bins=10, verbose=1\n)\n# Fit to training set\nresult = gbc.fit(train_X, train_y)\nprint(result.n_iter_)\n\n# Predict on test set\npred_y = gbc.predict(test_X)\nprint(pred_y)\nprint(test_y)\n\n# accuracy\nacc = accuracy_score(test_y, pred_y)\nprint(\"Gradient Boosting Classifier accuracy is : {:.3f}\".format(acc))\n\ndisp = metrics.ConfusionMatrixDisplay.from_predictions(test_y, pred_y)\ndisp.figure_.suptitle(\"Confusion Matrix\")\nprint(f\"Confusion matrix:\\n{disp.confusion_matrix}\")\n\nplt.show()\n","repo_name":"delabroj/python-templates","sub_path":"handwritten-digits-gradient-boost/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16223806075","text":"def opera(x,y,oper):\n if oper == \"+\":\n z = x+y\n elif oper == \"-\":\n z = x-y \n elif oper == \"*\":\n z = x*y \n elif oper == \"/\":\n if y != 0:\n z = x/y\n else:\n z = None\n else:\n z = None\n return z\n\nx = int(input(\"Digite o primero número: \"))\ny = int(input(\"Digite o segundo número: \"))\noper = str(input(\"Escolha a operação:\\n(+) para SOMA\\n(-) para SUBTRAÇÃO\\n(*) para MULTIPLICAÇÃO\\n(/) para DIVISÃO\\n \"))\nif (opera(x,y,oper)) == None:\n print(\"ERRO\")\nelse:\n print(opera(x,y,oper))","repo_name":"GabGomes16/Algoritmos-e-Logica-de-Programacao-II","sub_path":"trimestre-1/calculadora-simples.py","file_name":"calculadora-simples.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42251065163","text":"import pytest\n\nfrom projectq.cengines import MainEngine, DummyEngine\nfrom projectq.ops import H, CNOT, X, Measure\n\nfrom projectq.backends import ResourceCounter\n\n\nclass MockEngine(object):\n def is_available(self, cmd):\n return False\n\n\ndef test_resource_counter_isavailable():\n resource_counter = ResourceCounter()\n resource_counter.next_engine = MockEngine()\n assert not resource_counter.is_available(\"test\")\n resource_counter.next_engine = None\n resource_counter.is_last_engine = True\n\n assert resource_counter.is_available(\"test\")\n\n\ndef test_resource_counter():\n resource_counter = ResourceCounter()\n backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend, [resource_counter])\n\n qubit1 = eng.allocate_qubit()\n qubit2 = eng.allocate_qubit()\n H | qubit1\n X | qubit2\n del qubit2\n\n qubit3 = eng.allocate_qubit()\n CNOT | (qubit1, qubit3)\n\n Measure | (qubit1, qubit3)\n\n assert int(qubit1) == int(qubit3)\n assert int(qubit1) == 0\n\n assert resource_counter.max_width == 2\n\n str_repr = str(resource_counter)\n assert str_repr.count(\"H\") == 1\n assert str_repr.count(\"X\") == 2\n assert str_repr.count(\"CX\") == 1\n assert str_repr.count(\"Allocate : 3\") == 1\n assert str_repr.count(\"Deallocate : 1\") == 1\n\n sent_gates = [cmd.gate for cmd in backend.received_commands]\n assert sent_gates.count(H) == 1\n assert sent_gates.count(X) == 2\n assert sent_gates.count(Measure) == 1\n\n\ndef test_resource_counter_str_when_empty():\n assert isinstance(str(ResourceCounter()), str)\n","repo_name":"thomascherickal/ProjectQ","sub_path":"Quantum Algorithms in Python/ProjectQ/ProjectQ-develop/projectq/backends/_resource_test.py","file_name":"_resource_test.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22573699207","text":"#오픈채팅방\n\ndef solution(record):\n userDict = {}\n answer = []\n for i in record:\n userList = list(i.split())\n if userList[0] == \"Enter\":\n userDict[userList[1]] = userList[2]\n answer.append(\"%s님이 들어왔습니다.\" %userList[1])\n elif userList[0] == \"Leave\":\n answer.append(\"%s님이 나갔습니다.\" %userList[1])\n else:\n userDict[userList[1]] = userList[2]\n \n for i in range(len(answer)):\n a, b = answer[i].split()\n a = a[:-2]\n answer[i] = answer[i].replace(a, userDict[a])\n\n return answer\n\n\nrecord = [\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"]\n\nprint(solution(record))\n","repo_name":"jjiwoning/Code_Test","sub_path":"python_algo/Programmers/openchat.py","file_name":"openchat.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26842812416","text":"import time\nfrom telebot import types\n\nimport commands\nfrom api.endpoint_list import list_properties\nfrom api.get_offers import get_hotel_offers\nfrom api.get_photo import hotel_photo\nfrom calenar import select_from_calenar_check_in, MyState, user_dates\nfrom commands.start import get_start\n\nfrom database.database import logging\nfrom main import bot\nimport re\n\nfrom telebot.handler_backends import State, StatesGroup # States\n\nuser_req = dict()\nuser_response = dict()\nsticer_id = 'CAACAgIAAxkBAAEG8PpjpCz9oGtfQoPyYSYH_bQhlR_a-gACIwADKA9qFCdRJeeMIKQGLAQ'\n\n\n@bot.message_handler(commands=['lowprice'])\n@logging\ndef get_lowprice(foo_message):\n msg = bot.send_message(foo_message.from_user.id, \"Команда /lowprice. Введи город на латинице\")\n bot.set_state(foo_message.from_user.id, MyState.city, foo_message.from_user.id)\n\n\n@bot.message_handler(state=MyState.city)\ndef get_city(foo_message):\n print('выполняется get city')\n city = foo_message.text\n user_req[foo_message.from_user.id] = {'city': city}\n print('Буду искать в городе', city)\n\n bot.set_state(foo_message.from_user.id, MyState.check_in, foo_message.from_user.id)\n select_from_calenar_check_in(foo_message)\n\n\n@bot.message_handler(state=MyState.count_hotel)\ndef get_resultsSize(foo_message):\n print('Исходный словарь', user_req)\n user_req[foo_message.from_user.id].update(user_dates[foo_message.from_user.id])\n print('обновил словарь', user_req)\n\n try:\n resultsSize = int(foo_message.text)\n user_req[foo_message.from_user.id].update({'resultsSize': resultsSize})\n print('Буду искать {} отелей'.format(resultsSize))\n\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n item_yes = types.KeyboardButton('Да')\n item_no = types.KeyboardButton('Нет')\n keyboard.add(item_yes, item_no)\n\n bot.send_message(foo_message.from_user.id, text='Выводим фотки отеля? (Да/Нет)', reply_markup=keyboard)\n\n bot.register_next_step_handler(foo_message, print_photo)\n\n\n except ValueError:\n bot.send_message(foo_message.from_user.id, \"Проблема с количеством отелей, необходимо ввести целое число? \")\n bot.register_next_step_handler(foo_message, get_resultsSize)\n\n\ndef print_photo(foo_message):\n if foo_message.text.lower() == 'да':\n bot.send_message(foo_message.from_user.id, \"Введи кол-во фоток \")\n bot.register_next_step_handler(foo_message, get_hotel_photo)\n\n elif foo_message.text.lower() == 'нет':\n make_request(foo_message, flag=False)\n\n\ndef get_hotel_photo(foo_message): # Если надо выводить фотки, то мы вызываем функцию make_request с флагом True\n cont_photo = foo_message.text\n if cont_photo.isdigit():\n cont_photo = int(cont_photo)\n make_request(foo_message, flag=True, count=cont_photo)\n\n\n else:\n bot.send_message(foo_message.from_user.id, \"Проблема с вводом. Введи целое число\")\n bot.register_next_step_handler(foo_message, get_hotel_photo)\n\n\ndef checkdate(date: str) -> bool:\n \"\"\"\nФункция, которая проверяет дату на формат dd.mm.yyyy\n :param date: str\n :return: bool\n \"\"\"\n try:\n time.strptime(date, '%d.%m.%Y')\n print('Date ok')\n return True\n except ValueError:\n print('Invalid date!')\n return False\n\n\ndef make_request(foo_message, flag, count=0):\n print(user_req)\n msg = foo_message\n\n city = user_req[foo_message.from_user.id]['city']\n resultsSize = user_req[foo_message.from_user.id]['resultsSize']\n checkindate = user_req[foo_message.from_user.id]['checkindate']\n checkoutdate = user_req[foo_message.from_user.id]['checkoutdate']\n\n print('Дата заезда: ', checkindate)\n print('Дата выезда', checkoutdate)\n print('Делаю запрос по городу', city)\n bot.send_sticker(foo_message.from_user.id, sticer_id)\n hotels, region_id = list_properties(city=city, resultsSize=resultsSize, check_in_date=checkindate,\n check_out_date=checkoutdate)\n print('Найденные отели: ', hotels)\n\n check_in_day = int(checkindate.split('.')[0])\n check_in_month = int(checkindate.split('.')[1])\n check_in_year = int(checkindate.split('.')[2])\n\n check_out_day = int(checkoutdate.split('.')[0])\n check_out_month = int(checkoutdate.split('.')[1])\n check_out_year = int(checkoutdate.split('.')[2])\n\n if len(hotels) > 0:\n\n user_response[msg.from_user.id] = {}\n for name_hotel, data in hotels.items():\n # bot.send_message(foo_message.from_user.id, name_hotel)\n hotel_id = data.get('id')\n url = data.get('url_photo')\n\n print('name', name_hotel)\n print('id', hotel_id)\n print('Фото', url)\n\n photos, hotel_latitude, hotel_longitude, hotel_reviews_rating, hotel_address_line = hotel_photo(hotel_id)\n price_message = get_hotel_offers(property_id=hotel_id, in_day=check_in_day, in_month=check_in_month,\n in_year=check_in_year, out_day=check_out_day, out_month=check_out_month,\n out_year=check_out_year, latitude=hotel_latitude,\n longitude=hotel_longitude, region_id=region_id)\n\n user_response[msg.from_user.id][hotel_id] = {'data': [], 'photo': []}\n # собираем инфу по отелю в словарь\n user_response[msg.from_user.id][hotel_id]['data'].append(\n 'Название отеля: {name}\\nАдрес отеля: {adress}\\nРейтинг отеля: {id}\\nСтоимость отеля: {price}\\n'.format(\n name=name_hotel, adress=hotel_address_line, id=hotel_reviews_rating,\n price=price_message))\n\n if flag is True:\n\n for index, url_photo in enumerate(iter(photos)):\n if index < count:\n # bot.send_photo(msg.from_user.id, url_photo)\n user_response[msg.from_user.id][hotel_id]['photo'].append(url_photo)\n\n else:\n break\n\n for i_hotel in user_response[msg.from_user.id]:\n bot.send_message(msg.from_user.id, user_response[msg.from_user.id][f'{i_hotel}']['data'])\n if flag is True:\n for i_url_photo in user_response[msg.from_user.id][f'{i_hotel}']['photo']:\n bot.send_photo(msg.from_user.id, i_url_photo)\n\n else:\n bot.send_message(foo_message.from_user.id, 'Ничего не найдено /start')\n\n print(user_req)\n get_start(foo_message)\n","repo_name":"gistr1/hotel_telegram_bot","sub_path":"commands/lowprice.py","file_name":"lowprice.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73842684391","text":"def compress(chars):\r\n # anchor为起始位置\r\n anchor = write = 0\r\n for read, c in enumerate(chars):\r\n # 当读到最后一个字符,或者下一个下一个字符与当前不同时\r\n if read + 1 == len(chars) or chars[read + 1] != c: \r\n chars[write] = chars[anchor] # write character\r\n write += 1\r\n if read > anchor:\r\n for digit in str(read - anchor + 1):\r\n chars[write] = digit # write digits\r\n write += 1\r\n anchor = read + 1\r\n return write\r\n\r\n\r\ndef compress(self, chars: List[str]) -> int:\r\n n = len(chars)\r\n if n == 1:\r\n return 1\r\n fast, slow = 0, 0\r\n \r\n while fast < len(chars):\r\n if chars[fast] == chars[slow]:\r\n fast += 1\r\n elif chars[fast] != chars[slow]:\r\n if fast - slow != 1:\r\n length = str(fast - slow)\r\n tmp = [chars[slow]]\r\n for i in length:\r\n tmp.append(i)\r\n chars[slow:fast]= tmp\r\n slow += len(tmp)\r\n else:\r\n chars[slow:fast]= [chars[slow]]\r\n slow += 1\r\n fast = slow + 1\r\n \r\n if fast - slow != 1:\r\n length = str(fast - slow)\r\n tmp = [chars[slow]]\r\n for i in length:\r\n tmp.append(i)\r\n chars[slow:fast]= tmp\r\n else:\r\n chars[slow:fast]= [chars[slow]]\r\n return len(chars)","repo_name":"YuanyuanQiu/LeetCode","sub_path":"0443 String Compression.py","file_name":"0443 String Compression.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21653708446","text":"import re\n\nimport urwid\nimport pyperclip\nimport webbrowser\nfrom sclack.store import Store\nfrom sclack.component.time import Time\n\n\nclass Message(urwid.AttrMap):\n __metaclass__ = urwid.MetaSignals\n signals = [\n 'delete_message',\n 'edit_message',\n 'get_permalink',\n 'go_to_profile',\n 'go_to_sidebar',\n 'quit_application',\n 'set_insert_mode',\n 'mark_read',\n ]\n\n def __init__(self, ts, channel_id, user, text, indicators, reactions=(), attachments=()):\n self.ts = ts\n self.channel_id = channel_id\n self.user_id = user.id\n self.markdown_text = text\n self.original_text = text.original_text\n self.text_widget = urwid.WidgetPlaceholder(text)\n main_column = [urwid.Columns([('pack', user), self.text_widget])]\n main_column.extend(attachments)\n self._file_index = len(main_column)\n if reactions:\n main_column.append(urwid.Columns([\n ('pack', reaction) for reaction in reactions\n ]))\n self.main_column = urwid.Pile(main_column)\n columns = [\n ('fixed', 7, Time(ts)),\n self.main_column,\n ('fixed', indicators.size, indicators)\n ]\n self.contents = urwid.Columns(columns)\n super(Message, self).__init__(self.contents, None, {\n None: 'active_message',\n 'message': 'active_message'\n })\n\n def keypress(self, size, key):\n keymap = Store.instance.config['keymap']\n\n if key == keymap['delete_message']:\n urwid.emit_signal(self, 'delete_message', self, self.user_id, self.ts)\n return True\n elif key == keymap['edit_message']:\n urwid.emit_signal(self, 'edit_message', self, self.user_id, self.ts, self.original_text)\n return True\n elif key == keymap['go_to_profile']:\n urwid.emit_signal(self, 'go_to_profile', self.user_id)\n return True\n elif key == keymap['go_to_sidebar'] or key == keymap['cursor_left']:\n urwid.emit_signal(self, 'go_to_sidebar')\n return True\n elif key == keymap['quit_application']:\n urwid.emit_signal(self, 'quit_application')\n return True\n elif key == keymap['set_insert_mode']:\n urwid.emit_signal(self, 'set_insert_mode')\n return True\n elif key == keymap['yank_message']:\n try:\n pyperclip.copy(self.original_text)\n except pyperclip.PyperclipException:\n pass\n return True\n elif key == keymap['get_permalink']:\n # FIXME\n urwid.emit_signal(self, 'get_permalink', self, self.channel_id, self.ts)\n elif key == 'enter':\n browser_name = Store.instance.config['features']['browser']\n\n for item in self.markdown_text.markup:\n type, value = item\n\n if type == 'link' and re.compile(r'^https?://').search(value):\n browser_instance = webbrowser if browser_name == '' else webbrowser.get(browser_name)\n browser_instance.open(value, new=2)\n break\n\n return super(Message, self).keypress(size, key)\n\n def set_text(self, text):\n self.text_widget.original_widget = text\n\n def set_edit_mode(self):\n self.set_attr_map({\n None: 'editing_message',\n 'message': 'editing_message'\n })\n\n def unset_edit_mode(self):\n self.set_attr_map({\n None: None,\n 'message': None\n })\n\n def selectable(self):\n return True\n\n @property\n def file(self):\n return None\n\n @file.setter\n def file(self, file):\n self.main_column.contents.insert(self._file_index, (file, ('pack', 1)))\n","repo_name":"haskellcamargo/sclack","sub_path":"sclack/component/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":2436,"dataset":"github-code","pt":"72"} +{"seq_id":"6732504406","text":"import json\nfrom pprint import pprint\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nBASE_URL = \"https://datki.net/komplimenti/podruge/prikolnie/\"\n\n\ndef get_soup(url: str) -> BeautifulSoup:\n \"\"\"Get soup of page by it's url.\"\"\"\n html = requests.get(url).text\n return BeautifulSoup(html, \"html.parser\")\n\n\ndef replace_br(text):\n return str(text).replace(\"
\", \"\\n\").replace(\"

\", \"\").replace(\"

\", \"\")\n\n\ndef get_compliments(url: str = BASE_URL):\n soup = get_soup(url)\n\n result = []\n\n wrapper = soup.find(\"div\", class_=\"content-entry-wrap\")\n articles = wrapper.find_all(\"div\", class_=\"entry-summary\")\n inner_items = [item.find_all(\"p\") for item in articles]\n for items in inner_items:\n wrap = []\n for item in items:\n wrap.append(replace_br(item))\n result.append(wrap)\n\n return result\n\n\ncompliments = get_compliments() + get_compliments(\"https://datki.net/komplimenti\")\n\nwith open(\"compliments.json\", mode=\"w\", encoding=\"utf-8\") as file:\n json.dump(\n compliments,\n file,\n indent=4,\n ensure_ascii=False,\n )\n","repo_name":"IldarSaygafarov2/compliments_bot","sub_path":"helpers/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42294783048","text":"import numpy as np\nimport pandas as pd\n\ndef predict(movie):\n\n credits = pd.read_csv(\"tmdb_5000_credits.csv\")\n\n movies= pd.read_csv(\"tmdb_5000_movies.csv\")\n\n movies\n\n movies=movies.merge(credits,on='title')\n movies.shape\n\n movies = movies [['genres','id','keywords','title','overview','cast','crew']]\n movies.head(1)\n\n movies.isnull().sum()\n\n movies=movies.dropna()\n movies.isnull().sum()\n\n movies.duplicated().sum()\n\n movies.iloc[0].genres\n\n import ast\n\n def convert(obj):\n l=[]\n for i in ast.literal_eval(obj):\n l.append(i['name'])\n return l\n\n movies['genres']=movies['genres'].apply(convert)\n movies['genres']\n\n movies.head(1)\n\n movies['keywords']=movies['keywords'].apply(convert)\n movies.head(1)\n\n movies['cast']\n\n\n def convert3(obj):\n l=[]\n count=0\n for i in ast.literal_eval(obj):\n if (count<3):\n l.append(i['character'])\n count+=1\n return l\n\n\n\n movies['cast']=movies['cast'].apply(convert3)\n\n movies.head(1)\n\n import ast\n\n def fetch_director(obj):\n l=[]\n\n for i in ast.literal_eval(obj):\n if i['job']=='Director':\n l.append(i['name'])\n break\n return l\n\n movies['crew']=movies['crew'].apply(fetch_director)\n\n movies['crew']\n\n movies['overview']=movies['overview'].apply(lambda x:x.split())\n\n movies['overview']\n\n movies['genres']=movies['genres'].apply(lambda x : [i.replace(\" \",\"\") for i in x])\n movies['overview']=movies['overview'].apply(lambda x : [i.replace(\" \",\"\") for i in x])\n movies['crew']=movies['crew'].apply(lambda x : [i.replace(\" \",\"\") for i in x])\n movies['cast']=movies['cast'].apply(lambda x : [i.replace(\" \",\"\") for i in x])\n movies['keywords']=movies['keywords'].apply(lambda x : [i.replace(\" \",\"\") for i in x])\n\n movies.head(1)\n\n movies['tags']=movies['genres']+movies['overview']+movies['crew']+movies['cast']+movies['keywords']\n movies.head(1)\n\n new_df = movies[['id','title','tags']]\n\n new_df\n\n new_df['tags']=new_df['tags'].apply(lambda x:\" \".join(x))\n\n new_df['tags']\n\n new_df['tags']=new_df['tags'].apply(lambda x:x.lower())\n\n new_df['tags']\n\n new_df['tags'][0]\n\n new_df[['tags']]\n\n from nltk.stem.porter import PorterStemmer\n ps=PorterStemmer()\n\n def stem(text):\n y=[]\n for i in text.split():\n y.append(ps.stem(i))\n return \" \".join(y)\n\n #pd.options.mode.chained_assignment = None\n new_df['tags']=new_df['tags'].apply(stem)\n\n new_df['tags'][1300]\n\n\n from sklearn.feature_extraction.text import CountVectorizer\n cv=CountVectorizer(max_features=5000,stop_words='english')\n\n vectors=cv.fit_transform(new_df['tags']).toarray()\n\n vectors\n\n #cv.get_feature_names()\n\n from sklearn.metrics.pairwise import cosine_similarity\n\n\n similarity=cosine_similarity(vectors)\n\n def recommed(movie):\n movie_index = new_df[new_df['title']==movie].index[0]\n x=similarity[movie_index]\n l = sorted(list(enumerate(x)),reverse=True,key=lambda x:x[1])[0:6]\n\n ans=[]\n\n for i in l:\n ans.append(new_df.iloc[i[0]].title)\n\n return ans\n\n ans=recommed(movie)\n return ans\n\n\n","repo_name":"Ajitphulmante/movie_recommend_system","sub_path":"pratice.py","file_name":"pratice.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37207221019","text":"import os\n\nfrom celery import Celery\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\ncelery_app = Celery('application')\ncelery_app.config_from_object('django.conf:settings', namespace='CELERY')\ncelery_app.autodiscover_tasks()\ncelery_app.conf.enable_utc = False\n","repo_name":"PlagerX-Group/django-automation-telegram-bot","sub_path":"source/application/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41734539615","text":"\"\"\"Descrição\nVocê deve ler uma sequência de caracteres e imprimir na ordem inversa da leitura.\n\nFormato de entrada\n\nConsiste de um inteiro n, indicando quantos caracteres devem ser lidos e uma sequencia de n caracteres. A entrada termina quando n=0.\n\nConsidere que n é sempre n<=100000.\n\nFormato de saída\n\nn caracteres impressos na ordem inversa\"\"\"\n\n\nwhile True:\n numer_de_caracteres = int (input())\n if numer_de_caracteres == 0:\n break\n\n caracteres = input()\n caracteres_invertidos = []\n\n posicao = -1\n\n for i in range (len(caracteres)):\n caracteres_invertidos.append(caracteres[posicao])\n posicao += -1\n \n caracteres_invertidos = \"\".join(caracteres_invertidos)\n\n print (caracteres_invertidos)","repo_name":"Andreza-S/Codando","sub_path":"códigos python/Caracteres.py","file_name":"Caracteres.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43942951607","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'tddtutor.views.home', name='home'),\n # url(r'^tddtutor/', include('tddtutor.foo.urls')),\n url(r'^polls/$', 'polls.views.show_all_pools'),\n url(r'^poll/(?P\\d+)$', 'polls.views.poll'),\n\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"akava/django-testing-tutorial","sub_path":"tddtutor/tddtutor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27752296218","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\na = list(map(int, input().split()))\r\nhalf = sum(a)//2 # 이 값보단 커야됨\r\na = list(enumerate(a))\r\na.sort(key= lambda x: -x[1])\r\ndp = [[] for _ in range(half*2 + 2)]\r\ndp[0].append(0)\r\nIdx = 0\r\nfor idx, val in a:\r\n for i in range(half,-1,-1):\r\n if (dp[i]) and (not dp[i+val]):\r\n dp[i+val] = list(dp[i])\r\n dp[i+val].append(idx+1)\r\n Idx = max(Idx, i+val)\r\ndp[Idx].remove(0)\r\nprint(len(dp[Idx]))\r\nprint(*dp[Idx])\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"nube-net/baekjoon-nube-net-gytjdttop-","sub_path":"백준/Platinum/1226. 국회/국회.py","file_name":"국회.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39811670942","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n###-------------------------------------------------------------------\n### File : pde.py\n### Author : Oleg Baskakov\n### Description : Partial Differencial Equation\n###\n### 2011. Written for Moscow Aviation Institute.\n###-------------------------------------------------------------------\n\nfrom math import sqrt,sin,cos,tan,pi,log,exp\nfrom copy import copy, deepcopy\nfrom functools import reduce\nfrom itertools import product as dekart_product\nimport matrix_2\n\neps = 0.0001\n\ndef frange(x0, x1, d):\n\treturn [ x0 + d*k for k in range(int((x1-x0+eps/10)/d)+1)]\n\ndef print_mat(mat):\n\tprint(\"matrix:\")\n\tfor row in mat:\n\t\tfor elem in row:\n\t\t\tprint(\" %.2f \"%elem, end = '\\t')\n\t\tprint()\n\tprint(\"--------\")\n\ndef print_vec(row):\n\tprint(\"vector:\")\n\tfor elem in row:\n\t\tprint(round(elem, 4), end = \"\\t\")\n\tprint()\n\tprint(\"--------\")\n\ndef el_func(x, el, grid):\n\tif not( 0 <= el <= len(grid)): return 0\n\t\n\tz = -abs(x - grid[el])/(grid[1] - grid[0]) + 1.0\n\tif z<0: z = 0\n\treturn z\n\npoints = [(0.5, 1.5),\n\t\t (0.0, 2.0),\n\t\t (1.0, 2.0),\n\t\t (0.0, 1.0),\n\t\t (1.0, 1.0),\n\t\t (0.5, 0.5),\n\t\t (0.0, 0.0),\n\t\t (1.0, 0.0),\n\t\t (1.5, 0.5),\n\t\t (2.0, 1.0),\n\t\t (2.0, 0.0)]\n\nlim_point_tmp = [1,2,4,9,10,7,6,3,1]\n\nlim_points = list(zip(lim_point_tmp, lim_point_tmp[1:]))\nlim_points += [(b,a) for a,b in lim_points]\n\n\n\n\n\nelems_tmp = [(1,2,3),\n\t\t (1,2,4),\n\t\t (1,3,5),\n\t\t (1,4,5),\n\t\t (6,4,5),\n\t\t (6,7,4),\n\t\t (6,5,8),\n\t\t (6,8,7),\n\t\t (9,5,10),\n\t\t (9,8,5),\n\t\t (9,10,11),\n\t\t (9,11,8)]\n\t\t \nelems = [(a-1,b-1,c-1) for (a,b,c) in elems_tmp]\n\n\ndef elem_fun(koef):\n\treturn lambda x,y: koef[0] + koef[1]*x + koef[2]*y\n\n# F = elem_plane(elems[0],0)\n\ndef elem_fun_all(el, i):\n\tA = [k for k in [0,1,2] if elems[el][k] == i]\n\tif A:\n\t\treturn elem_fun(el, A[0])\n\telse:\n\t\treturn lambda x,y: 0.0\n\n\ndef gen_elems():\n\t\"\"\"undefined\"\"\"\n\treturn [(a-1,b-1,c-1) for (a,b,c) in elems_tmp]\n\ndef gen_lim_points():\n\t\"\"\"undefined\"\"\"\n\tlim_point_tmp = [1,2,4,9,10,7,6,3,1]\n\tlim_points = list(zip(lim_point_tmp, lim_point_tmp[1:]))\n\tlim_points += [(b,a) for a,b in lim_points]\n\treturn lim_points\n\n\n\ndef el_func(x, el, grid):\n\tif not( 0 <= el <= len(grid)): return 0\n\t\n\tz = -abs(x - grid[el])/(grid[1] - grid[0]) + 1.0\n\tif z<0: z = 0\n\treturn z\n\ndef gen_points():\n\treturn [(0.5, 1.5),\n\t\t\t (0.0, 2.0),\n\t\t\t (1.0, 2.0),\n\t\t\t (0.0, 1.0),\n\t\t\t (1.0, 1.0),\n\t\t\t (0.5, 0.5),\n\t\t\t (0.0, 0.0),\n\t\t\t (1.0, 0.0),\n\t\t\t (1.5, 0.5),\n\t\t\t (2.0, 1.0),\n\t\t\t (2.0, 0.0)]\n\n\n\nalpha = 1\nphi = lambda x,y: 1.0\nfunc_start = lambda x,y: 1.0\n\n\nelems_tmp = [(1,2,3),\n\t\t (1,2,4),\n\t\t (1,3,5),\n\t\t (1,4,5),\n\t\t (6,4,5),\n\t\t (6,7,4),\n\t\t (6,5,8),\n\t\t (6,8,7),\n\t\t (9,5,10),\n\t\t (9,8,5),\n\t\t (9,10,11),\n\t\t (9,11,8)]\n\t\t \n\ndef elem_plane(elem, p):\n# elem - num of element, p - point of element\n\tif not(p in elem): return None\n\tsystem = [ [1.0, points[p][0], points[p][1]] for p in elem ]\n\tb = [0.0] * 3\n\tb[elem.index(p)] = 1.0\n\t\n\tm = matrix_2.Matrix(m = 3, n = 3)\n\tm.M = system\n\tm.build_LU()\n#\tprint (\"Solve:\")\n\tkoef = m.solve(m.shift_b(b))\n\tkoef = [round(xx, 2) for xx in koef]\n#\tprint(\"koef = \", koef)\n\treturn koef\n\ndef elem_fun(koef):\n\treturn lambda x,y: koef[0] + koef[1]*x + koef[2]*y\n\nF = elem_plane(elems[0],0)\n\ndef elem_fun_all(el, i):\n\tA = [k for k in [0,1,2] if elems[el][k] == i]\n\tif A:\n\t\treturn elem_fun(el, A[0])\n\telse:\n\t\treturn lambda x,y: 0.0\n\n\ndef generate():\n\t\"\"\"Generate params for FEM solver\"\"\"\n\tpoints = []\n\tborder = []\n\telems = []\n\t\n\tlx = 1.0\n\tly = 1.0\n\thx = 0.25\n\thy = 0.25\n\t\n\tfield = None\n\t\n\tfield = [[(y,x) for x in frange(0, lx, hx)]\n\t for y in frange(0, ly, hy)]\n\t\n\tm = len(field)\n\tn = len(field[0])\n\t\n\tprint(\"n =\", n)\n\tprint(\"m =\", m)\n\t\n\t#correct last string\n\tnum = lambda i,j: i*(2*m-1) + ((i!=n-1) + 1)*j\n\n\tprint (\"test\", num(0,0), num(1,0), num(0,1), num(2,1))\n\n\tfor i in range(n):\n\t\tborder.append(field[0][i])\n\tfor i in range(m):\n\t\tborder.append(field[i][-1])\n\tfor i in range(n):\n\t\tborder.append(field[-1][n-1-i])\n\tfor i in range(m):\n\t\tborder.append(field[m-1-i][0])\n\n#\tborder.extend(field[0])\n#\tborder.extend(list(zip(*field))[0])\n#\ttmp = list(field[-1])\n#\ttmp.reverse()\n#\tborder.extend(tmp)\n#\ttmp = list(list(zip(*field))[-1])\n#\ttmp.reverse()\n#\tborder.extend(tmp)\n\t\n\tpoints = [(0.0,0.0)] * (n*m + (n-1)*(m-1))\n\t\n\tprint(\"len\", len(points))\n\t\n\tfor i in range(n-1):\n\t\tfor j in range(m-1):\n\t\t\t# add point's coord\n\t\t\tpoints[num(i+0,j+0)] = field[i+0][j+0]\n\t\t\tpoints[num(i+0,j+1)] = field[i+0][j+1]\n\t\t\tpoints[num(i+1,j+0)] = field[i+1][j+0]\n\t\t\tpoints[num(i+1,j+1)] = field[i+1][j+1]\n\t\t\t\n\t\t\t#add central point with shift = 1\n\t\t\tzx,zy = field[i][j]\n\t\t\tpoints[num(i, j) + 1] = (zx + hx/2, zy + hx/2)\n\t\t\t\n\t\t\t#add elem index\t\t\t\n\t\t\telems.append((num(i,j), num(i,j+1), num(i,j)+1))\n\t\t\telems.append((num(i,j), num(i+1,j), num(i,j)+1))\n\t\t\telems.append((num(i,j+1), num(i+1,j+1), num(i,j)+1))\n\t\t\telems.append((num(i+1,j), num(i+1,j+1), num(i,j)+1))\n\t\n\tfrom pprint import pprint\n\tpprint(field)\n\tprint('points')\n\tprint(list(enumerate(points))[:8])\n\tprint('border')\n\tpprint(list(enumerate(border)))\n\t#convert coord to index\n\tborder = [points.index(p) for p in border]\n\n\n\tborder.append(border[0])\n\t\n\tlim_points = [(a,b) for a,b in zip(border, border[1:]) if a!=b]\n#\tlim_points += [(b,a) for a,b in lim_points]\n\n\tprint('border2', border)\n\t\n\t\n\treturn elems, lim_points, points\n\ndef main():\n\te, p, b = generate()\n\n\tprint(b)\n\t\n#=====================================================================\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n","repo_name":"spetz911/CM","sub_path":"FEM/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12797068877","text":"import os\nimport re\nimport tifffile\nimport click\nimport cv2\nimport dill\nimport h5py\nimport ruamel.yaml as yaml\nimport caiman_pipeline.motion_correct as mc\nimport caiman_pipeline.util as util\nimport caiman_pipeline.extract as extract\nimport caiman as cm\nfrom glob import glob\nfrom tqdm import tqdm\nfrom os.path import join\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command(name='mat-to-tiff')\n@click.argument('input-file', type=click.Path(resolve_path=True, exists=True))\n@click.option('--output', '-o', default=None, type=click.Path(resolve_path=True))\ndef mat_to_tiff(input_file, output):\n output = mc.handle_mat_file(input_file, output)\n print('Data converted to tif: {}'.format(output))\n\n\n@cli.command(name='downsample-isxd')\n@click.argument('input-file', type=click.Path(exists=True, resolve_path=True))\n@click.option('--downsample', '-d', default=4, type=float)\ndef downsample_isxd(input_file, downsample):\n fname = input_file[:-5] + '-dnsmpl-{}x-concat.tif'.format(downsample)\n import sys\n sys.path.append('/home/wg41/code/Inscopix Data Processing.linux/Contents/API/Python')\n import isx\n movie = isx.Movie.read(input_file)\n with tifffile.TiffWriter(fname, bigtiff=True) as tif:\n for i in tqdm(range(movie.timing.num_samples), desc='Downsampling',\n total=movie.timing.num_samples):\n resized = cv2.resize(movie.get_frame_data(i), None, fx=1 / downsample,\n fy=1 / downsample, interpolation=cv2.INTER_AREA)\n tif.save(resized)\n\n\n@cli.command(name='concat-tiffs')\n@click.option('--input-dir', '-i', default=os.getcwd(), type=click.Path(resolve_path=True, exists=True))\n@click.option('--downsample', '-d', default=4, type=int)\ndef concat_tiffs(input_dir, downsample):\n # TODO: add a failsafe so multiple recordings in the same dir don't get concatenated\n regex_pat = r'-[0-9]{3}\\.tif$'\n # get tif files\n files = glob(join(input_dir, '*.tif'))\n # remove any files with 'concat' in the name - it means it's already concatenated\n files = [f for f in files if 'concat' not in os.path.basename(f)]\n processed = []\n for f in files:\n # append a 0 to the name if it doesn't have numbers\n if not re.findall(regex_pat, f):\n f2 = re.sub(r'\\.tif$', '-000.tif', f)\n # rename the actual file - it's easier for everyone\n os.rename(f, f2)\n f = f2\n processed += [f]\n processed = sorted(processed)\n # go through each file one-by-one and write to a larger tiffile\n fname = re.sub(regex_pat, '-dnsmpl-{}x-concat.tif'.format(downsample), processed[0])\n with tifffile.TiffWriter(fname, bigtiff=True) as out_file:\n for file in tqdm(processed, desc='Concatenating data'):\n # read in data frame by frame\n with tifffile.TiffFile(file) as in_file:\n for img in in_file.pages:\n resized = cv2.resize(img.asarray(), None, fx=1 / downsample,\n fy=1 / downsample, interpolation=cv2.INTER_AREA)\n out_file.save(data=resized.reshape(1, *resized.shape))\n\n\n@cli.command(name='extract')\n@click.argument('input_file', type=click.Path(exists=True, resolve_path=True))\n@click.option('--params', '-p', default=None, type=click.Path(exists=True, resolve_path=True))\n@click.option('--out-file', '-o', default=None, type=str)\n@click.option('--n-procs', default=9, type=int)\ndef extract_pipeline(input_file, params, out_file, n_procs):\n assert input_file.endswith('mmap'), 'Input file needs to be a memmap file!'\n dir = os.path.dirname(input_file)\n if out_file is None:\n search = re.search('_d1_[0-9]{3}_d2_[0-9]{3}_d3', input_file)\n if search is not None:\n out_file = input_file[:search.start()] + '-extract.h5'\n else:\n out_file = os.path.splitext(input_file)[0] + '-extract.h5'\n if params is None:\n params = glob(join(os.path.dirname(input_file), '*caiman*yaml'))[0]\n # TODO: if no params are found use default params\n with open(params, 'r') as f:\n cnmf_options = yaml.load(f, yaml.Loader)\n dview = util.create_dview(n_procs=n_procs)\n ca_traces, masks, cnmf, dims = extract.extract(input_file, cnmf_options, dview=dview)\n with h5py.File(out_file, 'w') as f:\n f.create_dataset('ca', data=ca_traces, compression='lzf')\n f.create_dataset('masks', data=masks, compression='lzf')\n util.plot_neurons(ca_traces, masks, join(dir, 'caiman-neurons'), dims)\n del cnmf.dview\n with open(os.path.splitext(out_file)[0] + '-estimates.dill', 'wb') as f:\n dill.dump(cnmf.estimates, f)\n with open(os.path.splitext(out_file)[0] + '-cnmf.dill', 'wb') as f:\n dill.dump(cnmf, f)\n # cnmf.save(os.path.splitext(out_file)[0] + '-cnmf.h5')\n\n print('There are {} neurons, baby!'.format(ca_traces.shape[0]))\n cm.stop_server(dview=dview)\n\n\n@cli.command(name='mc')\n@click.argument('input_file', type=click.Path(resolve_path=True))\n@click.option('--gsig', default=3, type=int)\n@click.option('--max-shifts', default=10, type=int)\n@click.option('--rigid-splits', default=50, type=int)\n@click.option('--save-movie', default=True, type=bool)\n@click.option('--nprocs', default=8, type=int)\ndef motion_correct(input_file, gsig, max_shifts, rigid_splits, save_movie, nprocs):\n import matplotlib.pyplot as plt\n if input_file.endswith('mat'):\n tif = mc.handle_mat_file(input_file)\n elif input_file.endswith('tif') or input_file.endswith('tiff'):\n tif = input_file\n mc_params = dict(gSig_filt=[gsig]*2, max_shifts=[max_shifts]*2, splits_rig=rigid_splits,\n save_movie=save_movie)\n dview = util.create_dview(n_procs=nprocs)\n corrected = mc.motion_correct(tif, mc_params, dview)\n mmapped = util.memmap_file(corrected.fname_tot_rig, dview=dview, basename=re.sub(r'\\.tif$', '', tif))\n # remove unnecessary intermediate\n if isinstance(corrected.fname_tot_rig, (list, tuple)):\n os.remove(corrected.fname_tot_rig[0])\n else:\n os.remove(corrected.fname_tot_rig)\n\n Yr, dims, T = cm.load_memmap(mmapped)\n Y = Yr.T.reshape((T,) + dims, order='F')\n cn_filt, pnr = cm.summary_images.correlation_pnr(Y[:3000, max_shifts:-max_shifts, max_shifts:-max_shifts], gSig=gsig, swap_dim=False)\n cm.utils.visualization.inspect_correlation_pnr(cn_filt, pnr)\n plt.savefig(re.sub(r'\\.tif$', '', tif) + '-caiman-corr.png')\n cm.stop_server(dview=dview)\n","repo_name":"wingillis/caiman-pipeline","sub_path":"caiman_pipeline/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3357810461","text":"# -*- coding: utf-8 -*-\nimport json\nimport urllib2\nimport sys\n\nclass meteo:\n\n def __init__(self,ville):\n \n self.ville = ville\n self.temps = ''\n self.temp_min = ''\n self.temp_max = ''\n self.longitude = 0\n self.latitude = 0\n self.dico_today = {'Thunderstorm':'le temps est orageux','Rain':'il pleut','Clouds':'le ciel est couvert','Clear':'il fait beau'}\n self.dico_forecast = {'Thunderstorm':'le temps sera orageux','Rain':'il pleuvra','Clouds':'le ciel sera couvert','Clear':'il fera beau'}\n self.list_day = ['Aujoud\\'hui','Demain','Après-demain']\n\n if self.ville == '':\n self.getCoord()\n\n def annonce(self):\n \n phrase = ''\n\n for i in range(0,3):\n\n self.getMeteo(i)\n if i==0:\n if self.temps != '':\n phrase += self.list_day[i] + ', '\n phrase += self.dico_today[self.temps]\n phrase += ', et la temperature est comprise entre '\n phrase += str(self.temp_min)\n phrase += ' et '\n phrase += str(self.temp_max)\n phrase += ' degre Celsius. '\n else:\n if self.temps != '':\n phrase += self.list_day[i] + ', '\n phrase += self.dico_forecast[self.temps]\n phrase += ', et la temperature sera comprise entre '\n phrase += str(self.temp_min)\n phrase += ' et '\n phrase += str(self.temp_max)\n phrase += ' degre Celsius. '\n\n return(phrase)\n\n def getMeteo(self,jour):\n\n self.temps = ''\n self.temp_min = ''\n self.temp_max = ''\n\n if self.ville != '':\n \n self.getWithCity(jour)\n\n else:\n \n self.getWithCoord(jour)\n\n def getWithCity(self,jour): \n\n if jour == 0:\n\n url = 'http://api.openweathermap.org/data/2.5/weather?q='\n url += self.ville\n url += '&units=metric'\n\n parsed_json = self.recupJson(url)\n\n self.temps = parsed_json['weather'][0]['main']\n self.temp_min = int(parsed_json['main']['temp_min'])\n self.temp_max = int(parsed_json['main']['temp_max']) + 1\n\n else:\n\n url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q='\n url += self.ville\n url += '&cnt=2&units=metric'\n\n parsed_json = self.recupJson(url)\n\n self.temps = parsed_json['list'][jour-1]['weather'][0]['main']\n self.temp_min = int(parsed_json['list'][jour-1]['temp']['min'])\n self.temp_max = int(parsed_json['list'][jour-1]['temp']['max']) + 1 \n\n def getWithCoord(self,jour):\n\n if jour == 0:\n\n url = 'http://api.openweathermap.org/data/2.5/weather?lat='\n url += str(self.latitude)\n url += '&lon='\n url += str(self.longitude)\n url += '&units=metric'\n\n parsed_json = self.recupJson(url)\n\n self.temps = parsed_json['weather'][0]['main']\n self.temp_min = int(parsed_json['main']['temp_min'])\n self.temp_max = int(parsed_json['main']['temp_max']) + 1\n\n else:\n\n url = 'http://api.openweathermap.org/data/2.5/forecast/daily?lat='\n url += str(self.latitude)\n url += '&lon='\n url += str(self.longitude)\n url += '&units=metric&cnt=2'\n\n parsed_json = self.recupJson(url)\n\n self.temps = parsed_json['list'][jour-1]['weather'][0]['main']\n self.temp_min = int(parsed_json['list'][jour-1]['temp']['min'])\n self.temp_max = int(parsed_json['list'][jour-1]['temp']['max']) + 1 \n\n\n\n def getCoord(self):\n url = 'http://www.telize.com/geoip'\n\n parsed_json = self.recupJson(url)\n\n self.longitude = parsed_json['longitude']\n self.latitude = parsed_json['latitude']\n\n def recupJson(self,url):\n try:\n page_json = urllib2.urlopen(url)\n json_string = page_json.read()\n parsed_json = json.loads(json_string)\n page_json.close()\n\n return parsed_json\n\n except:\n print('Je n\\'arrive pas à récupérer les données')\n sys.exit(2)\n\n\n","repo_name":"alxckn/ralf_controller","sub_path":"met2.py","file_name":"met2.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15119509969","text":"a,b=map(int,input().split())\r\nif a<=0 and b>=0:\r\n print('Zero')\r\nelif a>0 and b>0:\r\n print('Positive')\r\nelse:\r\n num=b-a+1\r\n if num%2==1:\r\n print('Negative')\r\n else:\r\n print('Positive')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc002/A/4683625.py","file_name":"4683625.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"7476660536","text":"import pickle\nimport numpy as np\n\n\nclass Join:\n feature_name = 'join'\n\n def __init__(self, id_list=None, group_features_dic_list=None, single_feature_dic_list=None):\n self.id_list = id_list\n self.group_features_dic_list = group_features_dic_list\n self.single_features_dic_list = single_feature_dic_list\n self.feature_dic = {}\n\n def join_feature(self):\n self.feature_dic = {}\n for _id in self.id_list:\n if self.group_features_dic_list:\n group_feature_list = []\n for group_features_dic in self.group_features_dic_list:\n feature = None\n if _id in group_features_dic:\n feature = group_features_dic[_id]\n group_feature_list.append(feature)\n if None in group_feature_list:\n continue\n feature_concatenated = np.stack(group_feature_list, axis=1).reshape(1,-1)\n\n if self.single_features_dic_list:\n single_feature_list = []\n for single_features_dic in self.single_features_dic_list:\n feature = None\n if _id in single_features_dic:\n feature = single_features_dic[_id]\n single_feature_list.append(feature)\n if None in single_feature_list:\n continue\n feature_concatenated = np.concatenate([feature_concatenated, np.array(single_feature_list)])\n\n if self.group_features_dic_list or self.single_features_dic_list:\n self.feature_dic[_id] = feature_concatenated[0]\n\n def get_feature_dic(self):\n return self.feature_dic\n\n def save(self, file_name=None):\n save_file_name = self.feature_name + '.pkl'\n if file_name: save_file_name = file_name\n\n with open(save_file_name, 'wb') as f:\n pickle.dump(self.__dict__, f)\n\n def load(self, file_name=None):\n load_file_name = self.feature_name + '.pkl'\n if file_name: load_file_name = file_name\n with open(load_file_name, 'rb') as f:\n tmp_obj = pickle.load(f)\n self.__dict__.update(tmp_obj)\n","repo_name":"sheepover96/dorper","sub_path":"feature/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42426213843","text":"\nimport STARTG01_AVLmodel\nimport numpy as np\nimport Drag_Build_Up # python file that calculates individual components zero lift drag\nimport MiscDrag\nimport matplotlib.pyplot as plt\nfrom FlapDrag import flapDrag\n# import FlapDrag\n\nclass dragpolar:\n def __init__(self,AR=17.51):\n '''\n Legend for flight_stg:\n\n 1 = clean\n 2 = takeoff flaps, gear up\n 3 = takeoff flaps, gear down\n 4 = landing flaps, gear up\n 5 = landing flaps, gear down\n\n '''\n\n CL1=[]\n CDi1=[]\n for i in range(-15,15+1):\n CL,CDi = STARTG01_AVLmodel.STARTG01_AVL(1,i,1)\n CL1 += [CL]\n CDi1 += [CDi]\n\n CL2=[]\n CDi2=[]\n for i in range(-15,15+1):\n CL,CDi = STARTG01_AVLmodel.STARTG01_AVL(2,i,1)\n CL2 += [CL]\n CDi2 += [CDi]\n\n CL3=[]\n CDi3=[]\n for i in range(-15,15+1):\n CL,CDi = STARTG01_AVLmodel.STARTG01_AVL(3,i,1)\n CL3 += [CL]\n CDi3 += [CDi]\n\n CL4=[]\n CDi4=[]\n for i in range(-15,15+1):\n CL,CDi = STARTG01_AVLmodel.STARTG01_AVL(4,i,1)\n CL4 += [CL]\n CDi4 += [CDi]\n\n CL5=[]\n CDi5=[]\n for i in range(-15,15+1):\n CL,CD = STARTG01_AVLmodel.STARTG01_AVL(5,i,1)\n CL5 += [CL]\n CDi5 += [CDi]\n\n\n\n # Define parameters\n S_ref = 826.13454 # reference area\n\n \n flight_stages = [1, 2, 3, 4, 5]\n\n # Flight conditions for each flight condition\n M = [0.457, 0.183, 0.183, 0.15, 0.15] # Mach Numbers\n rho = [0.1152e-02, 0.0023769, 0.0023769, 0.0023769, 0.0023769] # Densities\n V = [275*1.6878099, 1.3*157.3, 1.3*157.3, 1.3*128.4, 1.3*128.4] # Velocities\n mu = [3.246e-7, 3.784e-7, 3.784e-7, 3.784e-7, 3.784e-7] # Dynamic Viscosities\n\n\n\n '''\n We need to put the following code into a for loop to get an array of Cd0 values that correspond \n to the five flight stages. Then we can combine the CD0 values with the avl values to get our full \n drag polars \n '''\n\n\n # Loop that ouputs an array of CD0 values corresponding to our 5 flight stages\n CD0s = []\n for i in range(len(flight_stages)):\n all_components = Drag_Build_Up.dragDragComponents(M[i], rho[i], V[i], mu[i])\n # print([component.CalculateDrag() for component in all_components])\n Sumcomps = (1/S_ref) * sum([component.CalculateDrag() for component in all_components])\n C_D_leakpro = 0.07 * Sumcomps\n C_D_missing = MiscDrag.miscDrag(M[i], flight_stages[i])\n\n # print(\"Flight Stage\", flight_stages[i])\n # print(\"Wing: \", all_components[0].CalculateDrag() / S_ref)\n # print(\"hTail: \", all_components[1].CalculateDrag() / S_ref)\n # print(\"vTail: \", all_components[2].CalculateDrag() / S_ref)\n # print(\"Fuselage: \", all_components[3].CalculateDrag() / S_ref)\n # print(\"Nacelle: \", all_components[4].CalculateDrag() / S_ref)\n\n CD0s.append(Sumcomps + C_D_leakpro + C_D_missing)\n # print(CD0s)\n\n # for i in range(len(flight_stages)):\n # Cdflap = FlapDrag.flapDrag(flight_stages[i])\n\n # Cl's corresponding to the flight stages defined above\n # cl1 = \n # cl2 = \n # cl3 = \n # cl4 = \n # cl5 = \n\n\n\n\n\n # plt.plot(CL1,CD1)\n # plt.plot(CL2,CD2)\n # plt.plot(CL3,CD3)\n # plt.plot(CL4,CD4)\n # plt.plot(CL5,CD5)\n\n # plt.figure()\n\n # CD1=CD0s[0]+np.array(CDi1)*17.51/AR\n # CD2=CD0s[1]+np.array(CDi2)*17.51/AR\n # CD3=CD0s[2]+np.array(CDi3)*17.51/AR\n # CD4=CD0s[3]+np.array(CDi4)*17.51/AR\n # CD5=CD0s[4]+np.array(CDi5)*17.51/AR\n\n CD1=0.02+np.array(CDi1)*17.51/AR+flapDrag(1)\n CD2=0.02+np.array(CDi2)*17.51/AR+flapDrag(2)\n CD3=0.02+np.array(CDi3)*17.51/AR+flapDrag(3)\n CD4=0.02+np.array(CDi4)*17.51/AR+flapDrag(4)\n CD5=0.02+np.array(CDi5)*17.51/AR+flapDrag(5)\n self.CLs = [CL1,CL2,CL3,CL4,CL5]\n self.CDs = [CD1,CD2,CD3,CD4,CD5]\n\n\n def CD(self,flight_stg_req=1,CL_req=0):\n return np.interp(CL_req,self.CLs[flight_stg_req-1],self.CDs[flight_stg_req-1])\n \n\n\nif __name__ == \"__main__\":\n# print('PING')\n dpobj = dragpolar()\n CD = dpobj.CD(flight_stg_req=1,CL_req=0)\n # print(CD)\n # plt.plot(CL1,(CD1),label='Clean')\n # plt.plot(CL2,(CD2),label='Takeoff w/ Landing Gear Up')\n # plt.plot(CL3,(CD3),'--',label='Takeoff w/ Landing Gear Down')\n # plt.plot(CL4,(CD4),label='Landing w/ Landing Gear Up')\n # plt.plot(CL5,(CD5),'--',label='Landing w/ Landing Gear Down')\n # plt.xlabel('$C_L$')\n # plt.ylabel('$C_D$')\n # plt.title('Drag Polar')\n # plt.legend()\n\n\n # plt.figure()\n # plt.plot(range(-15,15+1),CL1/(CD1),label='Clean')\n # plt.plot(range(-15,15+1),CL2/(CD2),label='Takeoff w/ Landing Gear Up')\n # plt.plot(range(-15,15+1),CL3/(CD3),'--',label='Takeoff w/ Landing Gear Down')\n # plt.plot(range(-15,15+1),CL4/(CD4),label='Landing w/ Landing Gear Up')\n # plt.plot(range(-15,15+1),CL5/(CD5),'--',label='Landing w/ Landing Gear Down')\n # plt.xlabel('$\\\\alpha$')\n # plt.ylabel('$C_L/C_D$')\n # plt.title('$C_L/C_D$ Polar')\n # plt.legend()\n\n\n # plt.show()","repo_name":"jucobee/START","sub_path":"Code Merging/dragpolar.py","file_name":"dragpolar.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22574022007","text":"n = int(input())\na = list(map(int, input().split()))\nm = int(input())\n\nresult = m\n\na.sort(reverse = True)\n\ndef dfs(level, sum):\n global result\n if level > result:\n return\n if sum > m:\n return\n if sum == m:\n if level < result:\n result = level\n return\n else:\n for i in range(n):\n dfs(level + 1, sum + a[i])\n\ndfs(0, 0)\n\nprint(result)","repo_name":"jjiwoning/Code_Test","sub_path":"python_algo/bfs_dfs/dfs5_1.py","file_name":"dfs5_1.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8129477896","text":"from typing import List\n\n\ndef maximumWhiteTiles(tiles: List[List[int]], carpetLen: int) -> int:\n prefix = [0] * tiles[-1][-1]\n for s, e in tiles:\n prefix[s - 1:e] = [1] * (e + 1 - s)\n \n n = len(prefix)\n if carpetLen > n: return sum(prefix)\n \n max_ = cur_ = sum(prefix[:carpetLen])\n \n for i in range(carpetLen, len(prefix)): # 滑动窗口\n \n cur_ -= prefix[i - carpetLen]\n cur_ += prefix[i]\n max_ = max(max_, cur_)\n \n return max_\n\n\nprint(maximumWhiteTiles(\n [[1, 5], [10, 11], [12, 18], [20, 25], [30, 32]], 10\n))\n","repo_name":"YiandLi/rebel_pretrain","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19428569859","text":"import torch\nfrom typing import List\n\ndef compute_category_metrics(category):\n\n for key in category:\n TP = category[key][\"TP\"]\n FP = category[key][\"FP\"]\n FN = category[key][\"FN\"]\n category[key][\"support\"] = TP + FN\n\n precision = 0\n if TP + FP != 0:\n precision = TP / (TP + FP)\n \n recall = 0\n if TP + FN != 0:\n recall = TP / (TP + FN)\n \n f1 = 0\n if precision + recall != 0:\n f1 = (2 * precision * recall) / (precision + recall)\n \n category[key][\"precision\"] = precision\n category[key][\"recall\"] = recall\n category[key][\"f1\"] = f1\n \n return category\n\ndef compute_overall_metrics(category):\n TP = 0\n FP = 0\n FN = 0\n \n macro_precision = 0\n macro_recall = 0\n macro_f1 = 0\n\n weighted_precision = 0\n weighted_recall = 0\n weighted_f1 = 0\n\n for key in category:\n TP += category[key][\"TP\"]\n FP += category[key][\"FP\"]\n FN += category[key][\"FN\"]\n \n macro_precision += category[key][\"precision\"]\n macro_recall += category[key][\"recall\"]\n macro_f1 += category[key][\"f1\"]\n \n num = category[key][\"TP\"] + category[key][\"FN\"]\n weighted_precision += category[key][\"precision\"] * num\n weighted_recall += category[key][\"recall\"] * num\n weighted_f1 += category[key][\"f1\"] * num\n \n macro_precision /= len(category)\n macro_recall /= len(category)\n macro_f1 /= len(category)\n\n support = TP + FN\n if support != 0:\n weighted_precision /= support\n weighted_recall /= support\n weighted_f1 /= support\n\n micro_precision = 0\n if TP + FP != 0:\n micro_precision = TP / (TP + FP)\n \n micro_recall = 0\n if TP + FN != 0:\n micro_recall = TP / (TP + FN)\n \n micro_f1 = 0\n if micro_precision + micro_recall != 0:\n micro_f1 = (2 * micro_precision * micro_recall) / (micro_precision + micro_recall)\n\n overall = {\n \"micro-precision\": micro_precision,\n \"micro-recall\": micro_recall,\n \"micro-f1\": micro_f1,\n \"macro-precision\": macro_precision,\n \"macro-recall\": macro_recall,\n \"macro-f1\": macro_f1,\n \"weighted-precision\": weighted_precision,\n \"weighted-recall\": weighted_recall,\n \"weighted-f1\": weighted_f1,\n \"TP\": TP,\n \"FP\": FP,\n \"FN\": FN,\n \"support\": support,\n \"num_pred\": TP + FP,\n }\n\n return overall\n\n\n\nclass EntityRelationMetrics():\n def __init__(self, entity_types, id2rel, decoder):\n self.entity_types = entity_types\n self.id2rel = id2rel\n self.decoder = decoder\n self.relation_types = [t for _, t in id2rel.items()]\n\n self._init_entity_category()\n self._init_relation_category()\n \n def clear(self):\n self._init_entity_category()\n self._init_relation_category()\n\n def _init_relation_category(self):\n relation_types = self.id2rel.values()\n self.relation_category = self._create_category(relation_types)\n\n def _init_entity_category(self):\n self.entity_category = self._create_category(self.entity_types)\n\n def _create_category(self, types):\n category = {}\n for t in types:\n category[t] = {\"TP\": 0, \"FP\": 0, \"FN\": 0}\n return category\n \n def add_batch(self, entity_predictions, entity_references,\n relation_predictions, relation_references,\n mask):\n self._add_batch_entity(entity_predictions, entity_references)\n self._add_batch_relation(relation_predictions, relation_references, mask)\n \n def _add_batch_entity(self, predictions: List[List[str]], references: List[List[str]]):\n predictions = self.decoder.decode(predictions)\n references = self.decoder.decode(references)\n entity_category = self._compute_entity_category_count(predictions, references)\n self._update_entity_category_count(entity_category)\n \n def _add_batch_relation(self, predictions: torch.Tensor, references: torch.Tensor, mask: torch.Tensor):\n category = self._create_category(self.relation_types)\n mask = mask.unsqueeze(2) * mask.unsqueeze(1)\n predictions = predictions.argmax(-1)\n for rel_id, rel_type in self.id2rel.items():\n TP = ((predictions == rel_id) & (references == rel_id)).sum().item()\n FP = ((predictions == rel_id) & (references != rel_id) & mask).sum().item() # 只统计有效位置,不统计填充位置\n FN = ((predictions != rel_id) & (references == rel_id)).sum().item()\n category[rel_type][\"TP\"] += TP\n category[rel_type][\"FP\"] += FP\n category[rel_type][\"FN\"] += FN\n self._update_relation_category_count(category)\n \n def compute(self):\n entity_metrics = self._compute_metrics(self.entity_category)\n relation_metrics = self._compute_metrics(self.relation_category)\n metrics = {\n \"entity\": entity_metrics,\n \"relation\": relation_metrics,\n }\n return metrics\n \n def _compute_metrics(self, category):\n category = compute_category_metrics(category)\n overall = compute_overall_metrics(category)\n metrics = {\n \"category\": category,\n \"overall\": overall,\n }\n return metrics\n\n \n def _update_relation_category_count(self, category):\n for key in category:\n self.relation_category[key][\"TP\"] += category[key][\"TP\"]\n self.relation_category[key][\"FP\"] += category[key][\"FP\"]\n self.relation_category[key][\"FN\"] += category[key][\"FN\"]\n \n def _update_entity_category_count(self, category):\n for key in category:\n self.entity_category[key][\"TP\"] += category[key][\"TP\"]\n self.entity_category[key][\"FP\"] += category[key][\"FP\"]\n self.entity_category[key][\"FN\"] += category[key][\"FN\"]\n \n\n def _compute_entity_category_count(self, predictions, references):\n category = self._create_category(self.entity_types)\n new_predictions = []\n for idx, preds in enumerate(predictions):\n for span in preds:\n new_span = (idx,) + span\n new_predictions.append(new_span)\n\n new_references = [] \n for idx, refs in enumerate(references):\n for span in refs:\n new_span = (idx,) + span\n new_references.append(new_span)\n \n pred_set = set(new_predictions)\n true_set = set(new_references)\n\n tp_set = pred_set.intersection(true_set)\n fp_set = pred_set.difference(true_set)\n fn_set = true_set.difference(pred_set)\n\n for span in tp_set:\n ent_type = span[-1]\n category[ent_type][\"TP\"] += 1\n for span in fp_set:\n ent_type = span[-1]\n category[ent_type][\"FP\"] += 1\n for span in fn_set:\n ent_type = span[-1]\n category[ent_type][\"FN\"] += 1\n \n return category","repo_name":"wflying000/MultiHeadJointEntityRelationExtraction","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30254197789","text":"import math\n# Insertion_sort metoden tar inn Array 'A' og bruker insertion_sort algoritmen for aa sortere den.\ndef insertion_sort(A):\n for i in range(1, len(A)):\n j = i\n while (j > 0 and A[j-1] > A[j]):\n A[j-1], A[j] = A[j], A[j-1]\n j = j-1\n return A\n\ndef merge_sort(A):\n if len(A) <= 1:\n return A\n i = math.floor((len(A)/2))\n A1 = merge_sort(A[:i])\n A2 = merge_sort(A[i:])\n return merge(A, A1, A2)\n\ndef merge(A, A1, A2):\n i = 0\n j = 0\n while (i < len(A1) and j < len(A2)):\n if (A1[i] <= A2[j]):\n A[i + j] = A1[i]\n i += 1\n else:\n A[i + j] = A2[j]\n j += 1\n while (i < len(A1)):\n A[i + j] = A1[i]\n i += 1\n while (j < len(A2)):\n A[i + j] = A2[j]\n j += 1\n return A\n\n\"\"\"\nfileName = input()\nA = []\nwith open(fileName, 'r') as file:\n for line in file:\n A.append(int(line))\n\"\"\"\n\n\n\nexample = [2,3,6,1,4,5]\nprint(insertion_sort(example))\nprint(merge_sort(example))","repo_name":"Olavvv/IN2010","sub_path":"Obliger/Oblig1/sortering.py","file_name":"sortering.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22741138168","text":"import sys\nimport os \ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, dir_path)\nimport control.routes\n\ndef get_http_data(value):\n parts = value.split('&')\n parameters = {}\n for value in parts:\n pair = value.split('=')\n if len(pair) == 2:\n parameters[pair[0]] = pair[1]\n return parameters\n\ndef application(env, start_response):\n start_response('200 OK', [('Content-Type','text/html')])\n get = env['QUERY_STRING']\n http_data = get_http_data(get)\n if 'route' not in http_data:\n route = ''\n else:\n route = http_data['route']\n result = control.routes.get_response(route,http_data)\n return [result.encode('ascii')]\n ","repo_name":"joaofelipecb/hackathonCCR2","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21617191611","text":"# -*- coding: utf-8 -*-\n\nfrom vec import Vec\nfrom mat import Mat\nimport math\n\ndef matId(D): return Mat( (D,D) , { (x,y) : 1 for x in D for y in D if x == y} ) \nprint('matId')\nprint(matId({'a', 'b', 'c', 'd'}))\n \ndef matTransform(a,b,z): \n M = matId({'a','b','c'})\n M['a','c'] = a\n M['b','c'] = b\n return M\nv = Vec({'a','b','c'},{'a':1,'b':1, 'c':1})\nprint('matTransform')\nprint(matTransform(2,3,1) * v)\n \ndef matScale(i,j):\n M = matId({'a','b','c'})\n M['a','a'] = i\n M['b','b'] = j\n return M\nprint('matScale')\nprint(matScale(2,3))\n \ndef matReflect(k):\n M = matId({'a','b','c'})\n if k == 'x':\n M['b','b'] = -1\n elif k == 'y':\n M['a','a'] = -1\n else:\n print('Invalid argument (must be \"x\" or \"y\")')\n return M\nprint('matReflect')\nprint(matReflect('x'))\nprint(matReflect('y'))\nprint(matReflect('z'))\n \ndef matRotate(r):\n M = matId({'a','b','c'})\n M['a','a'] = math.cos(r)\n M['a','b'] = -math.sin(r)\n M['b','a'] = math.sin(r)\n M['b','b'] = math.cos(r)\n return M\nprint('matRotate')\nprint(matRotate(0))\n \n \n \n \n ","repo_name":"SamPoff/229","sub_path":"Labs_Mat_Vec_etc/lab8_229.py","file_name":"lab8_229.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12276088595","text":"def leiaDinheiro(texto):\n din=''\n c=0\n while c==0:\n din = input(texto)\n if ',' in din:\n din = din.replace(',','.')\n try:\n float(din)\n c=1\n except:\n print('Digite um valor número valido\\n')\n \n \n\n f = float(din)\n\n return f\n\n","repo_name":"lucas-silvs/Curso-Guanabara","sub_path":"Mundo - 3/utilidadesCeV/dados/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20585019781","text":"import time\nimport os\nfrom colorama import Fore, Back, Style, init\nimport sys\nfrom subprocess import call\nfrom tqdm import tqdm\ninit()\nimport keyboard\nimport pwinput\n\ndef flush_input():\n import msvcrt\n while msvcrt.kbhit():\n msvcrt.getch()\n\nclear = lambda: os.system('cls')\nclear()\n\nprint('1) Appearance')\ntime.sleep(0.1)\nprint('2) Return to main menu')\nprint('\\nWhat would you like to change?')\n\nwhile True:\n if keyboard.is_pressed('1'):\n clear()\n s = open('appearance.txt','w')\n print('1) Light theme')\n time.sleep(0.1)\n print('2) Grey dark theme')\n time.sleep(0.1)\n print('3) Magenta dark theme')\n time.sleep(0.1)\n print('4) Blue dark theme')\n time.sleep(0.1)\n print('5) Fully purple theme')\n time.sleep(0.1)\n print('6) High contrast theme\\n')\n while True:\n try:\n flush_input()\n choice = int(input('Select a theme: '))\n if choice > 6 or choice < 1:\n choice = int('f')\n else:\n s.write(str(choice))\n clear()\n s.close()\n call(['python','firstLogOn.py'])\n sys.exit()\n break\n except:\n print('\\nOopsy that is invalid')\n \n \n elif keyboard.is_pressed('2'):\n clear()\n call(['python', 'password_Manager.py'])\n exit()\n \n \n \n \n ","repo_name":"Sulaiman-Gaskell/Password_Manager","sub_path":"themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"11856577626","text":"from openalea.container.tree import PropertyTree\r\n\r\nclass nAdmissibleTree(PropertyTree):\r\n \"\"\"\r\n A class that adds some methods to PropertyTree to construct and explore an n-admissible tree.\r\n \"\"\"\r\n def __init__(self):\r\n self.maxLevel = -1\r\n PropertyTree.__init__(self)\r\n \r\n def addChild(self, parent, value, level, pbAngle = None, logPbAngle = None, questionMark = False, orderIndex = None):\r\n \"\"\"\r\n Add child with properties to the tree.\r\n \"\"\"\r\n for childId in self.children(parent):\r\n prop = self.get_vertex_property(childId)\r\n if ( prop[\"value\"] == value) and (prop[\"level\"] == level) :\r\n# print \"Already in the tree\"\r\n return childId\r\n break\r\n else:\r\n if level > self.maxLevel:\r\n self.maxLevel = level\r\n return self.add_child(parent, value = value, level = level, nodePrb = pbAngle, nodeLogProb = logPbAngle, questionMark = questionMark, orderIndex = orderIndex)\r\n \r\n def addListValueLevelProb(self, currentId, ListValueLevel):\r\n \"\"\"\r\n add vertexes given their ids and values to a vertex and updates the probability and order index of the vetex\r\n \r\n :Parameters:\r\n - currentID - vertex id to which new vertexes should be added \r\n - ListValueLevel - list of values, levels, probabilities, log of probabilities, and order indexes of children \r\n \"\"\"\r\n for node in ListValueLevel: \r\n currentId = self.addChild(parent = currentId, value = node[0], level = node[1], pbAngle = node[2], logPbAngle = node[3], orderIndex = node[4])\r\n \r\n def leaves(self):\r\n \"\"\"\r\n return the list of leaves of the n-admisisble tree.\r\n \"\"\"\r\n return [vid for vid in self.vertices() if self.is_leaf(vid)]\r\n \r\n def leavesNotLast(self, seqLen):\r\n \"\"\"\r\n return the leaves of the tree whose level is different from a given value.\r\n \r\n :Parameters:\r\n - `seqlen` - an integer\r\n \r\n :Returns:\r\n - list of leaves whose level is not equal to `seqLen`\r\n \"\"\"\r\n return [vid for vid in self.vertices() if ( self.is_leaf(vid) and (self.get_vertex_property(vid)[\"level\"] != seqLen )) ]\r\n \r\n def leavesLast(self, seqLen):\r\n \"\"\"\r\n return the leaves of the tree whose level is equal to a given value.\r\n \r\n :Parameters:\r\n - `seqlen` - an integer\r\n \r\n :Returns:\r\n - list of leaves whose level is equal to `seqLen`\r\n \"\"\"\r\n return [vid for vid in self.vertices() if (vid != self.root) and ( self.get_vertex_property(vid)[\"level\"] == seqLen ) ]\r\n \r\n def getMaxLevel(self):\r\n \"\"\"\r\n get the maximum level of leaves.\r\n \"\"\"\r\n return self.maxLevel \r\n \r\n def pathToRoot(self, vid):\r\n \"\"\"\r\n return list of values of vertexes on the path from a given vertex to the root.\r\n \r\n :Parameters:\r\n - `vid` - a vertex id \r\n \r\n :Returns:\r\n `valueList` - list of values of vertexes on the path from the `vertex` to the root.\r\n \"\"\"\r\n \r\n valueList = list()\r\n while not (vid == self.root):\r\n valueList.append(self.get_vertex_property(vid)[\"value\"]) \r\n vid = self.parent(vid)\r\n valueList.reverse()\r\n return valueList\r\n\r\n def allPathsAll(self):\r\n \"\"\"\r\n `allPathsAll` returns a list of values, probabilities , question marks, as well as the total logarithmic probability of all the vertexes on the paths from leaves to the root.\r\n \r\n :Returns:\r\n - `[valueList, probList ,questionList, logProb]` - list of lists of values, probability , question marks, as well as the total logarithmic probability of all the vertexes on the paths from leaves to the root\r\n \"\"\"\r\n for vid in self.vertices():\r\n if self.is_leaf(vid):\r\n valueList = list()\r\n probList = list()\r\n questionList = list()\r\n logProb = 0.0\r\n while not (vid == self.root):\r\n valueList.append(self.get_vertex_property(vid)[\"value\"])\r\n probList.append(self.get_vertex_property(vid)[\"nodePrb\"])\r\n logProb += self.get_vertex_property(vid)[\"nodeLogProb\"]\r\n if self.get_vertex_property(vid)[\"questionMark\"]:\r\n questionList.append([self.get_vertex_property(vid)[\"value\"], self.get_vertex_property(vid)[\"level\"] ])\r\n vid = self.parent(vid)\r\n valueList.reverse()\r\n probList.reverse()\r\n questionList.reverse()\r\n yield [valueList, probList ,questionList, logProb]\r\n \r\n","repo_name":"jldinh/vplants","sub_path":"phyllotaxis_analysis/src/vplants/phyllotaxis_analysis/nAdmissibleTree_implementation.py","file_name":"nAdmissibleTree_implementation.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38489019557","text":"from builtins import object\nfrom rest_framework import serializers\nfrom .workout_generator import stringify_target_workout\nfrom .models import Exercise, WorkoutExercise, Workout, UserExercise\n\nclass ExerciseSerializer(object):\n def __init__(self, workout_exercises, workout, rounds, ab_exercises=None):\n self.workout_exercises = workout_exercises\n self.workout = workout\n self.rounds = rounds\n self.ab_exercises = ab_exercises\n\n @property\n def all_exercises(self):\n output = {\n 'workout_id': self.workout.id,\n 'exercises': [],\n 'workout_target': stringify_target_workout(self.workout.workout_target.split('-')),\n 'rounds': self.rounds,\n 'ab_exercises': [],\n }\n\n for idx, exercise in enumerate(self.workout_exercises):\n output['exercises'].append({\n 'order': idx + 1,\n 'name': exercise.name,\n 'muscle_target': exercise.muscle_target,\n 'secondary_target': exercise.secondary_target,\n 'push_pull': exercise.push_pull,\n 'muscle_group': exercise.muscle_group,\n 'difficulty_level': exercise.difficulty_level,\n 'equipment': exercise.equipment,\n 'resistance_type': exercise.resistance_type,\n 'image_url': exercise.demo_src,\n 'id': exercise.id,\n 'quantity': exercise.quantity\n })\n if self.ab_exercises:\n for idx, ab_exercise in enumerate(self.ab_exercises):\n output['ab_exercises'].append({\n 'order': idx + 1,\n 'name': ab_exercise.name,\n 'muscle_target': ab_exercise.muscle_target,\n 'secondary_target': ab_exercise.secondary_target,\n 'push_pull': ab_exercise.push_pull,\n 'muscle_group': ab_exercise.muscle_group,\n 'difficulty_level': ab_exercise.difficulty_level,\n 'equipment': ab_exercise.equipment,\n 'resistance_type': ab_exercise.resistance_type,\n 'image_url': ab_exercise.demo_src,\n 'id': ab_exercise.id,\n 'quantity': ab_exercise.quantity\n })\n return output\n# class ExerciseSerializer(serializers.ModelSerializer):\n# class Meta:\n# model = Exercise\n# fields = ['id', 'name', 'muscle_target', 'secondary_target', 'push_pull', 'muscle_group', 'equipment', 'resistance_type', 'demo_src']\n\n# class WorkoutExerciseSerializer(serializers.ModelSerializer):\n# exercise = ExerciseSerializer(many=True, read_only=True)\n# class Meta:\n# model = WorkoutExercise\n# fields = ['id', 'order', 'exercise']\n\n\nclass WorkoutSerializer(serializers.ModelSerializer):\n # exercises = ExerciseSerializer(many=True, read_only=True)\n class Meta:\n model = Workout\n fields = ['id', 'total_rounds', 'workout_target', 'workout_date']\n\nclass UserExerciseSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserExercise\n fields = ['id', 'repetitions', 'resistance', 'exercise']\n","repo_name":"TomPrete/workout-today-back-end","sub_path":"exercises/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2515926629","text":"import numpy as np\nimport pandas as pd\nimport scipy as sc\nfrom sklearn.neural_network import MLPClassifier\nimport matplotlib.pyplot as plt\n\n\ndf = pd.read_csv(\"datavalidation.csv\", index_col=0)\n\nX = np.asarray(df.index)\nY = np.asarray(df.columns)\nZ = df.as_matrix()\n\nprint(Y, X)\nfig, ax = plt.subplots()\n\nfig = plt.contourf(Y, X, Z, 20, cmap='RdGy')\nfig = plt.colorbar();\n\nax.set_xticks(Y)\nax.set_xticklabels(Y)\nax.set_yticks(X)\nax.set_yticklabels(X)\n\nax.set_xlabel('alpha in the form of j: (10**((-1)*j))')\nax.set_ylabel('number of hidden layers')\n\nplt.show(block=True)\nplt.interactive(False)","repo_name":"mathmood/Hand_Writing_Recognition","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14319364267","text":"from django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import FileResponse, HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.generic import FormView, TemplateView\nfrom enum import Enum\nimport os\n\nfrom ..conf import settings\nfrom .forms import FileUploadForm, FileDetailsForm\nfrom .models import File, FileQuota, FileSummary\nfrom .signals import *\n\nclass FileIndexView(TemplateView):\n\n class ResponseDict(str, Enum):\n FILES = 'files'\n FORM = 'form'\n QUOTA = 'quota'\n SUMMARY = 'summary'\n\n template_name = settings.DEMOS_FILE_INDEX_TEMPLATE\n form_class = FileUploadForm\n success_page = 'demos:file:index'\n error_page = success_page\n\n def __init__(self, *args, **kwargs):\n self.response_dict = {}\n\n def get(self, request, *args, **kwargs):\n form = self.form_class(request.user)\n self.response_dict[self.ResponseDict.FORM] = form\n\n if request.user.id:\n summaries = FileSummary.objects.filter(owner=request.user)\n else:\n summaries = FileSummary.objects.filter(owner=None)\n\n if len(summaries) < 1: # first time uploader\n self.response_dict[self.ResponseDict.SUMMARY] = FileSummary(files=0, size=0)\n elif len(summaries) == 1:\n self.response_dict[self.ResponseDict.SUMMARY] = summaries[0]\n else:\n print(\"Too many summaries for {}, found {}. Using first result.\".format(request.user, len(summaries)))\n self.response_dict[self.ResponseDict.SUMMARY] = summaries[0]\n\n if request.user.id:\n files = File.objects.filter(owner=request.user)\n else:\n files = File.objects.filter(owner=None)\n\n self.response_dict[self.ResponseDict.FILES] = files\n\n quotas = FileQuota.objects.filter(id=1)\n if len(quotas) == 1:\n self.response_dict[self.ResponseDict.QUOTA] = quotas[0]\n else:\n self.response_dict[self.ResponseDict.QUOTA] = {\n max_files: 'n/a',\n max_filesize: 'n/a',\n max_total_filesize: 'n/a'\n }\n\n return render(request, self.template_name, self.response_dict)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.user, data=request.POST, files=request.FILES)\n self.response_dict[self.ResponseDict.FORM] = form\n if form.is_valid():\n try:\n form.save()\n messages.info(request, \"{} was successfully uploaded.\".format(form.cleaned_data['file'].name))\n file_uploaded.send(sender=request.user.__class__, request=request, id=form.file.id, name=form.file.name)\n return HttpResponseRedirect(reverse(self.success_page))\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return HttpResponseRedirect(reverse(self.error_page))\n else:\n messages.error(request, \"{} could not be uploaded.\".format(form.cleaned_data['file'].name))\n for error in form.errors[forms.form.NON_FIELD_ERRORS]:\n messages.error(request, error)\n return HttpResponseRedirect(reverse(self.error_page))\n\n\nclass FileDetailView(TemplateView):\n '''\n Display the details of an uploaded file\n '''\n\n class ResponseDict(str, Enum):\n FILE = 'file'\n FORM = 'form'\n\n template_name = settings.DEMOS_FILE_DETAILS_TEMPLATE\n success_page = 'demos:file:details'\n error_page = 'demos:file:index'\n\n def __init__(self, *args, **kwargs):\n self.response_dict = {}\n\n\n def get(self, request, *args, **kwargs):\n path = request.GET.get('path')\n\n if path:\n try:\n if request.user and request.user.id:\n file = File.objects.get(owner=request.user, path=path)\n else:\n file = File.objects.get(owner=None, path=path)\n form = FileDetailsForm(instance=file)\n self.response_dict[self.ResponseDict.FILE] = file\n self.response_dict[self.ResponseDict.FORM] = form\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return HttpResponseRedirect(reverse(self.error_page))\n else:\n messages.error(request, 'A file path was not specified.')\n return HttpResponseRedirect(reverse(self.error_page))\n\n return render(request, self.template_name, self.response_dict)\n\n def post(self, request, *args, **kwargs):\n if request.user and request.user.id:\n file = File.objects.get(owner=request.user, path=request.POST['path'])\n else:\n file = File.objects.get(owner=None, path=request.POST['path'])\n\n self.response_dict[self.ResponseDict.FILE] = file\n form = FileDetailsForm(instance=file, data=request.POST)\n if form.is_valid():\n form.save()\n messages.info(request, \"{} was successfully updated.\".format(file.name))\n query_string = \"?path={}\".format(file.path)\n file_updated.send(sender=request.user.__class__, request=request, id=file.id, name=file.name)\n return HttpResponseRedirect(reverse(self.success_page) + query_string)\n else:\n return render(request, self.template_name, self.response_dict)\n\n\nclass FileDeleteView(TemplateView):\n '''\n Deletes an uploaded file\n '''\n\n success_page = 'demos:file:index'\n error_page = 'demos:file:details'\n\n\n def __init__(self, *args, **kwargs):\n self.response_dict = {}\n\n\n def get(self, request, *args, **kwargs):\n path = request.GET.get('path')\n\n if path:\n try:\n if request.user and request.user.id:\n file = File.objects.get(owner=request.user, path=path)\n else:\n file = File.objects.get(owner=None, path=path)\n # update summary\n if request.user and request.user.id:\n summaries = FileSummary.objects.filter(owner=request.user)\n else:\n summaries = FileSummary.objects.filter(owner=None)\n if len(summaries) == 1:\n summary = summaries[0]\n summary.files -= 1\n summary.size -= file.size\n summary.save()\n else:\n pass # shouldn't happen!\n # remove from filesystem\n os.remove(settings.DEMOS_FILE_UPLOAD_DIR + '/' + file.path)\n # remove file record\n id = file.id # set to None after delete\n file.delete()\n file_deleted.send(sender=request.user.__class__, request=request, id=id, name=file.name)\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return HttpResponseRedirect(reverse(self.error_page))\n else:\n messages.error(request, 'A file path was not specified.')\n return HttpResponseRedirect(reverse(self.error_page), self.response_dict)\n\n messages.warning(request, \"{} was deleted\".format(file.name))\n return HttpResponseRedirect(reverse(self.success_page), self.response_dict)\n\n\nclass FileDownloadView(TemplateView):\n '''\n Downloads an uploaded file\n '''\n\n success_page = 'demos:file:index'\n error_page = success_page\n\n def get(self, request, *args, **kwargs):\n path = request.GET.get('path')\n\n if path:\n try:\n file = File.objects.get(owner=request.user, path=path)\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return HttpResponseRedirect(reverse(self.error_page))\n else:\n messages.error(request, 'A file path was not specified.')\n return HttpResponseRedirect(reverse(self.error_page), self.response_dict)\n\n content = open(settings.DEMOS_FILE_UPLOAD_DIR + '/' + file.path, 'rb')\n response = FileResponse(content)\n response['Content-Type'] = file.content_type\n response['Content-Length'] = file.size\n response['Content-Disposition'] = 'attachment; filename={}'.format(file.name)\n return response\n\n\nclass FileOpenView(TemplateView):\n '''\n Opens an uploaded file\n '''\n\n success_page = 'demos:file:index'\n error_page = success_page\n\n def get(self, request, *args, **kwargs):\n path = request.GET.get('path')\n\n if path:\n try:\n file = File.objects.get(owner=request.user, path=path)\n except Exception as e:\n print(str(e))\n messages.error(request, str(e))\n return HttpResponseRedirect(reverse(self.error_page))\n else:\n messages.error(request, 'A file path was not specified.')\n return HttpResponseRedirect(reverse(self.error_page), self.response_dict)\n\n content = open(settings.DEMOS_FILE_UPLOAD_DIR + '/' + file.path, \"rb\")\n response = FileResponse(content, filename=file.name)\n response['Content-Type'] = file.content_type\n response['Content-Length'] = file.size\n return response\n","repo_name":"dwiest/django-demos","sub_path":"dwiest/django/demos/file/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41168558057","text":"import torch\nimport torch.nn as nn\n\n\nclass MaskNet(nn.Module):\n def __init__(self):\n super(MaskNet, self).__init__()\n self.features = darknet53()\n self.bbox = nn.Sequential(\n nn.Conv2d(1024, 5, kernel_size=3, padding=1, stride=1),\n nn.BatchNorm2d(5),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Sigmoid()\n )\n\n def forward(self, image: torch.Tensor):\n features = self.features(image)\n out = self.bbox(features).flatten(start_dim=1)\n mask = out[:, 0]\n bbox = out[:, 1:] * image.size(2)\n bbox[:, :2] = bbox[:, :2] - bbox[:, 2:] / 2\n return bbox, mask\n\n\ndef darknet_conv(in_channels, out_channels, kernel_size=3, padding=1, stride=1):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(inplace=True)\n )\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_channels):\n super(Bottleneck, self).__init__()\n out_channels = in_channels // 2\n self.layer1 = darknet_conv(in_channels, out_channels, kernel_size=1, padding=0)\n self.layer2 = darknet_conv(out_channels, in_channels)\n\n def forward(self, x):\n return self.layer2(self.layer1(x)) + x\n\n\ndef darknet_residual(in_channels, n_residual):\n return nn.Sequential(*[Bottleneck(in_channels) for _ in range(n_residual)])\n\n\ndef darknet53():\n return nn.Sequential(\n darknet_conv(3, 32),\n darknet_conv(32, 64, stride=2),\n darknet_residual(64, 1),\n darknet_conv(64, 128, stride=2),\n darknet_residual(128, 2),\n darknet_conv(128, 256, stride=2),\n darknet_residual(256, 8),\n darknet_conv(256, 512, stride=2),\n darknet_residual(512, 8),\n darknet_conv(512, 1024, stride=2),\n darknet_residual(1024, 4)\n )\n","repo_name":"benpili/Mask-Prediction","sub_path":"src/masknet.py","file_name":"masknet.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26833206518","text":"import sqlite3\nfrom typing import List\n\nfrom malory.classes.attribute import Attribute\nfrom settings import DB_LOCATION\n\n\ndef get_attribute(name: str) -> Attribute:\n \"\"\"Returns an attribute based on a name, raising an AttributeError if there is no matching attribute\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n c = conn.cursor()\n c.execute(\"SELECT * FROM attrs WHERE name=?\", (name,))\n tup = c.fetchone()\n if not tup:\n raise AttributeError(f\"No attribute named {name}.\")\n idx, name, desc = tup\n return Attribute(name, desc, idx)\n\n\ndef get_all_attributes() -> List[Attribute]:\n \"\"\"Returns a list of all attributes\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n c = conn.cursor()\n c.execute(\"SELECT * FROM attrs\")\n lst = c.fetchall()\n return [Attribute(name, desc, idx) for (idx, name, desc) in lst]\n","repo_name":"ItayElf/Malory-backend","sub_path":"malory/orm/attribute_orm.py","file_name":"attribute_orm.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2302370310","text":"import pandas as pd\nimport sys, os \nsys.path.insert(0, os.path.abspath('.'))\n# from main import importer, display\n# importer(['S', 'results2df'], globals())\n\nfrom automatize.main import display\nfrom automatize.results import results2df\n\nimport argparse\n\ndef parse_args():\n \"\"\"[This is a function used to parse command line arguments]\n\n Returns:\n args ([object]): [Parse parameter object to get parse object]\n \"\"\"\n parse = argparse.ArgumentParser(description='Check and print results as human readable')\n parse.add_argument('results-path', type=str, help='path for the results folder')\n \n parse.add_argument('-f', '--method', type=str, default='*', help='method folder')\n parse.add_argument('-m', '--modelfolder', type=str, default='model', help='model folder')\n\n args = parse.parse_args()\n config = vars(args)\n return config\n \nconfig = parse_args()\n#print(config)\n\nresults_path = config[\"results-path\"]\nmethod = config[\"method\"]\nmodelfolder = config[\"modelfolder\"]\n\ndirr = os.path.join(results_path)\n#coringa = \"\"\n\ndf = results2df(dirr, '', method, modelfolder=modelfolder)\ndisplay(df)\n\n# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n# print(df)\n","repo_name":"ttportela/automatize","sub_path":"automatize/scripts/MAT-PrintResults.py","file_name":"MAT-PrintResults.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73271664873","text":"from django.db import models\nfrom profiles.models import UserProfile\nfrom django_countries.fields import CountryField\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Tiers(models.Model):\n \"\"\"\n Tiers model for subscription\n \"\"\"\n user = models.ForeignKey(\n UserProfile, on_delete=models.SET_NULL,\n null=True, blank=True, related_name='tier')\n tier = models.BooleanField(default=False)\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n _id = str(self.user)\n return _id\n\n\nclass Snack(models.Model):\n f_name = models.CharField(max_length=50, null=False, blank=False)\n l_name = models.CharField(\n max_length=50, null=True, blank=True)\n email = models.EmailField(max_length=254, null=False, blank=False)\n country = CountryField(\n blank_label='Country', null=False, blank=False, default='Country')\n county = models.CharField(max_length=80, null=True, blank=True)\n town_or_city = models.CharField(max_length=40, null=False, blank=False)\n street_address1 = models.CharField(max_length=80, null=False, blank=False)\n street_address2 = models.CharField(max_length=80, null=True, blank=True)\n postcode = models.CharField(max_length=20, null=True, blank=True)\n date = models.DateTimeField(auto_now_add=True)\n snack_qty = models.IntegerField(null=False, default=0)\n total = models.IntegerField(null=False, default=0)\n snack_data = models.TextField(null=True, blank=True, default='')\n stripe_pid = models.CharField(\n max_length=254, null=True, blank=True, default='')\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return self.f_name\n\n\n@receiver(post_save, sender=UserProfile)\ndef create_or_update_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create or update the user profile\n \"\"\"\n if created:\n Tiers.objects.create(user=instance)\n","repo_name":"Code-Institute-Submissions/thiagoluizfb-tag_albums","sub_path":"subscription/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9664421311","text":"from flask import Flask, request,url_for, redirect, render_template\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask import *\r\napp=Flask(__name__)\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///students.sqlite3'\r\napp.config['SECRET_KEY'] = \"random string\"\r\n\r\ndb = SQLAlchemy(app)\r\n\r\nclass Blog(db.Model):\r\n user = db.Column(db.String(50),nullable = False,primary_key=True)\r\n post = db.Column(db.String(100),nullable =False)\r\n \r\n def __init__(self,user,post):\r\n self.user = user\r\n self.post = post\r\n\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n content = Blog.query.all()\r\n return render_template(\"home.html\",content=content)\r\n\r\n@app.route('/add',methods = ['POST'])\r\ndef add():\r\n user = request.form.get('user')\r\n post = request.form.get('post')\r\n entry = Blog(user = user,post=post)\r\n db.session.add(entry)\r\n db.session.commit()\r\n return redirect(\"/\")\r\n\r\n@app.route('/delete/')\r\ndef delete(content_user):\r\n content=Blog.query.get(content_user)\r\n if not content:\r\n return redirect(\"/\")\r\n db.session.delete(content)\r\n db.session.commit()\r\n return redirect(\"/\")\r\n\r\nif __name__ == '__main__':\r\n db.create_all()\r\n app.run(debug=True)","repo_name":"abhishekpatil10/Blog-Website","sub_path":"blog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17304796207","text":"import json\nimport os\nimport sys\nimport urllib.parse\nimport urllib.request\n\nclass Config:\n ''' simple configuration loader '''\n def __init__(self, path):\n self.path = path\n self.load()\n\n def get(self, element, scope = 'access_token'):\n if element not in self.config[scope]:\n return None\n return self.config[scope][element]\n\n def load(self):\n try:\n with open(self.path, 'r') as f:\n self.config = json.load(f)\n except:\n traceback = sys.exc_info()[2]\n raise Exception('no configuration found, you can generate by tools/generator.py.').with_traceback(traceback)\n","repo_name":"loveratory/tootStdlibPy3","sub_path":"tools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28343103604","text":"from flask import Flask, request, jsonify, url_for, Blueprint\nfrom api.app.incident.controller import create_incident,get_common,get_owner_incidents,delete_incident,modify_incident, get_all_particular\nfrom api.app.user.controller import get_user_by_id\nfrom flask_jwt_extended import jwt_required\nfrom flask_jwt_extended import get_jwt_identity\nfrom api.models.index import Incident\n\nincidents=Blueprint('incidents',__name__)\n\n#route to create common incidents\n@incidents.route('/common/',methods=['POST'])\n@jwt_required()\ndef create_common_incidents(community_id):\n body=request.get_json()\n user_id=get_jwt_identity()\n new_incident=create_incident(body,community_id,user_id[\"id\"], common=True)\n if new_incident is None:\n return jsonify('Internar server error'),500\n elif new_incident==False:\n return jsonify('Bad Request'),400\n else:\n return jsonify(new_incident),201\n\n#route to create particular incidents\n@incidents.route('/owner/',methods=['POST'])\n@jwt_required()\ndef create_owner_incidents(community_id):\n body=request.get_json()\n user_id=get_jwt_identity()\n new_incident=create_incident(body,community_id,user_id[\"id\"], common=False)\n if new_incident is None:\n return jsonify('Internar server error'),500\n elif new_incident==False:\n return jsonify('Bad Request'),400\n else:\n return jsonify(new_incident),201\n\n#route to get particular incidents\n@incidents.route('/particular',methods=['GET'])\n@jwt_required()\ndef get_particular_incidents():\n user_id=get_jwt_identity()\n user=get_user_by_id(user_id[\"id\"])\n user=user.serialize()\n incidents=get_owner_incidents(user[\"id\"])\n return incidents\n\n#route to get all common incidents\n@incidents.route('/common',methods=['GET'])\ndef get_common_incidents():\n return get_common()\n\n#route to get all particular incidents\n@incidents.route('/allParticular',methods=['GET'])\ndef get_allParticular_incidents():\n return get_all_particular()\n \n#route to delete incidents\n@incidents.route('/',methods=['DELETE'])\n@jwt_required()\ndef delete(incident_id):\n user_id=get_jwt_identity()\n user=get_user_by_id(user_id[\"id\"])\n user=user.serialize()\n return delete_incident(user[\"role\"][\"role_id\"],incident_id)\n\n\n#route to update incidents\n@incidents.route('/',methods=['PUT'])\n@jwt_required()\ndef update(incident_id):\n user_id=get_jwt_identity()\n user=get_user_by_id(user_id[\"id\"])\n user=user.serialize()\n body=request.get_json()\n return modify_incident(user[\"role\"][\"role_id\"],incident_id,body)","repo_name":"Sergiogtz10/Gestion-de-comunidades","sub_path":"src/api/app/incident/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"36639432137","text":"from .halo_model import HaloModel\nfrom hmf import get_hmf\n\n\ndef get_halomodel(\n required_attrs,\n get_label=True,\n kls=HaloModel,\n fast_kwargs={\n \"transfer_fit\": \"BBKS\",\n \"lnk_min\": -4,\n \"lnk_max\": 2,\n \"dlnk\": 1,\n \"Mmin\": 13,\n \"dlog10m\": 0.5,\n \"rmin\": 10,\n \"rmax\": 20,\n \"rnum\": 4,\n \"halo_exclusion\": \"None\",\n \"nonlinear\": False,\n \"scale_dependent_bias\": False,\n \"hod_model\": \"Zehavi05\",\n },\n **kwargs\n):\n return get_hmf(required_attrs, get_label, kls, fast_kwargs, **kwargs)\n","repo_name":"jensenlawrence/CMASS-WISE-HOD","sub_path":"halomod/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15649367521","text":"import fix_yahoo_finance as yf \nimport datetime\n\ndef recent_quote(sym):\n start_date = datetime.datetime.today() - datetime.timedelta(days=1000)\n start_date = start_date.strftime('%Y-%m-%d')\n \n end_date = datetime.datetime.today().strftime('%Y-%m-%d')\n data = yf.download(sym, start_date, end_date)\n \n # print data.tail()\n\n print( data )\n\n l = data[\"Close\"].tolist()\n return l[-1]\n \n\n##########################\n\nprint (\"USO:\" , recent_quote(\"USO\"))\n\n# print (\"IBM:\" , recent_quote(\"IBM\"))\n# print (\"GOOG:\" , recent_quote(\"GOOG\"))\n","repo_name":"GGTechGuru/myBin","sub_path":"PYTHON/InvestingWizards/BackTesting/StockAlert.py","file_name":"StockAlert.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22058677099","text":"\"Data loading pipeline for structured data support. Loads from pandas DataFrame\"\nfrom ..torch_core import *\nfrom .transform import *\nfrom ..basic_data import *\nfrom ..data_block import *\nfrom ..basic_train import *\nfrom .models import *\nfrom pandas.api.types import is_numeric_dtype, is_categorical_dtype\n\n__all__ = ['TabularDataBunch', 'TabularLine', 'TabularList', 'TabularProcessor', 'get_tabular_learner']\n\nOptTabTfms = Optional[Collection[TabularProc]]\n\ndef def_emb_sz(df, n, sz_dict):\n col = df[n]\n n_cat = len(col.cat.categories)+1 # extra cat for NA\n sz = sz_dict.get(n, min(50, (n_cat//2)+1)) # rule of thumb\n return n_cat,sz\n\ndef _text2html_table(items:Collection[Collection[str]], widths:Collection[int])->str:\n html_code = f\"\"\n for w in widths: html_code += f\" \"\n for line in items:\n html_code += \" \\n\"\n html_code += \"\\n\".join([f\" \" for o in line if len(o) >= 1])\n html_code += \"\\n \\n\"\n return html_code + \"
{o}
\\n\"\n\nclass TabularLine(ItemBase):\n def __init__(self, cats, conts, classes, names):\n self.cats,self.conts,self.classes,self.names = cats,conts,classes,names\n self.data = [tensor(cats), tensor(conts)]\n\n def __str__(self):\n res = ''\n for c, n in zip(self.cats, self.names[:len(self.cats)]):\n res += f\"{n} {(self.classes[n][c-1] if c != 0 else 'nan')}\\n\"\n for c,n in zip(self.conts, self.names[len(self.cats):]):\n res += f'{n} {c:.4f}\\n'\n return res\n\n def show_batch(self, idxs:Collection[int], rows:int, ds:Dataset, **kwargs)->None:\n \"Show the data in `idxs` on a few `rows` from `ds`.\"\n from IPython.display import display, HTML\n x,y = ds[0]\n items = [x.names]\n for i in idxs[:rows]:\n x,y = ds[i]\n res = []\n for c, n in zip(x.cats, self.names[:len(x.cats)]):\n res.append(str(x.classes[n][c-1]) if c != 0 else 'nan')\n res += [f'{c:.4f}' for c in x.conts]\n items.append(res)\n display(HTML(_text2html_table(items, [10] * len(items[0]))))\n\nclass TabularList(ItemList):\n def __init__(self, items:Iterator, cat_names:OptStrList=None, cont_names:OptStrList=None,\n processor=None, procs=None, **kwargs):\n #dataframe is in xtra, items is just a range of index\n if processor is None: processor=TabularProcessor(procs)\n super().__init__(range_of(items), processor=processor, **kwargs)\n self.cat_names,self.cont_names = cat_names,cont_names\n\n @classmethod\n def from_df(cls, df:DataFrame, cat_names:OptStrList=None, cont_names:OptStrList=None, **kwargs)->'ItemList':\n \"Get the list of inputs in the `col` of `path/csv_name`.\"\n return cls(items=range(len(df)), cat_names=cat_names, cont_names=cont_names, xtra=df, **kwargs)\n\n def new(self, items:Iterator, **kwargs)->'TabularList':\n return super().new(items=items, cat_names=self.cat_names, cont_names=self.cont_names, **kwargs)\n\n def get(self, o):\n return TabularLine(self.codes[o], self.conts[o], self.classes, self.col_names)\n\n def get_emb_szs(self, sz_dict): \n \"Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`.\"\n return [def_emb_sz(self.xtra, n, sz_dict) for n in self.cat_names]\n\nclass TabularProcessor(PreProcessor):\n def __init__(self, procs=None):\n self.procs = listify(procs)\n\n def process_one(self, item):\n df = pd.DataFrame([item,item])\n for proc in self.procs: proc(df, test=True)\n codes = np.stack([c.cat.codes.values for n,c in df[self.cat_names].items()], 1).astype(np.int64) + 1\n conts = np.stack([c.astype('float32').values for n,c in df[self.cont_names].items()], 1)\n classes = None\n col_names = list(df[self.cat_names].columns.values) + list(df[self.cont_names].columns.values)\n return TabularLine(codes[0], conts[0], classes, col_names)\n \n def process(self, ds):\n for i,proc in enumerate(self.procs):\n if isinstance(proc, TabularProc): proc(ds.xtra, test=True)\n else:\n #cat and cont names may have been changed by transform (like Fill_NA)\n proc = proc(ds.cat_names, ds.cont_names)\n proc(ds.xtra)\n ds.cat_names,ds.cont_names = proc.cat_names,proc.cont_names\n self.procs[i] = proc\n self.cat_names,self.cont_names = ds.cat_names,ds.cont_names\n ds.codes = np.stack([c.cat.codes.values for n,c in ds.xtra[ds.cat_names].items()], 1).astype(np.int64) + 1\n ds.conts = np.stack([c.astype('float32').values for n,c in ds.xtra[ds.cont_names].items()], 1)\n ds.classes = {n:c.cat.categories.values for n,c in ds.xtra[ds.cat_names].items()}\n ds.col_names = list(ds.xtra[ds.cat_names].columns.values) + list(ds.xtra[ds.cont_names].columns.values)\n\nclass TabularDataBunch(DataBunch):\n \"Create a `DataBunch` suitable for tabular data.\"\n @classmethod\n def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None,\n cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None, **kwargs)->DataBunch:\n \"Create a `DataBunch` from train/valid/test dataframes.\"\n cat_names = ifnone(cat_names, [])\n cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var}))\n procs = listify(procs)\n return (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)\n .split_by_idx(valid_idx)\n .label_from_df(cols=dep_var, classes=None)\n .databunch())\n\ndef get_tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None,\n ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **kwargs):\n \"Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params.\"\n emb_szs = data.get_emb_szs(ifnone(emb_szs, {}))\n model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop,\n y_range=y_range, use_bn=use_bn)\n return Learner(data, model, metrics=metrics, **kwargs)\n\n","repo_name":"ameyyadav09/fastai","sub_path":"fastai/tabular/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"22899417764","text":"import pandas as pd\nfrom datetime import datetime, timedelta\n\n\nclass CSVGenerator():\n @staticmethod\n def print_csv_sheet(writer, sheet_name, print_date, start_date, i, info, column_list):\n if print_date == start_date:\n pd.DataFrame({print_date.strftime('%Y-%m-%d'): [print_date.strftime('%Y-%m-%d')]}).to_excel(writer, sheet_name=sheet_name, float_format=\"%.2f\", startcol=2,startrow=1, index=False)\n info.sort_values(by=['digital_service_id']).loc[info['date'].astype(str) == print_date.strftime('%Y-%m-%d')][['digital_service_id'] + column_list].to_excel(writer, sheet_name=sheet_name, float_format=\"%.2f\", startcol=1,startrow=2, index=False)\n\n else:\n pd.DataFrame({print_date.strftime('%Y-%m-%d'): [print_date.strftime('%Y-%m-%d')]}).to_excel(writer, sheet_name=sheet_name, float_format=\"%.2f\", startcol=2 + len(column_list) * i,startrow=1, index=False)\n info.sort_values(by=['digital_service_id']).loc[info['date'].astype(str) == print_date.strftime('%Y-%m-%d')][column_list].to_excel(writer, sheet_name=sheet_name, float_format=\"%.2f\", startcol=2 + len(column_list) * i,startrow=2, index=False)\n\n @staticmethod\n def create_payout_csv(payout_info, payout_info_date, spender_info, holding_info, buying_info, prior_payout_info, start_date, end_date, write_directory):\n s = start_date\n e = end_date - timedelta(days=1)\n dates = pd.date_range(s, e).tolist()\n\n payout_info.to_csv(path_or_buf=write_directory + \"/payout.csv\", float_format=\"%.2f\")\n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(write_directory + \"/payout.xlsx\", engine='xlsxwriter')\n\n payout_info.loc[:, ~payout_info.columns.isin(['email', 'contact'])].to_excel(writer, sheet_name='Summary', float_format=\"%.2f\")\n prior_payout_info.to_excel(writer, sheet_name='Prior Lifetime KRE Payouts', float_format=\"%.2f\")\n\n for i, d in enumerate(dates):\n CSVGenerator.print_csv_sheet(writer, 'Payouts', d, s, i, payout_info_date, ['spend_payout', 'hold_payout', 'buy_payout', 'total_payout'])\n CSVGenerator.print_csv_sheet(writer, 'Spend', d, s, i, spender_info, ['das', 's1_9', 's10_99', 's100_999', 's1000'])\n CSVGenerator.print_csv_sheet(writer, 'Hold', d, s, i, holding_info, ['holdings'])\n CSVGenerator.print_csv_sheet(writer, 'Buy', d, s, i, buying_info, ['lifetime_earns_minus_spends', 'prior_buy_demand_payouts', 'min_balance', 'buy_demand'])\n\n writer.save()\n","repo_name":"kinecosystem/kin-rewards-engine","sub_path":"airflow/dags/jobs/csvwriter.py","file_name":"csvwriter.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22782445899","text":"import logging\nfrom typing import Dict, List\n\nimport streamlink\nfrom streamlink.stream import Stream\n\n\nclass LinksError(Exception):\n def __init__(self, msg, thrown=None):\n super().__init__(msg)\n self.thrown = thrown\n\n\ndef first_desired_in_list(given: List[str], desired: List[str]):\n if given is None or desired is None:\n return None\n for item in desired:\n if item in given:\n return item\n\n\nclass LinksService:\n\n log = logging.getLogger()\n\n def __init__(self):\n pass\n\n def is_url(self, url: str) -> bool:\n return \"https://\" in url\n\n def find_stream(\n self, url: str, qualities: List[str] = [\"best\", \"good\", \"720p\", \"480p\"]\n ) -> str:\n try:\n streams: Dict[str, Stream] = streamlink.streams(url)\n if streams is not None:\n available_qualities = streams.keys()\n\n if available_qualities is None or len(available_qualities) == 0:\n raise LinksError(f\"No stream found for {url}\")\n\n quality = first_desired_in_list(available_qualities, qualities)\n\n if quality is None:\n self.log.warn(\n f\"Missing any value of {qualities} in {available_qualities}\"\n )\n quality = available_qualities[0] # select any quality\n\n return streams[quality].url\n\n except streamlink.NoPluginError:\n raise LinksError(\"Provided URL is not supported\")\n","repo_name":"ron96G/discordbot","sub_path":"discordbot/audio/links/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71144897512","text":"#n개의 점이 수직선 상에 존재\n#m개의 쿼리를 처리\n#쿼리는 x~y구간에 있는 점의 수를 출력하는 쿼리\nfrom bisect import bisect_left\nn = int(input())\nidx = [0]*(n+1)\nfor i in range(n):\n idx[i]=int(input())\n\nm = int(input())\nfor i in range(m):\n x,y = map(int,input().split())\n idx.append(x)\n idx.append(y)\n\nidx.sort()\n\ndedup = list(set(idx)) #중복을 없앤 리스트\n\ndef get_idx(x):\n return bisect_left(dedup,x) - dedup[0]\n","repo_name":"eunjee/algorithmStudy","sub_path":"prefixSum/좌표압축.py","file_name":"좌표압축.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39381701529","text":"import folium\nimport shapely.geometry\nfrom matplotlib import pyplot as plt\nimport json\nimport numpy as np\n\ndef convert_bounds(bbox, invert_y=False):\n x1, y1, x2, y2 = bbox\n if invert_y:\n y1, y2 = y2, y1\n return ((y1, x1), (y2, x2))\n\ndef visualize(meta_file_name, show_query=True):\n map = folium.Map()\n with open(meta_file_name, encoding = 'utf-8') as f:\n data = json.loads(f.read())\n for i, im in enumerate(data):\n bbox = im.get('bbox')\n if show_query:\n folium.GeoJson(\n shapely.geometry.box(*bbox),\n style_function=lambda x: dict(fill=True, weight=2, opacity=0.8, color=\"blue\"),\n name=f\"Query_{i}\",\n ).add_to(map)\n\n image_bounds = convert_bounds(bbox, invert_y=True)\n img = plt.imread(im.get('filename'), format='jpg')\n img_ovr = folium.raster_layers.ImageOverlay(\n name=f\"NDVI_{i}\",\n image=img,\n bounds=image_bounds,\n opacity=0.9,\n interactive=True,\n cross_origin=False,\n zindex=1,\n )\n img_ovr.add_to(map)\n map.fit_bounds(bounds=image_bounds)\n \n folium.LayerControl().add_to(map)\n return map\n\ndef show_legend(cmap_name):\n fig, ax = plt.subplots(figsize=(20, 0.5))\n cmap=plt.cm.get_cmap(cmap_name)\n values = np.linspace(-1, 1, 256).reshape(1, -1)\n img = ax.imshow(values, cmap=cmap, aspect='auto')\n ax.set_axis_off()\n cbar = plt.colorbar(img, orientation='horizontal', pad=0.05, aspect=300)\n plt.show();","repo_name":"ArmHPC/Scalable-EO-system","sub_path":"usage/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4557865280","text":"#!/usr/bin/env python3\n# Developed by Junyi Ma, Xieyuanli Chen, and Jun Zhang\n# This file is covered by the LICENSE file in the root of the project OverlapTransformer:\n# https://github.com/haomo-ai/OverlapTransformer/\n# Brief: train OverlapTransformer with Haomo dataset\n\n\nimport os\nimport sys\np = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))\nif p not in sys.path:\n sys.path.append(p)\nsys.path.append('../tools/')\nsys.path.append('../modules/')\n \nimport torch\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nfrom modules.overlap_transformer_haomo import featureExtracter\nfrom tools.read_samples_haomo import read_one_batch_pos_neg\nfrom tools.read_samples_haomo import read_one_need_from_seq\nnp.set_printoptions(threshold=sys.maxsize)\nimport modules.loss as PNV_loss\nfrom tools.utils.utils import *\nimport yaml\n\nclass trainHandler():\n def __init__(self, height=32, width=900, channels=1, norm_layer=None, batch_size=6, lr = 0.001,\n data_root_folder=None, train_set=None,use_transformer=True,training_seqs=None):\n super(trainHandler, self).__init__()\n\n self.height = height\n self.width = width\n self.channels = channels\n self.norm_layer = norm_layer\n self.use_transformer = use_transformer\n self.batch_size = batch_size\n self.learning_rate = lr\n self.train_set = train_set\n self.training_seqs = training_seqs\n self.data_root_folder = data_root_folder\n\n self.amodel = featureExtracter(channels=self.channels, use_transformer=self.use_transformer)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.amodel.to(self.device)\n print(self.amodel)\n self.parameters = self.amodel.parameters()\n self.optimizer = torch.optim.Adam(self.parameters, self.learning_rate)\n # self.optimizer = torch.optim.SGD(self.parameters, lr=self.learning_rate, momentum=0.9)\n # self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.98)\n self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=5, gamma=0.9)\n\n self.train_set_imgf1_imgf2_overlap = np.load(self.train_set)\n\n self.resume = True\n self.save_name = \"../weights/pretrained_overlap_transformer_haomo.pth.tar\"\n self.overlap_thresh = 0.3\n\n def train(self):\n epochs = 100\n\n if self.resume:\n print(\"Resuming from \", self.save_name)\n checkpoint = torch.load(self.save_name)\n starting_epoch = checkpoint['epoch']\n self.amodel.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n else:\n print(\"Training From Scratch ...\" )\n starting_epoch = 0\n\n writer1 = SummaryWriter(comment=\"LR_0.xxxx_haomo\")\n\n\n for i in range(starting_epoch+1, epochs):\n\n self.train_imgf1 = self.train_set_imgf1_imgf2_overlap[:, 0]\n self.train_imgf2 = self.train_set_imgf1_imgf2_overlap[:, 1]\n self.train_dir1 = np.zeros((len(self.train_imgf1),)) # to use the same form as KITTI\n self.train_dir2 = np.zeros((len(self.train_imgf2),))\n self.train_overlap = self.train_set_imgf1_imgf2_overlap[:, 2].astype(float)\n\n\n print(\"=======================================================================\\n\\n\\n\")\n print(\"total pairs: \", len(self.train_imgf1))\n print(\"\\n\\n\\n=======================================================================\")\n\n loss_each_epoch = 0\n used_num = 0\n loss_max= 0\n\n used_list_f1 = []\n used_list_dir1 = []\n\n for j in range(len(self.train_imgf1)):\n f1_index = self.train_imgf1[j]\n dir1_index = self.train_dir1[j]\n continue_flag = False\n for iddd in range(len(used_list_f1)):\n if f1_index==used_list_f1[iddd] and dir1_index==used_list_dir1[iddd]:\n continue_flag = True\n else:\n used_list_f1.append(f1_index)\n used_list_dir1.append(dir1_index)\n\n if continue_flag:\n continue\n\n current_batch = read_one_need_from_seq(self.data_root_folder, f1_index)\n\n sample_batch, sample_truth, pos_num, neg_num = read_one_batch_pos_neg \\\n (self.data_root_folder, f1_index, dir1_index, self.train_imgf1, self.train_imgf2, self.train_dir1, self.train_dir2,\n self.train_overlap, self.overlap_thresh)\n\n use_pos_num = 6\n use_neg_num = 6\n\n if pos_num >= use_pos_num and neg_num>=use_neg_num:\n sample_batch = torch.cat((sample_batch[0:use_pos_num, :, :, :], sample_batch[pos_num:pos_num+use_neg_num, :, :, :]), dim=0)\n sample_truth = torch.cat((sample_truth[0:use_pos_num, :], sample_truth[pos_num:pos_num+use_neg_num, :]), dim=0)\n pos_num = use_pos_num\n neg_num = use_neg_num\n elif pos_num >= use_pos_num:\n sample_batch = torch.cat((sample_batch[0:use_pos_num, :, :, :], sample_batch[pos_num:, :, :, :]), dim=0)\n sample_truth = torch.cat((sample_truth[0:use_pos_num, :], sample_truth[pos_num:, :]), dim=0)\n pos_num = use_pos_num\n elif neg_num >= use_neg_num:\n sample_batch = sample_batch[0:pos_num+use_neg_num,:,:,:]\n sample_truth = sample_truth[0:pos_num+use_neg_num, :]\n neg_num = use_neg_num\n\n\n if neg_num == 0 or pos_num == 0:\n continue\n\n input_batch = torch.cat((current_batch, sample_batch), dim=0)\n\n input_batch.requires_grad_(True)\n self.amodel.train()\n self.optimizer.zero_grad()\n\n global_des = self.amodel(input_batch)\n\n o1, o2, o3 = torch.split(\n global_des, [1, pos_num, neg_num], dim=0)\n\n MARGIN_1 = 0.5\n loss = PNV_loss.triplet_loss(o1, o2, o3, MARGIN_1, lazy=False)\n # loss = PNV_loss.triplet_loss_inv(o1, o2, o3, MARGIN_1, lazy=False, use_min=True)\n\n loss.backward()\n self.optimizer.step()\n print(str(used_num), loss)\n\n if torch.isnan(loss):\n print(\"Something error ...\")\n print(pos_num)\n print(neg_num)\n\n loss_each_epoch = loss_each_epoch + loss.item()\n used_num = used_num + 1\n\n print(\"epoch {} loss {}\".format(i, loss_each_epoch/used_num))\n print(\"saving weights ...\")\n self.scheduler.step()\n\n self.save_name = \"../weights/pretrained_overlap_transformer_haomo\"+str(i)+\".pth.tar\"\n\n torch.save({\n 'epoch': i,\n 'state_dict': self.amodel.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n },\n self.save_name)\n\n print(\"Model Saved As \" + 'pretrained_overlap_transformer_haomo' + str(i) + '.pth.tar')\n\n writer1.add_scalar(\"loss\", loss_each_epoch / used_num, global_step=i)\n\n\n\nif __name__ == '__main__':\n # load config ================================================================\n config_filename = '../config/config_haomo.yml'\n config = yaml.safe_load(open(config_filename))\n data_root_folder = config[\"file_root\"][\"data_root_folder\"]\n triplets_for_training = config[\"file_root\"][\"triplets_for_training\"]\n training_seqs = config[\"training_config\"][\"training_seqs\"]\n # ============================================================================\n\n train_handler = trainHandler(height=32, width=900, channels=1, norm_layer=None, batch_size=16, lr=0.000001,\n data_root_folder=data_root_folder, train_set=triplets_for_training,\n use_transformer=True)\n\n train_handler.train()\n","repo_name":"haomo-ai/OverlapTransformer","sub_path":"train/training_overlap_transformer_haomo.py","file_name":"training_overlap_transformer_haomo.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"72"} +{"seq_id":"72260237352","text":"import os\nfrom dag_loader import DagLoader, DagSampler\nfrom dct_policy import dct_policy\nfrom baseline_policies import random_policy, max_degree_policy, opt_single_policy, coloring_policy, greedy_minmax_policy, greedy_entropy_policy\nimport numpy as np\nfrom tqdm import tqdm\nfrom p_tqdm import p_map\nfrom time import time\nfrom multiprocessing import cpu_count\nimport random\n\n\nALG_DICT = {\n 'dct': dct_policy,\n 'random': random_policy,\n 'max_degree': max_degree_policy,\n 'opt_single': opt_single_policy,\n 'coloring': coloring_policy,\n 'greedy_minmax': greedy_minmax_policy,\n 'greedy_entropy': greedy_entropy_policy\n}\n\n\nclass AlgRunner:\n def __init__(self, alg: str, dag_loader: DagLoader):\n self.alg = alg\n self.dag_loader = dag_loader\n\n @property\n def alg_folder(self):\n return os.path.join(self.dag_loader.dag_folder, 'results', f'alg={self.alg}')\n\n def get_alg_results(self, overwrite=False, validate=True, multithread=True):\n random.seed(9859787)\n result_filename = os.path.join(self.alg_folder, f'num_nodes_list.npy')\n time_result_filename = os.path.join(self.alg_folder, f'times_list.npy')\n print(self.alg_folder)\n if overwrite or not os.path.exists(self.alg_folder):\n dags = self.dag_loader.get_dags(overwrite=overwrite)\n os.makedirs(self.alg_folder, exist_ok=True)\n\n def run_alg(tup):\n ix, dag = tup\n\n start = time()\n intervened_nodes = ALG_DICT[self.alg](dag)\n time_taken = time() - start\n\n if validate:\n cpdag = dag.interventional_cpdag([{node} for node in intervened_nodes], cpdag=dag.cpdag())\n if cpdag.num_edges > 0:\n print(f\"**************** BROKEN\")\n print(f\"ix={ix}, alg={self.alg}, num intervened = {len(intervened_nodes)}, num edges={cpdag.num_edges}\")\n raise RuntimeError\n # write_list(intervened_nodes, os.path.join(self.alg_folder, f'nodes{ix}.txt'))\n return len(intervened_nodes), time_taken\n\n if multithread:\n print(f'[AlgRunner.get_alg_results] Running {self.alg} on {cpu_count()-1} cores')\n num_nodes_list, times_list = zip(*p_map(run_alg, list(enumerate(dags))))\n else:\n print(f'[AlgRunner.get_alg_results] Running {self.alg} on 1 core')\n num_nodes_list, times_list = zip(*list(tqdm((run_alg(dag) for dag in dags), total=len(dags))))\n\n np.save(result_filename, np.array(num_nodes_list))\n np.save(time_result_filename, np.array(times_list))\n return np.array(num_nodes_list), np.array(times_list)\n else:\n return np.load(result_filename), np.load(time_result_filename)\n\n def specific_dag(self, ix, verbose=False):\n dag = self.dag_loader.get_dags()[ix]\n intervened_nodes = ALG_DICT[self.alg](dag, verbose=verbose)\n print(intervened_nodes)\n cpdag = dag.interventional_cpdag([{node} for node in intervened_nodes], cpdag=dag.cpdag())\n cpdag.to_complete_pdag()\n print(cpdag.edges)\n\n\nif __name__ == '__main__':\n import random\n\n # nnodes = 18\n nnodes = 100\n random.seed(8128)\n # dl = DagLoader(nnodes, 10, DagSampler.TREE_PLUS, dict(e_min=2, e_max=5), comparable_edges=True)\n dl = DagLoader(nnodes, 10, DagSampler.HAIRBALL_PLUS, dict(num_layers=5, degree=3, e_min=2, e_max=5), comparable_edges=True)\n dl.get_dags(overwrite=True)\n ar_random = AlgRunner('random', dl)\n ar_dct = AlgRunner('dct', dl)\n\n RUN_ALL = True\n if RUN_ALL:\n results_random = ar_random.get_alg_results(overwrite=True)\n results_dct = ar_dct.get_alg_results(overwrite=True)\n clique_sizes = dl.max_clique_sizes()\n num_cliques = dl.num_cliques()\n optimal_ivs = dl.get_verification_optimal_ivs()\n bound = np.ceil(np.log2(num_cliques)) * clique_sizes + 2*optimal_ivs\n\n print(\"Number of cliques\")\n print(num_cliques)\n\n print(\"Clique sizes\")\n print(clique_sizes)\n\n print(\"Verification optimal\")\n print(optimal_ivs)\n\n print(\"Bound\")\n print(bound)\n\n print(np.where(bound < nnodes))\n above_bound = results_dct > bound\n print(np.where(above_bound))\n print(np.mean(results_random))\n print(np.mean(results_dct))\n\n # ix = 27\n # d = dl.get_dags()[ix]\n # dct = d.directed_clique_tree()\n # dcg = get_directed_clique_graph(d)\n # dct_ = LabelledMixedGraph.from_nx(dct)\n # ar_dct.specific_dag(ix, verbose=True)\n # ar_random.specific_dag(ix)\n\n\n","repo_name":"csquires/dct-policy","sub_path":"alg_runner.py","file_name":"alg_runner.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"73655250153","text":"from pathlib import Path\r\nfrom helpers import util\r\nfrom helpers.model_saver import *\r\nfrom models.config_helper import *\r\nimport json\r\n\r\nimport os\r\n\r\nbase_data_path = \"../../../../data/training_pipeline_results/\"\r\n# # single model\r\n# path_jira_dt = base_data_path + r\"[7982] 2021-11-19 19_36_46_jiraset_ml_preprocess_single_balanced\" + \"/containers\" \\\r\n# + \"/summary_Decision Tree_model_container_454821.4465781571\"\r\n# # multi model\r\n# path_jira_rf = base_data_path + r\"[7982] 2021-11-16 23_01_05_jiraset_ml_preprocess_ensemble_balanced\" + \"/containers\" \\\r\n# + \"/description_RandomForrest_model_container_454755.3625842828\"\r\n\r\ncontainer_path_string = r\"output/2022-05-13 21_59_43_jiraset_ml_preprocess_single_balanced_debug/containers\"\r\n\r\ndef files_in_dir(path: str, with_folders = False, print_files=False):\r\n full_path = os.path.abspath(path)\r\n files_names = sorted([f for f in os.listdir(full_path) if os.path.isfile(os.path.join(full_path, f)) or with_folders])\r\n file_paths = [os.path.join(path, f) for f in files_names]\r\n if print_files:\r\n print(files_names)\r\n\r\n return file_paths\r\n\r\ndef main():\r\n containers = files_in_dir(container_path_string, with_folders=True)\r\n print(containers)\r\n first = containers[0]\r\n model = LoadedModel(first)\r\n print(model)\r\n # no_json = [n for n in containers if 'json' not in n]\r\n # for c in no_json:\r\n # print_stats(c)\r\n # first = containers[2]\r\n # print(first)\r\n # print_stats(first)\r\n\r\n\r\ndef print_stats(path: str):\r\n print(\"loading: \" + path)\r\n try:\r\n container = util.load_pickle(path)\r\n print(f\"\\nSUCCESS\\n---\\n{path}\\n---\\n\")\r\n print(container)\r\n except:\r\n print(f\"Failure\\n---\\n{path}\\n---\\n\")\r\n\r\n\r\n\r\n\r\nclass LoadedModel:\r\n def __init__(self, path):\r\n self.debug = True\r\n files = files_in_dir(path, print_files=True)\r\n # files are sorted alphabetically if the folder is the generated one it should work based on the order\r\n self.meta = load_json(files[0])\r\n self.model = util.load_pickle(files[1])\r\n self.ordinal_encoder = util.load_pickle(files[2])\r\n self.scaler = util.load_pickle(files[3])\r\n self.standardizer = util.load_pickle(files[4])\r\n self.topic_analyzer = util.load_pickle(files[5]) # used with ml preprocessing\r\n self.vectorizers = util.load_pickle(files[6]) # used with BOW\r\n\r\n def predict(self, input):\r\n print(f\"Predicting for input\\n{input}\")\r\n\r\n def __str__(self) -> str:\r\n info = json.dumps(self.meta, indent=4, sort_keys=True) if self.debug else self.meta['variation']\r\n return f\"{info}\\n{self.model}\" \\\r\n f\"\\n{self.ordinal_encoder}\\n{self.scaler}\\n{self.standardizer}\" \\\r\n f\"\\n{self.topic_analyzer}\\n{self.vectorizers.keys()}\"\r\n\r\n\r\n\r\ndef load_json(path):\r\n with open(path, 'r') as f:\r\n return json.load(f)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"DriesMeerman/Risk-Assessment","sub_path":"code/python/training_pipeline/src/data_extractor.py","file_name":"data_extractor.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18008007851","text":"import os\nimport subprocess\nimport uuid\n\nlibdir = os.path.join(os.getcwd(), 'lib')\n\nimport warnings\nfrom astropy.utils.data import CacheMissingWarning\nwarnings.simplefilter('ignore', CacheMissingWarning)\n\nfrom astropy.io import fits\nimport boto3\nfrom datetime import date\nfrom lppDataClasses import TCE\nfrom lppDataClasses import MapInfo\nimport lppTransform as lppt\nimport numpy as np\n\nmapfilename = 'combMapDR25AugustMapDV_6574.mat'\nmapfilepath = '/tmp/combMapDR25AugustMapDV_6574.mat'\ncolumns = ['id','planetNum','sector','period','tzero','depth','dur','mes','normTLpp','rawTLpp']\n\n# Download the map file unless we already have it.\n# Lambda functions are often cached and so it's\n# possible that the map file will be available between\n# Lambda executions.\ndef download_map():\n if os.path.exists(mapfilepath):\n pass\n else:\n s3 = boto3.resource('s3')\n s3_client = boto3.client('s3')\n bkt = s3.Bucket('mast-labs-s3')\n bkt.download_file(mapfilename, mapfilepath)\n\ndef download_dvt(event):\n dvt_file_key = event['fits_s3_key']\n dvt_file_bucket = event['fits_s3_bucket']\n\n root = dvt_file_key.split('/')[-1].split('_')[0]\n s3 = boto3.resource('s3')\n s3_client = boto3.client('s3')\n bkt = s3.Bucket(dvt_file_bucket)\n bkt.download_file(dvt_file_key, '/tmp/{0}'.format(root), ExtraArgs={\"RequestPayer\": \"requester\"})\n\ndef cleanup_dvt(event):\n dvt_file_key = event['fits_s3_key']\n root = dvt_file_key.split('/')[-1].split('_')[0]\n\n if os.path.exists('/tmp/{0}'.format(root)):\n os.remove('/tmp/{0}'.format(root))\n\ndef write_results(tceDict, s3_output_bucket):\n # Write the file locally then upload to S3\n tic_id = tceDict['id']\n sector = tceDict['sector']\n planet = tceDict['planetNum']\n\n outputfilename = f\"{tic_id}_{planet}_{sector}.txt\"\n outputfilepath = f\"/tmp/{outputfilename}\"\n\n output = open(outputfilepath,'w')\n\n output.write(\"# TCE Table with Normalized LPP Transit Metric.\\n\")\n output.write(\"# Date: %s\\n\" % str( date.today() ))\n\n # Write the header\n for c in columns:\n output.write(\"%s \" % c)\n output.write(\"\\n\")\n\n # Write the values\n for c in columns:\n output.write(\"%s \" % str(tceDict[c]))\n output.write(\"\\n\")\n\n output.close()\n\n # Write out to S3\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file(outputfilepath, s3_output_bucket, '{0}/{1}'.format(tic_id, outputfilename))\n\n # Finally, clean up the output file\n os.remove(outputfilepath)\n\ndef compute_transit_metric(event):\n planet_number = event['planet_number']\n ticid = event['ticid']\n sector = event['sector']\n\n mapInfo = MapInfo(mapfilepath)\n\n dvt_file = event['fits_s3_key']\n root = dvt_file.split('/')[-1].split('_')[0]\n\n data, header = fits.getdata('/tmp/{0}'.format(root), ext=planet_number, header=True)\n tce=TCE(ticid, planet_number)\n tce.populateFromDvExt(data, header)\n tce.sector = sector\n\n normTLpp, rawTLpp, transformedTr=lppt.computeLPPTransitMetric(tce, mapInfo)\n tce.normTLpp=normTLpp\n tce.rawTLpp = rawTLpp\n tce.transformedLppTr = transformedTr\n tceDict=tce.__dict__\n\n write_results(tceDict, event['s3_output_bucket'])\n\ndef handler(event, context):\n download_map()\n download_dvt(event)\n compute_transit_metric(event)\n cleanup_dvt(event)\n\nif __name__ == \"__main__\":\n handler('', '')\n","repo_name":"spacetelescope/lpp-transit-lambda","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17792054651","text":"import copy\nimport io\n\nimport onnx\nimport torch\nimport torch.onnx.symbolic_helper as sym_help\nimport torch.onnx.utils\nfrom onnx import numpy_helper\nfrom torch.onnx import OperatorExportTypes\n\nfrom . import module\n\ntry:\n import tensorflow as tf # noqa\n import tf2onnx\n\n TF_AND_TF2ONNX = True\nexcept ImportError:\n TF_AND_TF2ONNX = False\n\ntry:\n import torch.onnx.symbolic_registry as sym_registry # noqa\n\n SYM_REGISTRY = True\nexcept ImportError:\n from torch.onnx._internal.registration import registry # noqa\n\n SYM_REGISTRY = False\n\n\n_OPSET_VERSION = 17\n\n\ndef from_onnx(onnx_string_or_file):\n \"\"\"\n Converts an ONNX model serialized in an `onnx_string_or_file` to a CrypTen model.\n \"\"\"\n onnx_model = _load_onnx_model(onnx_string_or_file)\n return _to_crypten(onnx_model)\n\n\ndef from_pytorch(pytorch_model, dummy_input):\n \"\"\"\n Converts a PyTorch model `pytorch_model` into a CrypTen model by tracing it\n using the input `dummy_input`.\n \"\"\"\n\n # construct CrypTen model:\n f = _from_pytorch_to_bytes(pytorch_model, dummy_input)\n crypten_model = from_onnx(f)\n f.close()\n\n # set model architecture to export model back to pytorch model\n crypten_model.pytorch_model = copy.deepcopy(pytorch_model)\n\n # make sure training / eval setting is copied:\n crypten_model.train(mode=pytorch_model.training)\n return crypten_model\n\n\ndef from_tensorflow(tensorflow_graph_def, inputs, outputs):\n \"\"\"\n Function that converts Tensorflow model into CrypTen model based on\n https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py\n The model is returned in evaluation mode.\n Args:\n `tensorflow_graph_def`: Input Tensorflow GraphDef to be converted\n `inputs`: input nodes\n `outputs`: output nodes\n \"\"\"\n raise DeprecationWarning(\n \"crypten.nn.from_tensorflow is deprecated. \",\n \"CrypTen will no longer support model conversion from TensorFlow.\",\n )\n # Exporting model to ONNX graph\n if not TF_AND_TF2ONNX:\n raise ImportError(\"Please install both tensorflow and tf2onnx packages\")\n\n with tf.Graph().as_default() as tf_graph:\n tf.import_graph_def(tensorflow_graph_def, name=\"\")\n with tf2onnx.tf_loader.tf_session(graph=tf_graph):\n g = tf2onnx.tfonnx.process_tf_graph(\n tf_graph,\n opset=10,\n continue_on_error=False,\n input_names=inputs,\n output_names=outputs,\n )\n onnx_graph = tf2onnx.optimizer.optimize_graph(g)\n model_proto = onnx_graph.make_model(\n \"converted from {}\".format(tensorflow_graph_def)\n )\n f = io.BytesIO()\n f.write(model_proto.SerializeToString())\n\n # construct CrypTen model\n # Note: We don't convert crypten model to training mode, as Tensorflow\n # models are used for both training and evaluation without the specific\n # conversion of one mode to another\n f.seek(0)\n crypten_model = from_onnx(f)\n return crypten_model\n\n\ndef _from_pytorch_to_bytes(pytorch_model, dummy_input):\n \"\"\"\n Returns I/O stream containing ONNX graph for `pytorch_model` traced with\n input `dummy_input`.\n \"\"\"\n\n # first export is only used to obtain the PyTorch-to-ONNX symbolic registry:\n with io.BytesIO() as f:\n _export_pytorch_model(f, pytorch_model, dummy_input)\n\n # update ONNX symbolic registry with CrypTen-specific functions:\n _update_onnx_symbolic_registry()\n\n # export again so the graph is created with CrypTen-specific registry:\n f = io.BytesIO()\n f = _export_pytorch_model(f, pytorch_model, dummy_input)\n f.seek(0)\n return f\n\n\ndef _export_pytorch_model(f, pytorch_model, dummy_input):\n \"\"\"\n Returns a binary I/O stream containing ONNX-exported pytorch_model that was\n traced with input `dummy_input`.\n \"\"\"\n kwargs = {\n \"do_constant_folding\": False,\n \"export_params\": True,\n \"input_names\": [\"input\"],\n \"operator_export_type\": OperatorExportTypes.ONNX,\n \"output_names\": [\"output\"],\n \"opset_version\": _OPSET_VERSION,\n }\n torch.onnx.export(pytorch_model, dummy_input, f, **kwargs)\n return f\n\n\n# mapping from ONNX to crypten.nn for modules with different names:\nONNX_TO_CRYPTEN = {\n \"adaptive_avg_pool2d\": module.AdaptiveAvgPool2d,\n \"adaptive_max_pool2d\": module.AdaptiveMaxPool2d,\n \"AveragePool\": module.AvgPool2d,\n \"Clip\": module.Hardtanh,\n \"MaxPool\": module.MaxPool2d,\n \"Pad\": module._ConstantPad,\n \"Relu\": module.ReLU,\n \"ReduceMean\": module.Mean,\n \"ReduceSum\": module.Sum,\n}\n\n\ndef _to_crypten(onnx_model):\n \"\"\"\n Function that converts an `onnx_model` to a CrypTen model.\n \"\"\"\n\n # create graph:\n input_names, output_names = _get_input_output_names(onnx_model)\n assert len(output_names) == 1, \"Only one output per model supported.\"\n crypten_model = module.Graph(input_names, output_names[0])\n\n # create nodes for the parameters:\n for node in onnx_model.graph.initializer:\n param = torch.from_numpy(numpy_helper.to_array(node))\n crypten_model.add_module(node.name, module.Parameter(param), [])\n\n # loop over all nodes:\n for node in onnx_model.graph.node:\n\n # get attributes and node type:\n attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}\n crypten_class = _get_operator_class(node.op_type, attributes)\n\n # add CrypTen module to graph:\n crypten_module = crypten_class.from_onnx(attributes=attributes)\n input_names = list(node.input)\n output_names = list(node.output)\n if node.op_type == \"Dropout\":\n output_names = [output_names[0]] # do not output Dropout mask\n crypten_model.add_module(\n output_names[0], crypten_module, input_names, output_names=output_names\n )\n\n # return final model:\n crypten_model = _get_model_or_module(crypten_model)\n return crypten_model\n\n\ndef _load_onnx_model(onnx_string_or_file):\n \"\"\"\n Loads ONNX model from file or string.\n \"\"\"\n if hasattr(onnx_string_or_file, \"seek\"):\n onnx_string_or_file.seek(0)\n return onnx.load(onnx_string_or_file)\n return onnx.load_model_from_string(onnx_string_or_file)\n\n\ndef _get_input_output_names(onnx_model):\n \"\"\"\n Return input and output names of the ONNX graph.\n \"\"\"\n input_names = [input.name for input in onnx_model.graph.input]\n output_names = [output.name for output in onnx_model.graph.output]\n assert len(input_names) >= 1, \"number of inputs should be at least 1\"\n assert len(output_names) == 1, \"number of outputs should be 1\"\n return input_names, output_names\n\n\ndef _get_model_or_module(crypten_model):\n \"\"\"\n Returns `Module` if model contains only one module. Otherwise returns model.\n \"\"\"\n num_modules = len(list(crypten_model.modules()))\n if num_modules == 1:\n for crypten_module in crypten_model.modules():\n return crypten_module\n return crypten_model\n\n\ndef _get_attribute_value(attr):\n \"\"\"\n Retrieves value from an ONNX attribute.\n \"\"\"\n if attr.HasField(\"f\"): # floating-point attribute\n return attr.f\n elif attr.HasField(\"i\"): # integer attribute\n return attr.i\n elif attr.HasField(\"s\"): # string attribute\n return attr.s # TODO: Sanitize string.\n elif attr.HasField(\"t\"): # tensor attribute\n return torch.from_numpy(numpy_helper.to_array(attr.t))\n elif len(attr.ints) > 0:\n return list(attr.ints)\n elif len(attr.floats) > 0:\n return list(attr.floats)\n raise ValueError(\"Unknown attribute type for attribute %s.\" % attr.name)\n\n\ndef _get_operator_class(node_op_type, attributes):\n \"\"\"\n Returns the `crypten.nn.Module` type corresponding to an ONNX node.\n \"\"\"\n crypten_class = getattr(\n module, node_op_type, ONNX_TO_CRYPTEN.get(node_op_type, None)\n )\n if crypten_class is None:\n raise ValueError(f\"CrypTen does not support ONNX op {node_op_type}.\")\n return crypten_class\n\n\ndef _update_onnx_symbolic_registry():\n \"\"\"\n Updates the ONNX symbolic registry for operators that need a CrypTen-specific\n implementation and custom operators.\n \"\"\"\n if SYM_REGISTRY:\n # update PyTorch's symbolic ONNX registry to output different functions:\n for version_key, version_val in sym_registry._registry.items():\n for function_key in version_val.keys():\n if function_key == \"softmax\":\n sym_registry._registry[version_key][\n function_key\n ] = _onnx_crypten_softmax\n if function_key == \"log_softmax\":\n sym_registry._registry[version_key][\n function_key\n ] = _onnx_crypten_logsoftmax\n if function_key == \"dropout\":\n sym_registry._registry[version_key][\n function_key\n ] = _onnx_crypten_dropout\n if function_key == \"feature_dropout\":\n sym_registry._registry[version_key][\n function_key\n ] = _onnx_crypten_feature_dropout\n else:\n # Update ONNX symbolic registry using torch.onnx.register_custom_op_symbolic\n torch.onnx.register_custom_op_symbolic(\n \"aten::softmax\", _onnx_crypten_softmax, _OPSET_VERSION\n )\n torch.onnx.register_custom_op_symbolic(\n \"aten::log_softmax\", _onnx_crypten_logsoftmax, _OPSET_VERSION\n )\n torch.onnx.register_custom_op_symbolic(\n \"aten::dropout\", _onnx_crypten_dropout, _OPSET_VERSION\n )\n torch.onnx.register_custom_op_symbolic(\n \"aten::feature_dropout\", _onnx_crypten_feature_dropout, _OPSET_VERSION\n )\n\n\n@sym_help.parse_args(\"v\", \"i\", \"none\")\ndef _onnx_crypten_softmax(g, input, dim, dtype=None):\n \"\"\"\n This function converts PyTorch's Softmax module to a Softmax module in\n the ONNX model. It overrides PyTorch's default conversion of Softmax module\n to a sequence of Exp, ReduceSum and Div modules, since this default\n conversion can cause numerical overflow when applied to CrypTensors.\n \"\"\"\n result = g.op(\"Softmax\", input, axis_i=dim)\n if dtype and dtype.node().kind() != \"prim::Constant\":\n parsed_dtype = sym_help._get_const(dtype, \"i\", \"dtype\")\n result = g.op(\"Cast\", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])\n return result\n\n\n@sym_help.parse_args(\"v\", \"i\", \"none\")\ndef _onnx_crypten_logsoftmax(g, input, dim, dtype=None):\n \"\"\"\n This function converts PyTorch's LogSoftmax module to a LogSoftmax module in\n the ONNX model. It overrides PyTorch's default conversion of LogSoftmax module\n to avoid potentially creating Transpose operators.\n \"\"\"\n result = g.op(\"LogSoftmax\", input, axis_i=dim)\n if dtype and dtype.node().kind() != \"prim::Constant\":\n parsed_dtype = sym_help._get_const(dtype, \"i\", \"dtype\")\n result = g.op(\"Cast\", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])\n return result\n\n\n@sym_help.parse_args(\"v\", \"f\", \"i\")\ndef _onnx_crypten_dropout(g, input, p, train):\n \"\"\"\n This function converts PyTorch's Dropout module to a Dropout module in the ONNX\n model. It overrides PyTorch's default implementation to ignore the Dropout module\n during the conversion. PyTorch assumes that ONNX models are only used for\n inference and therefore Dropout modules are not required in the ONNX model.\n However, CrypTen needs to convert ONNX models to trainable\n CrypTen models, and so the Dropout module needs to be included in the\n CrypTen-specific conversion.\n \"\"\"\n r, _ = g.op(\"Dropout\", input, ratio_f=p, outputs=2)\n return r\n\n\n@sym_help.parse_args(\"v\", \"f\", \"i\")\ndef _onnx_crypten_feature_dropout(g, input, p, train):\n \"\"\"\n This function converts PyTorch's DropoutNd module to a DropoutNd module in the ONNX\n model. It overrides PyTorch's default implementation to ignore the DropoutNd module\n during the conversion. PyTorch assumes that ONNX models are only used for\n inference and therefore DropoutNd modules are not required in the ONNX model.\n However, CrypTen needs to convert ONNX models to trainable\n CrypTen models, and so the DropoutNd module needs to be included in the\n CrypTen-specific conversion.\n \"\"\"\n r, _ = g.op(\"DropoutNd\", input, ratio_f=p, outputs=2)\n return r\n","repo_name":"facebookresearch/CrypTen","sub_path":"crypten/nn/onnx_converter.py","file_name":"onnx_converter.py","file_ext":"py","file_size_in_byte":12463,"program_lang":"python","lang":"en","doc_type":"code","stars":1380,"dataset":"github-code","pt":"72"} +{"seq_id":"23134079829","text":"import torch\nimport os\nimport wget\nimport tarfile\nfrom torchtext.data.utils import get_tokenizer, ngrams_iterator\nfrom torch.utils.data import Dataset, DataLoader\ntry:\n from torchtext.legacy.datasets.text_classification import DATASETS, _create_data_from_iterator, TextClassificationDataset, LABELS\nexcept:\n from torchtext.datasets.text_classification import DATASETS, TextClassificationDataset, _create_data_from_iterator, LABELS\nfrom torchtext.datasets import IMDB as IMDB_iterator, AG_NEWS as AG_NEWS_iterator\nfrom collections import Counter, OrderedDict\nfrom torchtext.vocab import vocab as vocab_fn\nimport tqdm\n\n\ndef IMDB(root, ngrams=1, vocab=None, include_unk=False):\n \"\"\" Defines IMDB datasets.\n\n The labels include:\n\n - 0 : Negative\n - 1 : Positive\n\n Create supervised learning dataset: AG_NEWS\n\n Separately returns the training and test dataset\n\n Args:\n root: Directory where the datasets are saved. Default: \".data\"\n ngrams: a contiguous sequence of n items from s string text.\n Default: 1\n vocab: Vocabulary used for dataset. If None, it will generate a new\n vocabulary based on the train data set.\n include_unk: include unknown token in the data (Default: False)\n\n Examples:\n >>> train_dataset, test_dataset = IMDB(ngrams=3)\n\n \"\"\"\n test_iter = IMDB_iterator(root, split='test')\n\n tokenizer = get_tokenizer(\"basic_english\")\n\n # build vocabulary\n if vocab is None:\n train_iter = IMDB_iterator(root, split='train')\n counter = Counter()\n for (label, line) in tqdm.tqdm(train_iter):\n counter.update(tokenizer(line))\n vocab = vocab_fn(OrderedDict(counter), min_freq=1)\n vocab.set_default_index(PAD_TOKEN)\n\n train_iter = IMDB_iterator(root, split='train')\n\n label_dict = {'pos': 1, 'neg': 0}\n\n def wrap_iterator(iter):\n for cls, tokens in iter:\n yield label_dict[cls], ngrams_iterator(tokenizer(tokens), ngrams)\n\n train_data, train_labels = _create_data_from_iterator(\n vocab, wrap_iterator(train_iter), include_unk)\n test_data, test_labels = _create_data_from_iterator(\n vocab, wrap_iterator(test_iter), include_unk)\n\n if len(train_labels ^ test_labels) > 0:\n raise ValueError(\"Training and test labels don't match\")\n\n return (TextClassificationDataset(vocab, train_data, train_labels),\n TextClassificationDataset(vocab, test_data, test_labels))\n\n\ndef AG_NEWS(root, ngrams=1, vocab=None, include_unk=False):\n \"\"\" Defines IMDB datasets.\n\n The labels include:\n\n - 0 : Negative\n - 1 : Positive\n\n Create supervised learning dataset: AG_NEWS\n\n Separately returns the training and test dataset\n\n Args:\n root: Directory where the datasets are saved. Default: \".data\"\n ngrams: a contiguous sequence of n items from s string text.\n Default: 1\n vocab: Vocabulary used for dataset. If None, it will generate a new\n vocabulary based on the train data set.\n include_unk: include unknown token in the data (Default: False)\n\n Examples:\n >>> train_dataset, test_dataset = AG_NEWS(ngrams=3)\n\n \"\"\"\n test_iter = AG_NEWS_iterator(root, split='test')\n\n tokenizer = get_tokenizer(\"basic_english\")\n\n # build vocabulary\n if vocab is None:\n train_iter = AG_NEWS_iterator(root, split='train')\n counter = Counter()\n for (label, line) in tqdm.tqdm(train_iter):\n counter.update(tokenizer(line))\n vocab = Vocab(counter, min_freq=1)\n\n train_iter = AG_NEWS_iterator(root, split='train')\n\n def wrap_iterator(iter):\n for cls, tokens in iter:\n yield cls - 1, ngrams_iterator(tokenizer(tokens), ngrams)\n\n train_data, train_labels = _create_data_from_iterator(\n vocab, wrap_iterator(train_iter), include_unk)\n test_data, test_labels = _create_data_from_iterator(\n vocab, wrap_iterator(test_iter), include_unk)\n\n if len(train_labels ^ test_labels) > 0:\n raise ValueError(\"Training and test labels don't match\")\n\n return (TextClassificationDataset(vocab, train_data, train_labels),\n TextClassificationDataset(vocab, test_data, test_labels))\n\n\nDATASETS['IMDB'] = IMDB\nDATASETS['AG_NEWS'] = AG_NEWS\n\nMAX_SENTENCE_LEN = {\n 'AG_NEWS': 75,\n 'SogouNews': 800,\n 'DBpedia': 75,\n 'YelpReviewPolarity': 200,\n 'YelpReviewFull': 200,\n 'YahooAnswers': 150,\n 'AmazonReviewPolarity': 100,\n 'AmazonReviewFull': 100,\n 'IMDB': 300\n}\nPAD_TOKEN = ''\n\n\ndef get_text_dataset(dataset, root):\n train_dataset, test_dataset = DATASETS[dataset](\n root=root, ngrams=1, include_unk=True)\n\n return train_dataset, test_dataset\n\n\nclass CollateProcessor:\n \"\"\"\n Function to create batch for text datasets\n \"\"\"\n\n def __init__(self, dataset, vocab):\n self._max_length = MAX_SENTENCE_LEN[dataset]\n self._vocab = vocab\n\n def __call__(self, batch):\n labels, sentences = zip(*batch)\n labels = torch.LongTensor(labels)\n processed_sentences = \\\n torch.ones((len(sentences), self._max_length),\n dtype=torch.long)*self._vocab.stoi[PAD_TOKEN]\n for i, s in enumerate(sentences):\n l = min(self._max_length, len(s))\n processed_sentences[i, :l] = torch.LongTensor(s[:l])\n return processed_sentences, labels\n","repo_name":"phineasng/flan_bio","sub_path":"kme/data/text_classification.py","file_name":"text_classification.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18208205805","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport time;\nimport random\n \n#pip install BeautifulSoup4 -i https://pypi.douban.com/simple\n#pip install requests -i https://pypi.douban.com/simple\n \n# http请求头\nHostreferer = {\n 'Referer': 'http://www.mzitu.com',\n \n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n}\n \n# 此请求头Referer破解盗图链接\nPicreferer = {\n # 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n # 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36',\n # 'Referer': 'http://i.meizitu.net',\n # https://www.mzitu.com/224497/3\n 'Referer': 'http://www.mzitu.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n}\nall_url = 'https://www.mzitu.com'\n# 对mzitu主页all_url发起请求,将返回的HTML数据保存,便于解析\nstart_html = requests.get(all_url, headers=Hostreferer)\n \nsoup = BeautifulSoup(start_html.text, \"html.parser\") # 缩进格式\npage = soup.find_all('a', class_='page-numbers')\n# 最大页数\nmax_page = page[-2].text\nfor n in range(1, int(max_page) + 1):\n path = 'mzitu/' #存储路径,不要带特殊符号\n all_url = 'https://www.mzitu.com' #重新赋值\n if n!=1:\n all_url= all_url+\"/page/\"+str(n)+\"/\"\n print('开始爬第 %s 页, 网址是 %s' % (n , all_url))\n start_html = requests.get(all_url, headers=Hostreferer)\n soup = BeautifulSoup(start_html.text, \"html.parser\")\n# alt = soup.find(id='pins').find_all('a', target='_blank').find_all('img',class_='lazy').get('alt')\n hrefs = soup.find(id='pins').find_all('a', target='_blank'); #根据ID找\n \n for href in hrefs:\n imgs = href.find('img',class_='lazy')\n if imgs == None:\n break\n alt = imgs.get('alt')\n url = href.get('href')\n start_html2 = requests.get(url, headers=Hostreferer)\n soup2 = BeautifulSoup(start_html2.text, \"html.parser\") # 缩进格式\n page2 = soup2.find('div', class_='pagenavi').find_all('a')\n # print (page2[0])\n max_page2 = page2[-2].text\n path = path + alt.strip().replace('?', '')\n path = path.replace(':', '')\n path = path.replace('|', '')\n path = path.replace('*', '')\n path = path.replace('<', '')\n path = path.replace('>', '')\n if (os.path.exists(path)):\n pass\n # print('目录已存在')\n else:\n os.makedirs(path)\n for m in range(1,int(max_page2)):\n \n time.sleep(random.randint(1,5))\n # alt = href.find('img', class_='lazy').get('alt');\n # url = href.get('href');\n url3 = url+'/'+str(m)+'/'\n print('开始爬→%s' % url3)\n start_html3 = requests.get(url3, headers=Hostreferer)\n soup3 = BeautifulSoup(start_html3.text, \"html.parser\") # 缩进格式\n picSrc = soup3.find('div', class_='main-image').find('a').find('img').get('src');#.get('src');#.get('src'); #div class=\"main-image\"\n # imglist = #获取当前页上所有的子连接, 不包含class=\"box\"\n html = requests.get(picSrc, headers=Picreferer)\n \n # 提取图片名字\n file_name = path+'/'+picSrc.split(r'/')[-1]\n # 保存图片\n f = open(file_name, 'wb')\n f.write(html.content)\n f.close()\n print('图片保存到%s' % file_name)","repo_name":"yangwohenmai/TEST","sub_path":"GetAPIData/UnderstandData/第二版.py","file_name":"第二版.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"21763804676","text":"import pandas as pd \nfrom sklearn.model_selection import train_test_split \nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\n\n# I have trained a classifier that predicts the category of a given news title\n\nclass Classifier:\n def readCsv(self):\n lines = pd.read_csv(\"data/data.csv\");\n x = lines.titulli\n y = lines.kategoria\n return x, y\n \n def train_test_split(self):\n x, y = self.readCsv()\n return train_test_split(x, y, test_size=0.3, random_state=1) \n\n\n def main(self):\n X_train, X_test, y_train, y_test = self.train_test_split()\n cv = CountVectorizer()\n training_data = cv.fit_transform(X_train)\n naive_bayes = MultinomialNB()\n naive_bayes.fit(training_data, y_train)\n testing_data = cv.transform([\"Albin Kurti\"])\n\n predictions = naive_bayes.predict(testing_data)\n print(predictions)\n","repo_name":"tot98git/nlp-stuff","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20577049399","text":"from collections import defaultdict, deque\nfrom typing import DefaultDict, List\nfrom time import time\n\ndef tester_maker(solution):\n \"\"\"주어진 함수에 대해 테스트케이스 셋을 넣어 함수를 다시 만든다\n a = tester_maker(solution)\n a() => 작동\n \"\"\"\n\n n_list = [\n 4,\n 5,\n 6,\n 7,\n 16,\n 14,\n 14\n ]\n\n edges_list = [\n [[1, 2], [2, 3], [3, 4]],\n [[1, 5], [2, 5], [3, 5], [4, 5]],\n [[1, 6], [2, 3], [3, 5], [3, 4], [2, 6]],\n [[1, 2], [2, 3], [1, 4], [1, 5], [5, 7], [5, 6]],\n [[1, 2],[2, 3],[3, 4],[4, 5],[5, 6],[6, 7],[7, 8],[2, 11],[11, 13],\n [11, 12],[3, 14],[4, 15],[15, 16],[5, 9],[9, 10],],\n [[1, 2],[2, 3],[3, 4],[4, 5],[5, 6],[6, 7],[7, 8],[8, 9],[5, 11],[11, 12],[6, 13],[13, 14],],\n [[1, 2],[2, 3],[3, 4],[4, 5],[5, 6],[6, 7],[7, 8],[8, 9],[4, 11],[11, 12],[5, 13],[13, 14],],\n ]\n\n ans_list = [\n 2,\n 2,\n 4,\n 4,\n 8,\n 7,\n 7,\n ]\n\n def wrapper():\n \n for i, n in enumerate(n_list):\n print(f\"#{i + 1:03d}\", end=\"\")\n st = time()\n ret = solution(n, edges_list[i])\n run_time = time() - st\n print(f\"({int(run_time * 1000)}ms) ret: [{ret}] ans: {ans_list[i]}\")\n\n return wrapper\n\n\ndef get_farthest_point_from_point_with_path(start, adj, path=False):\n \"\"\"start point와 adj: List[set]를 통해 dfs로 가장 먼곳을 찾는 함수\n path가 true이면 start부터 찾은 점까지의 경로를 반환한다\n\n 리턴값은\n 1. where 해당 노드의 번호\n 2. 해당 노드까지의 길이\n 3. 경로\n \"\"\"\n global_depth = local_depth = 0\n where = here = start\n\n visited = defaultdict(int)\n visited[here] = here\n\n stack = deque()\n stack.append([here, local_depth])\n\n while len(stack):\n here, local_depth = stack.pop()\n\n for child in adj[here]:\n if visited[child] == 0:\n visited[child] = here\n stack.append([child, local_depth + 1])\n\n if global_depth < local_depth + 1:\n global_depth = local_depth + 1\n where = child\n\n if path == False:\n return [where, global_depth, None]\n\n else:\n ret = []\n here = where\n while here != start:\n ret.append(here)\n here = visited[here]\n else:\n ret.append(here)\n\n return [where, global_depth, ret]\n\n\ndef get_farthest_point_from_line(line, adj) -> int:\n \"\"\"주어진 직선과 adj: List[set] 으로 부터, 양 끝으로 부터 가장 먼점을 찾는다.\n 이때 양 끝중, 멀리 있는 점을 max 로 처리하여 return 된다.\n max(dist_from_a, dist_from_b)\n\n 주어진 문제가 중간값을 찾으므로, 양끝점을 제외하고 stack에 넣은뒤, (물론 양끝점은 탐색점에서 제외됨)\n 해당하는 거리로 찾는다.\n \"\"\"\n visited = defaultdict(int)\n stack = deque()\n global_depth = 0\n\n last = len(line) - 1\n for i, here in enumerate(line):\n visited[here] = here\n\n if i != 0 and i!= last:\n dist = max(i, last - i)\n stack.append([here, dist])\n\n if global_depth < dist:\n global_depth = dist\n where = here\n\n while len(stack):\n here, local_depth = stack.pop()\n\n for child in adj[here]:\n if visited[child] == 0:\n visited[child] = here\n stack.append([child, local_depth + 1])\n\n if global_depth < local_depth + 1:\n global_depth = local_depth + 1\n where = child\n \n if global_depth > last:\n return last\n else:\n return global_depth\n\n\n\ndef solution(n, edges):\n\n adj = [set() for _ in range(n + 1)]\n\n for edge in edges:\n x, y = edge\n a: set = adj[x]\n b: set = adj[y]\n a.add(y)\n b.add(x)\n\n first = get_farthest_point_from_point_with_path(1, adj, False)\n second = get_farthest_point_from_point_with_path(first[0], adj, True)\n k = get_farthest_point_from_line(second[2], adj)\n\n return k\n# \ntester = tester_maker(solution)\ntester()\n\n# 아래는 문제를 풀기전 시행했던 기타 연구\n\nclass Node(object):\n \n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, \"_Node__instance\"):\n cls.__instance = dict()\n instance = super().__new__(cls)\n return instance\n \n def __init__(self, idx) -> None:\n self.me = idx\n self.child = set()\n\n self.__instance[idx] = self\n\n @classmethod\n def instance(cls, idx):\n if not hasattr(cls, \"_Node__instance\"):\n cls.__instance = dict()\n \n return cls.__instance[idx]\n\n @classmethod\n def set_instance(cls, *args, **kwargs):\n pass\n\n\n def __str__(self):\n return f\"Node [{self.me}]: {{{self.child}}}\"\n\ndef dfs(adj: list, visited: DefaultDict, find: int, here: int):\n \"\"\"dfs로 find를 찾으면 그 거리를 return 하는 함수\n \"\"\"\n visited[here] = 1\n if find == here:\n return 0\n\n ret = None\n for child in adj[here]:\n if visited[child] == 0:\n ret = dfs(adj, visited, find, child)\n if isinstance(ret, int):\n return ret + 1\n\n return ret\n\ndef add_one(container):\n \"\"\"주어진 nested list 에 대해서 +1를 하는 함수\n \"\"\"\n for i, j in enumerate(container):\n if isinstance(j, list):\n add_one(j)\n else:\n container[i] += 1\n\ndef list_def(adj: list, visited: DefaultDict, here: int):\n \"\"\"주어진 트리에 대해서 Nested Number Tree 로 리턴하는 함수\n \"\"\"\n visited[here] = 1\n \n cond1 = len(adj[here])\n cond2 = visited[list(adj[here])[0]]\n if cond1 == 1 and cond2 == 1:\n return 0\n\n ret = []\n for i, child in enumerate(adj[here]):\n if visited[child] == 0:\n ret.insert(i, (list_def(adj, visited, child)))\n\n add_one(ret)\n return list(ret)","repo_name":"noname2048/programers","sub_path":"월간_코드_챌린지_시즌1/09_트리_트리오_중간값.py","file_name":"09_트리_트리오_중간값.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6423409958","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom padding_same_conv import Conv2d\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass EncoderNetwork(nn.Module):\n def __init__(self, ch=64, csize=128, gsize=128):\n super(EncoderNetwork, self).__init__()\n self.conv1 = Conv2d(3, ch, 3, stride=2)\n self.conv2 = Conv2d(ch, ch, 3, stride=1)\n self.bn2 = nn.BatchNorm2d(ch)\n self.conv3 = Conv2d(ch, ch, 3, stride=2)\n self.bn3 = nn.BatchNorm2d(ch)\n # Global Path\n self.conv4_g = Conv2d(ch+7, ch*2, 3, stride=1)\n self.bn4_g = nn.BatchNorm2d(ch*2)\n self.conv5_g = Conv2d(ch*2, ch*2, 3, stride=1)\n self.bn5_g = nn.BatchNorm2d(ch*2)\n self.conv6_g = Conv2d(ch*2, gsize, 1, stride=1)\n # Local Path\n self.conv4_l = Conv2d(ch, ch*2, 3, stride=1)\n self.bn4_l = nn.BatchNorm2d(ch*2)\n self.conv5_l = Conv2d(ch*2, ch*2, 3, stride=1)\n self.bn5_l = nn.BatchNorm2d(ch*2)\n self.conv6_l = Conv2d(ch*2, csize, 1, stride=1)\n\n def forward(self, x, v):\n out = F.leaky_relu(self.conv1(x), inplace=True)\n out = self.bn2(F.leaky_relu(self.conv2(out), inplace=True)) + out\n out = self.bn3(F.leaky_relu(self.conv3(out), inplace=True))\n # Global Path\n pose_code = v.view(v.shape[0], -1, 1, 1)\n pose_code = pose_code.repeat(1, 1, 16, 16)\n out_g = torch.cat((out, pose_code), dim=1)\n out_g = self.bn4_g(F.leaky_relu(self.conv4_g(out_g), inplace=True))\n out_g = self.bn5_g(F.leaky_relu(self.conv5_g(out_g), inplace=True)) + out_g\n out_g = self.conv6_g(out_g)\n out_g = torch.mean(out_g, (2,3), keepdim=True)\n # Local Path\n out_l = self.bn4_l(F.leaky_relu(self.conv4_l(out), inplace=True))\n out_l = self.bn5_l(F.leaky_relu(self.conv5_l(out_l), inplace=True)) + out_l\n out_l = self.conv6_l(out_l)\n return out_l, out_g","repo_name":"jerrywiston/Spatial-Transformation-Routing-Generative-Query-Network","sub_path":"backup/encoder_global.py","file_name":"encoder_global.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33693403224","text":"import math\r\nimport cv2\r\nimport numpy as np\r\n\r\nWIDTH, HEIGHT = 400, 300\r\n\r\nBLUE = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nRED = (0, 0, 255)\r\nYELLOW = (0, 255, 255)\r\n\r\n\r\ndef get_frame():\r\n _, frame = capture.read()\r\n frame = frame[:, ::-1] # flip video capture\r\n return cv2.resize(frame, (WIDTH, HEIGHT))\r\n\r\n\r\ndef get_hand_contour(image):\r\n image_copy = np.copy(image)\r\n contours, hierarchy = cv2.findContours(image_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n max_area_index, max_area = 0, 0\r\n for index, contour in enumerate(contours):\r\n area = cv2.contourArea(contour)\r\n if area > max_area:\r\n max_area = area\r\n max_area_index = index\r\n contour = contours[max_area_index]\r\n return contour\r\n\r\n\r\ndef get_convex_hull(contour):\r\n convex_hull = cv2.convexHull(contour)\r\n return convex_hull\r\n\r\n\r\ndef get_defect_points(contour):\r\n convexity_defects = cv2.convexityDefects(contour, cv2.convexHull(contour, returnPoints=False))\r\n defects_count = 0\r\n defects = []\r\n tips = []\r\n if convexity_defects is not None:\r\n for i in range(convexity_defects.shape[0]):\r\n s, e, f, d = convexity_defects[i, 0]\r\n start = tuple(contour[s][0])\r\n end = tuple(contour[e][0])\r\n far = tuple(contour[f][0])\r\n a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)\r\n b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)\r\n c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)\r\n angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 57\r\n if angle <= 130:\r\n defects_count += 1\r\n defects.append(far)\r\n tips.append(end)\r\n tips.append(start)\r\n\r\n return defects_count, defects, tips\r\n\r\n\r\ndef limit_finger_tips(potential_tips, center):\r\n result = []\r\n\r\n potential_tips = [item for item in potential_tips if is_above(item, center, 80)]\r\n\r\n if len(potential_tips) <= 0:\r\n return result\r\n\r\n potential_tips = sorted(potential_tips, key=lambda pt: pt[0])\r\n\r\n result.append(potential_tips[0])\r\n\r\n for i in xrange(len(potential_tips) - 1):\r\n dist = get_distance(potential_tips[i], potential_tips[i + 1])\r\n if dist > 15:\r\n result.append(potential_tips[i + 1])\r\n return result\r\n\r\n\r\ndef get_labeled_tips(last, present):\r\n result = {}\r\n\r\n for tip in present:\r\n for idx, position in last.iteritems():\r\n if get_distance(tip, position) < 15:\r\n result[idx] = tip\r\n\r\n return result\r\n\r\n\r\ndef get_middle(p1, p2):\r\n r = ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)\r\n return r\r\n\r\n\r\ndef is_above(p1, p2, margin=0):\r\n return p1[1] < p2[1] + margin\r\n\r\n\r\ndef get_hand_middle(contour):\r\n m = cv2.moments(contour)\r\n x = int(m['m10'] / m['m00'])\r\n y = int(m['m01'] / m['m00'])\r\n return x, y\r\n\r\n\r\ndef get_distance(p1, p2):\r\n return math.hypot(p2[0] - p1[0], p2[1] - p1[1])\r\n\r\n\r\ndef draw_contour(img, contour, color):\r\n cv2.drawContours(img, [contour], 0, color, 2)\r\n\r\n\r\ndef draw_point(img, middle, color):\r\n cv2.circle(img, middle, 4, color, -1)\r\n\r\n\r\ndef draw_elements_to_image(img, contour, convex_hull, defect_points, middle):\r\n draw_contour(img, contour, BLUE)\r\n draw_contour(img, convex_hull, GREEN)\r\n for point in defect_points:\r\n draw_point(img, point, RED)\r\n draw_point(img, middle, YELLOW)\r\n\r\n\r\ndef draw_labeled_tips(img, tips):\r\n for idx, position in tips.iteritems():\r\n cv2.putText(img, str(idx + 1), position, cv2.FONT_HERSHEY_COMPLEX, 1, 80)\r\n draw_point(img, position, YELLOW)\r\n\r\n\r\ndef main():\r\n history = {\r\n 0: (0, 0),\r\n 1: (0, 0),\r\n 2: (0, 0),\r\n 3: (0, 0),\r\n 4: (0, 0)\r\n }\r\n\r\n while True:\r\n # get image from camera\r\n frame = get_frame()\r\n\r\n # perform operations on image\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray, (3, 3), 0)\r\n ret, thresh1 = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n thresh1 = cv2.bitwise_not(thresh1)\r\n\r\n # finding contours\r\n hand_contour = get_hand_contour(thresh1)\r\n convex_hull = get_convex_hull(hand_contour)\r\n defects_count, defect_points, tips = get_defect_points(hand_contour)\r\n hand_middle = get_hand_middle(hand_contour)\r\n tips = limit_finger_tips(tips, hand_middle)\r\n\r\n # save recent positions\r\n if len(tips) == 5:\r\n for i in xrange(5):\r\n history[i] = tips[i]\r\n\r\n labeled_tips = get_labeled_tips(history, tips)\r\n\r\n # draw transformation on image\r\n thresh_image = cv2.cvtColor(thresh1, cv2.COLOR_GRAY2BGR)\r\n thresh_with_elements = np.copy(thresh_image)\r\n frame_with_elements = np.copy(frame)\r\n\r\n draw_elements_to_image(thresh_with_elements, hand_contour, convex_hull, defect_points, hand_middle)\r\n draw_elements_to_image(frame_with_elements, hand_contour, convex_hull, defect_points, hand_middle)\r\n\r\n draw_labeled_tips(frame_with_elements, labeled_tips)\r\n\r\n top = np.hstack((frame, frame_with_elements))\r\n bottom = np.hstack((thresh_image, thresh_with_elements))\r\n image = np.vstack((top, bottom))\r\n\r\n # show image\r\n cv2.imshow('frame', image)\r\n\r\n # quit application\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n capture = cv2.VideoCapture(0)\r\n\r\n main()\r\n\r\n # cleanup\r\n capture.release()\r\n cv2.destroyAllWindows()\r\n","repo_name":"mawilk/GesturesDetection","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18170744781","text":"# Interactive Franck-Condon model system for a model system\n# Show case the effect of Transition energy, relative displacement of the PES and\n# Temperature on the Franck Condon Stick Spectra.\n# The code is demonstrating the Franck-Condon principle, which describes the intensity\n# distribution of vibrational transitions in a molecule during an electronic transition.\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nprint(\"Franck-Condon Principle Demonstration\")\n\n# Input parameters\nE = float(input(\"Enter electronic transition energy (eV): \")) # Example 2.5\nw1 = float(input(\"Enter ground state vibrational frequency (eV): \")) # Example 0.3\nw2 = float(input(\"Enter excited state vibrational frequency (eV): \")) # Example 0.1\nD = float(input(\"Enter displacement between potentials (angstrom): \")) # Example 0.5\nT = float(input(\"Enter temperature (K): \")) # Example 300.0\n\n# Calculate vibrational energy levels\nvib_levels1 = w1 * np.arange(31) \nvib_levels2 = w2 * np.arange(31)\n\n# Calculate FC factors \n'''\nThe equation to calculate the Franck-Condon factors (fc) is:\n\nfc = (S * v') * exp(-S * δ2/2) * (δ^(v' - v\"))\n\nWhere:\nS = Frequency ratio (w2/w1) = df\nTaking the ratio w2/w1 gives the frequency scaling factor S that is used in the FC equation.\n\nv' = Vibrational quantum number for ground state (vib_levels1)\nδ = Displacement between ground and excited state potentials = D\nv\" = Vibrational quantum number for the excited state (vib_levels2)\n\nExpansion:\n(S * v') term: Frequency scaling of ground state vibrational wavefunctions\nexp(-S * δ2/2) term: Overlap between ground and excited state wavefunctions\n(δ^(v' - v\")) term: Shifting of excited state wavefunctions relative to ground state\n\n'''\n\ndf = w2/w1\nfc = (df**vib_levels1[:,None]) * np.exp(-df*D**2/2) * (D**(vib_levels1[:,None] - vib_levels2[None,:]))\n\n# Calculate intensities \nints = fc**2 * np.exp(-vib_levels1[:,None]/T)\n\n# Plot\nplt.figure()\nfor i in range(31):\n plt.vlines(E + vib_levels2[i], 0, ints[i].max(), colors='C0', lw=1.5)\nplt.xlabel('Energy (eV)')\nplt.ylabel('Intensity (a.u.)')\nplt.title('Franck-Condon Spectrum')\nplt.show()\n\nprint(\"Spectrum displayed!\")\n","repo_name":"Ajaykhanna/PythonProjects","sub_path":"franck_condon_model_system.py","file_name":"franck_condon_model_system.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25294459272","text":"import argparse\nfrom typing import List\n\n\ndef parse_args(args: List[str]):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-c', '--concurrency', metavar='N', default=1, type=int,\n help='Number of multiple requests to perform at a time'\n )\n parser.add_argument(\n '-H', '--header', metavar='custom-header', default=[],\n nargs='*', type=str,\n help='Append extra headers to the request.'\n )\n parser.add_argument(\n '-n', '--requests', metavar='N', default=1, type=int,\n help='Number of requests to perform for the benchmarking session'\n )\n parser.add_argument(\n '-P', '--proxy-auth',\n metavar='proxy-auth-username:password', type=str,\n help='Supply BASIC Authentication credentials to a proxy en-route.'\n )\n parser.add_argument(\n '-X', '--proxy', metavar='proxy:port', type=str,\n help='Use a proxy server for the requests.'\n )\n parser.add_argument(\n '-t', '--timeout', metavar='timeout', default=30, type=int,\n help='Maximum number of seconds to wait before the socket times out.'\n )\n parser.add_argument('url', metavar='URL', type=str)\n parser.add_argument(\n '-o', '--output', metavar='results.json', type=str,\n help='Write benchmark results to csv file.'\n )\n return parser.parse_args(args=args)\n","repo_name":"tesonet/pyhttp","sub_path":"pyhttp/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23776069941","text":"'''\n106. Construct Binary Tree from Inorder and Postorder Traversal\nMedium\n\n6225\n\n90\n\nAdd to List\n\nShare\nGiven two integer arrays inorder and postorder where inorder is the inorder traversal of a binary tree and postorder is the postorder traversal of the same tree, construct and return the binary tree.\n\n \n\nExample 1:\n\n\nInput: inorder = [9,3,15,20,7], postorder = [9,15,7,20,3]\nOutput: [3,9,20,null,null,15,7]\nExample 2:\n\nInput: inorder = [-1], postorder = [-1]\nOutput: [-1]\n \n\nConstraints:\n\n1 <= inorder.length <= 3000\npostorder.length == inorder.length\n-3000 <= inorder[i], postorder[i] <= 3000\ninorder and postorder consist of unique values.\nEach value of postorder also appears in inorder.\ninorder is guaranteed to be the inorder traversal of the tree.\npostorder is guaranteed to be the postorder traversal of the tree.\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# O(h*n) where h is the height of the tree, unbalacned tree h = n, O(n*n)\nclass Solution:\n def buildTree(self, inorder: List[int], postorder: List[int]) -> Optional[TreeNode]:\n if not inorder: return None\n root = TreeNode(postorder[-1])\n for i in range(len(inorder)):\n if inorder[i] == postorder[-1]:\n pos = i\n break\n pos_left, pos_right = [], []\n in_left, in_right = inorder[:i], inorder[i+1:]\n set_in_left = set(in_left)\n for i in range(len(postorder)-1):\n v = postorder[i]\n if v in set_in_left:\n pos_left.append(v)\n else:\n pos_right.append(v)\n root.left = self.buildTree(in_left, pos_left)\n root.right = self.buildTree(in_right, pos_right)\n return root\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def buildTree(self, inorder: List[int], postorder: List[int]) -> Optional[TreeNode]:\n d = {v: i for i, v in enumerate(inorder)}\n def helper(lo, hi):\n if lo > hi: return None\n root = TreeNode(postorder.pop())\n mid = d[root.val]\n root.right = helper(mid+1, hi)\n root.left = helper(lo, mid-1)\n return root\n return helper(0, len(inorder)-1)","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2023/03. March/16.buildTree.py","file_name":"16.buildTree.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74848246631","text":"import pickle\nimport numpy as np\nfrom tqdm import tqdm\nimport os\n\nclass Embedding(object):\n def __init__(self, data_path = None):\n self.data = []\n self.embs = []\n self.paths = []\n self.img_ids = []\n if data_path and os.path.exists(data_path):\n self.data = pickle.load(open(data_path, \"rb\"))\n for d in self.data:\n self.img_ids.append(d['img_id'])\n self.paths.append(d['url'])\n self.embs.append(d['embed'])\n\n def embed_to_str(self, embed_raw):\n embed_str = \"\"\n for i in range(len(embed_raw)):\n prefix = \"p\"\n if embed_raw[i] < 0:\n prefix = \"n\"\n if int(embed_raw[i]) != 0:\n for j in range(0, np.abs(int(embed_raw[i]))):\n embed_str += \"{}{} \".format(prefix, i + 1)\n\n return embed_str\n\n def get_data(self):\n return self.data\n\n def get_embs(self):\n return self.embs\n\n def get_paths(self):\n return self.paths\n\n def get_img_ids(self):\n return self.img_ids\n\n def quantization_factor(self, Q=50, embed=None):\n words = []\n if embed:\n embed = list(np.floor(np.multiply(embed, Q)))\n\n for e in tqdm(embed):\n words.append(self.embed_to_str(e))\n return words\n\n","repo_name":"thandongtb/tabby_search","sub_path":"fashion/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7437954221","text":"import sys\nimport pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn import preprocessing\n\n\ndef find_similar_songs(file_name):\n # 讀資料集 .csv檔,先不設定 index(index_col = False)\n data = pd.read_csv('./finalcsv/finalcsv30s.csv',index_col=False)\n data = data.dropna()\n data = data.drop(columns=['Unnamed: 0','song_name','label']) # 刪除不要的欄位\n\n\n # 曲歌單欄位對照 df\n songlist = data.iloc[:,-3:]\n songlist.set_index(['songid'], inplace=True)\n songlist = songlist.drop_duplicates() # 刪除相同欄位\n\n data.set_index(['songid'], inplace=True) # 設定 \"song_name\" 為 index\n data = data.drop(columns=['videoname','url']) # 刪除不要的欄位\n # 讀預測歌曲特徵\n userdf_3s2 = pd.read_csv(f'./csv30s/{file_name}30s.csv', index_col=False)\n # userdf_3s2 = pd.read_csv(f'{file_name}30s.csv', index_col=False)\n userdf_3s2 = userdf_3s2.dropna()\n userdf_3s2 = userdf_3s2.drop(columns=['Unnamed: 0','song_name','label']) # 刪���不要的欄位\n userdf_3s2 .set_index(['songid'], inplace=True) # 設定 \"song_name\" 為 index\n\n data = pd.concat([data, userdf_3s2], axis=0)\n\n # 將相同 “風格+編號”群組,並將所有特徵取平均值\n data = data.groupby(as_index=True, level=0).mean() # 以index 分群\n # data.set_index(['songid'], inplace=True) # 設定 \"song_name\" 為 index\n labels = data.index # 取出 index物件備用\n\n # Scale the data\n data_scaled=preprocessing.scale(data) # 特徵標準化\n # print('Scaled data type:', type(data_scaled)) #反註解查看 Scaled data type: \n\n # Cosine similarity 餘弦相識度模型\n similarity = cosine_similarity(data_scaled)\n # print(\"Similarity shape:\", similarity.shape) #反註解查看\n\n # 轉為 df 並改 index、columns 歌名的混淆矩陣\n sim_df_labels = pd.DataFrame(similarity) # np.array 轉 df\n sim_df_names = sim_df_labels.set_index(labels) # index 換歌名取名稱\n sim_df_names.columns = labels # colums換歌曲名稱\n\n\n # 以輸入的歌名取欄欄位,此時格式為 series 再排序相關性,並且排除自己對應的係數=1\n series = sim_df_names[file_name].sort_values(ascending = False)\n series = series.drop(file_name)\n series = series.iloc[0:5,]\n tmpdf = pd.DataFrame(series, index=series.index)\n\n similar_top5 = tmpdf.join(songlist)\n\n # 印出標題輸入歌曲名稱和前面相識最高的前五首歌名\n print(\"\\n\",\"*\"*31,\"\\n Similar songs to [\",file_name,\"]\",\"\\n\",\"*\"*31,)\n print(similar_top5.iloc[:,1:])\n\n# call function\nfile_name = sys.argv[1]\nfind_similar_songs(file_name)","repo_name":"Leo840811/music","sub_path":"modeltest/recommend_songlist.py","file_name":"recommend_songlist.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16532424397","text":"# 효율성 fail\n\ndef solution(participant, completion):\n answer = []\n for person in participant:\n if person not in answer:\n completion_person = completion.count(person)\n participant_person = participant.count(person)\n count_person = participant_person - completion_person\n for _ in range(count_person):\n answer.append(person)\n return ','.join(answer)\n\n# -------------------------------------------------------------------\n\n# Success\nimport collections\n\ndef solution2(participant, completion):\n answer = collections.Counter(participant) - collections.Counter(completion)\n return list(answer.keys())[0]\n\n# -------------------------------------------------------------------\n\n# # My Solution\n# Success\n\ndef solution3(participant, completion):\n participant.sort()\n completion.sort()\n for i in range(len(completion)):\n if participant[i] != completion[i]:\n return participant[i]\n return participant[-1]","repo_name":"yuueuni/algorithm","sub_path":"programmers/hash/marathon.py","file_name":"marathon.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73715561192","text":"# coding:utf-8\nimport scrapy\nimport json\nimport jsonpath\nfrom fagaiwei.items import FagaiweiItem\nimport time\nfrom fagaiwei.settings import session, NewsItemInfo\n\n\nclass xiamenSipderSpider(scrapy.Spider):\n name = 'hexuncaijingxinwen_sipder'\n allowed_domains = ['hexun.com']\n start_urls = [\n 'http://open.tool.hexun.com/MongodbNewsService/data/getOriginalNewsList.jsp?id=187804274&s=30&cp=1&priority=0&callback=hx_json',\n\n ]\n\n def parse(self, response):\n json_text = response.text[8:]\n json_text = json_text.replace(' )', '')\n json_str = json.loads(json_text, encoding='utf8')\n urls = jsonpath.jsonpath(json_str, '$..entityurl')\n laiyuan = jsonpath.jsonpath(json_str, '$..channelname')\n titles = jsonpath.jsonpath(json_str, '$..title')\n dabao = zip(urls, titles, laiyuan)\n for url, title, laiyuan in dabao:\n result = session.query(NewsItemInfo).filter_by(url=url, web_id=48).count()\n if result:\n # print(\"URL文件地址: {} 存在\".format(url))\n pass\n else:\n yield scrapy.Request(url=url, callback=self.parse_page, meta={'title': title, 'laiyuan': laiyuan})\n\n def parse_page(self, response):\n item = FagaiweiItem()\n item['title'] = response.meta['title']\n item['webname'] = '和讯财经新闻网' + response.meta['laiyuan']\n item['web'] = 'http://news.hexun.com/original/'\n item['url'] = response.url\n item['pub_time'] = response.xpath(\"//span[@class='pr20']/text()\").get()\n item['content'] = ''.join(\n list(response.xpath(\"//div[@class='art_contextBox']//p[not(script)]//text()\").getall())).replace('\\u3000',\n '')\n item['add_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n item['keyword'] = ''\n item['web_id'] = 48\n # print(item)\n yield item\n\n\n\n\n\n\n\n\n\n # for url in urls:\n # print(url)\n # if url not in qc_url:\n # qc_url.append(url)\n # else:\n # pass\n # print(response.text)\n # with open('aaa.html','w',encoding='utf-8') as fp :\n # fp.write(response.text)\n # print(len(qc_url))\n # for url in urls:\n # print(url)\n # result = session.query(NewsItemInfo).filter_by(url=url, web_id=41).count()\n # if result:\n # print(\"{} 存在\".format(url))\n # pass\n # else:\n # item = FagaiweiItem()\n # item['title'] = ''.join(list(response.xpath(\"//h1/text()\").get())).replace('\\u3000', '')\n # pub_time = response.xpath(\"//div[@class = 'fl_dib']/text()\").get()\n # if pub_time is None:\n # item['pub_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n # else:\n # item['pub_time'] = pub_time\n # web = response.xpath(\"//div[@class = 'fl_dib']/a/@href\").get()\n # if web is None:\n # item['web'] = 'http://news.takungpao.com/'\n # else:\n # item['web'] = web\n # laiyuan = response.xpath(\"//div[@class = 'fl_dib']/a/text()\").get()\n # item['url'] = response.url\n # if laiyuan is None:\n # item['webname'] = '太公网'\n # else:\n # item['webname'] = laiyuan\n # content = ''.join(list(response.xpath(\"//div[contains(@class,'tpk_text')]//text()\").getall())) \\\n # .replace('\\u3000', '').replace('\\t', '').replace('\\r', '')\n # if content is None:\n # item['content'] = '请点击原文链接查看内容'\n # else:\n # item['content'] = content\n # item['add_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n # item['keyword'] = ' '\n # item['web_id'] = 41\n # yield item\n","repo_name":"KKtoNN/python_spider","sub_path":"fagaiwei/fagaiwei/spiders/48hexuncaijingxinwen_sipder.py","file_name":"48hexuncaijingxinwen_sipder.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71433425193","text":"import pandas as pd\nimport numpy as np\nimport logging\n\nfrom .all_genes import all_genes, gap_character\nfrom .amino_acids import amino_acids\nfrom .blosum import blosum\nfrom .objects import DistanceParams\n\nlogger = logging.getLogger('tcr_distances.py')\n\ndef blosum_character_distance(a, b, gap_penalty, params):\n \"\"\"\n\n TODO: verb_noun function rename to calculate_blosum_character_distance\n\n calculates character to character score using blossum\n\n Parameters\n ----------\n a : string\n single uppercase character\n b : string\n single uppercase character\n gap_penalty : integer\n numeric integer for the gap penalty\n params : DistanceParams instance\n class containing default parameters for alignment scoring.\n\n Returns\n -------\n integer\n\n Example\n -------\n >>> blosum_character_distance(\"A\", \"A\", 4, objects.DistanceParams())\n 0\n\n \"\"\"\n if a == gap_character and b == gap_character:\n return 0\n elif a == '*' and b == '*':\n return 0\n elif a == gap_character or b == gap_character or a=='*' or b=='*':\n return gap_penalty\n else:\n # assert a in amino_acids\n # assert b in amino_acids\n # maxval = min( blosum[(a,a)], blosum[(b,b)] )\n # return maxval - blosum[(a,b)]\n return params.distance_matrix[(a, b)]\n\ndef blosum_sequence_distance( aseq, bseq, gap_penalty, params):\n \"\"\"\n calculates a sequence to sequence distance score\n\n TODO: verb_noun function rename to calculate_blosum_sequence_distance\n\n Parameters\n ----------\n aseq :\n string of amino acid sequence with possible gaps\n\n bseq : string\n string of amino acid sequences with possible gaps\n\n gap_penalty : integer\n\n params : objects.DistanceParams instance\n class containing default parameters for alignment scoring.\n\n Returns\n -------\n dist : integer\n\n Example\n -------\n >>> blosum_sequence_distance(\"ABC\", \"ABC\", 4, objects.DistanceParams())\n 0\n\n \"\"\"\n assert len(aseq) == len(bseq)\n dist = 0.0\n for a, b in zip(aseq, bseq):\n if a == ' ':\n assert b== ' '\n else:\n dist += blosum_character_distance( a, b, gap_penalty, params)\n return dist\n\n\ndef align_cdr3s( a, b, gap_character ):\n \"\"\"\n\n Insert gaps into the shorter sequence,\n one at a time at the \"best\" location.\n\n Parameters\n ----------\n a : string\n string of amino acids\n b : string\n string of amino acids\n gap_character : string\n character indicating a gap in the aligned sequences\n\n Returns\n -------\n dist : tuple\n tuple with two strings\n\n\n\n \"\"\"\n if len(a) == len(b):\n return (a[:], b[:])\n\n if len(a)best_score:\n best_score = score\n best_gappos = gappos\n ## insert the gap\n s0 = s0[:best_gappos+1] + gap_character*lendiff + s0[best_gappos+1:]\n\n assert len(s0) == len(s1)\n\n if len(a) 1##JCC testing\n assert lendiff>=0\n\n\n if params.trim_cdr3s:\n \"\"\"NOTE: Minimum length of cdr3 protein carried into clones file is currently set in the read_sanger_data.py script!\"\"\"\n assert lenshort > 3+2 ## something to align...\n\n if not params.align_cdr3s:\n ## if we are not aligning, use a fixed gap position relative to the start of the CDR3\n ## that reflects the typically longer and more variable-length contributions to\n ## the CDR3 from the J than from the V. For a normal-length\n ## CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3).\n ## Use an earlier gappos if lenshort is less than 11.\n ##\n gappos = min( 6, 3 + (lenshort-5)/2 )\n best_dist, count = sequence_distance_with_gappos( shortseq, longseq, gappos, params )\n\n else:\n ## the CYS and the first G of the GXG are 'aligned' in the beta sheet\n ## the alignment seems to continue through roughly CYS+4\n ## ie it's hard to see how we could have an 'insertion' within that region\n ## gappos=1 would be a insertion after CYS\n ## gappos=5 would be a insertion after CYS+4 (5 rsds before the gap)\n ## the full cdr3 ends at the position before the first G\n ## so gappos of len(shortseq)-1 would be gap right before the 'G'\n ## shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4\n min_gappos = 5\n max_gappos = len(shortseq)-1-4\n while min_gappos>max_gappos:\n min_gappos -= 1\n max_gappos += 1\n for gappos in range( min_gappos, max_gappos+1 ):\n dist, count = sequence_distance_with_gappos( shortseq, longseq, gappos, params )\n if gappos>min_gappos:\n assert count==best_count\n if gappos == min_gappos or dist < best_dist:\n best_dist = dist\n best_gappos = gappos\n best_count = count\n #print 'align1:',shortseq[:best_gappos] + '-'*lendiff + shortseq[best_gappos:], best_gappos, best_dist\n #print 'align2:',longseq, best_gappos, best_dist\n\n\n ## Note that weight_cdr3_region is not applied to the gap penalty\n ##\n return params.weight_cdr3_region * best_dist + lendiff * params.gap_penalty_cdr3_region\n\ndef compute_all_v_region_distances(organism, params):\n rep_dists = {}\n for chain in 'AB': # don't compute inter-chain distances\n repseqs = []\n for id, g in all_genes[organism].items():\n if g.chain == chain and g.region == 'V':\n merged_loopseq = ' '.join( g.cdrs[:-1])\n repseqs.append( ( id, merged_loopseq ) )\n rep_dists[ id ] = {}\n\n for r1, s1 in repseqs:\n for r2, s2 in repseqs:\n #if r1[2] != r2[2]: continue\n try:\n \"\"\"Assume symmetry and check first if its already been calculated\"\"\"\n rep_dists[r1][r2] = rep_dists[r2][r1]\n except KeyError:\n d = params.weight_v_region * blosum_sequence_distance(s1, s2,\n params.gap_penalty_v_region,\n params)\n rep_dists[r1][r2] = d\n rep_dists[r2][r1] = d\n\n return rep_dists\n\n'''def compute_distance(t1, t2, chains, rep_dists, params): # t1/2 = [ va_reps, vb_reps, l['cdr3a'], l['cdr3b'] ]\n dist = 0.0\n if 'A' in chains:\n dist += min( ( rep_dists[x][y] for x in t1[0] for y in t2[0] ) )\n dist += weighted_cdr3_distance(t1[2], t2[2], params)\n if 'B' in chains:\n dist += min( ( rep_dists[x][y] for x in t1[1] for y in t2[1] ) )\n dist += weighted_cdr3_distance(t1[3], t2[3], params)\n return params.scale_factor * dist'''\n\ndef compute_auc( l0, l1, sign_factor=1 ):\n ## l0 are the true positives, l1 are the false positives\n ## if sign_factor==1 then lower scores are better, otherwise it's the opposite\n ##\n if not l0:\n return 0.0, [0, 1], [0, 0]\n elif not l1:\n return 1.0, [0, 0, 1], [0, 1, 1]\n\n l = sorted([ (sign_factor*x, 0) for x in l0 ] + [ (sign_factor*x, -1) for x in l1 ]) ## in ties, take the false positive first\n\n xvals = []\n yvals = []\n\n counts = [0, 0]\n totals = [len(l0), len(l1)]\n\n area=0.0\n width = 1.0/totals[1]\n for ( score, neg_tcr_class ) in l:\n tcr_class = -1*neg_tcr_class\n counts[ tcr_class ] += 1\n xval = float( counts[1] ) / totals[1]\n yval = float( counts[0] ) / totals[0]\n xvals.append( xval )\n yvals.append( yval )\n if tcr_class==1: area += yval * width\n\n return area, xvals, yvals\n\ndef get_rank( val, l ): ## does not require that the list l is sorted\n num_lower = 0\n num_upper = 0\n\n epsilon = 1e-6\n\n lower_neighbor = val-10000\n upper_neighbor = val+10000\n\n for x in l:\n if xval+epsilon:\n num_upper += 1\n upper_neighbor = min( upper_neighbor, x )\n\n total = len(l)\n num_equal = total - num_lower - num_upper\n assert num_equal >=0\n\n if num_upper == 0:\n return 100.0\n\n elif num_lower == 0:\n return 0.0\n\n else:\n assert upper_neighbor>lower_neighbor\n interp = (val-lower_neighbor)/(upper_neighbor-lower_neighbor)\n\n #if num_equal>0:print 'num_equal:',num_equal\n\n interp_num_lower = num_lower + interp * ( 1 + num_equal )\n\n return (100.0*interp_num_lower)/total\n\n\n\n## negative nbrdist_percentile means take exactly -nbrdist_percentile topn\ndef sort_and_compute_nbrdist_from_distances( l, nbrdist_percentile, dont_sort=False ):\n if not dont_sort: l.sort()\n assert l[0]<=l[-1]\n if nbrdist_percentile<0:\n n = max( 1, min(len(l), -1*nbrdist_percentile ) )\n else:\n n = max(1, ( nbrdist_percentile * len(l) )/100 )\n return sum( l[:n])/float(n)\n\n## negative nbrdist_percentile means take exactly -nbrdist_percentile topn\ndef sort_and_compute_weighted_nbrdist_from_distances( l, nbrdist_percentile, dont_sort=False ):\n if not dont_sort: l.sort()\n assert l[0]<=l[-1]\n if nbrdist_percentile<0:\n n = max( 1, min(len(l), -1*nbrdist_percentile ) )\n else:\n n = max(1, ( nbrdist_percentile * len(l) )/100 )\n\n total_wt = 0.0\n nbrdist=0.0\n for i, val in enumerate( l[:n] ):\n wt = 1.0 - float(i)/n\n total_wt += wt\n nbrdist += wt * val\n\n return nbrdist / total_wt\n","repo_name":"kmayerb/tcrdist2","sub_path":"tcrdist/tcr_distances.py","file_name":"tcr_distances.py","file_ext":"py","file_size_in_byte":12597,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"31938807595","text":"import math\nimport matplotlib.pyplot as plt\n\nm = float(input(\"Massa objek (kg): \"))\nk = float(input(\"Konstanta pegas (kg/s): \"))\nc = float(input(\"Koefisien peredaman (kg/s): \"))\ny0 = float(input(\"Simpangan awal (m): \"))\nt = float(input(\"Waktu pengamatan (s): \"))\nn = int(input(\"Jumlah pengamatan: \"))\n\nalpha = c/(2*m)\nomega = math.sqrt((k/m) - (c**2/(4*m**2)))\nincrement = t/n\ntn = 0.0\n\nlistT = []\nlistY = []\n\nfor x in range(n+1):\n listT.append(tn)\n listY.append(((math.e**(-alpha*tn))) * (y0*math.cos(omega*tn) + y0*(alpha/omega)*math.sin(omega*tn)))\n tn = tn + increment\n\nrow = \"| {no:2d} | {time:.2f} | {y: .8f} |\".format\nprint(\"| No | t | Simpangan |\")\nfor i in range(len(listY)):\n # print(nformat.format(i) + \". t = \" + tnformat.format(listT[i]) + \", y = \" + ynformat.format(listY[i]))\n print(row(no=i, time=listT[i], y=listY[i]))\n\nplt.plot(listT, listY)\nplt.title(\"Plot Simulasi Model Massa-Pegas-Peredam\")\nplt.xlabel(\"waktu (s)\")\nplt.ylabel(\"simpangan (m)\")\nplt.show()","repo_name":"RaviEdho/MassSpringDamper","sub_path":"MassSpringDamper.py","file_name":"MassSpringDamper.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32003781952","text":"from __future__ import absolute_import\r\n\r\nimport functools\r\nfrom builtins import zip\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom PyQt5.QtCore import QPointF, QThread\r\nfrom qgis.PyQt import QtGui\r\n\r\nfrom ... import PyQtGraph as pg\r\nfrom ..model import constants\r\nfrom ..model.helper.calculos import fmax, vmedia\r\nfrom ..model.utils import *\r\nfrom ..view.estacas import (ApplyTransDialog, QgsMessageLog, SetCtAtiDialog,\r\n VolumeDialog, brucknerRampaDialog, closeDialog,\r\n cvEdit, rampaDialog, setEscalaDialog,\r\n ssRampaDialog)\r\nfrom .Geometria import Figure, Prismoide\r\n\r\n\r\n##############################################################################################################\r\n# TODO:\r\n\r\n# Features to ADD\r\n# ctrl+Z utility\r\n# add menu interface and grid and printing scale export\r\n\r\n##############################################################################################################\r\n\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n\r\n def _fromUtf8(s):\r\n return s\r\n\r\n\r\ntry:\r\n _encoding = QtWidgets.QApplication.UnicodeUTF8\r\n\r\n def _translate(context, text, disambig):\r\n return QtWidgets.QApplication.translate(context, text, disambig, _encoding)\r\n\r\nexcept AttributeError:\r\n\r\n def _translate(context, text, disambig):\r\n return QtWidgets.QApplication.translate(context, text, disambig)\r\n\r\n\r\nclass CustomViewBox(pg.ViewBox):\r\n def __init__(self, *args, **kwds):\r\n pg.ViewBox.__init__(self, *args, **kwds)\r\n self.setMouseMode(self.PanMode)\r\n\r\n # reimplement mid-click to zoom out\r\n def mouseClickEvent(self, ev):\r\n if ev.button() == QtCore.Qt.MidButton:\r\n self.autoRange()\r\n\r\n def mouseDragEvent(self, ev):\r\n if ev.button() == QtCore.Qt.LeftButton:\r\n ev.ignore()\r\n else:\r\n pg.ViewBox.mouseDragEvent(self, ev)\r\n\r\n\r\nPREC = 5 # draw precision in meters\r\n\r\n\r\nclass cv:\r\n def __init__(self, i1, i2, L, handlePos, lastHandlePos):\r\n\r\n self.i1 = i1\r\n self.i2 = i2\r\n self.G = (i1 - i2) / 100\r\n self.L = L\r\n self.lastHandlePos = lastHandlePos\r\n self.handlePos = handlePos\r\n self.curve = pg.PlotCurveItem()\r\n self.update(i1, i2, L, handlePos, lastHandlePos)\r\n\r\n def update(self, i1, i2, L, handlePos, lastHandlePos):\r\n if L != 0:\r\n self.i1 = i1\r\n self.i2 = i2\r\n self.G = (i1 - i2) / 100\r\n self.L = L\r\n\r\n G = self.G\r\n i1 = self.i1 / 100\r\n L = self.L\r\n xpcv = handlePos.x() - L / 2\r\n ypcv = lastHandlePos.y() + i1 * (xpcv - lastHandlePos.x())\r\n\r\n x = []\r\n\r\n for n in range(0, int(L / PREC) + 1):\r\n x.append(n * PREC)\r\n\r\n x = np.array(x)\r\n y = (-G / (2 * L)) * x * x + i1 * x + ypcv\r\n\r\n x = []\r\n for n in range(0, int(L / PREC) + 1):\r\n x.append(n * PREC + xpcv)\r\n\r\n self.xpcv = xpcv\r\n self.ypcv = ypcv\r\n self.xptv = xpcv + L\r\n self.yptv = self.getCota(self.xptv)\r\n self.x = x\r\n self.y = y\r\n\r\n self.curve.clear()\r\n\r\n self.curve.setData(\r\n x, y, pen=pg.mkPen(\"r\", width=3, style=QtCore.Qt.DashLine)\r\n )\r\n\r\n else:\r\n self.L = 0\r\n\r\n def getCota(self, x):\r\n return (\r\n False\r\n if x < self.xpcv or x > self.xptv\r\n else (-self.G / (2 * self.L)) * (x - self.xpcv) ** 2\r\n + self.i1 * (x - self.xpcv) / 100\r\n + self.ypcv\r\n )\r\n\r\n\r\nclass cvEditDialog(cvEdit):\r\n def __init__(self, roi, i):\r\n super(cvEditDialog, self).__init__(None)\r\n # self.addCurveBtn.clicked.connect(self.raiseCurveGroupeBox)\r\n # self.setupUi(self)\r\n self.isBeingModified = False\r\n self.i = i\r\n self.initialHandlesPos = []\r\n self.i1 = 0\r\n self.i2 = 0\r\n self.G = 0\r\n self.Lutilizado = 0\r\n\r\n self.roi = roi\r\n estacas = self.roi.perfil.iface.view.get_estacas()\r\n est = []\r\n for linha in estacas:\r\n est.append(linha[0])\r\n self.estacas = est\r\n\r\n self.cota = self.getHandlePos(i).y()\r\n self.horizontal = self.getHandlePos(i).x()\r\n\r\n self.updateCota()\r\n self.uicota.returnPressed.connect(self.updateCota)\r\n self.uihorizontal1.returnPressed.connect(self.updateAbscissa1)\r\n self.uihorizontal1.returnPressed.connect(self.updateAbscissa2)\r\n\r\n self.okBtn.clicked.connect(self.save)\r\n self.cancelBtn.clicked.connect(self.reset)\r\n\r\n # set readonly textEdits\r\n\r\n for j in range(0, roi.countHandles()):\r\n self.initialHandlesPos.append(self.getHandlePos(j))\r\n\r\n self.handle = self.roi.handles[i][\"item\"]\r\n\r\n # checking curve existence\r\n\r\n try:\r\n self.Lutilizado = self.handle.curve.L\r\n self.groupBox_2.setEnabled(True)\r\n except AttributeError:\r\n self.handle.curve = cv(\r\n self.i1, self.i2, 0, self.handle.pos(), self.getHandlePos(i - 1)\r\n )\r\n\r\n self.initialCurve = cv(\r\n self.handle.curve.i1,\r\n self.handle.curve.i2,\r\n self.handle.curve.L,\r\n self.handle.curve.handlePos,\r\n self.handle.curve.lastHandlePos,\r\n )\r\n self.uiLutilizado.valueChanged.connect(self.updateL)\r\n self.addCurveBtn.clicked.connect(self.updateL)\r\n\r\n self.uicota1.setText(shortFloat2String(self.getHandlePos(i - 1).y()))\r\n self.uicota2.setText(shortFloat2String(self.getHandlePos(i + 1).y()))\r\n\r\n self.setupValidators()\r\n self.redefineUI(-10)\r\n self.updateVerticesCb()\r\n\r\n # self.helpBtn.clicked.connect(self.displayHelp)\r\n self.viewCurveBtn.clicked.connect(self.viewCurva)\r\n\r\n self.shortcut1 = QtWidgets.QShortcut(QtGui.QKeySequence.MoveToNextChar, self)\r\n self.shortcut2 = QtWidgets.QShortcut(\r\n QtGui.QKeySequence.MoveToPreviousChar, self\r\n )\r\n self.shortcut1.activated.connect(self.nextVertex)\r\n self.shortcut2.activated.connect(self.previousVertex)\r\n self.velproj.valueChanged.connect(lambda: self.redefineUI(self.elm))\r\n self.generateAll.clicked.connect(self.generateAllC)\r\n\r\n self.generateAll.hide()\r\n\r\n def generateAllC(self):\r\n self.verticeCb.setCurrentIndex(1)\r\n while self.nextBtn.isEnabled():\r\n self.updateL()\r\n self.nextVertex()\r\n self.updateL()\r\n\r\n def nextVertex(self):\r\n if self.nextBtn.isEnabled():\r\n self.next()\r\n\r\n def previousVertex(self):\r\n if self.previousBtn.isEnabled():\r\n self.previous()\r\n\r\n def viewCurva(self):\r\n center = self.getHandlePos(self.i)\r\n self.roi.perfil.vb.scaleBy((0.5, 0.5), center)\r\n\r\n def displayHelp(self):\r\n dialog = imgDialog(\r\n imagepath=\"../view/ui/helpCV.png\", title=\"Ajuda: Curvas Verticais\"\r\n )\r\n dialog.show()\r\n dialog.exec_()\r\n\r\n def setupValidators(self):\r\n self.uicota.setValidator(QtGui.QDoubleValidator())\r\n self.uihorizontal1.setValidator(QtGui.QDoubleValidator())\r\n self.uihorizontal2.setValidator(QtGui.QDoubleValidator())\r\n\r\n self.uiL.setValidator(QtGui.QDoubleValidator())\r\n\r\n def raiseCurveGroupeBox(self):\r\n self.groupBox_2.setEnabled(True)\r\n\r\n def save(self):\r\n self.redefineUI(-1)\r\n self.close()\r\n\r\n def reset(self):\r\n j = 0\r\n for pos in self.initialHandlesPos:\r\n self.roi.handles[j][\"item\"].setPos(pos)\r\n j += 1\r\n self.handle.curve.curve.clear()\r\n self.handle.curve = self.initialCurve\r\n self.uiL.setText(str(self.handle.curve.L))\r\n\r\n self.roi.plotWidget.addItem(self.handle.curve.curve)\r\n self.close()\r\n\r\n def getHandlePos(self, i):\r\n return self.roi.handles[i][\"item\"].pos()\r\n\r\n def getSegIncl(self, i, j):\r\n try:\r\n return round(\r\n 100\r\n * (self.getHandlePos(j).y() - self.getHandlePos(i).y())\r\n / (self.getHandlePos(j).x() - self.getHandlePos(i).x()),\r\n 4,\r\n )\r\n except IndexError:\r\n return None\r\n\r\n def updateCota(self):\r\n try:\r\n if not self.isBeingModified:\r\n self.cota = float(self.uicota.text())\r\n self.update()\r\n self.redefineUI(3)\r\n\r\n except ValueError:\r\n pass\r\n\r\n def updateAbscissa1(self):\r\n try:\r\n if not self.isBeingModified:\r\n self.horizontal = (\r\n float(self.uihorizontal1.text()) + self.getHandlePos(self.i - 1).x()\r\n )\r\n self.update()\r\n self.redefineUI(4)\r\n self.updateVerticesCb()\r\n\r\n except ValueError:\r\n pass\r\n\r\n def updateAbscissa2(self):\r\n try:\r\n if not self.isBeingModified:\r\n self.horizontal = (\r\n -float(self.uihorizontal2.text())\r\n + self.getHandlePos(self.i + 1).x()\r\n )\r\n self.update()\r\n self.redefineUI(4)\r\n\r\n except ValueError:\r\n pass\r\n\r\n def updateL(self):\r\n try:\r\n if not self.isBeingModified:\r\n self.Lutilizado = float(self.uiLutilizado.value())\r\n self.uif.setText(\r\n (\"{:0.6e}\".format(self.G / (100 * 2 * float(self.Lutilizado))))\r\n )\r\n self.handle.curve.update(\r\n self.i1,\r\n self.i2,\r\n self.Lutilizado,\r\n self.getHandlePos(self.i),\r\n self.getHandlePos(self.i - 1),\r\n )\r\n self.roi.plotWidget.addItem(self.handle.curve.curve)\r\n self.updateAlert()\r\n except ValueError:\r\n pass\r\n\r\n def update(self):\r\n self.roi.handles[self.i][\"item\"].setPos(self.horizontal, self.cota)\r\n self.updateAlert()\r\n\r\n def redefineUI(self, elm):\r\n self.elm = elm\r\n self.isBeingModified = True\r\n i = self.i\r\n roi = self.roi\r\n self.groupBox_2.setTitle(\"Curva Vertical: \" + str(i))\r\n\r\n if i >= roi.countHandles() - 1 or i == 0:\r\n self.removeCv()\r\n else:\r\n self.i1 = self.getSegIncl(i - 1, i)\r\n self.i2 = self.getSegIncl(i, i + 1)\r\n self.G = self.i1 - self.i2\r\n\r\n self.horizontal1 = self.horizontal - self.getHandlePos(i - 1).x()\r\n self.horizontal2 = self.getHandlePos(i + 1).x() - self.horizontal\r\n self.uihorizontal1.setText(shortFloat2String(self.horizontal1))\r\n self.uihorizontal2.setText(shortFloat2String(self.horizontal2))\r\n self.uii1.setText(shortFloat2String(self.i1))\r\n self.uii2.setText(shortFloat2String(self.i2))\r\n self.uiG.setText(shortFloat2String(self.G))\r\n self.uicota.setText(shortFloat2String(self.cota))\r\n\r\n concave = False\r\n if self.G < 0:\r\n self.uiCurveType.setText(\"Côncava\")\r\n concave = True\r\n else:\r\n self.uiCurveType.setText(\"Convexa\")\r\n\r\n g = self.G\r\n\r\n if self.velproj.value() == 0:\r\n v = velproj = Config.instance().velproj\r\n else:\r\n v = velproj = self.velproj.value()\r\n\r\n self.roi.perfil.calcularGreide()\r\n vv = self.roi.perfil.velProj\r\n\r\n self.velproj.setValue(v)\r\n # constants.Kmin[min(max(30,(round(velproj/10)*10)),120)][self.G>0]\r\n Kmin = velproj**2 / (1296 * 9.8 * 1.5 / 100)\r\n Kdes = constants.Kdes[min(max(30, (round(velproj / 10) * 10)), 120)][self.G > 0]\r\n fmax = constants.f[min(max(30, (round(velproj / 10) * 10)), 120)]\r\n # dp=0.7*vmedia(v)+vmedia(v)**2/(255*(fmax))\r\n dp = 0.7 * v + v**2 / (255 * (fmax))\r\n self.uiDp.setText(shortFloat2String(dp))\r\n\r\n self.uiLutilizado: QtWidgets.QDoubleSpinBox\r\n self.uiLutilizado.setSingleStep(Config.instance().DIST)\r\n l1 = 0\r\n l2 = 0\r\n msgLog(\"Curva: \" + str(self.i))\r\n msgLog(\"v: \" + str(velproj))\r\n msgLog(\"kmin: \" + str(Kmin))\r\n if not concave:\r\n l1 = max(0, abs(self.G) * dp**2 / 412)\r\n msgLog(\"G: \" + str(self.G))\r\n msgLog(\"dp: \" + str(dp))\r\n l2 = max(0, 2 * dp - 412 / abs(self.G))\r\n else:\r\n l1 = max(0, abs(self.G) * dp**2 / (122 + 3.5 * dp))\r\n l2 = max(0, 2 * dp - (122 + 3.5 * dp) / abs(self.G))\r\n\r\n self.lmin1: QtWidgets.QLabel\r\n self.lmin2: QtWidgets.QLabel\r\n\r\n self.lmin1.setText(shortFloat2String(l1))\r\n self.lmin2.setText(shortFloat2String(l2))\r\n lsmin = l1 if l1 >= dp else l2\r\n\r\n if self.Lutilizado == 0:\r\n dist = Config.instance().DIST\r\n self.Lutilizado = max(0.6 * v, Kmin * abs(g), lsmin)\r\n self.Lutilizado = self.Lutilizado + dist - self.Lutilizado % dist\r\n\r\n self.uif.setText(\r\n (\"{:0.6e}\".format(self.G / (100 * 2 * float(self.Lutilizado))))\r\n )\r\n self.uiLmin.setText(shortFloat2String(Kmin * abs(g)))\r\n\r\n self.uiLutilizado.setValue(self.Lutilizado)\r\n self.uiL.setText(shortFloat2String(velproj * 0.6))\r\n self.uiff.setText(shortFloat2String(fmax))\r\n\r\n self.isBeingModified = False\r\n\r\n self.roi.update()\r\n\r\n txt = \"Cota\"\r\n self.cotaLabel_6.setText(txt + \" V\" + str(i - 1) + \":\")\r\n self.cotaLabel.setText(txt + \" V\" + str(i) + \":\")\r\n self.cotaLabel_8.setText(txt + \" V\" + str(i + 1) + \":\")\r\n\r\n txt = \"Horizontal\"\r\n self.label_9.setText(txt + \" V\" + str(i - 1) + \"-\" + \"V\" + str(i) + \":\")\r\n self.label_12.setText(txt + \" V\" + str(i) + \"-\" + \"V\" + str(i + 1) + \":\")\r\n\r\n self.updateAlert()\r\n\r\n def updateAlert(self):\r\n vv = self.roi.perfil.velProj\r\n if self.velproj.value() == 0:\r\n v = Config.instance().velproj\r\n else:\r\n v = self.velproj.value()\r\n try:\r\n self.uiAlertaLb: QtWidgets.QLabel\r\n if v > vv:\r\n self.uiAlertaLb2.setText(\r\n \"Alerta: \" + roundFloat2str(v) + \">\" + roundFloat2str(vv) + \"!\"\r\n )\r\n self.uiAlertaLb2.setToolTip(\r\n \"A velocidade de projeto configurada é maior que a recomendada para esse perfil\"\r\n )\r\n else:\r\n self.uiAlertaLb2.setText(\"\")\r\n self.uiAlertaLb2.setToolTip(\"\")\r\n\r\n if (\r\n self.getHandlePos(self.i).x() + self.Lutilizado / 2\r\n > self.getHandlePos(self.i + 1).x()\r\n - self.roi.handles[self.i + 1][\"item\"].curve.L / 2\r\n or self.getHandlePos(self.i).x() - self.Lutilizado / 2\r\n < self.getHandlePos(self.i - 1).x()\r\n + self.roi.handles[self.i - 1][\"item\"].curve.L / 2\r\n ):\r\n self.uiAlertaLb.setText(\"Alerta: Sobreposição de curvas!\")\r\n self.uiAlertaLb.setToolTip(\"\")\r\n else:\r\n self.uiAlertaLb.setText(\"\")\r\n self.uiAlertaLb.setToolTip(\"\")\r\n\r\n except:\r\n msgLog(\"Falha ao mostrar alertas\")\r\n\r\n def updateVerticesCb(self):\r\n\r\n self.verticeCb: QtWidgets.QComboBox\r\n self.verticeCb.clear()\r\n for i in range(1, len(self.roi.handles)):\r\n self.verticeCb.addItem(str(i - 1))\r\n\r\n try:\r\n self.verticeCb.currentIndexChanged.disconnect()\r\n self.nextBtn.clicked.disconnect()\r\n self.previousBtn.clicked.disconnect()\r\n except:\r\n pass\r\n\r\n self.verticeCb.setCurrentIndex(self.i)\r\n self.verticeCb.currentIndexChanged.connect(self.changeVertice)\r\n self.nextBtn.clicked.connect(self.next)\r\n self.previousBtn.clicked.connect(self.previous)\r\n\r\n if self.i == 1:\r\n self.previousBtn.setEnabled(False)\r\n else:\r\n self.previousBtn.setEnabled(True)\r\n\r\n if self.i == i - 1:\r\n self.nextBtn.setEnabled(False)\r\n else:\r\n self.nextBtn.setEnabled(True)\r\n\r\n def changeVertice(self, i):\r\n if i <= 0:\r\n i = self.i\r\n elif i >= len(self.roi.handles) - 1:\r\n i = self.i\r\n\r\n self.save()\r\n c = cvEditDialog(self.roi, i)\r\n c.move(self.x(), self.y())\r\n c.show()\r\n c.exec_()\r\n\r\n def next(self):\r\n self.verticeCb.setCurrentIndex(self.i + 1)\r\n\r\n def previous(self):\r\n self.verticeCb.setCurrentIndex(self.i - 1)\r\n\r\n\r\nclass CustomPolyLineROI(pg.PolyLineROI):\r\n wasModified = QtCore.pyqtSignal()\r\n modified = QtCore.pyqtSignal(object)\r\n\r\n def __init__(self, *args, **kwds):\r\n self.wasInitialized = False\r\n self.labels = []\r\n self.setPlotWidget(kwds.get(\"plot\", False))\r\n pg.PolyLineROI.__init__(self, *args, **kwds)\r\n self.ismodifying = False\r\n\r\n def setPlotWidget(self, plotWidget):\r\n if plotWidget:\r\n self.plotWidget = plotWidget\r\n\r\n def HandleEditDialog(self, i):\r\n dialog = cvEditDialog(self, i)\r\n dialog.show()\r\n dialog.exec_()\r\n self.sigRegionChangeFinished.emit(self)\r\n self.modified.emit(self)\r\n\r\n def mouseClickEvent(self, ev):\r\n\r\n if ev.button() == QtCore.Qt.RightButton and self.isMoving:\r\n self.modified.emit(self)\r\n ev.accept()\r\n self.cancelMove()\r\n\r\n self.wasModified.emit()\r\n ev.accept()\r\n self.modified.emit(self)\r\n self.sigClicked.emit(self, ev)\r\n\r\n def setPoints(self, points, closed=None):\r\n self.wasInitialized = False\r\n QgsMessageLog.logMessage(\"Iniciando pontos do perfil\", \"GeoRoad\", level=0)\r\n if closed is not None:\r\n self.closed = closed\r\n self.clearPoints()\r\n\r\n for i, p in enumerate(points):\r\n self.addRotateFreeHandle(p, p)\r\n\r\n start = -1 if self.closed else 0\r\n\r\n self.handles[0][\"item\"].sigEditRequest.connect(lambda: self.HandleEditDialog(0))\r\n self.handles[0][\"item\"].fixedOnX = True\r\n self.handles[0][\"item\"].edge = True\r\n\r\n for i in range(start, len(self.handles) - 1):\r\n self.addSegment(self.handles[i][\"item\"], self.handles[i + 1][\"item\"])\r\n j = i + 1\r\n self.handles[j][\"item\"].sigEditRequest.connect(\r\n functools.partial(self.HandleEditDialog, j)\r\n )\r\n\r\n self.handles[j][\"item\"].fixedOnX = True\r\n self.handles[j][\"item\"].edge = True\r\n\r\n self.wasInitialized = True\r\n self.updateHandles()\r\n\r\n def removeLabel(self, i):\r\n for text in self.labels[i + 1 :]:\r\n try:\r\n self.plotWidget.removeItem(text)\r\n except:\r\n pass\r\n\r\n def updateLabel(self, handle, i):\r\n msg = \"V\" + str(i - 1)\r\n i = i - 1\r\n text: pg.TextItem\r\n if i >= len(self.labels):\r\n text = pg.TextItem(msg)\r\n else:\r\n text = self.labels[i]\r\n text.setText(msg)\r\n text.setPos(handle.pos().x(), handle.pos().y())\r\n text.setAnchor((0.5, 1))\r\n if i >= len(self.labels):\r\n self.plotWidget.addItem(text)\r\n self.labels.append(text)\r\n\r\n def updateHandles(self):\r\n if self.wasInitialized:\r\n for i in range(0, len(self.handles) - 1):\r\n\r\n try:\r\n self.handles[i][\"item\"].sigEditRequest.disconnect()\r\n except:\r\n pass\r\n\r\n handle = self.handles[0][\"item\"]\r\n handle.sigEditRequest.connect(lambda: self.HandleEditDialog(0))\r\n start = -1 if self.closed else 0\r\n self.updateLabel(handle, 1)\r\n\r\n for i in range(start, len(self.handles) - 1):\r\n j = i + 1\r\n handle = self.handles[j][\"item\"]\r\n handle.sigEditRequest.connect(\r\n functools.partial(self.HandleEditDialog, j)\r\n )\r\n self.updateLabel(handle, j + 1)\r\n\r\n try:\r\n diag = cvEditDialog(self, j)\r\n diag.reset()\r\n except:\r\n pass\r\n self.removeLabel(len(self.handles) - 1)\r\n pg.QtGui.QGuiApplication.processEvents()\r\n self.wasInitialized = True\r\n\r\n def removeCurva(self, handle):\r\n if hasattr(handle, \"curve\"):\r\n self.plotWidget.removeItem(handle.curve.curve)\r\n handle.curve.curve.clear()\r\n\r\n self.removeHandle(handle)\r\n self.update()\r\n\r\n def addHandle(self, info, index=None):\r\n h = pg.ROI.addHandle(self, info, index=index)\r\n h.sigRemoveRequested.connect(self.removeHandle)\r\n self.stateChanged(finish=True)\r\n self.wasInitialized = True\r\n self.updateHandles()\r\n return h\r\n\r\n def countHandles(self):\r\n return len(self.handles)\r\n\r\n # pos should be in this item's coordinate system\r\n def segmentClicked(self, segment, ev=None, pos=None):\r\n msgLog(\"Segment click!!\")\r\n if ev is not None:\r\n pos = segment.mapToParent(ev.pos())\r\n elif pos is not None:\r\n pos = pos\r\n else:\r\n raise Exception(\"Either an event or a position must be given.\")\r\n msgLog(\"button:\", ev.button())\r\n if ev.button() == QtCore.Qt.RightButton:\r\n msgLog(\"right click detected\")\r\n i = self.segments.index(segment)\r\n d = rampaDialog(self, segment, pos, i + 2)\r\n self.ismodifying = True\r\n d.exec_()\r\n self.ismodifying = False\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n elif ev.button() == QtCore.Qt.LeftButton:\r\n h1 = segment.handles[0][\"item\"]\r\n h2 = segment.handles[1][\"item\"]\r\n i = self.segments.index(segment)\r\n h3 = self.addFreeHandle(pos, index=self.indexOfHandle(h2))\r\n self.addSegment(h3, h2, index=i + 1)\r\n segment.replaceHandle(h2, h3)\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n def getHandlePos(self, i):\r\n return self.handles[i][\"item\"].pos()\r\n\r\n def getVerticesList(self):\r\n l = []\r\n for i in range(0, len(self.handles)):\r\n p = self.getHandlePos(i)\r\n l.append([p.x(), p.y()])\r\n\r\n return l\r\n\r\n def getYfromX(self, x):\r\n for i in range(0, self.countHandles() - 2):\r\n h = self.handles[i][\"item\"].pos()\r\n nh = self.handles[1 + i][\"item\"].pos()\r\n if h.x() <= x and nh.x() >= x:\r\n return (nh.y() - h.y()) / (nh.x() - h.x()) * (x - h.x()) + h.y()\r\n raise ValueError\r\n\r\n def getSegIncl(self, i, j):\r\n try:\r\n return round(\r\n 100\r\n * (self.getHandlePos(j).y() - self.getHandlePos(i).y())\r\n / (self.getHandlePos(j).x() - self.getHandlePos(i).x()),\r\n 4,\r\n )\r\n except IndexError:\r\n return None\r\n\r\n def addCvs(self, cvList):\r\n\r\n if cvList == False or len(cvList) <= 2:\r\n\r\n for i in range(0, len(self.handles) - 1):\r\n i1 = self.getSegIncl(i - 1, i)\r\n i2 = self.getSegIncl(i, i + 1)\r\n L = 0\r\n self.handles[i][\"item\"].curve = cv(\r\n i1, i2, L, self.getHandlePos(i), self.getHandlePos(i - 1)\r\n )\r\n self.handles[i][\"item\"].sigRemoveRequested.disconnect()\r\n self.handles[i][\"item\"].sigRemoveRequested.connect(self.removeCurva)\r\n self.plotWidget.addItem(self.handles[i][\"item\"].curve.curve)\r\n\r\n return\r\n\r\n for i in range(0, len(self.handles) - 1):\r\n if cvList[i][1] != \"None\":\r\n i1 = self.getSegIncl(i - 1, i)\r\n i2 = self.getSegIncl(i, i + 1)\r\n L = float(cvList[i][1])\r\n self.handles[i][\"item\"].curve = cv(\r\n i1, i2, L, self.getHandlePos(i), self.getHandlePos(i - 1)\r\n )\r\n self.handles[i][\"item\"].sigRemoveRequested.disconnect()\r\n self.handles[i][\"item\"].sigRemoveRequested.connect(self.removeCurva)\r\n self.plotWidget.addItem(self.handles[i][\"item\"].curve.curve)\r\n else:\r\n\r\n i1 = self.getSegIncl(i - 1, i)\r\n i2 = self.getSegIncl(i, i + 1)\r\n L = 0\r\n self.handles[i][\"item\"].curve = cv(\r\n i1, i2, L, self.getHandlePos(i), self.getHandlePos(i - 1)\r\n )\r\n self.handles[i][\"item\"].sigRemoveRequested.disconnect()\r\n self.handles[i][\"item\"].sigRemoveRequested.connect(self.removeCurva)\r\n self.plotWidget.addItem(self.handles[i][\"item\"].curve.curve)\r\n\r\n def update(self):\r\n try:\r\n for i in range(0, len(self.handles) - 1):\r\n i1 = self.getSegIncl(i - 1, i)\r\n i2 = self.getSegIncl(i, i + 1)\r\n L = self.handles[i][\"item\"].curve.L\r\n # self.handles[i]['item'].curve.curve.clear()\r\n # self.handles[i]['item'].curve=cv(i1, i2, L,self.getHandlePos(i), self.getHandlePos(i-1))\r\n self.handles[i][\"item\"].curve.update(\r\n i1, i2, L, self.getHandlePos(i), self.getHandlePos(i - 1)\r\n )\r\n # self.plotWidget.addItem(self.handles[i]['item'].curve.curve)\r\n except:\r\n pass\r\n\r\n def removeRect(self, handle):\r\n try:\r\n self.plotWidget.removeItem(handle.leg1)\r\n self.plotWidget.removeItem(handle.leg2)\r\n self.plotWidget.removeItem(handle.rect1)\r\n self.plotWidget.removeItem(handle.rect2)\r\n except:\r\n pass\r\n pg.QtGui.QGuiApplication.processEvents()\r\n\r\n\r\nclass ssRoi(CustomPolyLineROI):\r\n wasModified = QtCore.pyqtSignal()\r\n modified = QtCore.pyqtSignal(object)\r\n\r\n def __init__(self, *args, **kwds):\r\n super(ssRoi, self).__init__(*args, **kwds)\r\n self.cota = kwds[\"cota\"]\r\n\r\n # pos should be in this item's coordinate system\r\n def segmentClicked(self, segment, ev=None, pos=None):\r\n if ev != None:\r\n pos = segment.mapToParent(ev.pos())\r\n elif pos != None:\r\n pos = pos\r\n else:\r\n raise Exception(\"Either an event or a position must be given.\")\r\n if ev.button() == QtCore.Qt.RightButton:\r\n d = ssRampaDialog(self, segment, pos, self.cota)\r\n d.exec_()\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n elif ev.button() == QtCore.Qt.LeftButton:\r\n h1 = segment.handles[0][\"item\"]\r\n h2 = segment.handles[1][\"item\"]\r\n i = self.segments.index(segment)\r\n h3 = self.addFreeHandle(pos, index=self.indexOfHandle(h2))\r\n self.addSegment(h3, h2, index=i + 1)\r\n segment.replaceHandle(h2, h3)\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n def setPoints(self, points, closed=None):\r\n self.wasInitialized = False\r\n QgsMessageLog.logMessage(\r\n \"Iniciando pontos do perfil transversal\", \"GeoRoad\", level=0\r\n )\r\n if closed is not None:\r\n self.closed = closed\r\n self.clearPoints()\r\n\r\n for p in points:\r\n self.addRotateHandle(p, p)\r\n # self.addTranslateHandle(p)\r\n\r\n start = -1 if self.closed else 0\r\n\r\n self.handles[0][\"item\"].sigEditRequest.connect(lambda: self.HandleEditDialog(0))\r\n\r\n for i in range(start, len(self.handles) - 1):\r\n self.addSegment(self.handles[i][\"item\"], self.handles[i + 1][\"item\"])\r\n j = i + 1\r\n self.handles[j][\"item\"].sigEditRequest.connect(\r\n functools.partial(self.HandleEditDialog, j)\r\n )\r\n\r\n self.wasInitialized = True\r\n self.updateHandles()\r\n\r\n def updateHandles(self):\r\n\r\n if self.wasInitialized:\r\n for i in range(0, len(self.handles) - 1):\r\n\r\n try:\r\n self.handles[i][\"item\"].sigEditRequest.disconnect()\r\n except:\r\n pass\r\n\r\n self.handles[0][\"item\"].sigEditRequest.connect(\r\n lambda: self.HandleEditDialog(0)\r\n )\r\n start = -1 if self.closed else 0\r\n\r\n for i in range(start, len(self.handles) - 1):\r\n j = i + 1\r\n self.handles[j][\"item\"].sigEditRequest.connect(\r\n functools.partial(self.HandleEditDialog, j)\r\n )\r\n try:\r\n diag = cvEditDialog(self, j)\r\n diag.reset()\r\n except:\r\n pass\r\n\r\n self.wasInitialized = True\r\n self.wasModified.emit()\r\n\r\n\r\nclass brucknerRoi(CustomPolyLineROI):\r\n wasModified = QtCore.pyqtSignal()\r\n modified = QtCore.pyqtSignal(object)\r\n\r\n def __init__(self, *args, **kwds):\r\n super(brucknerRoi, self).__init__(*args, **kwds)\r\n self.ismodifying = False\r\n self.ypos = self.pos().y()\r\n\r\n def removeRect(self, handle):\r\n try:\r\n self.plotWidget.removeItem(handle.leg)\r\n self.plotWidget.removeItem(handle.rect1)\r\n self.plotWidget.removeItem(handle.rect2)\r\n except:\r\n pass\r\n pg.QtGui.QGuiApplication.processEvents()\r\n\r\n def moveCota(self, y):\r\n self.setPos(QPointF(self.pos().x(), y - self.ymed))\r\n self.ypos = y - self.ymed\r\n for handle in self.getHandles()[1:-1]:\r\n handle.sigRemoveRequested.emit(handle)\r\n self.removeRect(self.getHandles()[0])\r\n self.removeRect(self.getHandles()[-1])\r\n\r\n # pos should be in this item's coordinate system\r\n def segmentClicked(self, segment, ev=None, pos=None):\r\n if ev != None:\r\n pos = segment.mapToParent(ev.pos())\r\n elif pos != None:\r\n pos = pos\r\n else:\r\n raise Exception(\"Either an event or a position must be given.\")\r\n if ev.button() == QtCore.Qt.RightButton:\r\n d = brucknerRampaDialog(self, segment, pos)\r\n d.cotasb.setValue((self.pos().y() + self.ymed) / 1000000)\r\n d.cotasb.valueChanged.connect(\r\n lambda: self.moveCota(d.cotasb.value() * 1000000)\r\n )\r\n self.ismodifying = True\r\n d.exec_()\r\n self.ismodifying = False\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n elif ev.button() == QtCore.Qt.LeftButton:\r\n h1 = segment.handles[0][\"item\"]\r\n h2 = segment.handles[1][\"item\"]\r\n i = self.segments.index(segment)\r\n h3 = self.addFreeHandle(pos, index=self.indexOfHandle(h2))\r\n self.addSegment(h3, h2, index=i + 1)\r\n segment.replaceHandle(h2, h3)\r\n y0 = self.getHandles()[0].pos().y()\r\n h1.setPos(QPointF(h1.pos().x(), y0))\r\n h2.setPos(QPointF(h2.pos().x(), y0))\r\n h3.setPos(QPointF(h3.pos().x(), y0))\r\n self.wasModified.emit()\r\n self.sigRegionChangeFinished.emit(self)\r\n\r\n def setPoints(self, points, closed=None):\r\n self.wasInitialized = False\r\n QgsMessageLog.logMessage(\r\n \"Iniciando pontos do diagrama de bruckner\", \"GeoRoad\", level=0\r\n )\r\n if closed is not None:\r\n self.closed = closed\r\n self.clearPoints()\r\n\r\n first = True\r\n for p in points:\r\n self.addRotateHandle(p, p)\r\n # self.addTranslateHandle(p)\r\n\r\n start = -1 if self.closed else 0\r\n\r\n self.handles[0][\"item\"].sigEditRequest.connect(lambda: self.HandleEditDialog(0))\r\n self.handles[0][\"item\"].fixedOnX = True\r\n self.handles[0][\"item\"].fixedOnY = True\r\n self.handles[0][\"item\"].edge = True\r\n\r\n for i in range(start, len(self.handles) - 1):\r\n self.addSegment(self.handles[i][\"item\"], self.handles[i + 1][\"item\"])\r\n j = i + 1\r\n self.handles[j][\"item\"].fixedOnY = True\r\n\r\n self.handles[j][\"item\"].fixedOnX = True\r\n self.handles[j][\"item\"].fixedOnY = True\r\n self.handles[j][\"item\"].edge = True\r\n\r\n self.wasInitialized = True\r\n self.updateHandles()\r\n\r\n def updateHandles(self):\r\n\r\n if self.wasInitialized:\r\n for i in range(0, len(self.handles) - 1):\r\n try:\r\n self.handles[i][\"item\"].sigEditRequest.disconnect()\r\n except:\r\n pass\r\n\r\n start = -1 if self.closed else 0\r\n for i in range(start, len(self.handles) - 1):\r\n j = i + 1\r\n self.handles[j][\"item\"].fixedOnY = True\r\n try:\r\n diag = cvEditDialog(self, j)\r\n diag.reset()\r\n except:\r\n pass\r\n self.wasInitialized = True\r\n\r\n\r\nclass Ui_Perfil(QtWidgets.QDialog):\r\n\r\n save = QtCore.pyqtSignal(bool)\r\n reset = QtCore.pyqtSignal()\r\n\r\n def __init__(\r\n self,\r\n ref_estaca,\r\n tipo,\r\n classeProjeto,\r\n greide,\r\n cvList,\r\n wintitle=\"Perfil Longitudinal\",\r\n iface=None,\r\n ):\r\n super(Ui_Perfil, self).__init__(None)\r\n self.iface = iface\r\n self.initVars(ref_estaca, tipo, classeProjeto, greide, cvList, wintitle)\r\n self.setupUi(self)\r\n\r\n def initVars(self, ref_estaca, tipo, classeProjeto, greide, cvList, wintitle):\r\n self.everPloted = False\r\n self.showDistances = False\r\n self.ref_estaca = ref_estaca\r\n self.tipo = tipo\r\n self.classeProjeto = classeProjeto\r\n self.estaca1txt = -1\r\n self.estaca2txt = -1\r\n self.greide = greide\r\n self.cvList = cvList\r\n self.vb = CustomViewBox()\r\n self.perfilPlot = pg.PlotWidget(\r\n viewBox=self.vb, enableMenu=False, title=wintitle\r\n )\r\n self.perfilPlot.curves = []\r\n self.saved = True\r\n self.isValid = False\r\n\r\n def showMaximized(self):\r\n if self.isValid:\r\n return super().showMaximized()\r\n else:\r\n return False\r\n\r\n def exec_(self):\r\n if self.isValid:\r\n return super().exec_()\r\n else:\r\n return messageDialog(\r\n message=\"Defina as cotas na tabela de horizontais primeiro!\"\r\n )\r\n\r\n def __setAsNotSaved(self):\r\n self.lblTipo.setText(\"Modificado\")\r\n self.saved = False\r\n\r\n def perfil_grafico(self):\r\n pontos = []\r\n k = 0\r\n while True:\r\n try:\r\n ponto = []\r\n # e = self.ref_estaca[k][0]\r\n # if e in [None,\"\"]:\r\n # break\r\n\r\n ponto.append(float(self.ref_estaca[k][1]))\r\n ponto.append(float(self.ref_estaca[k][2]))\r\n pontos.append(ponto)\r\n if ponto[1] != 0:\r\n self.isValid = True\r\n # self.comboEstaca1.addItem(_fromUtf8(e))\r\n except:\r\n break\r\n k += 1\r\n\r\n x, y = list(zip(*pontos))\r\n x = list(x)\r\n y = list(y)\r\n self.V = y\r\n self.X = x\r\n self.ymed = np.average(y)\r\n self.perfilPlot.plot(x=x, y=y, symbol=\"o\")\r\n self.perfilPlot.setWindowTitle(\"Perfil Vertical\")\r\n # A = np.array([x,np.ones(len(x))])\r\n # w = np.linalg.lstsq(A.T,y)[0]\r\n if self.greide:\r\n # i=0\r\n lastHandleIndex = len(self.greide) - 1\r\n L = []\r\n for pt in self.greide:\r\n xh = pt[0]\r\n cota = pt[1]\r\n pos = (xh, cota)\r\n L.append(pos)\r\n if xh != x[-1]:\r\n final = (x[-1], cota)\r\n self.greide[-1] = final\r\n messageDialog(message=\"O último vértice foi ajustado ao novo traçado!\")\r\n L[-1] = final\r\n self.roi = CustomPolyLineROI(L, plot=self.perfilPlot)\r\n else:\r\n # self.roi = CustomPolyLineROI([(x[0], w[0]*x[0]+w[1]), (x[len(x)-1],w[0]*x[len(x)-1]+w[1])], plot=self.perfilPlot)\r\n self.roi = CustomPolyLineROI(\r\n [(x[0], y[0]), (x[-1], y[-1])], plot=self.perfilPlot\r\n )\r\n self.roi.perfil = self\r\n self.roi.wasModified.connect(self.__setAsNotSaved)\r\n self.roi.setAcceptedMouseButtons(QtCore.Qt.RightButton)\r\n self.perfilPlot.addItem(self.roi)\r\n\r\n self.lastGreide = self.getVertices()\r\n self.lastCurvas = self.getCurvas()\r\n self.roi.setPlotWidget(self.perfilPlot)\r\n self.roi.addCvs(self.cvList)\r\n self.roi.sigRegionChangeFinished.connect(self.modifiedRoi)\r\n self.roi.sigRegionChangeFinished.connect(self.updater)\r\n\r\n # self.perfilPlot.plot(y,x)\r\n\r\n def updater(self):\r\n self.computingLabel.setText(\"Processando....\")\r\n if not self.showDistances:\r\n for h in self.roi.getHandles():\r\n self.roi.removeRect(h)\r\n pg.QtGui.QGuiApplication.processEvents()\r\n self.computingLabel.clear()\r\n return\r\n if not self.roi.ismodifying:\r\n handles = [self.roi.getHandlePos(i) for i in range(self.roi.countHandles())]\r\n for j, handlePos in enumerate(handles[1:]): # para cada segmento\r\n I = (handles[j + 1].y() - handles[j].y()) / (\r\n handles[j + 1].x() - handles[j].x()\r\n )\r\n greideStart = [handles[j].x(), handles[j].y()]\r\n distsY = []\r\n Xs = []\r\n terreno = []\r\n greides = []\r\n endX = handlePos.x()\r\n lx = greideStart[0]\r\n i1 = max([i for i, ix in enumerate(self.X) if ix <= lx])\r\n i2 = min([i for i, ix in enumerate(self.X) if ix >= endX])\r\n # para cada estaca\r\n for i, pt in enumerate(zip(self.X[i1:i2], self.V[i1:i2])):\r\n x = pt[0]\r\n y = pt[1]\r\n greideY = greideStart[1] + I * (x - greideStart[0])\r\n distsY.append(y - greideY)\r\n Xs.append(x)\r\n terreno.append([x, y])\r\n greides.append([x, greideY])\r\n ymax = max(distsY)\r\n ymin = min(distsY)\r\n if ymax > 0 and ymin > 0:\r\n ymin = 0\r\n else:\r\n indexMin = distsY.index(ymin)\r\n xmin = Xs[indexMin]\r\n tmin = terreno[indexMin][1]\r\n gmin = greides[indexMin][1]\r\n if ymin < 0 and ymax < 0:\r\n ymax = 0\r\n else:\r\n indexMax = distsY.index(ymax)\r\n xmax = Xs[indexMax]\r\n tmax = terreno[indexMax][1]\r\n gmax = greides[indexMax][1]\r\n\r\n # Plotar cota\r\n handle = self.roi.handles[j + 1][\"item\"]\r\n self.roi.removeRect(handle)\r\n if ymin != 0:\r\n handle.rect1 = pg.PlotCurveItem()\r\n handle.rect1.setData(\r\n [xmin, xmin],\r\n [gmin, tmin],\r\n pen=pg.mkPen(\"b\", width=2, style=QtCore.Qt.SolidLine),\r\n )\r\n handle.leg1 = pg.TextItem(color=(200, 200, 200))\r\n handle.leg1.setHtml(\"%s m\" % (str(roundFloatShort(ymin))))\r\n handle.leg1.setAnchor((0, 0))\r\n handle.leg1.setPos(xmin, gmin + (ymin) / 2)\r\n self.roi.plotWidget.addItem(handle.rect1)\r\n self.roi.plotWidget.addItem(handle.leg1)\r\n\r\n if ymax != 0:\r\n handle.rect2 = pg.PlotCurveItem()\r\n handle.rect2.setData(\r\n [xmax, xmax],\r\n [gmax, tmax],\r\n pen=pg.mkPen(\"b\", width=2, style=QtCore.Qt.SolidLine),\r\n )\r\n handle.leg2 = pg.TextItem(color=(200, 200, 200))\r\n handle.leg2.setHtml(\"%s m\" % (str(roundFloatShort(ymax))))\r\n handle.leg2.setAnchor((0, 0))\r\n handle.leg2.setPos(xmax, gmax + (ymax) / 2)\r\n self.roi.plotWidget.addItem(handle.rect2)\r\n self.roi.plotWidget.addItem(handle.leg2)\r\n\r\n handle.sigRemoveRequested.connect(self.roi.removeRect)\r\n\r\n pg.QtGui.QGuiApplication.processEvents()\r\n self.computingLabel.clear()\r\n\r\n def calcularGreide(self):\r\n self.roi.getMenu()\r\n I = []\r\n handles = self.roi.getHandles()\r\n for i in range(0, len(self.roi.getHandles()) - 1):\r\n g1 = []\r\n g2 = []\r\n handle = handles[i]\r\n nextHandle = handles[i + 1]\r\n g1.append(handle.pos().x())\r\n g1.append(handle.pos().y())\r\n g2.append(nextHandle.pos().x())\r\n g2.append(nextHandle.pos().y())\r\n p1 = abs((g1[1] - g2[1]) / (g1[0] - g2[0])) * 100\r\n I.append(p1)\r\n A = []\r\n for x in I:\r\n A.append(x)\r\n A.sort()\r\n p1 = A[len(I) - 1]\r\n maxIndex = I.index(p1) + 1\r\n classeProjeto = Config.instance().CLASSE_INDEX - 1\r\n cfg = Config.instance()\r\n\r\n if p1 >= cfg.planoMin and p1 < cfg.planoMax:\r\n if classeProjeto <= 0:\r\n s = \"120\"\r\n elif classeProjeto < 4:\r\n s = \"100\"\r\n elif classeProjeto < 6:\r\n s = \"80\"\r\n else:\r\n s = \"60\"\r\n\r\n self.lblTipo.setText(\r\n \"Plano %s KM/h, Rampa n° %d, Inclinação %s%%\"\r\n % (s, maxIndex, roundFloat2str(I[maxIndex - 1])[:-2])\r\n )\r\n\r\n elif p1 >= cfg.onduladoMin and p1 < cfg.onduladoMax:\r\n if classeProjeto <= 0:\r\n s = \"100\"\r\n elif classeProjeto < 3:\r\n s = \"80\"\r\n elif classeProjeto < 4:\r\n s = \"70\"\r\n elif classeProjeto < 6:\r\n s = \"60\"\r\n else:\r\n s = \"40\"\r\n\r\n self.lblTipo.setText(\r\n \"Ondulado %s KM/h, Rampa n° %d, Inclinação %s%%\"\r\n % (s, maxIndex, roundFloat2str(I[maxIndex - 1])[:-2])\r\n )\r\n\r\n elif p1 <= cfg.montanhosoMax:\r\n if classeProjeto <= 0:\r\n s = \"80\"\r\n elif classeProjeto < 3:\r\n s = \"60\"\r\n elif classeProjeto < 4:\r\n s = \"50\"\r\n elif classeProjeto < 6:\r\n s = \"40\"\r\n else:\r\n s = \"30\"\r\n self.lblTipo.setText(\r\n \"Montanhoso %s KM/h, Rampa n° %d, Inclinação %s%%\"\r\n % (s, maxIndex, roundFloat2str(I[maxIndex - 1])[:-2])\r\n )\r\n\r\n else:\r\n s = \"0\"\r\n self.lblTipo.setText(\r\n \"Indefinido %s KM/h, Rampa n° %d, Inclinação %s%%\"\r\n % (s, maxIndex, roundFloat2str(I[maxIndex - 1])[:-2])\r\n )\r\n\r\n self.velProj = int(s)\r\n\r\n for a in A:\r\n maxIndex = I.index(a) + 1\r\n if a == p1:\r\n if hasattr(self, \"maxInclinationIndicatorLine\"):\r\n self.perfilPlot.removeItem(self.maxInclinationIndicatorLine)\r\n self.maxInclinationIndicatorLine.clear()\r\n self.perfilPlot.update()\r\n self.maxInclinationIndicatorLine = pg.PlotCurveItem()\r\n self.maxInclinationIndicatorLine.setData(\r\n [\r\n self.roi.getHandlePos(maxIndex - 1).x(),\r\n self.roi.getHandlePos(maxIndex).x(),\r\n ],\r\n [\r\n self.roi.getHandlePos(maxIndex - 1).y(),\r\n self.roi.getHandlePos(maxIndex).y(),\r\n ],\r\n pen=pg.mkPen(\"r\", width=4),\r\n )\r\n self.perfilPlot.addItem(self.maxInclinationIndicatorLine)\r\n\r\n def modifiedRoiStarted(self):\r\n self.roi.updateHandles()\r\n try:\r\n self.perfilPlot.removeItem(self.maxInclinationIndicatorLine)\r\n self.maxInclinationIndicatorLine.clear()\r\n self.perfilPlot.update()\r\n self.vb.update()\r\n except Exception as e:\r\n import traceback\r\n\r\n msgLog(\"Falha ao remover linha!\")\r\n msgLog(str(traceback.format_exception(None, e, e.__traceback__)))\r\n\r\n def modifiedRoi(self):\r\n self.modifiedRoiStarted()\r\n pass\r\n\r\n def estaca1(self, ind):\r\n self.estaca1txt = ind - 1\r\n\r\n def estaca2(self, ind):\r\n self.estaca2txt = ind - 1\r\n\r\n def toggleDistances(self):\r\n self.showDistances = not self.showDistances\r\n self.updater()\r\n\r\n def setupUi(self, PerfilTrecho):\r\n PerfilTrecho.setObjectName(_fromUtf8(\"PerfilTrecho\"))\r\n PerfilTrecho.resize(590, 169)\r\n\r\n self.perfil_grafico()\r\n\r\n self.lblTipo = QtWidgets.QLabel(PerfilTrecho)\r\n self.lblTipo.setGeometry(QtCore.QRect(220, 140, 181, 21))\r\n self.lblTipo.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lblTipo.setObjectName(_fromUtf8(\"lblTipo\"))\r\n\r\n self.btnCalcular = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnCalcular.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnCalcular.setObjectName(_fromUtf8(\"btnCalcular\"))\r\n self.btnCalcular.clicked.connect(self.calcularGreide)\r\n\r\n self.btnAutoRange = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnAutoRange.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnAutoRange.setText(\"Escala\")\r\n self.btnAutoRange.clicked.connect(self.showEscalaDialog)\r\n\r\n self.btnDistances = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnDistances.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnDistances.setText(\"Distâncias\")\r\n self.btnDistances.clicked.connect(self.toggleDistances)\r\n\r\n self.btnSave = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnSave.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnSave.setText(\"Aplicar\")\r\n self.btnSave.clicked.connect(self.salvarPerfil)\r\n\r\n self.btnCancel = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnCancel.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnCancel.setText(\"Fechar\")\r\n self.btnCancel.clicked.connect(self.close)\r\n\r\n self.btnReset = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnReset.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnReset.setText(\"Apagar\")\r\n self.btnReset.clicked.connect(self.resetGeometry)\r\n\r\n self.computingLabel = QtWidgets.QLabel(\"Processando...\")\r\n self.computingLabel.setAlignment(QtCore.Qt.AlignRight)\r\n self.computingLabel.clear()\r\n\r\n Hlayout = QtWidgets.QHBoxLayout()\r\n Hlayout2 = QtWidgets.QHBoxLayout()\r\n Vlayout = QtWidgets.QVBoxLayout()\r\n\r\n QtCore.QMetaObject.connectSlotsByName(PerfilTrecho)\r\n\r\n Hlayout.addWidget(self.btnCalcular)\r\n Hlayout.addWidget(self.btnAutoRange)\r\n Hlayout.addWidget(self.btnDistances)\r\n Hlayout.addWidget(self.btnSave)\r\n Hlayout.addWidget(self.btnReset)\r\n Hlayout.addWidget(self.btnCancel)\r\n Hlayout2.addWidget(self.computingLabel)\r\n Vlayout.addLayout(Hlayout)\r\n Vlayout.addLayout(Hlayout2)\r\n Vlayout.addWidget(self.lblTipo)\r\n Vlayout.addWidget(self.perfilPlot)\r\n\r\n self.setLayout(Vlayout)\r\n\r\n PerfilTrecho.setWindowTitle(\r\n _translate(\"PerfilTrecho\", \"Perfil do trecho\", None)\r\n )\r\n self.calcularGreide()\r\n self.btnCalcular.setText(\"Calcular rampa máxima\")\r\n\r\n def resetGeometry(self):\r\n reset = yesNoDialog(self, message=\"Realmente deseja excluir esse perfil?\")\r\n if reset:\r\n self.reset.emit()\r\n\r\n def showEscalaDialog(self):\r\n dialog = setEscalaDialog(self)\r\n dialog.show()\r\n if dialog.exec_() == QtWidgets.QDialog.Accepted:\r\n pass\r\n else:\r\n pass\r\n\r\n def salvarPerfil(self, noreset=False):\r\n \"\"\"exportar greide para o banco de dados\r\n sinal emitido para salvar\"\"\"\r\n\r\n self.save.emit(noreset)\r\n self.saved = True\r\n self.lastGreide = self.getVertices()\r\n self.lastCurvas = self.getCurvas()\r\n self.lblTipo.setText(\"Salvo!\")\r\n\r\n def getCurvas(self):\r\n r = []\r\n for i in range(0, self.roi.countHandles()):\r\n x = []\r\n x.append(i)\r\n try:\r\n x.append(str(self.roi.handles[i][\"item\"].curve.L))\r\n\r\n except:\r\n x.append(str(None))\r\n r.append(x)\r\n\r\n return r\r\n\r\n def getVertices(self):\r\n r = []\r\n for handle in self.roi.getHandles():\r\n x = []\r\n x.append(str(handle.pos().x()))\r\n x.append(str(handle.pos().y()))\r\n r.append(x)\r\n\r\n return r\r\n\r\n def wasSaved(self):\r\n if (\r\n self.lastGreide == self.getVertices()\r\n and self.lastCurvas == self.getCurvas()\r\n ):\r\n return True\r\n else:\r\n return False\r\n\r\n def reject(self):\r\n if not self.wasSaved():\r\n closedialog = closeDialog(self)\r\n closedialog.setModal(True)\r\n closedialog.save.connect(self.__exitSave)\r\n # closedialog.cancel.connect(closedialog.close)\r\n closedialog.dischart.connect(self.justClose)\r\n closedialog.exec_()\r\n return\r\n return super(Ui_Perfil, self).reject()\r\n\r\n def closeEvent(self, event):\r\n if not self.wasSaved():\r\n closedialog = closeDialog(self)\r\n closedialog.setModal(True)\r\n closedialog.save.connect(self.__exitSave)\r\n closedialog.cancel.connect(event.ignore)\r\n closedialog.dischart.connect(self.close)\r\n closedialog.exec_()\r\n else:\r\n event.accept()\r\n super(Ui_Perfil, self).closeEvent(event)\r\n\r\n def __exitSave(self):\r\n self.salvarPerfil(noreset=True)\r\n self.close()\r\n\r\n def justClose(self):\r\n self.wasSaved = lambda: True\r\n self.close()\r\n\r\n\r\nclass Ui_sessaoTipo(Ui_Perfil):\r\n\r\n save = QtCore.pyqtSignal(bool)\r\n plotar = QtCore.pyqtSignal(int)\r\n\r\n def __init__(\r\n self,\r\n iface,\r\n terreno,\r\n hor,\r\n ver=[],\r\n st=False,\r\n prism=None,\r\n estacanum=0,\r\n greide=[],\r\n title=\"Perfil Transversal\",\r\n ):\r\n self.progressiva = []\r\n self.terreno = []\r\n self.iface = iface\r\n self.editMode = True\r\n self.intersecTable = hor\r\n\r\n if not st:\r\n msgLog(\"Creating new terrain\")\r\n st = []\r\n self.terreno = terreno\r\n self.verticais = self.vGreideCurve(hor)\r\n defaultST = [\r\n [-9.549337802669857, -5.325013960480259],\r\n [-8.049337802669857, -5.370013960480259],\r\n [-4.299337802669858, -0.3700139604802599],\r\n [-4.099337802669858, -0.8700139604802597],\r\n [-3.699337802669858, -0.8700139604802597],\r\n [-3.499337802669858, -0.0700139604802598],\r\n [0.0006621973301417, -1.39604802598e-05],\r\n [3.500662197330141, -0.0700139604802598],\r\n [3.700662197330141, -0.8700139604802597],\r\n [4.100662197330142, -0.8700139604802597],\r\n [4.300662197330142, -0.3700139604802599],\r\n [6.800662197330142, 4.62998603951974],\r\n [8.300662197330142, 4.58498603951974],\r\n ]\r\n\r\n for item in hor:\r\n self.progressiva.append(float(item[2]))\r\n newST = []\r\n for pt in defaultST:\r\n newST.append([pt[0], pt[1] + self.verticais.getY(float(item[2]))])\r\n st.append(newST)\r\n self.st = st\r\n\r\n else:\r\n msgLog(\"Using existing terrain\")\r\n self.terreno = terreno\r\n self.progressiva = ver\r\n self.st = st\r\n self.verticais = self.vGreideCurve(hor)\r\n\r\n self.current = estacanum\r\n\r\n super(Ui_sessaoTipo, self).__init__(iface, 0, 0, st[self.current], [], title)\r\n\r\n if prism is None:\r\n try:\r\n self.prismoide\r\n except AttributeError:\r\n self.prismoide = Prismoide.QPrismoid(\r\n self.terreno, self.st, self.progressiva\r\n )\r\n self.prismoide.generate(self.current)\r\n else:\r\n self.prismoide = Prismoide.QPrismoid(prism=prism, cti=7)\r\n\r\n if (\r\n not len(self.terreno)\r\n == len(self.st)\r\n == len(self.progressiva)\r\n == len(self.verticais.getPoints())\r\n == len(self.prismoide.progressiva)\r\n ):\r\n msgLog(\"Terreno: \" + str(len(self.terreno)))\r\n msgLog(\"Progressiva: \" + str(len(self.progressiva)))\r\n msgLog(\"Verticais: \" + str(len(self.verticais.getPoints())))\r\n msgLog(\"Prismoide: \" + str(len(self.prismoide.progressiva)))\r\n messageDialog(\r\n message=\"O traçado foi modificado! As edições não vão funcionar corretamente!\"\r\n )\r\n\r\n self.roi.sigRegionChangeFinished.connect(self.updateData)\r\n self.updateAreaLabels()\r\n self.isValid = True\r\n\r\n def updater(self):\r\n pass\r\n\r\n def vGreideCurve(self, greide):\r\n ptList = []\r\n for item in greide:\r\n ptList.append(Figure.point(item[2], item[5]))\r\n greide = Figure.curve()\r\n greide.setPoints(ptList)\r\n return greide\r\n\r\n def createPrismoid(self, j):\r\n if not self.prismoide.generate(j) is None:\r\n messageDialog(\r\n title=\"Erro\",\r\n message=\"Falha ao criar o talude na estaca: \"\r\n + str(self.progressiva[j])\r\n + \" estaca: \"\r\n + prog2estacaStr(self.progressiva[j]),\r\n )\r\n self.updateAreaLabels()\r\n\r\n def createLabels(self):\r\n self.banquetaLbC = pg.TextItem(\r\n text=\"Banqueta em aterro\", color=(200, 200, 200), anchor=(0.5, 0)\r\n )\r\n self.taludeLbC = pg.TextItem(text=\"Talude de aterro\", anchor=(0.5, 0))\r\n self.pistaLb = pg.TextItem(text=\"Pista\", anchor=(0.5, 0))\r\n self.banquetaLbA = pg.TextItem(\r\n text=\"Banqueta em corte\", color=(200, 200, 200), anchor=(0.5, 0)\r\n )\r\n self.taludeLbA = pg.TextItem(text=\"Talude de corte\", anchor=(0.5, 0))\r\n\r\n def perfil_grafico(self, reseting=False):\r\n if not reseting:\r\n self.perfilPlot.setWindowTitle(\"Seção Tipo\")\r\n self.createLabels()\r\n\r\n if self.greide:\r\n lastHandleIndex = len(self.greide) - 1\r\n L = []\r\n for pt in self.greide:\r\n x = pt[0]\r\n cota = pt[1]\r\n pos = (x, cota)\r\n L.append(pos)\r\n\r\n self.roi = ssRoi(\r\n L, cota=self.verticais.getY(self.progressiva[self.current])\r\n )\r\n self.roi.wasModified.connect(self.setAsNotSaved)\r\n self.roi.setAcceptedMouseButtons(QtCore.Qt.RightButton)\r\n self.perfilPlot.addItem(self.roi)\r\n\r\n if reseting:\r\n self.st[self.current] = self.greide\r\n self.updateData()\r\n\r\n self.lastGreide = self.getVertices()\r\n self.lastCurvas = self.getCurvas()\r\n self.roi.setPlotWidget(self.perfilPlot)\r\n\r\n X = []\r\n Y = []\r\n for x, y in self.terreno[self.current]:\r\n X.append(x)\r\n Y.append(y)\r\n\r\n self.perfilPlot.plot(X, Y)\r\n self.updateLabels()\r\n\r\n def updateLabels(self):\r\n\r\n self.banquetaLbC.setPos(\r\n self.roi.getHandlePos(0).x(), self.roi.getHandlePos(0).y()\r\n )\r\n self.taludeLbC.setPos(\r\n (self.roi.getHandlePos(2).x() + self.roi.getHandlePos(1).x()) / 2,\r\n (self.roi.getHandlePos(1).y() + self.roi.getHandlePos(2).y()) / 2,\r\n )\r\n middle = float(self.st[self.current][int(len(self.st[self.current]) / 2)][1])\r\n self.pistaLb.setPos(0, middle)\r\n self.banquetaLbA.setPos(\r\n self.roi.getHandlePos(len(self.roi.handles) - 1).x(),\r\n self.roi.getHandlePos(len(self.roi.handles) - 1).y(),\r\n )\r\n self.taludeLbA.setPos(\r\n (\r\n self.roi.getHandlePos(len(self.roi.handles) - 3).x()\r\n + self.roi.getHandlePos(len(self.roi.handles) - 2).x()\r\n )\r\n / 2,\r\n (\r\n self.roi.getHandlePos(len(self.roi.handles) - 3).y()\r\n + self.roi.getHandlePos(len(self.roi.handles) - 2).y()\r\n )\r\n / 2,\r\n )\r\n\r\n self.perfilPlot.removeItem(self.banquetaLbA)\r\n self.perfilPlot.removeItem(self.taludeLbA)\r\n self.perfilPlot.removeItem(self.banquetaLbC)\r\n self.perfilPlot.removeItem(self.taludeLbC)\r\n self.perfilPlot.removeItem(self.pistaLb)\r\n\r\n self.perfilPlot.addItem(self.banquetaLbA)\r\n self.perfilPlot.addItem(self.taludeLbA)\r\n self.perfilPlot.addItem(self.banquetaLbC)\r\n self.perfilPlot.addItem(self.taludeLbC)\r\n self.perfilPlot.addItem(self.pistaLb)\r\n\r\n def updateData(self):\r\n self.updateLabels()\r\n if not self.editMode:\r\n self.plotTransCurve()\r\n\r\n # if self.editMode:\r\n vt = self.getVertices()\r\n self.st[self.current] = vt\r\n self.prismoide.st = self.st\r\n self.createPrismoid(self.current)\r\n\r\n self.perfilPlot.scale(1, 1)\r\n\r\n def getMatrixVertices(self):\r\n\r\n r = []\r\n r.append([])\r\n r.append([])\r\n\r\n for i, _ in enumerate(self.progressiva):\r\n r[1].append(self.terreno[i])\r\n r[0].append(self.st[i])\r\n\r\n return r\r\n\r\n def getTerrenoVertices(self):\r\n return self.terreno\r\n\r\n def getxList(self):\r\n return self.progressiva\r\n\r\n def setAsNotSaved(self):\r\n self.lblTipo.setText(\"Modificado\")\r\n self.saved = False\r\n\r\n def plotTrans(self):\r\n\r\n items = [\"Plotar transversal atual\", \"Plotar todas transversais\"]\r\n item, ok = QtWidgets.QInputDialog.getItem(\r\n None,\r\n \"Plotar transversais\",\r\n \"Escolha uma opção para gerar as linhas:\",\r\n items,\r\n 0,\r\n False,\r\n )\r\n\r\n if ok:\r\n if item == items[0]:\r\n self.plotar.emit(self.current)\r\n elif item == items[1]:\r\n self.plotar.emit(-1)\r\n\r\n def exec_(self):\r\n self.salvarPerfil()\r\n return super().exec_()\r\n\r\n def calcularGreide(self):\r\n pass\r\n\r\n def setupUi(self, PerfilTrecho):\r\n\r\n PerfilTrecho.setObjectName(_fromUtf8(\"PerfilTrecho\"))\r\n PerfilTrecho.resize(680, 320)\r\n\r\n self.perfil_grafico()\r\n\r\n self.lblTipo = QtWidgets.QLabel(PerfilTrecho)\r\n self.lblTipo.setGeometry(QtCore.QRect(220, 140, 181, 21))\r\n self.lblTipo.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lblTipo.setObjectName(_fromUtf8(\"lblTipo\"))\r\n\r\n self.btnCalcular = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnCalcular.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnCalcular.setObjectName(_fromUtf8(\"btnCalcular\"))\r\n self.btnCalcular.clicked.connect(self.plotTrans)\r\n self.btnCalcular.setText(\"Criar Layer\")\r\n self.btnCalcular.setToolTip(\r\n \"Plotar Layers com as tranversais sobre o traçado horizontal (Perpendiculares ao traçado)\"\r\n )\r\n\r\n self.btnExportDxf = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnExportDxf.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnExportDxf.setObjectName(_fromUtf8(\"btnExportDxf\"))\r\n self.btnExportDxf.setText(\"Exportar\")\r\n self.btnExportDxf.setToolTip(\"Exporta DXF que pode ser modificado e importado\")\r\n\r\n self.btnImportDxf = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnImportDxf.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnImportDxf.setObjectName(_fromUtf8(\"btnImportDxf\"))\r\n self.btnImportDxf.setText(\"Importar\")\r\n self.btnImportDxf.setToolTip(\"Importar DXF com a seção tipo\")\r\n\r\n self.btnAutoRange = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnAutoRange.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnAutoRange.setText(\"Extender visão\")\r\n self.btnAutoRange.clicked.connect(lambda: self.vb.autoRange())\r\n self.btnAutoRange.setToolTip(\"Ajusta o zoom e retorna para visualização padrão\")\r\n\r\n self.btnSave = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnSave.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnSave.setText(\"Salvar\")\r\n self.btnSave.clicked.connect(self.salvarPerfil)\r\n self.btnSave.setToolTip(\"Armazena edições\")\r\n\r\n self.btnClean = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnClean.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnClean.setText(\"Recalcular\")\r\n self.btnClean.setToolTip(\r\n \"Apaga todas as modificações e retorna com o perfil padrão para todo o traçado\"\r\n )\r\n\r\n self.btnReset = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnReset.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnReset.setText(\"Restaurar seção\")\r\n self.btnReset.clicked.connect(self.resetSS)\r\n self.btnReset.setToolTip(\"Descarta as modificações na seção atual\")\r\n\r\n self.btnVolume = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnVolume.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnVolume.setText(\"Volume\")\r\n self.btnVolume.clicked.connect(self.volumeCalc)\r\n self.btnVolume.setToolTip(\"Calcular volumes de aterro e de corte\")\r\n\r\n self.btnPrevious = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnPrevious.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnPrevious.setText(\"Anterior\")\r\n self.btnPrevious.clicked.connect(self.previousEstaca)\r\n self.btnPrevious.setToolTip(\"Estaca anterior (seta esquerda)\")\r\n\r\n self.btnNext = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnNext.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnNext.setText(\"Próxima\")\r\n self.btnNext.clicked.connect(self.nextEstaca)\r\n self.btnNext.setToolTip(\"Proxima estaca (seta direita)\")\r\n\r\n self.shortcut1 = QtWidgets.QShortcut(QtGui.QKeySequence.MoveToNextChar, self)\r\n self.shortcut2 = QtWidgets.QShortcut(\r\n QtGui.QKeySequence.MoveToPreviousChar, self\r\n )\r\n self.shortcut1.activated.connect(self.nextEstaca)\r\n self.shortcut2.activated.connect(self.previousEstaca)\r\n\r\n self.btnEditToggle = QtWidgets.QPushButton(PerfilTrecho)\r\n self.btnEditToggle.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.btnEditToggle.setText(\"Visualizar\")\r\n self.btnEditToggle.clicked.connect(self.editToggle)\r\n\r\n self.selectEstacaComboBox = QtWidgets.QComboBox(PerfilTrecho)\r\n dist = Config.instance().DIST\r\n self.selectEstacaComboBox.addItems(\r\n list(map(lambda i: fastProg2EstacaStr(i, dist), self.progressiva))\r\n )\r\n self.selectEstacaComboBox.currentIndexChanged.connect(self.changeEstaca)\r\n self.selectEstacaComboBox.view().setVerticalScrollBarPolicy(\r\n QtCore.Qt.ScrollBarAsNeeded\r\n )\r\n\r\n self.applyBtn = QtWidgets.QPushButton(PerfilTrecho)\r\n self.applyBtn.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.applyBtn.setText(\"Aplicar\")\r\n self.applyBtn.clicked.connect(self.applyTrans)\r\n self.applyBtn.setToolTip(\"Aplica o desenho atual para um intervalo de estacas\")\r\n\r\n self.ctatiBtn = QtWidgets.QPushButton(PerfilTrecho)\r\n self.ctatiBtn.setGeometry(QtCore.QRect(260, 80, 99, 27))\r\n self.ctatiBtn.setText(\"Laterais\")\r\n self.ctatiBtn.clicked.connect(self.setAtCti)\r\n self.ctatiBtn.setToolTip(\r\n \"Define os segmentos de reta que devem se repetir a partir das laterais de corte e aterro até a interseção com o terreno\"\r\n )\r\n\r\n self.areaCtLb = QtWidgets.QLabel(PerfilTrecho)\r\n self.areaAtLb = QtWidgets.QLabel(PerfilTrecho)\r\n self.areaLb = QtWidgets.QLabel(PerfilTrecho)\r\n self.progressivaLb = QtWidgets.QLabel(PerfilTrecho)\r\n\r\n self.btnNext.setDisabled(self.current >= len(self.progressiva) - 1)\r\n self.btnPrevious.setDisabled(self.current == 0)\r\n QtCore.QMetaObject.connectSlotsByName(PerfilTrecho)\r\n\r\n self.layAllOut()\r\n\r\n PerfilTrecho.setWindowTitle(\r\n _translate(\"PerfilTrecho\", \"Perfil do trecho\", None)\r\n )\r\n self.calcularGreide()\r\n\r\n self.changingEstaca = False\r\n\r\n def resetSS(self):\r\n self.perfilPlot.removeItem(self.roi)\r\n self.perfil_grafico(reseting=True)\r\n\r\n # self.reset()\r\n\r\n def volumeCalc(self):\r\n try:\r\n diag = VolumeDialog(self)\r\n ct, at = self.prismoide.getVolumes(0)\r\n diag.set(ct, at)\r\n diag.exec_()\r\n except Exception as e:\r\n import traceback\r\n\r\n msgLog(str(traceback.format_exception(None, e, e.__traceback__)))\r\n messageDialog(message=\"Erro! Os taludes definidos não encontram o terreno!\")\r\n\r\n def layAllOut(self):\r\n\r\n layout = self.layout()\r\n\r\n if layout is not None:\r\n index = layout.count() - 1\r\n while index >= 0:\r\n element = layout.itemAt(index).widget()\r\n if element is None:\r\n element = layout.itemAt(index).layout()\r\n if element is not None:\r\n element.setParent(None)\r\n\r\n index -= 1\r\n\r\n Hlayout = QtWidgets.QHBoxLayout()\r\n Hlayout2 = QtWidgets.QHBoxLayout()\r\n Hlayout3 = QtWidgets.QHBoxLayout()\r\n Vlayout = QtWidgets.QVBoxLayout()\r\n\r\n Hlayout.addWidget(self.btnCalcular)\r\n Hlayout.addWidget(self.btnExportDxf)\r\n Hlayout.addWidget(self.btnImportDxf)\r\n Hlayout.addWidget(self.applyBtn)\r\n Hlayout.addWidget(self.btnAutoRange)\r\n Hlayout.addWidget(self.btnReset)\r\n Hlayout.addWidget(self.btnClean)\r\n Hlayout.addWidget(self.btnSave)\r\n Hlayout.addWidget(self.btnVolume)\r\n\r\n Hlayout3.addWidget(self.areaLb)\r\n Hlayout3.addWidget(self.areaCtLb)\r\n Hlayout3.addWidget(self.areaAtLb)\r\n\r\n Hlayout3.addWidget(self.progressivaLb)\r\n\r\n Hlayout2.addWidget(self.btnEditToggle)\r\n Hlayout2.addWidget(self.ctatiBtn)\r\n Hlayout2.addWidget(self.selectEstacaComboBox)\r\n Hlayout2.addWidget(self.btnPrevious)\r\n Hlayout2.addWidget(self.btnNext)\r\n\r\n Vlayout.addLayout(Hlayout)\r\n Vlayout.addLayout(Hlayout3)\r\n Vlayout.addWidget(self.lblTipo)\r\n Vlayout.addWidget(self.perfilPlot)\r\n Vlayout.addLayout(Hlayout2)\r\n\r\n self.setLayout(Vlayout)\r\n self.Vlayout = Vlayout\r\n\r\n def disableArrowButtons(self, bool):\r\n self.btnEditToggle.setDisabled(bool)\r\n self.btnPrevious.setDisabled(bool)\r\n self.btnNext.setDisabled(bool)\r\n self.selectEstacaComboBox.setDisabled(bool)\r\n\r\n def nextEstaca(self):\r\n if self.current < len(self.progressiva) - 1 and not self.changingEstaca:\r\n self.current += 1\r\n self.changingEstaca = True\r\n self.reset()\r\n\r\n def previousEstaca(self):\r\n if self.current > 0 and not self.changingEstaca:\r\n self.current -= 1\r\n self.changingEstaca = True\r\n self.reset()\r\n\r\n def editToggle(self):\r\n if self.editMode:\r\n self.btnEditToggle.setText(\"Esconder\")\r\n self.plotTransCurve()\r\n\r\n else:\r\n self.btnEditToggle.setText(\"Visualizar\")\r\n self.curve.clear()\r\n self.perfilPlot.removeItem(self.curve)\r\n\r\n self.editMode = not self.editMode\r\n\r\n def changeEstaca(self):\r\n\r\n if not self.changingEstaca:\r\n self.current = int(\r\n self.progressiva.index(\r\n estaca2progFloat(self.selectEstacaComboBox.currentText())\r\n )\r\n )\r\n self.reset()\r\n\r\n def reset(self):\r\n perfilPlot = self.perfilPlot\r\n self.disableArrowButtons(True)\r\n\r\n self.initVars(self.iface, 0, 0, self.st[self.current], [], \"Perfil Transversal\")\r\n self.perfil_grafico()\r\n self.roi.sigRegionChangeFinished.connect(self.updateData)\r\n\r\n self.btnNext.setDisabled(self.current == len(self.progressiva))\r\n self.btnPrevious.setDisabled(self.current == 0)\r\n self.selectEstacaComboBox.setCurrentIndex(self.current)\r\n self.Vlayout.replaceWidget(perfilPlot, self.perfilPlot)\r\n self.disableArrowButtons(False)\r\n\r\n if not self.editMode:\r\n self.plotTransCurve()\r\n\r\n if self.prismoide.lastGeneratedIndex < self.current:\r\n self.createPrismoid(self.current)\r\n\r\n self.btnNext.setDisabled(self.current >= len(self.progressiva) - 1)\r\n self.btnPrevious.setDisabled(self.current == 0)\r\n self.updateAreaLabels()\r\n pg.QtGui.QGuiApplication.processEvents()\r\n self.changingEstaca = False\r\n\r\n def updateAreaLabels(self):\r\n try:\r\n act, aat = self.prismoide.getFace(self.current).getAreas()\r\n area = self.prismoide.getFace(self.current).getArea()\r\n except Exception as e:\r\n import traceback\r\n\r\n msgLog(str(traceback.format_exception(None, e, e.__traceback__)))\r\n messageDialog(message=\"Erro! Os taludes definidos não encontram o terreno!\")\r\n area = act = aat = 0\r\n self.areaLb.setText(\"Area: \" + str(round(area, 3)) + \"m²\")\r\n dist = Config.instance().DIST\r\n self.progressivaLb.setText(\r\n \"E: \"\r\n + str(int(self.progressiva[self.current] / dist))\r\n + \" + \"\r\n + str(\r\n round(\r\n (\r\n self.progressiva[self.current] / dist\r\n - int(self.progressiva[self.current] / dist)\r\n )\r\n * dist,\r\n 4,\r\n )\r\n )\r\n + \" \"\r\n + str(self.intersecTable[self.current][1])\r\n )\r\n # act,aat = self.prismoide.getAreasCtAt(self.current)\r\n self.areaCtLb.setText(\"Corte: \" + str(round(act, 3)) + \"m²\")\r\n self.areaAtLb.setText(\"Aterro: \" + str(round(aat, 3)) + \"m²\")\r\n\r\n def plotTransCurve(self):\r\n\r\n if self.everPloted:\r\n self.curve.clear()\r\n self.perfilPlot.removeItem(self.curve)\r\n\r\n self.everPloted = True\r\n\r\n self.curve = pg.PlotCurveItem()\r\n\r\n X, Y = Figure.plotCurve(self.prismoide.getCurve(self.current))\r\n\r\n self.curve.clear()\r\n self.curve.setData(X, Y, pen=pg.mkPen(\"b\", width=4, style=QtCore.Qt.SolidLine))\r\n self.perfilPlot.addItem(self.curve)\r\n\r\n def applyTrans(self):\r\n diag = ApplyTransDialog(self.iface, self.progressiva)\r\n diag.show()\r\n if diag.exec_() == QtWidgets.QDialog.Accepted:\r\n st = deepcopy(self.st[self.current])\r\n greide = self.verticais\r\n for estaca in range(diag.progressivas[0], diag.progressivas[1] + 1):\r\n progressiva = self.progressiva[estaca]\r\n newST = []\r\n for pt in st:\r\n newST.append(\r\n [\r\n float(pt[0]),\r\n float(pt[1])\r\n + float(greide.getY(progressiva))\r\n - float(greide.getY(self.progressiva[self.current])),\r\n ]\r\n )\r\n self.st[estaca] = newST\r\n\r\n self.prismoide.st = self.st\r\n erros = []\r\n for estaca in range(diag.progressivas[0], diag.progressivas[1] + 1):\r\n try:\r\n if not self.prismoide.generate(estaca) is None:\r\n erros.append(estaca)\r\n except Exception as e:\r\n import traceback\r\n\r\n msgLog(str(traceback.format_exception(None, e, e.__traceback__)))\r\n erros.append(estaca)\r\n if erros:\r\n messageDialog(\r\n message=\"Faha ao encontrar a interseção com o terreno nas estacas: \\n\"\r\n + \"; \".join([str(e) for e in erros])\r\n )\r\n\r\n def setAtCti(self):\r\n diag = SetCtAtiDialog(self.iface, self.roi.getVerticesList())\r\n diag.show()\r\n if diag.exec_() == QtWidgets.QDialog.Accepted:\r\n self.prismoide.ati = diag.ati\r\n self.prismoide.cti = diag.cti\r\n\r\n\r\nclass Ui_Bruckner(Ui_Perfil):\r\n\r\n save = QtCore.pyqtSignal()\r\n plotar = QtCore.pyqtSignal(int)\r\n\r\n def __init__(self, X, V, key=\"\", bruck=[], bruckData=[], interval=[]):\r\n self.editMode = True\r\n self.X = X\r\n # self.V=[v/1000000 for v in V]\r\n self.V = V\r\n self.bruck = bruck\r\n self.key = key\r\n self.interval = interval\r\n self.bruckData = bruckData\r\n super(Ui_Bruckner, self).__init__(\r\n 0, 0, 0, [], [], wintitle=\"Diagrama de Bruckner\"\r\n )\r\n # self.btnCalcular.setDisabled(True)\r\n self.btnDistances.hide()\r\n self.btnReset.clicked.disconnect()\r\n self.btnReset.clicked.connect(self.resetGeometry)\r\n self.btnReset.setText(\"Recalcular\")\r\n self.btnReset.setToolTip(\r\n \"Recalcular o diagrama de bruckner redefinindo o Fh com os novos dados de seção transversal\"\r\n )\r\n self.setWindowTitle(\"Diagrama de Bruckner\")\r\n self.btnSave.setText(\"Exportar\")\r\n self.btnSave.setToolTip(\"Exportar planilha em formato csv\")\r\n self.btnSave.clicked.connect(self.csvExport)\r\n self.btnCalcular.disconnect()\r\n self.btnCalcular.setText(\"Limpar Alterações\")\r\n self.btnCalcular.setToolTip(\r\n \"Apaga os dados relacionados à linha de terra para esse intervalo\"\r\n )\r\n self.btnCalcular.clicked.connect(self.resetView)\r\n if not bruckData:\r\n self.setBruckData()\r\n else:\r\n # +self.roi.ymed))\r\n self.roi.setPos(QPointF(self.roi.pos().x(), float(bruckData[0][1])))\r\n self.updater()\r\n\r\n def setBruckData(self):\r\n r = []\r\n for handle in self.roi.getHandles():\r\n x = []\r\n x.append(str(handle.pos().x()))\r\n x.append(str(self.roi.ypos))\r\n r.append(x)\r\n self.bruckData = r\r\n # self.bruckData = [[self.roi.getHandlePos(i).x(), self.roi.getHandlePos(i).y()] for i in range(self.roi.countHandles())]\r\n\r\n def resetView(self):\r\n for h in self.roi.getHandles()[1:-1]:\r\n h.sigRemoveRequested.emit(h)\r\n self.roi.removeRect(self.roi.getHandles()[-1])\r\n self.roi.removeRect(self.roi.getHandles()[0])\r\n self.setBruckData()\r\n\r\n def salvarPerfil(self):\r\n self.save.emit()\r\n\r\n def setAsNotSaved(self):\r\n pass\r\n\r\n def calcularGreide(self):\r\n pass\r\n\r\n def perfil_grafico(self):\r\n self.perfilPlot.setWindowTitle(\"Diagrama de Bruckner (m³)\")\r\n # self.createLabels()\r\n ymed = np.average(self.V)\r\n if self.bruckData:\r\n self.roi = brucknerRoi([[p[0], ymed] for p in self.bruckData])\r\n else:\r\n self.roi = brucknerRoi([[self.X[0], ymed], [self.X[-1], ymed]])\r\n self.roi.ymed = ymed\r\n self.roi.wasModified.connect(self.setAsNotSaved)\r\n self.roi.setAcceptedMouseButtons(QtCore.Qt.RightButton)\r\n self.roi.sigRegionChangeFinished.connect(self.updater)\r\n self.perfilPlot.addItem(self.roi)\r\n self.roi.setPlotWidget(self.perfilPlot)\r\n self.perfilPlot.plot(self.X, self.V)\r\n # self.updateLabels()\r\n self.isValid = True\r\n\r\n def updater(self):\r\n if not self.roi.ismodifying:\r\n handles = [\r\n self.roi.getHandlePos(i).x() for i in range(self.roi.countHandles())\r\n ]\r\n self.setBruckData()\r\n v0 = self.roi.pos().y() + self.roi.ymed\r\n dist = Config.instance().DIST\r\n for j, x in enumerate(handles[1:]): # para cada segmento\r\n lx = handles[j]\r\n A = 0\r\n vmax = 0\r\n xmax = (lx + x) / 2\r\n i1 = max([i for i, ix in enumerate(self.X) if ix <= lx])\r\n i2 = min([i for i, ix in enumerate(self.X) if ix >= x])\r\n\r\n for i, v in enumerate(self.V[i1:i2]): # para cada Volume\r\n dx = self.X[i + 1 + i1] - self.X[i + i1]\r\n A += dx * ((self.V[i + 1 + i1] + self.V[i + i1]) / 2 - v0)\r\n if abs(vmax) <= abs(v - v0):\r\n vmax = v - v0\r\n xmax = (self.X[i + 1 + i1] + self.X[i + i1]) / 2\r\n\r\n lx = x\r\n # Distância média de transporte vmax--> altura\r\n dm = abs(A / vmax)\r\n\r\n # Plotar retangulo, associar com handle\r\n handle = self.roi.handles[j + 1][\"item\"]\r\n self.roi.removeRect(handle)\r\n handle.rect1 = pg.PlotCurveItem()\r\n handle.rect2 = pg.PlotCurveItem()\r\n handle.rect1.setData(\r\n [xmax - dm / 2, xmax - dm / 2, xmax + dm / 2, xmax + dm / 2],\r\n [v0, v0 + vmax, v0 + vmax, v0],\r\n pen=pg.mkPen(\"b\", width=4, style=QtCore.Qt.SolidLine),\r\n )\r\n handle.rect2.setData(\r\n [xmax, xmax],\r\n [v0, v0 + vmax],\r\n pen=pg.mkPen(\"r\", width=3, style=QtCore.Qt.SolidLine),\r\n )\r\n\r\n handle.leg = pg.TextItem(color=(200, 200, 200))\r\n handle.leg.setHtml(\r\n \"A = %s \\u33A1
Vmax = %s m³
Dm = %s m\"\r\n % (\r\n str(roundFloatShort(A * dist)),\r\n str(roundFloatShort(vmax)),\r\n str(roundFloatShort(dm * dist)),\r\n )\r\n )\r\n handle.leg.setAnchor((0.5, abs(vmax) / vmax))\r\n handle.leg.setPos(xmax, v0 + vmax)\r\n handle.sigRemoveRequested.connect(self.roi.removeRect)\r\n self.roi.plotWidget.addItem(handle.rect1)\r\n self.roi.plotWidget.addItem(handle.rect2)\r\n self.roi.plotWidget.addItem(handle.leg)\r\n\r\n pg.QtGui.QGuiApplication.processEvents()\r\n\r\n def closeEvent(self, event):\r\n pass\r\n\r\n def wasSaved(self):\r\n pass\r\n\r\n def reject(self):\r\n pass\r\n\r\n def resetGeometry(self):\r\n reset = yesNoDialog(self, message=\"Realmente recalcular todo o Diagrama?\")\r\n self.reseted = reset\r\n if reset:\r\n self.reset.emit()\r\n\r\n def csvExport(self):\r\n import csv\r\n\r\n filter = \".csv\"\r\n filename = QtWidgets.QFileDialog.getSaveFileName(\r\n filter=\"Arquivo csv(*\" + filter + \" *\" + filter.upper() + \")\"\r\n )[0]\r\n if filename in [\"\", None]:\r\n return\r\n filename = (\r\n str(filename) if str(filename).endswith(filter) else str(filename) + filter\r\n )\r\n delimiter = str(Config.CSV_DELIMITER.strip()[0])\r\n table = self.bruck[\"table\"]\r\n\r\n ei, ef = self.key.split(\"-\")\r\n if not ei or not ef or not self.interval:\r\n msgLog(\r\n \"Algo de errado com o intervalo de estacas: \" + ei + \"-\" + ef + \" !!!\"\r\n )\r\n return\r\n ei, ef = self.interval\r\n table = table[ei : ef + 1]\r\n header = [\r\n \"estaca\",\r\n \"corte\",\r\n \"aterro\",\r\n \"at.cor.\",\r\n \"soma\",\r\n \"semi-distancia\",\r\n \"vol.corte\",\r\n \"vol.aterro\",\r\n \"volume\",\r\n \"vol.acum\",\r\n ]\r\n with open(filename, \"w\") as fo:\r\n writer = csv.writer(fo, delimiter=delimiter, dialect=\"excel\")\r\n if type(header) == list:\r\n writer.writerow(header)\r\n for d in table:\r\n r = [d[k] for k in header]\r\n for i, c in enumerate(r):\r\n try:\r\n c = round(float(c), 4)\r\n r[i] = str(c).replace(\".\", \",\")\r\n except:\r\n r[i] = str(c)\r\n writer.writerow(r)\r\n","repo_name":"matheusfillipe/Topografia","sub_path":"app/controller/perfil.py","file_name":"perfil.py","file_ext":"py","file_size_in_byte":82172,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"28306944447","text":"import adafruit_ahtx0\nfrom adafruit_datetime import datetime\nfrom adafruit_requests import Session\nimport board\nfrom busio import I2C\nfrom digitalio import DigitalInOut, Direction\nfrom socketpool import SocketPool\nfrom ssl import create_default_context\nfrom time import monotonic, sleep\nfrom wifi import radio\n\ntry:\n from secrets import secrets\nexcept ImportError:\n print('Be sure to setup secrets.py for wifi secrets!')\n raise\n\n# Setup an LED\ndef setup_led(led_pin):\n led = DigitalInOut(getattr(board, led_pin))\n led.direction = Direction.OUTPUT\n # Ensure the LED is off\n led.value = False\n return led\n\n# Blink an LED\ndef blink_led(led_pin = 'LED_WHITE'):\n led = leds[led_pin]\n led.value = not led.value\n sleep(0.1)\n led.value = not led.value\n\n# Setup I2C and AHT20 sensor\ni2c = I2C(board.IO7, board.IO8)\nsensor = adafruit_ahtx0.AHTx0(i2c)\n\n# Setup the LEDs available on this board\nled_pins = ['LED_RED', 'LED_GREEN', 'LED_BLUE', 'LED_WHITE', 'LED_YELLOW']\nleds = dict(zip(led_pins, map(lambda led_pin: setup_led(led_pin), led_pins)))\n\n# Connect to the WiFi network\nradio.connect(secrets['wireless_name'], secrets['wireless_password'])\n# Create socket pool\npool = SocketPool(radio)\n# Create requests Session object\nrequests = Session(pool, create_default_context())\n\nlast = monotonic()\nwhile True:\n # Only check once per second\n if monotonic() - last < 1:\n continue\n last = monotonic()\n now = datetime.now()\n # Every 15 minutes, sample the sensor\n if now.minute % 15 == 0 and now.second == 0:\n data = { 'temperature': sensor.temperature, 'humidity': sensor.relative_humidity }\n res = requests.post('https://jsonplaceholder.typicode.com/posts', data=data)\n print('Response from API:', res.json())\n # Every 30 seconds blink the LED\n if now.second % 30 == 0:\n blink_led()\n","repo_name":"makvoid/Blog-Articles","sub_path":"ESP32-ESP8266-Experiments/esp32/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"2337033470","text":"\"\"\"\n\n\n\"\"\"\n\nfrom pyjade import utils as jadeutils\nfrom pyramid.compat import escape\nfrom pyjade.compiler import Compiler\n\nprefix = 'pyjade.'\nlist_attributes = ['inlineTags', 'selfClosing', 'autocloseCode']\nbool_attributes = ['pretty', 'compileDebug']\n\ndef includeme(config):\n \"\"\"\n\n\n \"\"\"\n\n def process(src, filename = None, parser = jadeutils.Parser,\n compiler = jadeutils.Compiler):\n \"\"\"\n\n\n \"\"\"\n\n _parser = parser(src, filename = filename)\n block = _parser.parse()\n pyjade_settings = {key[len(prefix):] : value for key, value in\n config.registry.settings.iteritems() if key.startswith(prefix)}\n\n for attr in list_attributes:\n parse_list(pyjade_settings, attr)\n\n for attr in bool_attributes:\n parse_bool(pyjade_settings, attr)\n\n _compiler = compiler(block, **pyjade_settings)\n return _compiler.compile().strip()\n\n jadeutils.process = process\n\n Compiler.filters.update({\n 'plain': filter_plain,\n 'css': filter_css,\n 'javascript': filter_javascript,\n 'escaped': filter_escaped\n })\n\n # Load pyjade.\n config.include('pyjade.ext.pyramid')\n\ndef parse_list(settings, attribute):\n \"\"\"\n\n\n \"\"\"\n\n if attribute in settings:\n settings[attribute] = (\n settings[attribute].split(','))\n\ndef parse_bool(settings, attribute):\n \"\"\"\n\n\n \"\"\"\n\n if attribute in settings:\n settings[attribute] = not (settings[attribute].lower() == 'false'\n or settings[attribute].lower() == 'no'\n or settings[attribute].lower() == 'off')\n\ndef filter_plain(x, y):\n \"\"\"\n\n\n \"\"\"\n\n return x\n\ndef filter_css(x, y):\n \"\"\"\n\n\n \"\"\"\n\n return ''.format(x)\n\ndef filter_javascript(x, y):\n \"\"\"\n\n\n \"\"\"\n\n return ''.format(x)\n\ndef filter_escaped(x, y):\n \"\"\"\n\n \n \"\"\"\n\n return escape(x)\n","repo_name":"jaminh/duckly-server","sub_path":"duckly/pyjade_loader.py","file_name":"pyjade_loader.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"424424725","text":"# 프로그래머스 양과 늑대\n\n# 만약 늑대의 수 >= 양의 수:\n# 모든 양을 잡아먹어버림\n\n# 양이 최대한 늑대에게 안 잡하먹히도록 하면서 돌아오기\n\n# info 0 : 양, 1 : 늑대\n# edges [부모, 자식]\n\nfrom collections import defaultdict, deque\n\ndef dfs(queue, sheep, wolf):\n global answer\n answer = max(sheep, answer)\n \n for _ in range(len(queue)):\n popped = queue.popleft()\n # 양\n if infos[popped] == 0:\n for j in trees[popped]:\n queue.append(j)\n dfs(queue, sheep + 1, wolf)\n for j in trees[popped]:\n queue.pop()\n # 늑대\n elif infos[popped] == 1:\n # 양의 수가 늑대 + 1보다 더 많음\n if sheep > wolf + 1:\n for j in trees[popped]:\n queue.append(j)\n dfs(queue, sheep, wolf + 1)\n for j in trees[popped]:\n queue.pop()\n queue.append(popped)\n\n\ndef solution(info, edges):\n global answer, trees, infos\n infos = info\n answer = 0\n trees = defaultdict(list)\n for one, two in edges:\n trees[one].append(two)\n queue = deque([0])\n dfs(queue, 0, 0)\n return answer\n\nprint(solution([0,0,1,1,1,0,1,0,1,0,1,1], [[0,1],[1,2],[1,4],[0,8],[8,7],[9,10],[9,11],[4,3],[6,5],[4,6],[8,9]]))\n# 5","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"037주차/양과 늑대/hyeongjun.py","file_name":"hyeongjun.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"9401850502","text":"# Ejercicio 1\n# Jose Daniel Gomez 21429\n# Gonzalo Santizo 21504\n\nfrom graphviz import Digraph\n\nclass Nodo:\n def __init__(self, valor):\n self.valor = valor\n self.izquierda = None\n self.derecha = None\n\ndef infix_to_postfix(expression):\n precedence = {'*': 3, '+': 2, '|': 1}\n \n def is_operator(char):\n return char in precedence or char in '()'\n\n output = []\n stack = []\n \n for char in expression:\n if char.isalpha():\n output.append(char)\n elif char == '(':\n stack.append(char)\n elif char == ')':\n while stack and stack[-1] != '(':\n output.append(stack.pop())\n stack.pop()\n elif char in precedence:\n while stack and stack[-1] != '(' and precedence[char] <= precedence.get(stack[-1], 0):\n output.append(stack.pop())\n stack.append(char)\n \n while stack:\n output.append(stack.pop())\n\n result = []\n\n for item in output:\n if item.isalpha():\n result.insert(0, item)\n else:\n result.append(item)\n \n return ''.join(result)\n\ndef construir_arbol(expresion_postfix):\n pila = []\n\n for caracter in expresion_postfix:\n if caracter.isalpha():\n nodo = Nodo(caracter)\n pila.append(nodo)\n else:\n nodo = Nodo(caracter)\n if len(pila)>0:\n nodo.derecha = pila.pop()\n if len(pila)>0:\n nodo.izquierda = pila.pop()\n pila.append(nodo)\n\n return pila[-1]\n\ndef imprimir_arbol(nodo, espaciado=0):\n if nodo is None:\n return\n espaciado += 4\n imprimir_arbol(nodo.derecha, espaciado)\n if nodo.derecha is not None:\n print(\" \" * (espaciado - 4) + \"│\", \" │\")\n print(\" \" * (espaciado - 4) + \"──\", nodo.valor)\n if nodo.izquierda is not None:\n print(\" \" * (espaciado-1) + \"│\")\n imprimir_arbol(nodo.izquierda, espaciado)\n\n#--------------------------------------------------------------------------------\n\nclass Estado:\n contador = 0\n \n def __init__(self, aceptacion=False, simbolo=None):\n self.id = Estado.contador\n Estado.contador += 1\n self.aceptacion = aceptacion\n self.simbolo = simbolo\n self.transiciones = {}\n \n def agregar_transicion(self, simbolo, estado):\n if simbolo in self.transiciones:\n self.transiciones[simbolo].add(estado)\n else:\n self.transiciones[simbolo] = {estado}\n \n\ndef crear_afn(nodo):\n if nodo is None:\n return []\n \n if nodo.valor.isalpha():\n estado_inicial = Estado()\n estado_final = Estado(aceptacion=True, simbolo=nodo.valor)\n estado_inicial.agregar_transicion(nodo.valor, estado_final)\n return [estado_inicial, estado_final]\n \n if nodo.valor == \"*\":\n nodo_izq = nodo.izquierda\n \n afn_izq = crear_afn(nodo_izq)\n \n estado_inicial = Estado()\n estado_final = Estado(aceptacion=True)\n if len(afn_izq)>0:\n estado_inicial.agregar_transicion('ε', afn_izq[0])\n estado_inicial.agregar_transicion('ε', estado_final)\n afn_izq[-1].agregar_transicion('ε', afn_izq[0])\n afn_izq[-1].agregar_transicion('ε', estado_final)\n return [estado_inicial, estado_final]\n\n if nodo.valor == \"+\":\n nodo_izq = nodo.izquierda\n \n afn_izq = crear_afn(nodo_izq)\n \n if len(afn_izq)>0:\n estado_inicial = afn_izq[0]\n estado_final = Estado(aceptacion=True)\n estado_final.agregar_transicion('ε', estado_inicial)\n afn_izq[-1].agregar_transicion('ε', estado_final)\n return [estado_inicial, estado_final]\n\n if nodo.valor in \"|\":\n nodo_izq, nodo_der = nodo.izquierda, nodo.derecha\n \n afn_izq = crear_afn(nodo_izq)\n afn_der = crear_afn(nodo_der)\n \n estado_inicial = Estado()\n estado_final = Estado(aceptacion=True)\n estado_inicial.agregar_transicion('ε', afn_izq[0])\n estado_inicial.agregar_transicion('ε', afn_der[0])\n afn_izq[-1].agregar_transicion('ε', estado_final)\n afn_der[-1].agregar_transicion('ε', estado_final)\n return [estado_inicial, estado_final]\n\n# Función para obtener los estados alcanzables desde un estado dado\ndef estados_alcanzables(estado, visitados=None):\n if visitados is None:\n visitados = set()\n visitados.add(estado)\n if 'ε' in estado.transiciones:\n for siguiente_estado in estado.transiciones['ε']:\n if siguiente_estado not in visitados:\n estados_alcanzables(siguiente_estado, visitados)\n return visitados\n\n# Crear una lista de nodos y una lista de estados para mostrarlos en pantalla\nnodos = []\nestados = []\n\n# if type(afn)!='NoneType':\n# for estado in afn:\n# estados_alcanzables_desde_estado = estados_alcanzables(estado)\n# nodos.append(estado)\n# estados.append(estados_alcanzables_desde_estado)\n\n# Imprimir nodos y estados\nfor i, estado in enumerate(nodos):\n print(f\"Nodo {i}: {estado.simbolo if estado.aceptacion else 'No aceptación'}\")\n\n# for i, estados_alcanzables_desde_estado in enumerate(estados):\n# print(f\"Estados alcanzables desde Nodo {i}: {[estado.id for estado in estados_alcanzables_desde_estado]}\")\n\n# def crear_grafico_afn(afn):\n# dot = Digraph(comment='AFN')\n \n# for estado in afn:\n# dot.node(str(estado.id), label=f\"{estado.id}\\n{estado.simbolo if estado.simbolo else ''}\", shape='doublecircle' if estado.aceptacion else 'circle')\n \n# if estado.transiciones:\n# for simbolo, destinos in estado.transiciones.items():\n# for destino in destinos:\n# dot.edge(str(estado.id), str(destino.id), label=simbolo)\n \n# return dot\n\ndef crear_grafico_afn(afn):\n if afn is None:\n return None\n\n dot = Digraph(comment='AFN')\n\n for estado in afn:\n dot.node(str(estado.id), label=f\"{estado.id}\\n{estado.simbolo if estado.simbolo else ''}\", shape='doublecircle' if estado.aceptacion else 'circle')\n\n if estado.transiciones:\n for simbolo, destinos in estado.transiciones.items():\n for destino in destinos:\n dot.edge(str(estado.id), str(destino.id), label=simbolo)\n\n return dot\n\n\n#Infix to postfix\nexpression = \"(a*|b*)+\"\npostfix = infix_to_postfix(expression)\nprint(\"1)\",postfix) # Salida: ab|c*d+\n\n# arbol desde postfix\narbol = construir_arbol(postfix)\nimprimir_arbol(arbol)\n\n# Ejemplo de uso con el árbol generado anteriormente\n# arbol = construir_arbol(\"(a*|b*)+\")\nafn1 = crear_afn(arbol)\n\n# Crear el gráfico del AFN\nafn_grafico = crear_grafico_afn(afn1)\nif afn_grafico is not None:\n afn_grafico.format = 'jpg'\n afn_grafico.render('afn1', view=True)\n\nprint(\"\\n\\n\")\n\n#Infix to postfix\nexpression = \"((e|a)|b*)*\"\npostfix = infix_to_postfix(expression)\nprint(\"2)\",postfix) # Salida: ab|c*d+\n\n# arbol desde postfix\narbol = construir_arbol(postfix)\nimprimir_arbol(arbol)\n\n# Ejemplo de uso con el árbol generado anteriormente\n# arbol = construir_arbol(\"(a*|b*)+\")\nafn2 = crear_afn(arbol)\n\n# Crear el gráfico del AFN\nafn_grafico = crear_grafico_afn(afn2)\nif afn_grafico is not None:\n afn_grafico.format = 'jpg'\n afn_grafico.render('afn2', view=True)\n\nprint(\"\\n\\n\")\n\n#Infix to postfix\nexpression = \"(a|b)*abb(a|b)*\"\npostfix = infix_to_postfix(expression)\nprint(\"3)\",postfix) # Salida: ab|c*d+\n\n# arbol desde postfix\narbol = construir_arbol(postfix)\nimprimir_arbol(arbol)\n\n# Ejemplo de uso con el árbol generado anteriormente\n# arbol = construir_arbol(\"(a*|b*)+\")\nafn3 = crear_afn(arbol)\n\n# Crear el gráfico del AFN\nafn_grafico = crear_grafico_afn(afn3)\nif afn_grafico is not None:\n afn_grafico.format = 'jpg'\n afn_grafico.render('afn3', view=True)\n\nprint(\"\\n\\n\")\n\n#Infix to postfix\nexpression = \"0?(1?)?0*\"\npostfix = infix_to_postfix(expression)\nprint(\"4)\",postfix) # Salida: ab|c*d+\n\n# arbol desde postfix\narbol = construir_arbol(postfix)\nimprimir_arbol(arbol)\n\n# Ejemplo de uso con el árbol generado anteriormente\n# arbol = construir_arbol(\"(a*|b*)+\")\nafn4 = crear_afn(arbol)\n\n# Crear el gráfico del AFN\nafn_grafico = crear_grafico_afn(afn4)\nif afn_grafico is not None:\n afn_grafico.format = 'jpg'\n afn_grafico.render('afn4', view=True)\n\ndef verificar_cadena(cadena, afn):\n if type(afn)!=\"NoneType\":\n estados_actuales = set([afn.estado_inicial]) # Conjunto de estados actuales\n for simbolo in cadena:\n estados_siguientes = set() # Conjunto de estados siguientes\n for estado in estados_actuales:\n transiciones = afn.obtener_transiciones(estado, simbolo)\n estados_siguientes.update(transiciones)\n estados_actuales = estados_siguientes\n return any(estado in afn.estados_finales for estado in estados_actuales)\n\nverificar_cadena(\"abbab\", afn1)\n","repo_name":"JDgomez2002/lab04_CC2019","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22060293877","text":"\"\"\"\nWritten by Danfei Xu\n\nRecollect data by using a joint position controller to try and track the\nreference trajectories.\n\nThis is useful for collecting datasets with lower control frequencies.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport h5py\nimport json\nimport argparse\nimport random\nimport datetime\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom tqdm import tqdm\n\nimport mujoco_py\nimport robosuite\nimport robosuite.utils.transform_utils as T\nfrom robosuite.utils.mjcf_utils import postprocess_model_xml\n\nimport RobotTeleop\nfrom RobotTeleop.configs.base_config import BaseServerConfig\n\n#import batchRL\n\n# environment\n# ENV_NAME = \"SawyerCircusEasyTeleop\"\nENV_NAME = \"SawyerCircusTeleop\"\n\n# number of demos to collect\nNUM_DEMOS = 27\n\n# parameters for path\nCORNER_DISTANCE = 0.2 # path is two sides of a triangle - this determines the corner\nNUM_STEPS_LIFT = (40, 41) # task path length ranges\nNUM_STEPS_REACH = (50, 51)\nNUM_STEPS_INSERT = (50, 51)\nNUM_STEPS_FIT = 50\nNUM_STEPS_WAIT = 10\nLIFT_X_RANGE = 0.01 # randomization for grasp\nLIFT_Z_RANGE = 0.015\nPERTURB = False\n\n# CORNER_DISTANCE = 0.2 # path is two sides of a triangle - this determines the corner\n# NUM_STEPS_LIFT = (40, 41) # task path length ranges\n# NUM_STEPS_REACH = (50, 101)\n# NUM_STEPS_INSERT = (50, 76)\n# NUM_STEPS_FIT = 50\n# NUM_STEPS_WAIT = 10\n# LIFT_X_RANGE = 0.01 # randomization for grasp\n# LIFT_Z_RANGE = 0.015\n# PERTURB = True\n\ndef interpolate_poses(pos1, rot1, pos2, rot2, steps, perturb=False):\n \"\"\"\n Extremely simple linear interpolation between two poses.\n\n If @perturb, randomly move all the interpolated position points in a uniform, non-overlapping grid.\n \"\"\"\n\n delta_pos = pos2 - pos1\n pos_step_size = delta_pos / steps\n grid = np.arange(steps).astype(np.float64)\n if perturb:\n # move the interpolation grid points by up to a half-size forward or backward\n perturbations = np.random.uniform(\n low=-0.5,\n high=0.5,\n size=(steps - 2,),\n )\n grid[1:-1] += perturbations\n pos_steps = np.array([pos1 + grid[i] * pos_step_size for i in range(steps)])\n\n # break up axis angle delta rotation into smaller angles along same axis\n delta_rot_mat = rot1.dot(rot2.T)\n delta_quat = T.mat2quat(delta_rot_mat)\n delta_axis, delta_angle = T.quat2axisangle(delta_quat)\n rot_step_size = delta_angle / steps\n\n # convert into delta rotation matrices, and then convert to absolute rotations\n delta_rot_steps = [T.quat2mat(T.axisangle2quat(delta_axis, i * rot_step_size)) for i in range(steps)]\n rot_steps = np.array([delta_rot_steps[i].T.dot(rot1) for i in range(steps)])\n\n return pos_steps, rot_steps\n\ndef get_lift_waypoints(env, perturb=False):\n \"\"\"\n Generate waypoints to grab the rod.\n \"\"\"\n\n assert ENV_NAME == \"SawyerCircusTeleop\"\n\n # initial ee pose\n pos_0 = np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(\"right_hand\")])\n rot_0 = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])\n \n # pose of handle\n pos_handle = np.array(env.sim.data.geom_xpos[env.sim.model.geom_name2id(\"block_handle\")])\n rot_handle = np.array(rot_0) \n\n # z-tolerance to hover above handle first\n pos_handle_hover = np.array(pos_handle)\n pos_handle_hover[2] += 0.04 #0.02\n\n # offset to correct for ee pos to center of ee gripper AND a little extra to center the grasp\n # since the site of the gripper is at the bottom, in between the finger edges\n ee_site = np.array(env.sim.data.site_xpos[env.sim.model.site_name2id(\"grip_site\")])\n offset = pos_0 - ee_site\n offset[2] -= 0.02\n pos_handle += offset\n pos_handle_hover += offset\n\n if perturb:\n # sample a small offset to move the grasp location a little bit\n x_perturb = np.random.uniform(low=-LIFT_X_RANGE, high=LIFT_X_RANGE)\n z_perturb = np.random.uniform(low=-LIFT_Z_RANGE, high=LIFT_Z_RANGE)\n perturb = np.array([x_perturb, 0., z_perturb])\n pos_handle += perturb\n pos_handle_hover += perturb\n\n # sample random amount of time it should take to reach the handle\n num_steps_lift = np.random.randint(NUM_STEPS_LIFT[0], NUM_STEPS_LIFT[1])\n\n # hover above handle\n pos_steps0, rot_steps0 = interpolate_poses(\n pos1=pos_0, \n rot1=rot_0, \n pos2=pos_handle_hover, \n rot2=rot_handle, \n steps=num_steps_lift,\n perturb=PERTURB,\n )\n gripper_steps0 = [[-1.] for _ in range(num_steps_lift)] # open\n\n # reach handle\n pos_steps1, rot_steps1 = interpolate_poses(\n pos1=pos_handle_hover, \n rot1=rot_handle, \n pos2=pos_handle, \n rot2=rot_handle, \n steps=10,\n perturb=PERTURB,\n )\n gripper_steps1 = [[-1.] for _ in range(10)] # open\n\n all_pos_steps = np.concatenate([pos_steps0, pos_steps1])\n all_rot_steps = np.concatenate([rot_steps0, rot_steps1])\n all_grip_steps = np.concatenate([gripper_steps0, gripper_steps1])\n\n return all_pos_steps, all_rot_steps, all_grip_steps\n\n\ndef get_pose_waypoints(env):\n \"\"\"\n Generate triangle path with interpolation.\n \"\"\"\n\n # get the waypoints from current position of arm to goal\n pos_0 = np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(\"right_hand\")])\n rot_0 = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])\n pos_goal, rot_goal, pos_goal2 = get_goal_pose(env)\n if ENV_NAME == \"SawyerCircusEasyTeleop\":\n # try for direct insertion instead of aligning tip in easy env\n pos_goal = pos_goal2\n\n # get vector in x-y plane parallel to the ring to find the x-y line that passes through the ring\n ring_geoms = []\n for i in range(env.num_ring_geoms):\n ring_geom_pos = np.array(env.sim.data.geom_xpos[env.sim.model.geom_name2id(\"hole_ring_{}\".format(i))])\n ring_geoms.append(ring_geom_pos)\n ring_dir = ring_geoms[1][:2] - ring_geoms[0][:2]\n ring_perp = np.array([ring_dir[1], -ring_dir[0]])\n ring_perp /= np.linalg.norm(ring_perp)\n\n # place a corner waypoint a distance away from the hole\n pos_corner = np.array(pos_goal)\n pos_corner[0] += CORNER_DISTANCE * ring_perp[0]\n pos_corner[1] += CORNER_DISTANCE * ring_perp[1]\n rot_corner = np.array(rot_goal)\n\n # sample random amount of time it should take to traverse the triangle path\n num_steps = np.random.randint(NUM_STEPS_REACH[0], NUM_STEPS_REACH[1])\n\n # interpolated poses along the first leg of the triangle\n pos_steps, rot_steps = interpolate_poses(\n pos1=pos_0, \n rot1=rot_0, \n pos2=pos_corner, \n rot2=rot_corner, \n steps=num_steps,\n perturb=PERTURB,\n )\n gripper_steps = [[0.] for _ in range(num_steps)] # closed\n\n return pos_steps, rot_steps, gripper_steps\n\n\ndef get_insertion_waypoints(env):\n \"\"\"\n Get waypoints for insertion. Assumes the arm is already sufficiently\n close to the hole.\n \"\"\"\n\n # get the waypoints from current position of arm to goal\n pos_0 = np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(\"right_hand\")])\n rot_0 = np.array(env.sim.data.body_xmat[env.sim.model.body_name2id(\"right_hand\")].reshape([3, 3]))\n pos_goal, rot_goal, pos_goal2 = get_goal_pose(env)\n\n # sample random amount of time it should take to traverse the triangle path\n num_steps = np.random.randint(NUM_STEPS_INSERT[0], NUM_STEPS_INSERT[1])\n\n # interpolated poses\n pos_concat = []\n rot_concat = []\n gripper_concat = []\n\n pos_steps, rot_steps = interpolate_poses(\n pos1=pos_0, \n rot1=rot_0, \n pos2=pos_goal, \n rot2=rot_goal, \n steps=num_steps,\n perturb=PERTURB,\n )\n gripper_steps = [[0.] for _ in range(num_steps)] # closed\n pos_concat.append(pos_steps)\n rot_concat.append(rot_steps)\n gripper_concat.append(gripper_steps)\n\n if ENV_NAME == \"SawyerCircusTeleop\":\n pos_steps, rot_steps = interpolate_poses(\n pos1=pos_goal, \n rot1=rot_goal, \n pos2=pos_goal2, \n rot2=rot_goal, \n steps=NUM_STEPS_FIT,\n perturb=PERTURB,\n )\n gripper_steps = [[0.] for _ in range(NUM_STEPS_FIT)] # closed\n pos_concat.append(pos_steps)\n rot_concat.append(rot_steps)\n gripper_concat.append(gripper_steps)\n\n # add in fixed final poses as a buffer\n pos_steps = np.array([pos_goal2 for _ in range(NUM_STEPS_WAIT)])\n rot_steps = np.array([rot_goal for _ in range(NUM_STEPS_WAIT)])\n gripper_steps = [[0.] for _ in range(NUM_STEPS_WAIT)] # closed\n pos_concat.append(pos_steps)\n rot_concat.append(rot_steps)\n gripper_concat.append(gripper_steps)\n\n all_pos_steps = np.concatenate(pos_concat)\n all_rot_steps = np.concatenate(rot_concat)\n all_grip_steps = np.concatenate(gripper_concat)\n return all_pos_steps, all_rot_steps, all_grip_steps\n\ndef get_goal_pose(env):\n \"\"\"\n Helper function to get goal location for arm based on the current state of the env.\n\n The function finds the relative transform between the tip of the rod and \n the gripper, and uses that to find the correct gripper pose that will\n result in successful ring insertion.\n \"\"\"\n\n # ring position is average of all the surrounding ring geom positions\n ring_pos = np.zeros(3)\n ring_geoms = []\n for i in range(env.num_ring_geoms):\n ring_geom_pos = np.array(env.sim.data.geom_xpos[env.sim.model.geom_name2id(\"hole_ring_{}\".format(i))])\n ring_pos += ring_geom_pos\n ring_geoms.append(ring_geom_pos)\n ring_pos /= env.num_ring_geoms\n\n # get vector in x-y plane parallel to the ring to find the x-y line that passes through the ring\n ring_dir = ring_geoms[1][:2] - ring_geoms[0][:2]\n ring_perp = np.array([ring_dir[1], -ring_dir[0]])\n ring_perp /= np.linalg.norm(ring_perp)\n ring_angle = np.arctan2(ring_perp[1], ring_perp[0])\n\n # rotate the initial tip rotation by ring angle to get the target pose\n if ENV_NAME == \"SawyerCircusTeleop\":\n initial_tip_mat = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])\n else:\n initial_tip_mat = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])\n target_tip_mat = T.rotation_matrix(angle=-ring_angle, direction=[0., 0., 1.])[:3, :3].T.dot(initial_tip_mat)\n target_tip_pose = T.make_pose(ring_pos, target_tip_mat)\n\n # gripper pose\n gripper_pose = T.make_pose(\n np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(\"right_hand\")]),\n np.array(env.sim.data.body_xmat[env.sim.model.body_name2id(\"right_hand\")].reshape([3, 3])),\n )\n\n # tip pose\n if ENV_NAME == \"SawyerCircusTeleop\":\n block_pos = np.array(env.sim.data.geom_xpos[env.sim.model.geom_name2id(\"block_thread\")])\n block_pose = np.array(env.sim.data.geom_xmat[env.sim.model.geom_name2id(\"block_handle\")].reshape(3, 3))\n handle_pos = np.array(env.sim.data.geom_xpos[env.sim.model.geom_name2id(\"block_handle\")])\n else:\n # the handle is basically the robot gripper\n block_pos = np.array(env.sim.data.body_xpos[env.object_body_ids[\"r_gripper_rod\"]])\n block_pose = np.array(env.sim.data.body_xmat[env.object_body_ids[\"r_gripper_rod\"]].reshape(3, 3))\n handle_pos = np.array(gripper_pose[3, :3])\n handle_pos[2] = block_pos[2]\n block_dir = block_pos - handle_pos\n block_dir /= np.linalg.norm(block_dir)\n tip_pos = block_pos + 0.06 * block_dir\n tip_pose = T.make_pose(tip_pos, block_pose)\n world_pose_in_tip = T.pose_inv(tip_pose)\n block_pose = T.make_pose(block_pos, block_pose)\n world_pose_in_block = T.pose_inv(block_pose)\n\n # arm pose in tip frame and in block frame (center of rod)\n arm_in_tip = T.pose_in_A_to_pose_in_B(gripper_pose, world_pose_in_tip)\n arm_in_block = T.pose_in_A_to_pose_in_B(gripper_pose, world_pose_in_block)\n\n # use the relative transform between tip / block and arm to get target gripper poses\n target_gripper_pose1 = T.pose_in_A_to_pose_in_B(arm_in_tip, target_tip_pose)\n target_gripper_pose2 = T.pose_in_A_to_pose_in_B(arm_in_block, target_tip_pose)\n\n return target_gripper_pose1[:3, 3], target_gripper_pose1[:3, :3], target_gripper_pose2[:3, 3]\n\n\ndef get_robosuite_env(render):\n \"\"\"\n Constructs robosuite env\n \"\"\"\n\n # we use specific OSC controller args in RobotTeleop\n controller_json_path = os.path.join(RobotTeleop.__path__[0], \n \"assets/osc/robosuite/osc.json\")\n with open(controller_json_path, \"r\") as f:\n controller_args = json.load(f)\n\n osc_args = dict(\n use_camera_obs=False,\n reward_shaping=False,\n gripper_visualization=False,\n has_renderer=render,\n has_offscreen_renderer=False,\n control_freq=20,\n ignore_done=True,\n eval_mode=True,\n perturb_evals=False,\n controller_config=controller_args,\n # use_indicator_object=True,\n # indicator_num=2,\n )\n env = robosuite.make(ENV_NAME, **osc_args)\n\n return env\n\n\ndef get_fake_teleop_config():\n \"\"\"\n Helper function to fake a teleoperation config. This is used for parsing\n env arguments, among other things, during postprocessing and training.\n \"\"\"\n config = BaseServerConfig()\n config.robot.type = \"MujocoSawyerRobot\"\n config.robot.task.name = ENV_NAME\n config.controller.mode = \"osc\"\n config.data_collection.dagger_mode = False\n config.infer_settings()\n return config\n\ndef follow_waypoints(env, ref_ee_pos, ref_ee_mat, ref_ee_grip, render=False):\n \"\"\"\n Helper function to follow a sequence of waypoints with OSC control, and\n return the seqeunce of states and actions visited.\n \"\"\"\n\n # store controller bounds for re-scaling actions\n MAX_DPOS = env.controller.output_max[0]\n MAX_DROT = env.controller.output_max[3]\n\n states = []\n actions = []\n success = False\n for j in range(len(ref_ee_pos)):\n\n # current mjstate\n state = np.array(env.sim.get_state().flatten())\n\n desired_position = ref_ee_pos[j]\n current_position = np.array(env.sim.data.body_xpos[env.sim.model.body_name2id(\"right_hand\")])\n delta_position = desired_position - current_position\n delta_position = np.clip(delta_position / MAX_DPOS, -1., 1.)\n\n # use the OSC controller's convention for delta rotation\n desired_rotation = ref_ee_mat[j]\n current_rotation = np.array(env.sim.data.body_xmat[env.sim.model.body_name2id(\"right_hand\")].reshape([3, 3]))\n delta_rot_mat = current_rotation.dot(desired_rotation.T)\n delta_quat = T.mat2quat(delta_rot_mat)\n delta_axis, delta_angle = T.quat2axisangle(delta_quat)\n delta_rotation = -T.axisangle2vec(delta_axis, delta_angle)\n delta_rotation = np.clip(delta_rotation / MAX_DROT, -1., 1.)\n\n # play the action\n play_action = np.concatenate([delta_position, delta_rotation, ref_ee_grip[j]])\n env.step(play_action)\n if render:\n env.render()\n\n # collect data\n states.append(state)\n actions.append(play_action)\n\n # termination condition\n done = int(env._check_success())\n success = success or done\n # if success:\n # break\n\n return states, actions, success\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--batch\",\n type=str,\n )\n parser.add_argument(\n \"--render\",\n action='store_true', # to visualize collection\n )\n args = parser.parse_args()\n\n env = get_robosuite_env(render=args.render)\n fake_teleop_config = get_fake_teleop_config()\n\n # file to write\n f = h5py.File(args.batch, \"w\")\n grp = f.create_group(\"data\")\n\n avg_success = 0.\n num_failures = 0\n ep_lengths = []\n demo_count = 0\n num_demos_attempted = 0\n for i in range(NUM_DEMOS):\n\n # store trajectory under new group\n ep_data_grp = grp.create_group(\"demo_{}\".format(demo_count))\n\n # start new ep, using the hack for deterministic playback\n env.reset()\n initial_mjstate = env.sim.get_state().flatten()\n xml = env.model.get_xml()\n env.reset_from_xml_string(xml)\n env.sim.reset()\n env.sim.set_state_from_flattened(initial_mjstate)\n env.sim.forward()\n\n if args.render:\n env.viewer.set_camera(2)\n\n if ENV_NAME == \"SawyerCircusTeleop\":\n # first, grab the rod, so that we can obtain the relative pose of the rod to the arm\n # and use that to set the goal\n ref_ee_pos, ref_ee_mat, ref_ee_grip = get_lift_waypoints(\n env=env,\n perturb=PERTURB,\n )\n lift_states, lift_actions, _ = follow_waypoints(\n env=env, \n ref_ee_pos=ref_ee_pos, \n ref_ee_mat=ref_ee_mat, \n ref_ee_grip=ref_ee_grip, \n render=args.render,\n )\n\n\n # first leg of triangle \n ref_ee_pos, ref_ee_mat, ref_ee_grip = get_pose_waypoints(env)\n states, actions, _ = follow_waypoints(\n env=env, \n ref_ee_pos=ref_ee_pos, \n ref_ee_mat=ref_ee_mat, \n ref_ee_grip=ref_ee_grip, \n render=args.render,\n )\n\n # get insertion trajectory based on where we end up after the first part of task\n ref_ee_pos, ref_ee_mat, ref_ee_grip = get_insertion_waypoints(env)\n insertion_states, insertion_actions, success = follow_waypoints(\n env=env, \n ref_ee_pos=ref_ee_pos, \n ref_ee_mat=ref_ee_mat, \n ref_ee_grip=ref_ee_grip, \n render=args.render,\n )\n\n avg_success += float(success)\n num_demos_attempted += 1\n\n # skip failures\n if not success:\n del grp[\"demo_{}\".format(demo_count)]\n continue\n\n if ENV_NAME == \"SawyerCircusTeleop\":\n # prepend the lifting states and actions\n states = lift_states + states\n actions = lift_actions + actions\n states += insertion_states\n actions += insertion_actions\n\n # write datasets for states and actions\n ep_data_grp.create_dataset(\"states\", data=np.array(states))\n ep_data_grp.create_dataset(\"actions\", data=np.array(actions))\n\n # write model file\n ep_data_grp.attrs[\"model_file\"] = xml\n\n demo_count += 1\n print(\"{} successes out of {}\".format(avg_success, num_demos_attempted))\n\n print(\"Done!\\n\")\n print(\"{} successes out of {}\".format(avg_success, num_demos_attempted))\n print(\"{} exceptions out of {}\\n\".format(num_failures, num_demos_attempted))\n\n avg_success /= num_demos_attempted\n\n print(\"Average Success: {}\".format(avg_success))\n\n # write dataset attributes (metadata)\n now = datetime.datetime.now()\n grp.attrs[\"date\"] = \"{}-{}-{}\".format(now.month, now.day, now.year)\n grp.attrs[\"time\"] = \"{}:{}:{}\".format(now.hour, now.minute, now.second)\n grp.attrs[\"repository_version\"] = robosuite.__version__\n grp.attrs[\"env\"] = ENV_NAME\n grp.attrs[\"teleop_config\"] = fake_teleop_config.dump()\n\n f.close()","repo_name":"Bronars/RL2_robomimic","sub_path":"simulation/scripts/hardcoded_demonstrator.py","file_name":"hardcoded_demonstrator.py","file_ext":"py","file_size_in_byte":19130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37953663830","text":"import cv2\nimport numpy as np\nimport imutils\nimport easyocr\n\n# import sys\n# import codecs\n# sys.stdout = codecs.getwriter('utf8')(sys.stdout)\n# sys.stderr = codecs.getwriter('utf8')(sys.stderr)\n\nimport importlib\nimport sys\n# sys.setdefaultencoding() does not exist, here!\n# reload(sys) # Reload does the trick!\nimportlib.reload(sys)\n# export PYTHONIOENCODING=utf-8\n\nimg = cv2.imread('images/1.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nbfilter = cv2.bilateralFilter(gray, 11, 17, 17)\nedged = cv2.Canny(bfilter, 30, 200)\n\nkeypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(keypoints)\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\nlocation = None\nfor contour in contours:\n approx = cv2.approxPolyDP(contour, 10, True)\n if len(approx) == 4:\n location = approx\n break\n\nmask = np.zeros(gray.shape, np.uint8)\nnew_image = cv2.drawContours(mask, [location], 0,255, -1)\nnew_image = cv2.bitwise_and(img, img, mask=mask)\n\n(x,y) = np.where(mask==255)\n(x1, y1) = (np.min(x), np.min(y))\n(x2, y2) = (np.max(x), np.max(y))\ncropped_image = gray[x1:x2+1, y1:y2+1]\n\nreader = easyocr.Reader(['en'], gpu=False)\nresult = reader.readtext(cropped_image)\ntext = result[0][-2]\nres = cv2.putText(img, text, (approx[0][0][0],\napprox[1][0][1]+60),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)\nres = cv2.rectangle(img, tuple(approx[0][0]),\ntuple(approx[2][0]), (0,255,0),3)\n\n# cv2.imshow(\"Image\", cv2.cvtColor(res, cv2.COLOR_BGR2RGB))\ncv2.imshow(\"Image\", cv2.cvtColor(res, cv2.COLOR_BGR2RGB))\ncv2.waitKey(0)","repo_name":"wendikardian/aidev2","sub_path":"Meeting 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29529987917","text":"import os\r\nimport sys\r\nimport numpy as np\r\nfrom voice_util import (audio_bic_feat,\r\n div_gauss,\r\n vad_audio_segment,\r\n trim_audio_with_sox,\r\n trim_audio_ffmpeg,\r\n audio_merge_with_sox)\r\n\r\n\r\ndef split_audio_file(audio_file, time_list):\r\n file_list = []\r\n split_dir = \"split\"\r\n if os.path.exists(split_dir):\r\n if 'win32' in sys.platform:\r\n os.system(\"rmdir {} /s /q\".format(split_dir))\r\n else:\r\n os.system('rm -rf \\\"{}\\\"'.format(split_dir))\r\n os.mkdir(split_dir)\r\n for idx, [seg_st, seg_end] in enumerate(time_list):\r\n split_file = \"split_{}.wav\".format(idx)\r\n split_file = os.path.join(split_dir, split_file)\r\n # split_file_name = trim_audio_with_sox(audio_file, 16000, seg_st, seg_end, split_file)\r\n split_file_name = trim_audio_ffmpeg(audio_file, seg_st, seg_end, split_file)\r\n file_list.append(\"\\\"{}\\\"\".format(split_file_name))\r\n return file_list\r\n\r\n\r\ndef get_enroll_audio(audio_file, enroll_file):\r\n speech_segment = vad_audio_segment(audio_file)\r\n split_list = split_audio_file(audio_file, speech_segment)\r\n # target_file = \"merge.wav\"\r\n return audio_merge_with_sox(enroll_file, split_list)\r\n\r\n\r\ndef get_verify_audio(audio_file, enroll_file):\r\n speech_segment = vad_audio_segment(audio_file, concate_gap=0.45)\r\n split_list = split_audio_file(audio_file, speech_segment)\r\n # target_file = \"merge.wav\"\r\n return audio_merge_with_sox(enroll_file, split_list)\r\n\r\n\r\ndef audio_segmentation(audio_file):\r\n energy, mfb, mfcc = audio_bic_feat(audio_file)\r\n win_size = 200\r\n frame_step = 0.01 # (s)\r\n segment = div_gauss(mfcc, win=win_size)\r\n borders = np.array(segment)*frame_step\r\n segmentation_result = []\r\n for i in range(len(borders) - 1):\r\n segmentation_result.append([borders[i], borders[i+1]])\r\n\r\n return segmentation_result\r\n\r\n\r\ndef multiple_verify(audio_file):\r\n from speaker_verification import SpeakerVerification\r\n import emo_recognizer as emr\r\n verifier = SpeakerVerification()\r\n em_verifier = emr.load_model()\r\n # audio_segments = audio_segmentation(audio_file)\r\n audio_segments = vad_audio_segment(audio_file, concate_gap=0.6)\r\n caller_list = []\r\n unknown_speaker_num = 0\r\n for seg_st, seg_end in audio_segments:\r\n if seg_end - seg_st > 50:\r\n seg_end -= 1\r\n seg_st += 1\r\n print(\"interval: {}\".format([seg_st, seg_end]))\r\n # seg_file = trim_audio_with_sox(audio_file, 16000, seg_st, seg_end, out_file=\"merge.wav\")\r\n seg_file = trim_audio_ffmpeg(audio_file, seg_st, seg_end, \"merge.wav\")\r\n # res_file = get_enroll_audio(seg_file, \"enroll.wav\")\r\n called_name, sc = verifier.verify(seg_file)\r\n emotion = emr.emotion_recognizer(em_verifier, seg_file)\r\n if called_name != \"Unknown\":\r\n if len(caller_list) > 0 and caller_list[-1][0] == called_name:\r\n caller_list[-1][1] = (caller_list[-1][1] + sc) / 2\r\n caller_list[-1][3] = seg_end\r\n else:\r\n\r\n caller_list.append([called_name, sc, seg_st, seg_end, emotion[0]])\r\n else:\r\n unknown_speaker_num += 1\r\n caller_list.append([\"{}-{}\".format(called_name, unknown_speaker_num), sc, seg_st, seg_end, emotion[0]])\r\n\r\n # verify_speaker(verifier, seg_file)\r\n return caller_list\r\n\r\n\r\nif __name__ == \"__main__\":\r\n wav_dir = \"wav\"\r\n audio_file = \"Voice 164.wav\"\r\n # audio_file = \"Joe_training16.wav\"\r\n # audio_file = \"Ashline_training_16.wav\"\r\n audio_file = os.path.join(wav_dir, audio_file)\r\n enroll_file = \"merge.wav\"\r\n # audio_segmentation(audio_file)\r\n multiple_verify(audio_file)\r\n # vad_audio_segment(audio_file)\r\n # get_enroll_audio(audio_file, enroll_file)\r\n","repo_name":"JaneBittner/Voice-Command","sub_path":"wav_spliter.py","file_name":"wav_spliter.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28069452596","text":"import sys\nimport cPickle as pkl\n\ndef predict(args):\n\n days = ['Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday']\n\n exm = []\n exm.append(int(args[0]))\n\n if args[1] == 'M':\n exm.append(1)\n else:\n exm.append(0)\n\n dayRes = [0 for i in range(len(days))]\n dayRes[int(args[2])] = 1\n exm.extend(dayRes)\n\n exm.append(int(args[3]))\n exm.append(int(args[4]))\n\n reg = pkl.load(open('model.pkl'))\n prob = reg.predict_proba([exm])\n\n print(prob[0][1])\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n args[0] = int(args[0])\n args[-2] = int(args[-2])\n args[-1] = int(args[-1])\n\n predict(args)\n","repo_name":"tummus/cse112_oldproj","sub_path":"ml/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73665961832","text":"import re\nimport util\n\n\nclass Linenamer(object):\n\n def __init__(self, lines):\n self.lines = lines\n self.changed = []\n\n def replace_all(self, ss):\n out = []\n for ll in self.lines:\n if ll[0] == ';':\n out.append(ll)\n continue\n out.append(ll.replace(ss, '@' + ss + '@'))\n self.lines = out\n\n def linesnames_changed(self):\n out = []\n for ll in self.lines:\n if ll[0:8] == 'LINENAME':\n ll = util.real_strip(ll)\n mm = re.match(r'^LINENAME (.*)', ll)\n self.changed.append(mm.group(1))\n out.append(mm.group(1)) # this gets replaced by @ + name + @\n else:\n out.append(ll)\n self.lines = out\n\n self.changed.sort(key=len, reverse=True)\n for nn in self.changed:\n self.replace_all(nn)\n nn2 = nn.replace('(', '___').replace(')', '---')\n if nn2 != nn:\n for ll in self.lines:\n ll.replace(nn, nn2)\n\n return self.lines\n","repo_name":"jeidsath/lm-compiler","sub_path":"linenamer.py","file_name":"linenamer.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35865927926","text":"\"\"\"Name: Owen Schlagenhaft\nlab3.py\n\nProblem: creating a calculator for DOT to calculate road averages\n\nCertification of Authenticity:\nI certify that this assignment is entirely my own work.\n\"\"\"\ndef traffic_average():\n cars = 0.0\n average = 0.0\n totalcars = 0.0\n overallsum = 0.0\n totalroads = eval(input(\"How many roads were surveyed?:\"))\n for i in range(1, totalroads+1):\n days = eval(input(\"How many days was road \" + str(i) + \" surveyed?:\"))\n for c in range(1, days + 1):\n cars = eval(input(\"How many cars passed on day \" + str(c) + \"?\"))\n totalcars = cars + totalcars\n average = totalcars / days\n print(\"Road\", i, \"average vehicles per day:\", average)\n overallsum = overallsum + totalcars\n totalcars = 0.0\n print(\"Total number of vehicles traveled on all roads:\", overallsum)\n roadaverage = overallsum / totalroads\n roadaverage = round(roadaverage, 2)\n print(roadaverage)\n\n\n\n\ntraffic_average()","repo_name":"ItsTheGreenG/220","sub_path":"labs/lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17302447837","text":"from decimal import Decimal\nfrom flaskr.model import QuoteHistoryItem\nfrom datetime import datetime\nfrom typing import List\n\n\n# based on the quote info from 'data' performs linear interpolation to fill all\n# timepoints in 'timeScale'. If there is no quote available for some of the leftmost\n# points in the 'timeScale', uses 'leftFill'. If 'leftFill' is None then uses next available\n# quote from the future. The same applies for 'rightFill' on the right side\ndef interp(data:List[QuoteHistoryItem], timeScale:List[datetime], leftFill = None, rightFill = None):\n if len(timeScale) == 0:\n return []\n\n assert len(data) > 0\n\n if leftFill is not None and not isinstance(leftFill, Decimal):\n leftFill = Decimal(leftFill)\n\n if rightFill is not None and not isinstance(rightFill, Decimal):\n rightFill = Decimal(rightFill)\n\n result = []\n quoteIdx = 0\n\n for dateIdx in timeScale:\n thisQuote = data[quoteIdx]\n while thisQuote.timestamp < dateIdx and quoteIdx < len(data) - 1:\n quoteIdx += 1\n thisQuote = data[quoteIdx]\n\n if quoteIdx == 0:\n result.append(QuoteHistoryItem(timestamp=dateIdx, quote = data[0].quote if leftFill is None else leftFill))\n elif thisQuote.timestamp < dateIdx:\n result.append(QuoteHistoryItem(timestamp=dateIdx, quote = data[-1].quote if rightFill is None else rightFill))\n else:\n prevQuote = data[quoteIdx-1]\n timeTillNow = Decimal(dateIdx.timestamp() - prevQuote.timestamp.timestamp())\n timeBetweenPoints = Decimal(thisQuote.timestamp.timestamp() - prevQuote.timestamp.timestamp())\n linearCoef = timeTillNow / timeBetweenPoints\n linearQuote = linearCoef * (thisQuote.quote - prevQuote.quote) + prevQuote.quote\n result.append(QuoteHistoryItem(timestamp=dateIdx, quote=linearQuote))\n\n return result\n","repo_name":"kpk-pl/wallet","sub_path":"web-gui/flaskr/pricing/interp.py","file_name":"interp.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18068056805","text":"import os\nimport sys\nfrom dotenv import load_dotenv\nimport ndn.security as sec\nimport gitsync.db.proto as proto\nimport gitsync.db.json_encoder as json_encoder\n\n\ndef main():\n load_dotenv()\n if len(sys.argv) <= 1:\n print(f'Usage: {sys.argv[0]} ', file=sys.stderr)\n exit(-1)\n\n filename = os.path.abspath(sys.argv[1])\n tpm = sec.TpmFile(os.path.abspath(os.getenv('GIT_NDN_TPM')))\n signer = tpm.get_signer(os.getenv('GIT_NDN_KEY'))\n\n with open(filename, 'r') as f:\n content = f.read()\n git_obj = json_encoder.json_decode(content)\n wire = proto.encode(git_obj, signer)\n\n file_prefix, _ = os.path.splitext(filename)\n tlv_filename = file_prefix + '.tlv'\n with open(tlv_filename, 'wb') as f:\n f.write(wire)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zjkmxy/git_ndn","sub_path":"bin/encode_json.py","file_name":"encode_json.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"73422331114","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\n#preprocessing\ndataset=pd.read_csv('data.csv')\nx=dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 21].values\n#splitting dataest\nfrom sklearn.cross_validation import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)\n#fitting model\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train,y_train)\n#predicting results\ny_pred=regressor.predict(x_test)\n#converting float to int\ny_final=np.round(y_pred)\n#confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_final)\n#accuracy\ncount=0\nfor i in range(len(y_final)):\n if(y_final[i]==y_test[i]):\n count=count+1\nprint(count) \n ","repo_name":"VamsiKrishnaNori/Comparison-Of-ML-Algorithms-in-Disease-Prediction","sub_path":"multiple linear regression.py","file_name":"multiple linear regression.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32069151641","text":"from datetime import datetime\nfrom django.test import TestCase\n\nfrom escolar.escolas.models import Escola\nfrom municipios.models import Municipio, UF\nfrom escolar.core.models import Pais\n\n'''\npython manage.py test escolar.escolas.testes.test_model_escola\n'''\n\nclass EscolaModelTest(TestCase):\n def setUp(self):\n self.pais = Pais(nome='Brasil',\n sigla='BRA',\n idioma='português',)\n self.pais.save()\n self.uf = UF(id_ibge=1234,\n uf='SP',\n nome='São Paulo',\n regiao='Sudeste',)\n self.uf.save()\n self.municipio = Municipio(\n id_ibge=12233,\n nome_abreviado='SJC',\n nome=\"São José dos Campos\",\n uf=self.uf,\n uf_sigla='SP',)\n self.municipio.save()\n self.escola = Escola(pais=self.pais,\n nome='Padre Julio Maria',\n razao_social='Pde julio S/A',\n endereco='rua xyz',\n numero='123A',\n telefone='12-31247895',\n municipio=self.municipio,\n bairro='Santana',\n celular='12982239764',\n slug='padre_julio',\n )\n self.escola.save()\n\n def test_create(self):\n \"\"\"TESTE 01 test_model_escola\"\"\"\n self.assertTrue(Escola.objects.exists())\n\n def test_created_at(self):\n \"\"\"Escola must have an auto created_at attr\"\"\"\n self.assertIsInstance(self.escola.created_at, datetime)\n\n def test_str(self):\n self.assertEqual('Padre Julio Maria', str(self.escola))\n\n def test_publica(self):\n self.assertEqual(False, self.escola.publica)\n","repo_name":"FabianoAlmeidaMelo/escolar","sub_path":"escolar/escolas/tests/test_model_escola.py","file_name":"test_model_escola.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7474847569","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef calculate_loss(recon_hmaps, hmaps, loss_function):\n # (bs,l,w,h)\n loss = 0\n if loss_function == 'RMSE':\n loss = torch.sqrt(torch.mean(F.mse_loss(recon_hmaps, hmaps, reduction='none'), dim=(1, 2, 3)))\n if loss_function == 'MSE':\n loss = torch.mean(F.mse_loss(recon_hmaps, hmaps, reduction='none'), dim=(1, 2, 3))\n if loss_function == 'MAE':\n loss = torch.mean(F.l1_loss(recon_hmaps, hmaps, reduction='none'), dim=(1, 2, 3))\n if len(loss.shape) > 1:\n loss = torch.mean(loss, dim=(1, 2))\n return np.array(loss.data.cpu().detach())\n\n\ndef calculate_seq_loss(y_pred, y, loss_function):\n bs, l, c, w, h = y_pred.shape\n losses = []\n # category\n for c in range(c):\n loss = calculate_loss(y_pred[:, :, c], y[:, :, c], loss_function)\n losses.append(loss)\n losses = np.stack(losses, axis=1)\n return losses\n","repo_name":"echoyi/MAPSED","sub_path":"src/utils/calculate_seq_loss.py","file_name":"calculate_seq_loss.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"37502550920","text":"from collections import defaultdict\nimport itertools\n\n\ndef get_lines(filename='09/input.txt'):\n file = open(filename, 'r')\n return [line.rstrip('\\n') for line in file.readlines()]\n\n\ndef get_edges():\n edges = defaultdict(lambda: {})\n\n for line in get_lines():\n splited = line.split()\n source = splited[0]\n destination = splited[2]\n distance = int(splited[-1])\n edges[source][destination] = distance\n edges[destination][source] = distance\n\n return edges\n\n\ndef longest_path(edges):\n max_path_length = 0\n for path in itertools.permutations(edges):\n dist = find_distance(edges, path)\n max_path_length = max(max_path_length, dist)\n\n return max_path_length\n\n\ndef find_distance(distances, path):\n dist = 0\n\n for idx in range(len(path) - 1):\n source = path[idx]\n destination = path[idx+1]\n\n dist += distances[source][destination]\n\n return dist\n\n\nedges = get_edges()\nmin_dist = longest_path(edges)\n\nprint(min_dist)\n","repo_name":"wpedrak/advent_of_code","sub_path":"2015/09/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17978417847","text":"\"\"\"\n给你一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?请你找出所有和为 0 且不重复的三元组。\n\n注意:答案中不可以包含重复的三元组。\n\n\n\n示例 1:\n\n输入:nums = [-1,0,1,2,-1,-4]\n输出:[[-1,-1,2],[-1,0,1]]\n\n示例 2:\n\n输入:nums = []\n输出:[]\n\n示例 3:\n\n输入:nums = [0]\n输出:[]\n\n\n\n提示:\n\n 0 <= nums.length <= 3000\n -105 <= nums[i] <= 105\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/3sum\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n\"\"\"\n\n\nclass Solution:\n def threeSum(self, nums):\n nums = sorted(nums)\n result = list()\n # 类似于快排的解法.\n nums_len = len(nums)\n if len(nums) < 3:\n return result\n # for i in range(nums_len - 1):\n i = 0\n while i < nums_len - 1:\n j = i + 1\n k = nums_len - 1\n while j < k:\n sums = nums[i] + nums[j] + nums[k]\n if sums == 0:\n result_list = list()\n result_list.append(nums[i])\n result_list.append(nums[j])\n result_list.append(nums[k])\n \"\"\"\n if result_list in result:\n pass\n else:\n result.append(result_list)\n \"\"\"\n result.append(result_list)\n j += 1\n k -= 1\n # 需要去掉重复元素\n while j < k and nums[j] == nums[j - 1]:\n j += 1\n while k > j and nums[k] == nums[k + 1]:\n k -= 1\n elif sums < 0:\n j += 1\n else:\n k -= 1\n\n while i < nums_len - 1:\n if nums[i] != nums[i + 1]:\n break\n else:\n i += 1\n i += 1\n return result\n\n\nA = Solution()\nprint(A.threeSum([-1, 0, 1, 2, -1, -4]))\n","repo_name":"SunsetBrightGorgeousAgain/LeetCode","sub_path":"LeetCode/Pycharm/15.三数之和.py","file_name":"15.三数之和.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42998532959","text":"class Queue:\r\n def __init__(self):\r\n self.array = []\r\n\r\n def push(self, n):\r\n self.array.append(n)\r\n print(\"ok\")\r\n\r\n def pop(self):\r\n if self.array != []:\r\n print(self.array.pop(0))\r\n else:\r\n print(\"error\")\r\n \r\n def front(self):\r\n if self.array != []:\r\n print(self.array[0])\r\n else:\r\n print(\"error\")\r\n\r\n def size(self):\r\n print(len(self.array))\r\n\r\n def clear(self):\r\n self.array = []\r\n print(\"ok\")\r\n\r\nqueue = Queue()\r\n\r\nwhile True:\r\n command = input()\r\n\r\n if \"push\" in command.split(' '):\r\n queue.push(command.split(' ')[1])\r\n elif \"pop\" == command:\r\n queue.pop()\r\n elif \"front\" == command:\r\n queue.front()\r\n elif \"size\" == command:\r\n queue.size()\r\n elif \"clear\" == command:\r\n queue.clear()\r\n elif \"exit\" == command:\r\n print(\"bye\")\r\n break","repo_name":"AkiiraBino/yandex-training3","sub_path":"Alg3_1.py","file_name":"Alg3_1.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25904707289","text":"import ruamel_yaml\n\n\ndef load_yaml(filename: str) -> dict:\n \"\"\"\n load a file given by `filename` and return its data\n\n inputs:\n - filename: a string filename that has already been checked\n\n outputs:\n - data: a dictionary/list structure containing the data from the yaml\n \"\"\"\n\n yaml = ruamel_yaml.YAML(typ=\"safe\", pure=True)\n with open(filename, \"r\") as infile:\n data = yaml.load(infile)\n\n # # pyyaml version\n # # try to safe read\n # with open(filename, \"r\") as infile:\n # data = yaml.safe_load(infile)\n\n return data\n\n\ndef save_yaml(data: dict, filename_out: str) -> dict:\n \"\"\"\n save a yaml data dictionary to a yaml file\n\n inputs:\n - data: a yaml data dictionary\n - filename_out: the yaml file destination to be written\n \"\"\"\n\n yaml = ruamel_yaml.YAML(typ=\"safe\", pure=True)\n with open(filename_out, \"w\") as outfile:\n yaml.dump(data, outfile)\n","repo_name":"cfrontin/tools_cvf","sub_path":"tools_cvf/nrel_tools/utils/yaml_io.py","file_name":"yaml_io.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15859230554","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Author: 思文伟\n@Date: 2021/09/16\n'''\nimport copy\nimport json\nfrom ..const import Const\nfrom ..utils.sutils import strclass\nfrom .test_wrapper import Test\nfrom .abstract_testcase import AbstractTestCase\nfrom ..utils.attrs_manager import AttributeManager\nfrom ..utils.attrs_marker import ConstAttributeMarker\n\n\nclass TestCaseWrapper(AttributeManager):\n\n SUCCESS = ConstAttributeMarker(0, \"通过\", alias=\"通过\")\n FAILURE = ConstAttributeMarker(1, \"失败\", alias=\"失败\")\n ERROR = ConstAttributeMarker(2, \"异常\", alias=\"异常\")\n BLOCKED = ConstAttributeMarker(3, \"阻塞\", alias=\"阻塞\")\n SKIPED = ConstAttributeMarker(4, \"跳过\", alias=\"skiped\")\n XFAILURE = ConstAttributeMarker(5, \"预期失败\", alias=\"预期失败\")\n XSUCCESS = ConstAttributeMarker(6, \"预期失败却成功\", alias=\"unexpected successes\")\n\n CSS_CLASS_MAPS = {\n SUCCESS.value: 'test-pass',\n FAILURE.value: 'test-fail',\n ERROR.value: 'test-error',\n BLOCKED.value: 'test-blocked',\n SKIPED.value: 'test-skip',\n XFAILURE.value: 'test-xfail',\n XSUCCESS.value: 'test-unexpected-pass',\n }\n\n def __init__(self, test, id, result_code, message=\"\", **kwargs):\n\n self.test = test\n self.id = id\n self._kwargs = kwargs\n self.set_result(result_code, message)\n self.is_abstract_testcase = isinstance(self.test, AbstractTestCase)\n # self.is_abstract_testcase = hasattr(self.test, \"collect_testcases\")\n\n @property\n def name(self):\n\n return self.description or self.method_name\n\n @property\n def method_name(self):\n\n if self.is_abstract_testcase:\n name = self.test.real_test_method_name\n else:\n name = self.test.id()\n return name\n\n @property\n def number(self):\n return self.id\n\n @property\n def duration(self):\n \"\"\" 返回执行用例耗时,单位秒\"\"\"\n\n start_time = getattr(self.test, Const.STEST_START_PERF_COUNTER, None)\n finish_time = getattr(self.test, Const.STEST_FINISH_PERF_COUNTER, None)\n\n if start_time is not None and finish_time is not None:\n total_time = finish_time - start_time\n return '{:f}'.format(total_time)\n else:\n return ''\n\n @property\n def start_time(self):\n\n return getattr(self.test, Const.STEST_TESTCASE_START_TIME, None)\n\n @property\n def exec_number(self):\n return getattr(self.test, Const.STEST_TESTCASE_EXEC_NUMBER, 0)\n\n @property\n def screenshot_info(self):\n\n k = 'screenshot_info'\n return self._kwargs.get(k, {})\n\n @property\n def testpoint(self):\n if self.is_abstract_testcase:\n classname = self.test.strclass()\n else:\n try:\n classname = strclass(self.test.__class__)\n except Exception:\n classname = self.test.id()\n\n try:\n chinese_name = self.test.__doc__.split(\"\\n\")[0].strip()\n except Exception:\n chinese_name = \"\"\n return (classname, chinese_name)\n\n @property\n def result_name(self):\n\n markers = self.__class__.const_attrs.values()\n for marker in markers:\n code = marker.value\n if self.__result_code == code:\n return marker.expend_kwargs[\"alias\"]\n\n @property\n def result_code(self):\n return self.__result_code\n\n @classmethod\n def result_codes(cls):\n markers = cls.const_attrs.values()\n codes = [marker.value for marker in markers]\n return codes\n\n @property\n def css_class(self):\n\n return self.CSS_CLASS_MAPS[self.result_code]\n\n @property\n def extra_info(self):\n \"\"\"用例额外信息,如编写者、修改者、最后修改者等\"\"\"\n\n tms = getattr(self.test, 'test_method_settings', {})\n info = {\n Test.AUTHOR: tms.get(Test.AUTHOR, \"\"),\n Test.EDITORS: tms.get(Test.EDITORS, []),\n Test.LAST_MODIFYIED_BY: tms.get(Test.LAST_MODIFYIED_BY, \"\"),\n Test.LAST_MODIFYIED_TIME: tms.get(Test.LAST_MODIFYIED_TIME, \"\"),\n \"运行编号\": self.exec_number,\n Test.PRIORITY: tms.get(Test.PRIORITY, \"\"),\n Test.DNAME: tms.get(Test.DNAME, \"\"),\n Test.DEPENDS: tms.get(Test.DEPENDS, \"\"),\n Test.GROUPS: tms.get(Test.GROUPS, \"\"),\n }\n return info\n\n @property\n def description(self):\n\n return self.test.shortDescription() or ''\n\n @property\n def output_message(self):\n\n return ''\n\n @property\n def error_message(self):\n\n return self.message\n\n @property\n def testdatas(self):\n args_mapinfo = {}\n kwargs_mapinfo = {}\n if self.is_abstract_testcase:\n spec = getattr(self.test.test_method_obj, Test.ORIGINAL_TEST_METHOD_ARGSPEC)\n runtimedatas = self.test.get_testcase_runtime_datas()\n args = runtimedatas['args']\n kwargs = runtimedatas['kwargs']\n for index, arg_value in enumerate(args):\n try:\n arg_name = spec.args[index]\n except IndexError:\n arg_name = '' # 代表传入了多余的参数\n if arg_value == self.test:\n continue\n else:\n args_mapinfo[index] = (arg_name, arg_value)\n\n kwargs_mapinfo = copy.deepcopy(kwargs)\n\n return (args_mapinfo, kwargs_mapinfo)\n\n def __to_str(self, value):\n\n try:\n fstr = json.dumps(value, ensure_ascii=False, indent=4)\n except Exception:\n fstr = str(value)\n return fstr\n\n @property\n def printable_testdatas(self):\n\n args_mapinfo, kwargs_mapinfo = self.testdatas\n printable_args = {}\n printable_kwargs = {}\n for k, v in args_mapinfo.items():\n arg_name, arg_value = v\n printable_args[k] = (arg_name, self.__to_str(arg_value))\n for k, v in kwargs_mapinfo.items():\n printable_kwargs[k] = self.__to_str(v)\n return (printable_args, printable_kwargs)\n\n def set_result(self, result_code, message=\"\"):\n\n if result_code not in self.result_codes():\n raise ValueError('The value range of result code is: {}'.format(' | '.join(self.result_codes())))\n self.__result_code = result_code\n self.message = message\n\n\nclass TestResultFormatter(object):\n def __init__(self, test_result):\n\n self.__test_result = test_result\n\n @property\n def result(self):\n return self.__test_result\n\n def _get_belong_project(self):\n\n return \"\"\n\n def to_py_json(self):\n\n testpoints = []\n for pointname, group_testcases in self._group_by_belong_testpoint():\n results = []\n for tc in group_testcases:\n result = dict(\n result=dict(code=tc.result_code, name=tc.result_name, css_class=tc.css_class),\n id=tc.id,\n name=tc.name,\n duration=tc.duration,\n method_name=tc.method_name,\n testdatas=tc.printable_testdatas,\n number=tc.number,\n description=tc.description,\n output_message=tc.output_message,\n error_message=tc.error_message,\n screenshot_info=tc.screenshot_info,\n extra_info=tc.extra_info,\n )\n results.append(result)\n testpoint = dict(name=pointname,\n count=len(group_testcases),\n pass_count=self.counts(group_testcases, TestCaseWrapper.SUCCESS),\n fail_count=self.counts(group_testcases, TestCaseWrapper.FAILURE),\n error_count=self.counts(group_testcases, TestCaseWrapper.ERROR),\n block_count=self.counts(group_testcases, TestCaseWrapper.BLOCKED),\n skip_count=self.counts(group_testcases, TestCaseWrapper.SKIPED),\n xfail_count=self.counts(group_testcases, TestCaseWrapper.XFAILURE),\n xpass_count=self.counts(group_testcases, TestCaseWrapper.XSUCCESS),\n testcases=results)\n testpoints.append(testpoint)\n root = dict(project=self._get_belong_project(), testpoints=testpoints)\n return root\n\n def to_json_string(self):\n\n return json.dumps(self.to_py_json(), ensure_ascii=False)\n\n @property\n def testcases(self):\n\n testcaselist = []\n id = 1\n for test, message in self.result.errors:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.ERROR, message=message, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n for test, message in self.result.skipped:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.SKIPED, message=message, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n\n for test, message in self.result.failures:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.FAILURE, message=message, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n\n for test in self.result.successes:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.SUCCESS, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n\n for test, message in self.result.expectedFailures:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.XFAILURE, message=message, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n\n for test, message in self.result.unexpectedSuccesses:\n testcaselist.append(TestCaseWrapper(test, id, TestCaseWrapper.XSUCCESS, screenshot_info=self.result.screenshots.get(test, {})))\n id = id + 1\n testcaselist.sort(key=lambda one: one.exec_number)\n return testcaselist\n\n def _get_testpoints(self):\n\n testpoints = []\n for testcase in self.testcases:\n testpoint = testcase.testpoint\n clsname = testpoint[0]\n if clsname not in [tp[0] for tp in testpoints]:\n testpoints.append(testpoint)\n return testpoints\n\n def _group_by_belong_testpoint(self):\n\n groups = []\n for testpoint in self._get_testpoints():\n group_testcases = []\n for test in self.testcases:\n if testpoint[0] == test.testpoint[0]:\n group_testcases.append(test)\n groups.append((testpoint, group_testcases))\n return groups\n\n @classmethod\n def counts(cls, testcases, result_code):\n\n return len([tc for tc in testcases if tc.result_code == result_code])\n","repo_name":"hotswwkyo/stest","sub_path":"stest/core/result_formatter.py","file_name":"result_formatter.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72631157992","text":"import pandas as pd\nimport numpy as np\nimport typer\nfrom collections import Counter\nimport os.path\n\nIN_DIR = 'data/s2orc/chunk_meme_mappings'\nOUT_DIR = 'data/s2orc/clusterings/clusters_names/'\n\ndef ctidf_names(memes_mappings: pd.DataFrame, chunks: pd.Series) -> pd.DataFrame:\n chunks_counter = Counter(chunks.explode())\n\n def split_function(s: str):\n return s.split()\n\n def max_dict(d: dict):\n if len(d) != 0:\n return max(d, key=d.get)\n else:\n return ''\n\n b = memes_mappings['chunk'].str.split(' ').explode() # flatten to all words\n all_words_counter = Counter(b)\n\n clusters = (memes_mappings.groupby(['meme_id']).agg({\n 'chunk':\n lambda x: x.tolist()\n }).rename({'chunk': 'cluster'}, axis=1)) # creates mapping of clusters to unique noun chunks list\n\n #calculate mean number of words per cluster:\n A = b.size / clusters.size\n\n clusters['cluster'] = clusters['cluster'].apply(\n ' '.join).apply(split_function)\n\n #length of each cluster:\n clusters['length'] = [len(x) for x in clusters['cluster']]\n\n #counter of words in cluster\n clusters['cluster_counter'] = clusters['cluster'].apply(Counter)\n\n #calculate ctdf for each word in cluster:\n list_of_dicts = []\n for _, row in clusters.iterrows():\n words_to_score = {}\n for key in row['cluster_counter']:\n words_to_score[key] = row['cluster_counter'][key] * np.log(\n 1 + A / all_words_counter[key])\n list_of_dicts.append(words_to_score)\n clusters['ctdf'] = list_of_dicts\n #return series of names of clusters\n clusters['cluster_name'] = clusters['ctdf'].apply(max_dict)\n print(clusters.head())\n\n\ndef main(file_name: str):\n in_path = os.path.join(IN_DIR, file_name)\n out_path = os.path.join(OUT_DIR, file_name)\n df = pd.read_parquet(in_path)\n file_name = file_name.split(\"/\")[-1]\n df_out = ctidf_names(df)\n\n df_out.to_parquet(out_path) \n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"ModelOriented/AI-strategies-papers-regulations-monitoring","sub_path":"scripts/s2orc/clusters_names_ctidf.py","file_name":"clusters_names_ctidf.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"70129515114","text":"\n\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom collections import OrderedDict\n\nimport ffmpy\n\nfrom config import ConfigMedia\nfrom utils.logger import InfoLogger\n\n\nclass BasePipe(object):\n def __init__(self):\n self.pipe_r, self.pipe_w = os.pipe()\n self.closed = False\n\n def write(self, data):\n os.write(self.pipe_w, data)\n\n def read(self):\n return os.read(self.pipe_r)\n\n def get_read_pipe(self):\n return self.pipe_r\n\n def get_write_pipe(self):\n return self.pipe_w\n\n def close(self):\n os.close(self.pipe_r)\n os.close(self.pipe_w)\n self.closed = True\n\n def is_closed(self):\n return self.closed\n\n\nclass ImagePipe(BasePipe):\n def __init__(self):\n super(ImagePipe, self).__init__()\n\n\nclass AudioPipe(BasePipe):\n def __init__(self):\n super(AudioPipe, self).__init__()\n\n\n# -----------------------------------------------------------------------------\n\ndef run_video_feed(func, handle, image_pipe):\n logger = InfoLogger(__name__).get_logger()\n logger.info('[Media] run_video_feed start')\n\n try:\n while not image_pipe.is_closed():\n image_container = func(handle)\n image_pipe.write(image_container[6])\n\n except Exception as e:\n logger.info('[Media] run_video_feed error : %s' % e)\n\n logger.info('[Media] run_video_feed exit done')\n\n\ndef run_audio_feed(proc, audio_pipe):\n logger = InfoLogger(__name__).get_logger()\n logger.info('[Media] run_audio_feed start')\n\n try:\n while not audio_pipe.is_closed():\n # audio_pipe.write(proc.stdout.read(80))\n audio_pipe.write(proc.stdout.readline())\n\n except Exception as e:\n logger.info('[Media] run_audio_feed error : %s' % e)\n\n logger.info('[Media] run_audio_feed exit done')\n\n\ndef run_media_stream(robot_uid, ff_obj):\n logger = InfoLogger(__name__).get_logger()\n logger.info('[Media] run_media_stream start')\n\n try:\n ff_obj.run()\n\n except Exception as e:\n logger.info('[Media] run_media_stream error : %s' % e)\n\n logger.info('[Media] run_media_stream exit done')\n\n\n# -----------------------------------------------------------------------------\n\nclass MediaStreamProxy(object):\n def __init__(self, robot_uid):\n self.logger = InfoLogger(__name__).get_logger()\n\n self.robot_uid = robot_uid\n\n self.host = ConfigMedia.host\n self.port = ConfigMedia.port\n self.path = ConfigMedia.path\n\n self.a_format = ConfigMedia.a_format\n self.analyze_duration = ConfigMedia.analyze_duration\n\n self.v_format = ConfigMedia.v_format\n self.pix_fmt = ConfigMedia.pix_fmt\n self.v_resolution = ConfigMedia.v_resolution\n\n self.audio_codec = ConfigMedia.audio_codec\n self.audio_bitrate = ConfigMedia.audio_bitrate\n self.strict_lvl = ConfigMedia.strict_lvl\n\n self.video_bitrate = ConfigMedia.video_bitrate\n self.output_format = ConfigMedia.output_format\n\n self.a_proc = None\n self.v_thread = None\n self.a_thread = None\n\n self.ff = None\n self.m_thread = None\n\n self.image_pipe = None\n self.audio_pipe = None\n\n def start(self, func, handle, bit_dict=None):\n try:\n self.image_pipe = ImagePipe()\n self.audio_pipe = AudioPipe()\n\n # Video thread\n self.v_thread = threading.Thread( \\\n target = run_video_feed, \\\n kwargs = {'func' : func, 'handle' : handle, \n 'image_pipe' : self.image_pipe})\n\n # Pipe for media stream\n audio_pipe_r = self.audio_pipe.get_read_pipe()\n image_pipe_r = self.image_pipe.get_read_pipe()\n\n audio_input = 'pipe:%s' % str(audio_pipe_r)\n image_input = 'pipe:%s' % str(image_pipe_r)\n\n # Cover bitrate config from args\n if bit_dict:\n self.audio_bitrate = bit_dict.get('audio_bitrate')\n self.video_bitrate = bit_dict.get('video_bitrate')\n\n # -re (input) : Read input at native frame rate. \n # Set analyzeduration to 0 for audio to disable analyze delay\n audio_args = '-f {f} -analyzeduration {ad} -re'.format(\n f = self.a_format, ad = self.analyze_duration)\n\n image_args = '-f {f} -pix_fmt {pf} -s:v {sv} -re'.format(\n f = self.v_format, pf = self.pix_fmt, sv = self.v_resolution)\n\n # For dev\n # receiver = '/tmp/test.flv'\n # if os.path.exists(receiver):\n # os.unlink(receiver)\n receiver = 'rtmp://' + self.host + ':' + str(self.port) + \\\n self.path + str(self.robot_uid)\n\n # -c:a : audio codec, must be set in output\n # -b:a : audio bitrate\n # -b:v : video bitrate\n # output total bitrate = (video bitrate + audio bitrate)kbits/s\n # -muxdelay seconds : Set the maximum demux-decode delay.\n # -muxpreload seconds : Set the initial demux-decode delay.\n receiver_args = \"\"\"-map 0 -c:a {ca} -strict -{s} -b:a {ba}\n -map 1 -b:v {bv} \n -f {f} -muxdelay 0.1 -muxpreload 0\"\"\".\\\n format(ca = self.audio_codec, s = self.strict_lvl, \n ba = self.audio_bitrate, bv = self.video_bitrate,\n f = self.output_format)\n\n ff_input = OrderedDict(\n [ (audio_input, audio_args), (image_input, image_args), ]\n )\n ff_output = {receiver : receiver_args,}\n\n self.ff = ffmpy.FFmpeg(inputs = ff_input, outputs = ff_output)\n self.logger.info('[MediaStreamProxy] ff.cmd : %s' % self.ff.cmd)\n\n self.m_thread = threading.Thread( \\\n target = run_media_stream, \\\n kwargs = {'robot_uid' : self.robot_uid, 'ff_obj' : self.ff})\n\n # Audio related\n # Do not use stdout=PIPE or stderr=PIPE with this function as \n # that can deadlock based on the child process output volume.\n # Use Popen with the communicate() method when you need pipes.\n a_command = 'arecord -f cd -t {a_format}'.format(a_format = self.a_format)\n self.a_proc = subprocess.Popen(shlex.split(a_command), \\\n stdout = self.audio_pipe.get_write_pipe())\n\n self.m_thread.start()\n self.v_thread.start()\n\n self.logger.info('[MediaStreamProxy] start done')\n\n except Exception as e:\n self.logger.info('[MediaStreamProxy] start error : %s' % e)\n\n def stop(self):\n try:\n # Close working pipes\n if self.image_pipe:\n self.image_pipe.close()\n self.logger.info('[MediaStreamProxy] close image_pipe done')\n\n if self.audio_pipe:\n self.audio_pipe.close()\n self.logger.info('[MediaStreamProxy] close audio_pipe done')\n\n # Stop audio process\n if self.a_proc:\n self.a_proc.terminate()\n # self.a_proc.kill()\n\n # Use wait() to terminate the defunct process\n self.a_proc.wait()\n self.logger.info('[MediaStreamProxy] stop audio proc done')\n\n # Stop ffmpeg process\n if self.ff:\n self.ff.process.terminate()\n # self.ff.process.kill()\n self.logger.info('[MediaStreamProxy] stop ffmpeg proc done')\n\n except Exception as e:\n self.logger.info('[MediaStreamProxy] stop error : %s' % e)\n\n self.logger.info('[MediaStreamProxy] stop done')\n","repo_name":"xianglei0610/ve_puppet","sub_path":"client/libs/mediastream.py","file_name":"mediastream.py","file_ext":"py","file_size_in_byte":7775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2334125672","text":"import numpy as np\nimport os\n\nfrom astropy.tests.helper import pytest\nfrom stingray import Lightcurve, Crossspectrum, sampledata\nfrom stingray.simulator import simulator, models\n\n_H5PY_INSTALLED = True\n\ntry:\n import h5py\nexcept ImportError:\n _H5PY_INSTALLED = False\n\n\nclass TestSimulator(object):\n\n @classmethod\n def setup_class(self):\n self.simulator = simulator.Simulator(N=1024, mean=0.5, dt=0.125)\n\n def calculate_lag(self, lc, h, delay):\n \"\"\"\n Class method to calculate lag between two light curves.\n \"\"\"\n s = lc.counts\n output = self.simulator.simulate(s, h, 'same')[delay:]\n s = s[delay:]\n time = lc.time[delay:]\n\n lc1 = Lightcurve(time, s)\n lc2 = Lightcurve(time, output)\n cross = Crossspectrum(lc1, lc2)\n cross = cross.rebin(0.0075)\n\n return np.angle(cross.power) / (2 * np.pi * cross.freq)\n\n def test_simulate_with_seed(self):\n \"\"\"\n Simulate with a random seed value.\n \"\"\"\n self.simulator = simulator.Simulator(N=1024, seed=12)\n assert len(self.simulator.simulate(2).counts), 1024\n\n def test_simulate_with_incorrect_arguments(self):\n with pytest.raises(ValueError):\n self.simulator.simulate(1, 2, 3, 4)\n\n def test_simulate_channel(self):\n \"\"\"\n Simulate an energy channel.\n \"\"\"\n self.simulator.simulate_channel('3.5-4.5', 'lorenzian', [1, 2, 3, 4])\n self.simulator.delete_channel('3.5-4.5')\n\n def test_incorrect_simulate_channel(self):\n \"\"\"Test simulating a channel that already exists.\"\"\"\n self.simulator.simulate_channel('3.5-4.5', 2)\n with pytest.raises(KeyError):\n self.simulator.simulate_channel('3.5-4.5', 2)\n self.simulator.delete_channel('3.5-4.5')\n\n def test_get_channel(self):\n \"\"\"\n Retrieve an energy channel after it has been simulated.\n \"\"\"\n self.simulator.simulate_channel('3.5-4.5', 2)\n lc = self.simulator.get_channel('3.5-4.5')\n self.simulator.delete_channel('3.5-4.5')\n\n def test_get_channels(self):\n \"\"\"\n Retrieve multiple energy channel after it has been simulated.\n \"\"\"\n self.simulator.simulate_channel('3.5-4.5', 2)\n self.simulator.simulate_channel('4.5-5.5', 'smoothbknpo', [1, 2, 3, 4])\n lc = self.simulator.get_channels(['3.5-4.5', '4.5-5.5'])\n\n self.simulator.delete_channels(['3.5-4.5', '4.5-5.5'])\n\n def test_get_all_channels(self):\n \"\"\" Retrieve all energy channels. \"\"\"\n self.simulator.simulate_channel('3.5-4.5', 2)\n self.simulator.simulate_channel('4.5-5.5', 1)\n lc = self.simulator.get_all_channels()\n\n self.simulator.delete_channels(['3.5-4.5', '4.5-5.5'])\n\n def test_count_channels(self):\n \"\"\"\n Count energy channels after they have been simulated.\n \"\"\"\n self.simulator.simulate_channel('3.5-4.5', 2)\n self.simulator.simulate_channel('4.5-5.5', 1)\n\n assert self.simulator.count_channels() == 2\n self.simulator.delete_channels(['3.5-4.5', '4.5-5.5'])\n\n def test_delete_incorrect_channel(self):\n \"\"\"\n Test if deleting incorrect channel raises a\n keyerror exception.\n \"\"\"\n with pytest.raises(KeyError):\n self.simulator.delete_channel('3.5-4.5')\n\n def test_delete_incorrect_channels(self):\n \"\"\"\n Test if deleting incorrect channels raises a\n keyerror exception.\n \"\"\"\n with pytest.raises(KeyError):\n self.simulator.delete_channels(['3.5-4.5', '4.5-5.5'])\n\n def test_simulate_powerlaw(self):\n \"\"\"\n Simulate light curve from power law spectrum.\n \"\"\"\n assert len(self.simulator.simulate(2).counts), 1024\n\n def test_compare_powerlaw(self):\n \"\"\"\n Compare simulated power spectrum with actual one.\n \"\"\"\n B, N, red_noise, dt = 2, 1024, 10, 1\n\n self.simulator = simulator.Simulator(N=N, dt=dt, mean=5, rms=1,\n red_noise=red_noise)\n lc = [self.simulator.simulate(B) for i in range(1, 30)]\n simulated = self.simulator.powerspectrum(lc, lc[0].tseg)\n\n w = np.fft.rfftfreq(N, d=dt)[1:]\n actual = np.power((1/w), B/2)[:-1]\n\n actual_prob = actual/float(sum(actual))\n simulated_prob = simulated/float(sum(simulated))\n\n assert np.all(\n np.abs(actual_prob - simulated_prob) < 3*np.sqrt(actual_prob)\n )\n\n def test_simulate_powerspectrum(self):\n \"\"\"\n Simulate light curve from any power spectrum.\n \"\"\"\n s = np.random.rand(1024)\n assert len(self.simulator.simulate(s)), 1024\n\n def test_simulate_lorenzian(self):\n \"\"\"\n Simulate light curve using lorenzian model.\n \"\"\"\n assert len(self.simulator.simulate('lorenzian', [1, 2, 3, 4])), 1024\n\n def test_compare_lorenzian(self):\n \"\"\"\n Compare simulated lorenzian spectrum with original spectrum.\n \"\"\"\n N, red_noise, dt = 1024, 10, 1\n\n self.simulator = simulator.Simulator(N=N, dt=dt, mean=0.1,\n rms=0.4, red_noise=red_noise)\n lc = [self.simulator.simulate('lorenzian', [0.3, 0.9, 0.6, 0.5])\n for i in range(1, 30)]\n simulated = self.simulator.powerspectrum(lc, lc[0].tseg)\n\n w = np.fft.rfftfreq(N, d=dt)[1:]\n actual = models.lorenzian(w, [0.3, 0.9, 0.6, 0.5])[:-1]\n\n actual_prob = actual/float(sum(actual))\n simulated_prob = simulated/float(sum(simulated))\n\n assert np.all(\n np.abs(actual_prob - simulated_prob) < 3*np.sqrt(actual_prob))\n\n def test_simulate_smoothbknpo(self):\n \"\"\"\n Simulate light curve using smooth broken power law model.\n \"\"\"\n assert len(self.simulator.simulate('smoothbknpo', [1, 2, 3, 4])), 1024\n\n def test_compare_smoothbknpo(self):\n \"\"\"\n Compare simulated smooth broken power law spectrum with original\n spectrum.\n \"\"\"\n N, red_noise, dt = 1024, 10, 1\n\n self.simulator = simulator.Simulator(N=N, dt=dt, mean=0.1, rms=0.7,\n red_noise=red_noise)\n lc = [self.simulator.simulate('smoothbknpo', [0.6, 0.2, 0.6, 0.5])\n for i in range(1, 30)]\n\n simulated = self.simulator.powerspectrum(lc, lc[0].tseg)\n\n w = np.fft.rfftfreq(N, d=dt)[1:]\n actual = models.smoothbknpo(w, [0.6, 0.2, 0.6, 0.5])[:-1]\n\n actual_prob = actual/float(sum(actual))\n simulated_prob = simulated/float(sum(simulated))\n\n assert np.all(\n np.abs(actual_prob - simulated_prob) < 3*np.sqrt(actual_prob))\n\n def test_simulate_wrong_model(self):\n \"\"\"\n Simulate with a model that does not exist.\n \"\"\"\n with pytest.raises(ValueError):\n self.simulator.simulate('unsupported', [0.6, 0.2, 0.6, 0.5])\n\n def test_construct_simple_ir(self):\n \"\"\"\n Construct simple impulse response.\n \"\"\"\n t0, w = 100, 500\n assert len(self.simulator.simple_ir(t0, w)), (t0+w)/self.simulator.dt\n\n def test_construct_relativistic_ir(self):\n \"\"\"\n Construct relativistic impulse response.\n \"\"\"\n t1, t3 = 3, 10\n assert len(self.simulator.relativistic_ir(t1=t1, t3=t3)),\\\n (t1+t3)/self.simulator.dt\n\n def test_simulate_simple_impulse(self):\n \"\"\"\n Simulate light curve from simple impulse response.\n \"\"\"\n lc = sampledata.sample_data()\n s = lc.counts\n h = self.simulator.simple_ir(10, 1, 1)\n output = self.simulator.simulate(s, h)\n\n def test_powerspectrum(self):\n \"\"\"\n Create a power spectrum from light curve.\n \"\"\"\n self.simulator.simulate(2)\n self.simulator.powerspectrum(self.simulator.lc)\n\n def test_simulate_relativistic_impulse(self):\n \"\"\"\n Simulate light curve from relativistic impulse response.\n \"\"\"\n lc = sampledata.sample_data()\n s = lc.counts\n\n h = self.simulator.relativistic_ir()\n output = self.simulator.simulate(s, h)\n\n def test_filtered_simulate(self):\n \"\"\"\n Simulate light curve using 'filtered' mode.\n \"\"\"\n lc = sampledata.sample_data()\n s = lc.counts\n\n h = self.simulator.simple_ir()\n output = self.simulator.simulate(s, h, 'filtered')\n\n def test_simple_lag_spectrum(self):\n \"\"\"\n Simulate light curve from simple impulse response and\n compute lag spectrum.\n \"\"\"\n lc = sampledata.sample_data()\n h = self.simulator.simple_ir(start=14, width=1)\n delay = int(15/lc.dt)\n\n lag = self.calculate_lag(lc, h, delay)\n v_cutoff = 1.0/(2*15.0)\n h_cutoff = lag[int((v_cutoff-0.0075)*1/0.0075)]\n\n assert np.abs(15-h_cutoff) < np.sqrt(15)\n\n def test_relativistic_lag_spectrum(self):\n \"\"\"\n Simulate light curve from relativistic impulse response and\n compute lag spectrum.\n \"\"\"\n lc = sampledata.sample_data()\n h = self.simulator.relativistic_ir(t1=3, t2=4, t3=10)\n delay = int(4/lc.dt)\n\n lag = self.calculate_lag(lc, h, delay)\n v_cutoff = 1.0/(2*4)\n h_cutoff = lag[int((v_cutoff-0.0075)*1/0.0075)]\n\n assert np.abs(4-h_cutoff) < np.sqrt(4)\n\n def test_position_varying_channels(self):\n \"\"\"\n Tests lags for multiple energy channels with each channel\n having same intensity and varying position.\n \"\"\"\n lc = sampledata.sample_data()\n s = lc.counts\n h = []\n h.append(self.simulator.simple_ir(start=4, width=1))\n h.append(self.simulator.simple_ir(start=9, width=1))\n\n delays = [int(5/lc.dt), int(10/lc.dt)]\n\n outputs = []\n for i in h:\n lc2 = self.simulator.simulate(s, i)\n lc2 = lc2.shift(-lc2.time[0] + lc.time[0])\n outputs.append(lc2)\n\n cross = [Crossspectrum(lc, lc2).rebin(0.0075) for lc2 in outputs]\n lags = [np.angle(c.power) / (2 * np.pi * c.freq) for c in cross]\n\n v_cutoffs = [1.0/(2.0*5), 1.0/(2.0*10)]\n h_cutoffs = [lag[int((v-0.0075)*1/0.0075)]\n for lag, v in zip(lags, v_cutoffs)]\n\n assert np.abs(5-h_cutoffs[0]) < np.sqrt(5)\n assert np.abs(10-h_cutoffs[1]) < np.sqrt(10)\n\n def test_intensity_varying_channels(self):\n \"\"\"\n Tests lags for multiple energy channels with each channel\n having same position and varying intensity.\n \"\"\"\n lc = sampledata.sample_data()\n s = lc.counts\n h = []\n h.append(self.simulator.simple_ir(start=4, width=1, intensity=10))\n h.append(self.simulator.simple_ir(start=4, width=1, intensity=20))\n\n delay = int(5/lc.dt)\n\n outputs = []\n for i in h:\n lc2 = self.simulator.simulate(s, i)\n lc2 = lc2.shift(-lc2.time[0] + lc.time[0])\n outputs.append(lc2)\n\n cross = [Crossspectrum(lc, lc2).rebin(0.0075) for lc2 in outputs]\n lags = [np.angle(c.power) / (2 * np.pi * c.freq) for c in cross]\n\n v_cutoff = 1.0/(2.0*5)\n h_cutoffs = [lag[int((v_cutoff-0.0075)*1/0.0075)] for lag in lags]\n\n assert np.abs(5-h_cutoffs[0]) < np.sqrt(5)\n assert np.abs(5-h_cutoffs[1]) < np.sqrt(5)\n\n def test_powerspectrum(self):\n \"\"\"\n Create a powerspectrum from light curve.\n \"\"\"\n lc = self.simulator.simulate(2)\n self.simulator.powerspectrum(lc)\n\n def test_io(self):\n sim = simulator.Simulator(N=1024)\n sim.write('sim.pickle')\n sim = sim.read('sim.pickle')\n assert sim.N == 1024\n os.remove('sim.pickle')\n\n def test_io_with_unsupported_format(self):\n sim = simulator.Simulator(N=1024)\n with pytest.raises(KeyError):\n sim.write('sim.hdf5', format_='hdf5')\n with pytest.raises(KeyError):\n sim.write('sim.pickle', format_='pickle')\n sim.read('sim.pickle', format_='hdf5')\n os.remove('sim.pickle')\n","repo_name":"svenbuder/SummerSchool","sub_path":"stingray/simulator/tests/test_simulator.py","file_name":"test_simulator.py","file_ext":"py","file_size_in_byte":12210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28197179181","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : pmoma\nDate : 2019-04-16\nPurpose: Hamm\n\"\"\"\nimport logging\nimport argparse\nimport sys\nimport re\nimport os\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Hamming Python script',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'positionals', metavar='FILE', nargs=2, help='File inputs')\n\n parser.add_argument(\n '-d',\n\t'--debug',\n help='Debug',\n default=False,\n action='store_true')\n\n return parser.parse_args()\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n# --------------------------------------------------\ndef dist(s1,s2):\n \"\"\"Hamming code\"\"\"\n num= abs(len(s1)-len(s2))\n for l1, l2 in list(zip(s1,s2)):\n if l1!= l2:\n num+= 1\n\n logging.debug('s1 = {}, s2 = {}, d = {}'.format(s1,s2,num))\n return num\n\n# --------------------------------------------------\ndef main():\n args = get_args()\n fil = args.positionals\n\n if len(fil) !=2:\n die('Only two files')\n if not os.path.isfile(fil[0]):\n die('\"{}\" is not a file'.format(fil[0]))\n if not os.path.isfile(fil[1]):\n die('\"{}\" is not a file'.format(fil[1]))\n logging.basicConfig(\n filename='.log',\n filemode='w',\n level=logging.DEBUG if args.debug else logging.CRITICAL\n)\n logging.debug('file1 = {}, file2 = {}'.format(fil[0], fil[1]))\n\n dis = 0\n fil1= open(fil[0]).read().split()\n fil2= open(fil[1]).read().split()\n ftot= list(zip(fil1, fil2))\n #print(ftot)\n for s1, s2 in ftot:\n dis += dist(s1, s2)\n\n print(dis)\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n\n","repo_name":"pmoma/biosys-analytics","sub_path":"assignments/13-hamm/hamm.py","file_name":"hamm.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"24370108017","text":"from itertools import permutations\nfrom collections import OrderedDict\n\n#use (9*8-7=65)\n#then use (0+12/3=4)\n\n# a bit slow\n# i also made it so that it only uses a max of 2 of the same number or operator\n# because cartesian product was even slower\n# so if there are three of the same number in one nerdle\n# then you're out of luck\n\nG = [\"NA\",\"NA\",\"NA\",\"NA\",\"NA\",\"NA\",\"NA\",\"NA\"]\nunknown = 8\nR = [[],[],[],[],[],[],[],[]]\nH = []\nB = []\nused_words = []\n\nfor p in range(6):\n U = []\n list_of_lists = []\n letters = input(\"Input equation as string: \")\n colors = input(\"Input colors as string Red Black Green (eg. RRBGB): \")\n if(colors == \"GGGGGGGG\"):\n print(\"Congrats!\")\n exit()\n used_words.append(letters)\n def Convert(string):\n list1=[]\n list1[:0]=string\n return list1\n letterlist = Convert(letters)\n colorlist = Convert(colors)\n for i in range(len(letterlist)):\n if(letterlist[i] == \"=\"):\n letterlist[i] = \"==\"\n for i in range(len(letterlist)):\n if(colorlist[i] == \"G\"):\n if G[i] != letterlist[i]:\n G[i] = letterlist[i]\n unknown -= 1\n if(colorlist[i] == \"R\"):\n R[i].append(letterlist[i])\n H.append(letterlist[i])\n if(colorlist[i] == \"B\"):\n if(letterlist[i] not in H):\n B.append(letterlist[i])\n \n equation_list1 = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"+\",\"-\",\"*\",\"/\",\"==\"]\n equation_list2 = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"+\",\"-\",\"*\",\"/\"]\n equation_list = equation_list1 + equation_list2\n\n for i in range(len(equation_list)):\n if equation_list[i] not in B:\n U.append(equation_list[i])\n\n def read(list):\n return eval(''.join(list))\n\n def newthing(list):\n new_list = G.copy()\n k = 0\n for i in range(len(new_list)):\n if(new_list[i] == \"NA\"):\n new_list[i] = list[k]\n k += 1\n if (new_list[0].isdigit() and new_list[7].isdigit()):\n flag2 = 0\n for t in range(len(new_list) - 1):\n if(new_list[t].isdigit()==False and new_list[t+1].isdigit()==False):\n flag2 = 1\n if(new_list[t] == \"/\" and new_list[t+1] == \"0\"):\n flag2 = 1\n if(new_list[t] == \"0\" and new_list[t+1].isdigit()):\n flag2 = 1\n if flag2 == 0:\n result = read(new_list)\n if result == True and new_list[0] not in R[0] and new_list[1] not in R[1] and new_list[2] not in R[2] and new_list[3] not in R[3] and new_list[4] not in R[4] and new_list[5] not in R[5] and new_list[6] not in R[6] and new_list[7] not in R[7]:\n flag = 0\n for j in range(len(H)):\n if H[j] not in new_list:\n flag = 1\n if flag == 0:\n list_of_lists.append(''.join(new_list))\n\n combos = permutations(U, unknown)\n hello = list(combos)\n print(hello)\n for i in range(len(hello)):\n newthing(list(hello[i]))\n\n semi_final_list = list(OrderedDict.fromkeys(list_of_lists))\n final_list = []\n for i in range(len(semi_final_list)):\n if(semi_final_list[i] not in used_words):\n final_list.append(semi_final_list[i])\n print(final_list)","repo_name":"wilsebbis/nerdle-solver","sub_path":"nerdle_solver.py","file_name":"nerdle_solver.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22266447225","text":"siswa = {\n 'name': 'Wijaya Kusuma',\n 'age': 20,\n 'awesome': True,\n 'language': ['C', 'Python', 'SQL'],\n 'address' : 'Pontianak - Kalimantan Barat',\n 'favorites': {\n 'food': 'Indomie',\n 'sport': 'Balap Karung'\n }\n}\n\nname = siswa.get('name')\nprint(name)\n\nhobby = siswa.get('favorites')['food']\nprint(hobby)\n\naddress = siswa.get('address')\nprint(address)\n\nfor key, val in siswa.items():\n print(key, '=', siswa[key])\n","repo_name":"ciptomx/StrukturData","sub_path":"dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118608873","text":"import heapq\nfrom typing import List\n\n\nclass Solution:\n def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> List[int]:\n worker_to_bike_group = []\n queue = []\n\n for worker, worker_loc in enumerate(workers):\n curr = []\n for bike, bike_loc in enumerate(bikes):\n distance = self.get_distance(worker_loc, bike_loc)\n curr.append((distance, worker, bike))\n curr.sort(reverse=True)\n heapq.heappush(queue, curr.pop())\n worker_to_bike_group.append(curr)\n\n bike_status = [False] * len(bikes)\n worker_status = [-1] * len(workers)\n\n while queue:\n distance, worker, bike = heapq.heappop(queue)\n if not bike_status[bike]:\n bike_status[bike] = True\n worker_status[worker] = bike\n else:\n next_closest_bike = worker_to_bike_group[worker].pop()\n heapq.heappush(queue, next_closest_bike)\n\n return worker_status\n\n def get_distance(self, worker_loc, bike_loc):\n return abs(worker_loc[0] - bike_loc[0]) + abs(worker_loc[1] - bike_loc[1])\n","repo_name":"cabulous/leetcode","sub_path":"python/1057.py","file_name":"1057.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32030022807","text":"\"\"\"This module contains utility functions used by the SDK.\"\"\"\n\n# standard imports\nimport logging\nimport os\nimport re\nfrom datetime import datetime\nfrom typing import Optional\n\n# external imports\nimport requests\nfrom pytz import timezone\n\n# local imports\nfrom mpesa_sdk.exceptions import UnsupportedMethodError\n\nlogg = logging.getLogger(__file__)\n\n\ndef camel_to_snake(value: str):\n \"\"\"This function converts a camel case string to snake case.\n :param value: string to be converted\n :type value: str\n :return: snake case string\n :rtype: str\n \"\"\"\n value = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", value)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", value).lower()\n\n\ndef make_request(\n method: str,\n url: str,\n data: Optional[dict] = None,\n headers: Optional[dict] = None,\n **kwargs,\n):\n \"\"\"This function makes the actual HTTP request to the API.\n :param method: The HTTP method to use.\n :type method: str\n :param url: The URL to make the request to.\n :type url: str\n :param data: The data to send with the request.\n :type data: dict\n :param headers: The headers to send with the request.\n :type headers: dict\n :return: The response object.\n :rtype: requests.Response\n \"\"\"\n if method == \"GET\":\n logg.debug(\"Retrieving data from: %s.\", url)\n result = requests.get(timeout=2, url=url, **kwargs)\n elif method == \"POST\":\n logg.debug(\"Posting to: %s with: %s.\", url, data)\n result = requests.post(data=data, headers=headers, timeout=2, url=url, **kwargs)\n elif method == \"PUT\":\n logg.debug(\"Putting to: %s with: %s.\", url, data)\n result = requests.put(data=data, headers=headers, timeout=2, url=url, **kwargs)\n else:\n raise UnsupportedMethodError(f\"Unsupported method: {method}.\")\n return result\n\n\ndef preprocess_http_response(response: requests.Response) -> Optional[dict]:\n \"\"\"This function preprocesses the HTTP response and returns the response as a JSON object.\n :param response: The HTTP response object.\n :type response: requests.Response\n :return: The response as a JSON object.\n :rtype: dict\n \"\"\"\n status_code = response.status_code\n reason = response.reason\n\n if 100 <= status_code < 200:\n logg.error(\"Informational errors: %s, reason: %s.\", status_code, reason)\n\n elif 300 <= status_code < 400:\n logg.error(\"Redirect Issues: %s, reason: %s.\", status_code, reason)\n\n elif 400 <= status_code < 500:\n logg.error(\"Client Error: %s, reason: %s.\", status_code, reason)\n\n elif 500 <= status_code < 600:\n logg.error(\"Server Error: %s, reason: %s.\", status_code, reason)\n\n elif status_code == 200:\n logg.info(\"Request was successful, returning response.\")\n\n return response.json() or None\n\n\ndef timestamp():\n \"\"\"This function returns the current timestamp in the format required by the API.\n :return: timestamp in the format %Y%m%d%H%M%S. e.g. 20191010120000\n :rtype: str\n \"\"\"\n timestamp_format = \"%Y%m%d%H%M%S\"\n zone = os.getenv(\"TIMEZONE\")\n sys_timestamp = datetime.now(timezone(zone))\n return sys_timestamp.strftime(timestamp_format)\n","repo_name":"PhilipWafula/python-mpesa-sdk","sub_path":"mpesa_sdk/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16688137733","text":"X = int(input())\n\nhighest = 1\n\nfor i in range(1, 1001, 1):\n if i >= X:\n break\n\n for j in range(2, 1001, 1):\n n = i**j\n\n if n > X:\n break\n\n highest = max(n, highest)\n\nprint(highest)","repo_name":"caio-felipee/exercicios","sub_path":"tep-2/lista-4/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34299772356","text":"#!/usr/bin/env python3\n\nimport imaplib\nfrom email.utils import formatdate, make_msgid, parsedate_to_datetime\n\n\nprotocols = globals()\nimap = protocols[\"imap\"]\ninstagram = protocols[\"instagram\"]\n\ndef filter(elem) -> bool:\n #print(elem.to_email().as_string())\n return True\n\ndef action(elem):\n global imap\n if imap and imap.connection_inited:\n email = elem.to_email()\n imap.append(\"INBOX.Social.Instagram\", \"(Auto)\", email)\n\n#imap.get_objects(\"INBOX\")\ninstagram.run_filters(filter, action)\n","repo_name":"aurelienpierreeng/VirtualSecretary","sub_path":"examples/common/90-instagram-import-to-email.py","file_name":"90-instagram-import-to-email.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"28762081775","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport re\nfrom ast import literal_eval\nimport seaborn as sns\nimport os\nimport yaml\nimport argparse\n\nsns.set_style('ticks', {'axes.edgecolor': '0', \n 'xtick.color': '0',\n 'ytick.color': '0'})\n\n\ndef get_argparser():\n \"\"\"Return an argparse argument parser.\"\"\"\n parser = argparse.ArgumentParser(prog = 'Plot staircase',\n description = 'Generates staircase plots from all filters.')\n add_arguments(parser)\n return parser\n\ndef add_arguments(parser):\n parser.add_argument('--data', required=True, help='Filepath for data')\n parser.add_argument('--labels', required=True, help='Filepath for output filter labels')\n parser.add_argument('--filters', required=True, help='Filepath for filters')\n parser.add_argument('--out-dir', required=True, help='Directory to place output plots')\n\ndef HLA_cd8_converter(x):\n return x.replace(\"[\",\"\").replace(\"]\",\"\").replace(\",\", \"\").replace(\"'\",\"\").split(\" \")\n\ndef cdr3_lst_converter(x):\n return x.replace(\"[\",\"\").replace(\"]\",\"\").replace(\"'\",\"\").split(\" \")\n\ndef epitope_converter(x):\n return [y for y in x.replace(\"[\",\"\").replace(\"]\",\"\").replace(\"\\n\",\"\").split(\"'\") if (y != '') & (y != ' ')]\n\ndef peptide_hla_converter(x):\n return re.findall(\"\\w+\\s{1}\\w{1}\\d+\", x.replace(\"[\",\"\").replace(\"]\",\"\").replace(\"\\n\",\"\").replace(\"'\",\"\"))\n\ndef literal_converter(val):\n # replace NaN with '' and perform literal eval on the rest\n return [] if val == '' else literal_eval(val)\n\nconverters = {'peptide_HLA_lst': peptide_hla_converter,\n 'umi_count_lst_mhc': literal_eval,\n 'umi_count_lst_TRA': literal_converter,'umi_count_lst_TRB': literal_converter,\n 'cdr3_lst_TRA': cdr3_lst_converter,\n 'cdr3_lst_TRB': cdr3_lst_converter,\n 'HLA_lst_mhc': cdr3_lst_converter,'HLA_cd8': HLA_cd8_converter} #\n\ndef notnan(x):\n return x == x\n\ndef get_multiplets(df):\n dct = df.groupby(['ct','peptide_HLA']).gem.count() > 1\n idx = df.set_index(['ct','peptide_HLA']).index.map(dct)\n return idx.fillna(False)\n\ndef calc_binding_concordance(df, clonotype_fmt):\n gems_per_specificity = df.groupby([clonotype_fmt,'peptide_HLA']).gem.count().to_dict()\n df['gems_per_specificity'] = df.set_index([clonotype_fmt,'peptide_HLA']).index.map(gems_per_specificity)\n gems_per_spec_hla_match = df[df.HLA_match == True].groupby([clonotype_fmt, 'peptide_HLA']).gem.count().to_dict()\n df['gems_per_spec_hla_match'] = df.set_index([clonotype_fmt,'peptide_HLA']).index.map(gems_per_spec_hla_match)\n gems_per_clonotype = df.groupby([clonotype_fmt]).gem.count().to_dict()\n df['gems_per_clonotype'] = df[clonotype_fmt].map(gems_per_clonotype)\n df['binding_concordance'] = df.gems_per_specificity / df.gems_per_clonotype\n df['hla_concordance'] = df.gems_per_spec_hla_match / df.gems_per_specificity\n df['hla_concordance'] = df.hla_concordance.fillna(0)\n return df\n\n\ndef plot_specificity(title, df, max_gems, save=True):\n # Sort\n df.ct = df.ct.astype(int).astype(str)\n \n try:\n df.sort_values(by=['epitope_rank','gems_per_specificity','binding_concordance'],\n ascending=[True, False, False], inplace=True)\n except KeyError:\n df.sort_values(by=['gems_per_specificity','binding_concordance'],\n ascending=[True, False, False], inplace=True)\n\n # devide GEMs by max concordance and outliers\n dct = df.groupby('ct').binding_concordance.max()\n df['max_conc'] = df.ct.map(dct)\n idx = df.binding_concordance == df.max_conc\n\n def modify_legend(h,l):\n flag = False\n labels = []\n handles = []\n for e, le in enumerate(l):\n if flag:\n labels.append(le)\n handles.append(h[e])\n if le == 'gems_per_specificity':\n flag = True\n \n idxs = np.linspace(0,len(labels)-1,5)\n l = []\n h = []\n for i in idxs:\n l.append(labels[int(i)])\n h.append(handles[int(i)])\n return h,l\n \n # Style\n # https://seaborn.pydata.org/generated/seaborn.axes_style.html\n sns.set_style('ticks', {'axes.edgecolor': '0', #'axes.facecolor':'lightgrey',\n 'xtick.color': '0',\n 'ytick.color': '0'})\n sns.set_context(\"paper\",font_scale=2)\n \n fig_height = int(df.peptide_HLA.nunique()/2) # 6\n fig = plt.figure(figsize=(20,fig_height)) # 6\n sns.scatterplot(data=df[idx], x='ct', y='peptide_HLA',\n size='gems_per_specificity', sizes=(10,1000), size_norm=(1,max_gems),\n hue='binding_concordance', palette='viridis_r', hue_norm=(0,1),\n legend='full', linewidth=0)\n sns.scatterplot(data=df[~idx], x='ct', y='peptide_HLA',\n size='gems_per_specificity', sizes=(10,1000), size_norm=(1,max_gems),\n hue='binding_concordance', palette='viridis_r', hue_norm=(0,1),\n legend=False, linewidth=0)\n ax = plt.gca()\n sm = plt.cm.ScalarMappable(norm=matplotlib.colors.Normalize(vmin=0,vmax=1), cmap='viridis_r')\n sm.set_array([]) # hack for cbar\n fig.colorbar(sm, ax=ax, orientation='horizontal', label='Binding Concordance', fraction=0.06*6/fig_height, pad=0.15*6/fig_height)\n\n h,l = ax.get_legend_handles_labels()\n h,l = modify_legend(h,l)\n ax.legend(h, l, bbox_to_anchor=(0.5, -0.5*6/fig_height), loc=9, frameon=False, title='GEMs', ncol=len(l))\n\n plt.xlabel('%d clonotypes (across %d GEMs)' %(df.ct.nunique(), df.gem.nunique()))\n plt.ylabel('')\n\n sns.despine(bottom=False, trim=True, offset={'left':-30})\n ax.set_xticks([])\n ax.set_xticklabels([])\n if save:\n plt.savefig(title, bbox_inches='tight', dpi=100)\n plt.show()\n\n\n##########################################################\n# Main #\n##########################################################\ntry:\n VALID = snakemake.input.df\n FLT = snakemake.input.lbl\n IDX = snakemake.input.flt\n OUT_DIR = os.path.dirname(snakemake.output[0])\nexcept:\n parser = get_argparser()\n args = parser.parse_args()\n \n VALID = args.data\n FLT = args.labels\n IDX = args.filters\n OUT_DIR = args.out_dir\n\n\ndf = pd.read_csv(VALID, converters=converters)\n\ndf.fillna({'umi_count_mhc':0, 'delta_umi_mhc':0, 'umi_count_mhc_rel':0,\n 'umi_count_cd8':0, 'delta_umi_cd8':0,\n 'umi_count_TRA':0, 'delta_umi_TRA':0,\n 'umi_count_TRB':0, 'delta_umi_TRB':0,\n 'cdr3_TRA':'','cdr3_TRB':''}, inplace=True)\ndf = calc_binding_concordance(df.copy(), 'ct')\n\nidx_df = pd.read_csv(IDX)\n\n\n##########################################################\n# Filters #\n##########################################################\n\nwith open(FLT, 'r') as f:\n flt = yaml.load(f, Loader=yaml.FullLoader)\nglobals().update(flt)\n\n\n##########################################################\n# Compute statistics #\n##########################################################\nfor label in labels:\n idx = idx_df[label]\n plt_df = calc_binding_concordance(df[idx].copy(), 'ct')\n \n filename = os.path.join(OUT_DIR, '%s.png' %( '_'.join(label.split()) ) )\n max_gems = df.gems_per_specificity.max() if df.gems_per_specificity.max() < 1000 else 1000\n \n plot_specificity(filename, plt_df, max_gems, save=True)\n plt.cla()\n plt.clf()\n plt.close()\n","repo_name":"mnielLab/itrap","sub_path":"scripts/I_filter_impact.staircase.py","file_name":"I_filter_impact.staircase.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10997485873","text":"def main():\n plate = input(\"Plate: \")\n if is_valid(plate):\n print(\"Valid\")\n else:\n print(\"Invalid\")\n\n\ndef is_valid(s):\n # vanity plates may contain min 2 - max 6 chars (letters or numbers)\n if len(s) < 2 or len(s) > 6:\n return False\n\n # Vanity plates must start at least with 2 letters\n if s[0:2].isalpha() == False:\n return False\n\n # numbers can not be used in the middle of the plate. must come at the end.\n for i in range(len(s)):\n if s[i].isdigit():\n if not s[i:].isdigit():\n return False\n\n # first number cannot be \"0\"\n indx = 0\n while indx < len(s): # while there are still letters or numbers left in our plate\n if s[indx].isalpha() == False:\n if s[indx] == \"0\":\n return False\n else:\n break\n indx += 1 # checking the next char of our plate till the end of our string\n\n # Periods, spaces or punction marks arenot allowed in the plate\n for c in s:\n marks = {'.', ' ', '!', '?'}\n if c in marks:\n return False\n\n # if each checking conditions goes well:\n return True\n\n\nmain()","repo_name":"berkod-ai/python-basics","sub_path":"w2/pset2/plates/plates.py","file_name":"plates.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"27516523011","text":"import time\nimport random\n\n\ndef FuNnY(not_funny):\n random.seed(time.time())\n FuNnyxd = \"\"\n for i, char in enumerate(not_funny, 0):\n if i % 3 == 0:\n FuNnyxd += char.upper()\n elif i % 3 == 1:\n FuNnyxd += char.lower()\n else:\n if random.choice([0, 1]):\n FuNnyxd += char.upper()\n else:\n FuNnyxd += char.lower()\n return FuNnyxd\n\n\nif __name__ == \"__main__\":\n print(FuNnY(\"TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\"))\n","repo_name":"Frostforus/InstaBot_0","sub_path":"FunNiER.py","file_name":"FunNiER.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"513218936","text":"nums1 = [4, 9, 5]\nnums2 = [9, 4, 9, 8, 4]\nnums1.sort()\nnums2.sort()\nr, l = 0, 0\nres = []\nwhile r < len(nums1) and l < len(nums2):\n # print(r, l)\n if nums1[r] > nums2[l]:\n l = l + 1\n elif nums1[r] < nums2[l]:\n r = r + 1\n else:\n res.append(nums1[r])\n l = l + 1\n r = r + 1\n # print(res)\nprint(res)\n","repo_name":"YourZhou/LeetCode2Learn","sub_path":"华为笔试题/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6567120333","text":"#! /usr/bin/env python\n\nimport clip, logging, logtool, raven, sys\nfrom path import Path\nfrom .config import Config\nfrom . import __version__\n\nlogging.basicConfig (level = logging.WARN)\nLOG = logging.getLogger (__name__)\nAPP = clip.App (name = \"xxpaper\")\nConfig.set (\"xxpaper\", {})\n\n@logtool.log_call\ndef option_logging (flag): # pylint: disable=unused-argument\n logging.root.setLevel (logging.DEBUG)\n\n@logtool.log_call\ndef option_nosentry (value):\n Config._nosentry = value # pylint: disable=protected-access\n\n@logtool.log_call\ndef option_verbose (value):\n Config._verbose = value # pylint: disable=protected-access\n\n@logtool.log_call\ndef option_version (opt): # pylint: disable=unused-argument\n clip.echo (\"Version: %s\" % __version__)\n sys.exit (0)\n\n@APP.main (name = Path (sys.argv[0]).basename (),\n description = \"18xx asset generation tool\",\n tree_view = \"-H\")\n@clip.flag (\"-H\", \"--HELP\", help = \"Help for all sub-commands\")\n@clip.flag (\"-D\", \"--debug\", name = \"debug\", help = \"Enable debug logging\",\n callback = option_logging)\n@clip.flag (\"-N\", \"--nosentry\", name = \"nosentry\",\n help = \"Do not send exception reports to the developers\",\n callback = option_logging)\n@clip.flag (\"-v\", \"--verbose\", help = \"Show keys as they are resolved\",\n callback = option_verbose)\n@clip.flag (\"-V\", \"--version\", help = \"Report installed version\",\n callback = option_version)\n@logtool.log_call\ndef app_main (*args, **kwargs): # pylint: disable=unused-argument\n if kwargs[\"debug\"]:\n logging.basicConfig (level = logging.DEBUG)\n\n@logtool.log_call\ndef main ():\n try:\n APP.run ()\n except KeyboardInterrupt:\n pass\n except clip.ClipExit:\n sys.exit (0)\n except Exception as e:\n logtool.log_fault (e)\n sys.stderr.write (\"Something broke!\\n\")\n if not (getattr (Config, \"_nosentry\", False)\n or Config.get (\"user/nosentry\", params = {\"default\": True})):\n client = raven.Client (\n \"https://250e838eaff24eee9461682bc7160904\"\n \":b455442d9dcb4773a82786844f430386@sentry.io/127918\")\n h = client.captureException ()\n sys.stderr.write (\"\\t Sentry filed: %s\\n\" % h)\n sys.exit (1)\n\nif __name__ == \"__main__\":\n main ()\n","repo_name":"clearclaw/xxpaper","sub_path":"xxpaper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"72"} +{"seq_id":"37128640832","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nfrom math import ceil, floor\n\nimport numpy\nimport h5py\nfrom pyx import *\nfrom scipy.stats import linregress\nfrom scipy.optimize import curve_fit\n\nunit.set(defaultunit=\"cm\")\ntext.set(mode=\"latex\")\ntext.preamble(r\"\\usepackage{uarial}\")\ntext.preamble(r\"\\usepackage{amsmath}\")\ntext.preamble(r\"\\renewcommand*\\familydefault{\\sfdefault}\")\ntext.preamble(r\"\\renewcommand*\\encodingdefault{T1}\")\ntext.preamble(r\"\\newcommand{\\textoverline}[1]{$\\overline{\\mbox{#1}}$}\")\n\npage_width = 16.9\npage_height = 22.4\ntext_size = -3\nhoffset = 3.2\nvoffset = 0.75\nvoffset2 = 0.25\npwidth = (page_width - hoffset) / 4.0\npheight = page_height - voffset - voffset2 * 3\n\nbase_dir = \".\"\n\ndef main():\n out_fname = \"%s/Figures/Figure_S3.pdf\" % base_dir\n c = canvas.canvas()\n data_fname = \"%s/Results/quasar_replicate_results.txt\" % base_dir\n data = load_rdata(data_fname)\n resolutions = {}\n sizes = numpy.zeros(4, dtype=numpy.float64)\n all_samples = {}\n genomes = ['hg38', 'mm10', 'dm6']\n total = 0\n for i, gen in enumerate(genomes):\n all_samples[gen] = []\n sizes[i + 1] = sizes[i]\n where = numpy.where(data['genome'] == gen)\n resolutions[gen] = numpy.unique(data['resolution'][numpy.where(data['genome'] == gen)])\n samples = numpy.unique(numpy.r_[data['sample1'][where], data['sample2'][where]])\n for samp in samples:\n if samp.count('Pseudo') == 0:\n sizes[i + 1] += 1\n all_samples[gen].append(samp)\n bounds = numpy.zeros((4, 2), dtype=numpy.float64)\n bounds[:, 0] = 0\n bounds[:, 1] = 1.0\n spans = bounds[:, 1] - bounds[:, 0]\n bounds[:, 0] -= spans * 0.05\n bounds[:, 1] += spans * 0.05\n total = sizes[-1]\n heights = (sizes[1:] - sizes[:-1]) / sizes[-1] * pheight + voffset2\n span = pheight / total\n pos = page_height - span * 0.2 - voffset2\n pos2 = page_height\n gen_label = {'hg38': 'Human', 'mm10': 'Mouse', 'dm6': 'Fruit Fly'}\n for i, gen in enumerate(genomes):\n for j in range(len(all_samples[gen])):\n temp = all_samples[gen][j].split('_')\n c.text(hoffset - 0.01, pos, ' '.join([temp[0], temp[2], temp[4]]),\n [text.halign.right, text.valign.top, text.size(text_size - 1)])\n pos -= span\n pos -= voffset2\n pos2 -= heights[i]\n c.text(0, pos2 + (heights[i] - voffset2) * 0.5, gen_label[gen],\n [text.halign.center, text.valign.top, text.size(text_size), trafo.rotate(90)])\n for j in range(resolutions[gen].shape[0]):\n where = numpy.where((data['genome'] == gen) & (data['resolution'] == resolutions[gen][j]))[0]\n c.insert(plot_samples(data[where], heights[i], resolutions[gen][j], all_samples[gen], bounds[j, :],\n ((i == 2) | ((i == 1) & (j == 3)))), [trafo.translate(hoffset + pwidth * j, pos2)])\n c.insert(plot_key(heights[-1] * 0.6), [trafo.translate(hoffset + pwidth * 3 + 0.5, 0.5)])\n c.writePDFfile(out_fname)\n\ndef plot_samples(data, height, res, samples, bounds, Xlab):\n pheight = height - voffset2\n step = pheight / float(len(samples))\n indices = {}\n for i, sample in enumerate(samples):\n indices[sample] = pheight - (i + 0.5) * step\n c = canvas.canvas()\n maxX = bounds[1]\n minX = bounds[0]\n span = maxX - minX\n for i in range(int(ceil(minX * 10)), int(floor(maxX * 10) + 1), 2):\n val = i / 10.0\n X = (val - minX) / span * pwidth\n if Xlab:\n c.stroke(path.line(X, 0, X, -0.05))\n c.text(X, -0.1, \"%0.1f\" % val,\n [text.halign.left, text.valign.middle, text.size(text_size), trafo.rotate(-90)])\n c.stroke(path.line(X, 0, X, pheight), [color.gray(0.9)])\n if Xlab:\n c.text(pwidth * 0.5, -voffset, \"Replicate Score\",\n [text.halign.center, text.valign.bottom, text.size(text_size)])\n ramp = Ramp(0.0, 4.0)\n strokes = [\n path.circle(0, 0, 0.08),\n path.path(path.moveto(-0.1, -0.0867), path.lineto(0, 0.0867), path.lineto(0.1, -0.0867), path.closepath()),\n path.rect(-0.075, -0.075, 0.15, 0.15),\n path.path(path.moveto(-0.1, 0), path.lineto(0, 0.1), path.lineto(0.1, 0),\n path.lineto(0, -0.1), path.closepath()),\n ]\n points = [[], [], [], []]\n\n for i in range(data.shape[0]):\n name1 = data['sample1'][i]\n name2 = data['sample2'][i]\n t1 = name1.split('_')[2].split('H1-')[-1].split('-Dil')[0].split('-IS')[0]\n t2 = name2.split('_')[2].split('H1-')[-1].split('-Dil')[0].split('-IS')[0]\n val = (data['score'][i] - minX) / span * pwidth\n if name1.count('Pseudo') + name2.count('Pseudo') > 0:\n jitter = numpy.random.normal(0, step * 0.1)\n index = 1\n elif name1.split('Rep')[0] == name2.split('Rep')[0]:\n jitter = 0\n index = 3\n elif t1 == t2:\n jitter = numpy.random.normal(0, step * 0.1)\n index = 2\n else:\n jitter = numpy.random.normal(0, step * 0.1)\n index = 0\n if name1 in indices:\n Y = indices[name1]\n points[index].append([val, Y + jitter])\n if name2 in indices:\n Y = indices[name2]\n points[index].append([val, Y + jitter])\n for i in range(len(points)):\n for j in range(len(points[i])):\n if i == 0:\n c.stroke(strokes[i],\n [trafo.scale(0.6), trafo.translate(points[i][j][0], points[i][j][1]), ramp.get_rgb_color(i)])\n else:\n c.stroke(strokes[i], [trafo.scale(0.6), style.linewidth.thin, deco.filled([ramp.get_rgb_color(i)]),\n trafo.translate(points[i][j][0], points[i][j][1])])\n c.stroke(path.rect(0, 0, pwidth, pheight))\n if res >= 1000:\n label = \"%iMb\" % (res / 1000)\n else:\n label = \"%iKb\" % (res)\n c.text(pwidth * 0.5, height - 0.02, label, [text.halign.center, text.valign.top, text.size(text_size)])\n for i in range(1, len(samples)):\n c.stroke(path.line(0, step * i, pwidth, step * i), [style.linestyle.dotted])\n return c\n\ndef plot_key(height):\n ramp = Ramp(0.0, 4.0)\n strokes = [\n path.circle(0, 0, 0.08),\n path.path(path.moveto(-0.1, -0.0867), path.lineto(0, 0.0867), path.lineto(0.1, -0.0867), path.closepath()),\n path.rect(-0.075, -0.075, 0.15, 0.15),\n path.path(path.moveto(-0.1, 0), path.lineto(0, 0.1), path.lineto(0.1, 0),\n path.lineto(0, -0.1), path.closepath()),\n ]\n c = canvas.canvas()\n step = height / 5.0\n for i, label in enumerate(['Unrelated', 'Pseudo Replicate', 'Same Tissue Type', 'Biological Replicate']):\n if i == 0:\n c.stroke(strokes[i], [ramp.get_rgb_color(i), trafo.translate(0, step * (i + 0.5))])\n else:\n c.stroke(strokes[i], [style.linewidth.thin, deco.filled([ramp.get_rgb_color(i)]),\n trafo.translate(0, step * (i + 0.5))])\n c.text(0.2, step * (i + 0.5) + 0.08, label, [text.halign.left, text.valign.top, text.size(text_size)])\n return c\n\ndef load_rdata(fname):\n data = []\n infile = open(fname)\n line = infile.readline()\n line = infile.readline()\n while line:\n temp = line.rstrip('\\n').split('\\t')\n data.append((temp[0], temp[1], temp[2], int(temp[3]), int(temp[4]) / 1000, float(temp[5])))\n line = infile.readline()\n data = numpy.array(data, dtype=numpy.dtype([('sample1', 'S50'), ('sample2', 'S50'), ('genome', 'S4'),\n ('coverage', numpy.int32), ('resolution', numpy.int32), ('score', numpy.float64)]))\n for i in range(data.shape[0]):\n if data['sample1'][i] > data['sample2'][i]:\n data['sample1'][i], data['sample2'][i] = data['sample2'][i], data['sample1'][i]\n return data\n\n\nclass Ramp():\n def __init__(self, minval, maxval):\n self.min = float(minval)\n self.max = float(maxval)\n self.breaks = numpy.linspace(minval, maxval, 8)\n self.span = self.breaks[1] - self.breaks[0]\n self.colors = numpy.array([\n [0, 0, 0],\n [26, 26, 89],\n [77, 38, 166],\n [153, 51, 128],\n [255, 64, 38],\n [230, 140, 0],\n [230, 191, 26],\n [230, 230, 128]], dtype=numpy.uint32)\n self.base = 256 ** 3 * 255\n self.scale = numpy.array([1, 256, 256 ** 2], dtype=numpy.uint32)\n\n def get_color(self, val):\n index = numpy.searchsorted(self.breaks[1:-1], val)\n frac = numpy.maximum(0.0, numpy.minimum(1.0, (val - self.breaks[index]) / self.span))\n cvals = (self.colors[index, :] * (1.0 - frac) +\n self.colors[index + 1, :] * frac) / 255.0\n return color.rgb(r=cvals[0], g=cvals[1], b=cvals[2])\n\n def get_rgb_color(self, val):\n index = numpy.searchsorted(self.breaks[1:-1], val)\n frac = max(0.0, min(1.0, (val - self.breaks[index]) / self.span))\n cvals = (self.colors[index] * (1.0 - frac) + self.colors[index + 1] * frac) / 255.0\n return color.rgb(r=cvals[0], g=cvals[1], b=cvals[2])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bxlab/Quasar_PaperAnalysis","sub_path":"Scripts/Figure_S3.py","file_name":"Figure_S3.py","file_ext":"py","file_size_in_byte":9155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31463285715","text":"from copy import deepcopy\n\n\nclass Grid:\n \"\"\"\n 2D grid with (x, y) int indexed internal storage\n Has .width .height size properties\n \"\"\"\n\n def __init__(self, width, height):\n self.height = height\n self.width = width\n self.array = [[None for i in range(width)] for j in range(height)]\n\n def __str__(self):\n \"\"\"\n selfstr = ''\n for y in range(self.height):\n selfstr += '\\n | '\n for x in range(self.width):\n selfstr += f'{self.grid[y][x]} | '\n return selfstr\n \"\"\"\n return f\"Grid({self.height}, {self.width}, first = {self.array[0][0]})\"\n\n def __repr__(self):\n return f\"Grid.build({self.array})\"\n\n def __eq__(self, other):\n if isinstance(other, Grid):\n if self.array == other.array:\n return True\n elif type(other) == list:\n if self.array == other:\n return True\n return False\n\n def __len__(self):\n return len(self.array)\n\n def in_bounds(self, x, y):\n if x >= 0 and x < len(self.array[0]) and y >= 0 and y < len(self.array):\n return True\n else:\n return False\n\n def get(self, x, y):\n if self.in_bounds(x, y):\n return self.array[y][x]\n else:\n raise IndexError(f\"Invalid grid index: {y}, {x}\")\n\n def set(self, x, y, val):\n if self.in_bounds(x, y):\n self.array[y][x] = val\n else:\n raise IndexError(f\"Invalid grid index: {y}, {x}\")\n\n def copy(self):\n return deepcopy(self)\n\n @staticmethod\n def build(lst):\n Grid.check_list_malformed(lst)\n width = len(lst)\n height = len(lst[0])\n newgrid = Grid(height, width)\n newgrid.array = deepcopy(lst)\n return newgrid\n\n @staticmethod\n def check_list_malformed(lst):\n if type(lst) == list and len(lst) > 0:\n length = len(lst)\n height = len(lst[0])\n for x in range(length):\n if type(lst) != list or len(lst[x]) != height:\n raise ValueError(\n \"Invalid input list. Ensure list is rectangular and all primary array values are lists.\"\n )\n","repo_name":"TimmyTheTaterTot/Pong","sub_path":"Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32083475792","text":"# Helen O'Shea uploaded on the 6/2/18\n# Adapted from Ian McLoughlin's code https://github.com/ianmcloughlin/python-fib/blob/master/fibname.py\n# Ian McLoughlin\n# A program that displays Fibonacci numbers.\n\ndef fib(n): # define function\n \"\"\"This function returns the nth Fibonacci number.\"\"\"\n i = 0 # initalize variables\n j = 1 # initalize variables\n n = n - 1\n\n while n >= 0: # while n is a non negative integer\n i, j = j, i + j # i = j and j = i+j\n n = n - 1 # update n\n \n return i \n\n# Test the function with the following value.\nx = 22 # H = 8th letter of alphabet N= 14th letter of alphabet 8+14 = 22\nans = fib(x)\nprint(\"Fibonacci number\", x, \"is\", ans)\n\n\n# comments on the forum \n# week 1 forum comments My name is Helen, so the first and last letter of my name (H + N = 8 + 14) give the number 22. The 22nd Fibonacci number is 17,711.\n# https://learnonline.gmit.ie/mod/forum/discuss.php?d=10326\n\n","repo_name":"Osheah/programming-scripting-assignments","sub_path":"week1/fibname.py","file_name":"fibname.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17197478568","text":"from flask import Flask, render_template, flash, session, redirect, url_for\nfrom form import HouseForm\nfrom postcodes import df\nimport pickle\nfrom flask_bootstrap import Bootstrap\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'blablalala - hard to guess string'\nBootstrap(app)\n\nwith open(\"linear_regression.pkl\", \"rb\") as pfile:\n model = pickle.load(pfile)\n\ndef get_lat_lon(postcode):\n lat_lon = df[df[\"postcode\"] == postcode.upper()]\n return [lat_lon.latitude.values[0], lat_lon.longitude.values[0]]\n\n\ndef pt_back(pt):\n pts = ['D', 'F', 'S', 'T']\n return [0] * pts.index(pt) + [1] + (3 - pts.index(pt)) * [0]\n\n\ndef to_predict_list(form):\n return [[form.bedroomnumber.data, form.duration.data, form.old_new.data] + \n get_lat_lon(form.postcode.data) + pt_back(form.propertytype.data)]\n\n\ndef ValuePredictor(to_predict_list):\n result = model.predict(to_predict_list)\n return result[0]\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n form = HouseForm()\n predicted_v = \"\"\n if form.validate_on_submit():\n predicted_v = '£' + '{:20,.2f}'.format(round(ValuePredictor(to_predict_list(form)), 2))\n session[\"value\"] = predicted_v\n return redirect(url_for(\"predicted\"))\n elif form.errors:\n for errors in form.errors.values():\n for error in errors:\n flash(f\"{error}\", category='error')\n return render_template(\"index.html\", form=form)\n\n@app.route(\"/predicted\")\ndef predicted():\n return render_template(\"predicted.html\", predicted=session[\"value\"])\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"green-fox-academy/SantiDu","sub_path":"week-07/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17935359020","text":"from typing import Any\n\nwaiting_time_for_updates: float\nmyPath: str\nmyFn: str\ncomputation: str\nmyHost: Any\nmyUser: Any\nmyPassword: Any\nmyPort: Any\nres: str\nsave_figs: bool\n\ndef find_numbers_in_string(a_string_containing_numbers: Any): ...\n\ntransport: Any\nsftp: Any\nmyFile: Any\niterations: Any\nBx_res: Any\nBy_res: Any\nBz_res: Any\nEx_res: Any\nEy_res: Any\nEz_res: Any\nrho_res: Any\nvx_res: Any\nvy_res: Any\nvz_res: Any\nT_res: Any\nCFL: Any\nPhysTime: Any\nWallTime: Any\nclient: Any\nfileObject: Any\nlenfileObject: Any\nnums: Any\nfig0: Any\nax0: Any\nlh0: Any\nax1: Any\nlh1: Any\nfig1: Any\nax2: Any\nlh2: Any\nlh3: Any\nlh4: Any\nlh5: Any\nax3: Any\nlh6: Any\nfig2: Any\nax4: Any\nlh7: Any\nax5: Any\nlh8: Any\n","repo_name":"proganalysis/python3_types","sub_path":"Result/4079files/uninferred/3384-Uninferred.pyi","file_name":"3384-Uninferred.pyi","file_ext":"pyi","file_size_in_byte":684,"program_lang":"python","lang":"ca","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74268957034","text":"import json\nfrom db.model import db, User\nfrom flask import Blueprint, request, jsonify\n\nbp_users = Blueprint('bp_users', __name__)\n\n\n@bp_users.get('/')\ndef pg_all_users():\n all_users = User.query.all()\n users_response = []\n\n for user in all_users:\n users_response.append({'id': user.id,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'age': user.age,\n 'email': user.email,\n 'role': user.role,\n 'phone': user.phone})\n # return json.dumps(users_response, indent=2)\n return jsonify(users_response)\n\n\n@bp_users.get('/')\ndef pg_user_by_id(id):\n user_by_id = User.query.get(id)\n if user_by_id is None:\n return 'user not found'\n\n user = {'id': user_by_id.id,\n 'first_name': user_by_id.first_name,\n 'last_name': user_by_id.last_name,\n 'age': user_by_id.age,\n 'email': user_by_id.email,\n 'role': user_by_id.role,\n 'phone': user_by_id.phone}\n return json.dumps(user)\n\n\n@bp_users.post('/')\ndef add_new_user():\n data_for_new_user = request.json\n new_user = User(\n first_name=data_for_new_user.get('first_name'),\n last_name=data_for_new_user.get('last_name'),\n age=data_for_new_user.get('age'),\n email=data_for_new_user.get('email'),\n role=data_for_new_user.get('role'),\n phone=data_for_new_user.get('phone')\n )\n db.session.add(new_user)\n db.session.commit()\n return 'user added'\n\n\n@bp_users.put('/')\ndef update_user(pk):\n data_for_update_user = request.json\n user_for_update = User.query.get(pk)\n\n user_for_update.first_name = data_for_update_user.get('first_name')\n user_for_update.last_name = data_for_update_user.get('last_name')\n user_for_update.age = data_for_update_user.get('age')\n user_for_update.email = data_for_update_user.get('email')\n user_for_update.role = data_for_update_user.get('role')\n user_for_update.phone = data_for_update_user.get('phone')\n db.session.add(user_for_update)\n db.session.commit()\n return 'user updated'\n\n\n@bp_users.delete('/')\ndef delete_user(pk):\n user_for_delete = User.query.get(pk)\n db.session.delete(user_for_delete)\n db.session.commit()\n return 'user deleted'\n","repo_name":"maksNd/lesson_16_home_work","sub_path":"bp_users/bp_users.py","file_name":"bp_users.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23893633603","text":"#encoding:utf-8\nfrom scipy.linalg import *\nimport numpy as np\n#初期設定\nalpha = 2.0\nbeta = 25\nmu = np.array([0,0])\nsigma = np.matrix([[alpha, 0], [0, alpha]])\na0 = -0.3\na1 = 0.5\nxv = [0.9, -0.6]\ntv = [0.05, -0.8]\nfor xn in range(18):\n xn = 2*np.random.random()-1\n xv += [xn]\n tv += [a0+a1*xn+np.random.normal(0, 0.2, )]\n","repo_name":"secureAImonster/MachineLearning","sub_path":"BasicML/fig3_7.py","file_name":"fig3_7.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20868626481","text":"#########################################################################################################\n# Design python application which creates two thread named as even and odd. Even\n# thread will display first 10 even numbers and odd thread will display first 10 odd numbers. \n# Author : Annaso Chavan\n#########################################################################################################\n\nimport threading\n\ndef Even():\n\tprint(\"Even numbers are \")\n\tcount = 1\n\ti = 1\n\twhile count != 10:\n\t\tif i % 2 == 0:\n\t\t\tprint(i, end=\" \")\n\t\t\tcount += 1\n\t\ti += 1\n\ndef Odd():\n\tprint(\"\\nOdd numbers are \")\n\tcount = 1\n\ti = 1\n\twhile count != 10:\n\t\tif i % 2 == 1:\n\t\t\tprint(i, end=\" \")\n\t\t\tcount += 1\n\t\ti += 1\n\t\ndef main():\n\tt1 = threading.Thread(target = Even)\n\tt2 = threading.Thread(target = Odd)\n\t\n\tt1.start()\n\tt2.start()\n\t\n\tt1.join()\n\tt2.join()\n\t\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"annasochavan777/Python-Assignment","sub_path":"Assignment-8/Assignment8_1.py","file_name":"Assignment8_1.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36217470574","text":"import random\n#print(dir(random)) #dir 이라는 내장함수로 random을 부른다.\n#dir를 사용해서 메소드 뭐있는지 찾아볼 수 있다.\n\n#choice를 써보자\nnumber = random.choice(range(10))\n#print(number)\n\n# list에서 무작위로 '뽑아보자.\narr = [100,40,30,50]\npick = random.choice(arr)\n#print(pick)\n\n# dictionary에도 써보자\nmask = {\n '100' : '삼성',\n '40' : '역삼',\n '30' : '선릉',\n '50' : '영등포'\n}\n#print(mask[str(pick)])\n\n\n#sample\nlotto = random.sample(range(1,46),6)\nprint(sorted(lotto))\n","repo_name":"myomi2767/python","sub_path":"intro/lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10367283429","text":"nums = [0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4]\n\ndef remove_duplicatesII(nums):\n i = 0\n j = 1\n k = 2\n spaces = 0\n\n while k < len(nums):\n if nums[i] == nums[j] == nums[k]:\n nums.pop(k)\n spaces += 1\n else:\n i += 1\n j += 1\n k += 1\n nums.extend([\"_\" for i in range(spaces)])\n return len(nums) - spaces, nums\n\nprint(remove_duplicatesII(nums))\n\n","repo_name":"devRaphael13/Datastructures_and_algorithms","sub_path":"remove_duplicatesII.py","file_name":"remove_duplicatesII.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8811384830","text":"import binascii\nfrom hex_operations import xor_hex\nfrom hex_operations import xor\nfrom hex_operations import int2hexbyte\nimport matplotlib.pyplot as plt\n\nDEBUG = 0\n\nletters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p',\n 'q','r','s','t','u','v','w','x','y','z']\nletters += [x.upper() for x in letters]\n\nspecial_letters = [' ', '.', ',']\nletters += special_letters\n\nvalues = ['8.167','1.492','2.782','4.253','12.702','2.228','2.015','6.094',\n '6.966','0.153','0.772','4.025','2.406','6.749', '7.507','1.929',\n '0.095','5.987','6.327','9.056','2.758','0.978','2.360','0.150',\n '1.974','0.074']\n\nspecial_values = ['13', '2', '2']\n\nvalues += [x.upper() for x in values]\nvalues += special_values\n\ndef find_char(sequence, _plot=False):\n h1 = binascii.a2b_hex(sequence)\n return find_char_hex(h1, _plot)[0][0]\n\n\ndef find_char_hex(h1, _plot=False):\n ranking = {}\n amount_of_bytes = 1 # for finding more bytes -- not functional atm\n\n\n for char in range(amount_of_bytes * 256):\n h2 = int2hexbyte(char)\n\n temp_h3 = xor_hex(h1,h2)\n\n ranking[char] = build_char_dictionary(binascii.a2b_hex(temp_h3))\n\n temp_i = 0\n temp_rank = 0\n\n for i, rank in enumerate(rank_list(ranking, _plot)):\n if rank > temp_rank:\n temp_rank = rank\n temp_i = i\n\n rank = rank_list(ranking, _plot)\n scores = [[int2hexbyte(char), score] for char, score in enumerate(rank)]\n scores_sorted = sorted(scores, key=lambda x: x[1], reverse=True)\n if DEBUG: print(\"scores\", scores_sorted[0:5])\n\n return scores_sorted\n\n\ndef build_char_dictionary(string1):\n ranking = {}\n for char_in_seq in string1:\n if chr(char_in_seq) in letters:\n if not ranking.get(char_in_seq):\n ranking[char_in_seq] = 0\n\n ranking[char_in_seq] += 1\n\n else: # TODO Add -1 to everything\n if not ranking.get(-1):\n ranking[-1] = 0\n\n ranking[-1] += 1\n\n return ranking\n\n\n\ndef rank_list(ranking_dict, _plot=False):\n \"\"\"apply some ranking\"\"\"\n\n # TODO: temporary fix, please make mor generalize\n\n rank = []\n normalize = len(ranking_dict.items())\n\n\n\n\n for key, value in ranking_dict.items():\n if key == -1:\n score -= ranking_dict[-1]\n score = 0\n\n for i in range(len(letters)):\n temp = value.get(ord(letters[i])) or 0\n score += temp * float(values[i])\n\n temp = value.get(-1) or 0\n score -= temp\n\n rank.append(score)\n\n if _plot:\n plot(rank)\n\n\n return rank\n\n\ndef plot(result):\n plt.plot(range(1,len(result) + 1), result, 'ro')\n plt.axis([0, len(result), 0, max(result)])\n plt.ylabel('Score')\n plt.show()\n\n\n\ndef find_xor_sequence(sequence):\n return xor_hex(binascii.a2b_hex(sequence), find_char(sequence))\n\n\ndef niceify_find_xor_sequence(seq):\n return binascii.a2b_hex(find_xor_sequence(seq))\n\n\nif __name__ == '__main__':\n print(find_char('1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'))\n print(niceify_find_xor_sequence('1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'))\n","repo_name":"ProgHaj/crypto-scripts","sub_path":"toolbox/find_sequence.py","file_name":"find_sequence.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74040344872","text":"import time\nfrom conf import regions as data\n\nclass itemlist:\n items = None\n ids = None\n names = None\n\n def __init__(self, itemdict):\n self.items = itemdict\n x = dict()\n for a in itemdict:\n x[a[\"id\"]] = a[\"name\"]\n self.ids = x.copy()\n x = dict()\n for a in itemdict:\n x[a[\"name\"]] = a[\"id\"]\n self.names = x.copy()\n\n def get_by_id(self, id):\n return self.ids[id]\n\n def get_by_name(self, name):\n return self.names[name]\n\n def get_ids(self):\n return self.ids.keys()\n\n def get_names(self):\n return self.ids.values()\n\nclass list:\n items = None\n keys = None\n store = None\n\n def __init__(self, itemdict):\n self.keys = itemdict[0].keys()\n self.store = {}\n for k in self.keys:\n self.store[k] = {}\n for item in itemdict:\n j = str(item[k]).lower()\n if j not in self.store[k.lower()]:\n self.store[k.lower()][j] = []\n self.store[k.lower()][j].append(item)\n \n def get_by(self, key, value):\n key = key.lower()\n value = str(value).lower()\n if (key not in [x.lower() for x in self.keys] \n or value not in [x.lower() for x in self.store[key.lower()]]):\n return None\n return self.store[key][value][0]\n\n def get_by_full(self, key, value):\n key = key.lower()\n value = value.lower()\n if (key not in [x.lower() for x in self.keys] \n or value not in [x.lower() for x in self.store[key.lower()]]):\n return None\n return self.store[key][value]\n\n def get_by_any(self, value):\n it = None\n value = value.lower()\n for key in self.keys:\n if value in [x.lower() for x in self.get_possible_values(key)]:\n it = self.get_by(key, value)\n break\n return it\n\n def get_all(self, key):\n key = key.lower()\n if key not in [x.lower() for x in self.keys]:\n return None\n return self.store[key].values()\n\n def get_possible_values(self, key):\n key = key.lower()\n if key not in [x.lower() for x in self.keys]:\n return None\n return self.store[key].keys()\n\n def get_keys(self):\n return self.keys\n \n def dump(self):\n return self.store\n\nclass _cache:\n store = {}\n\n def set(self, name, region, data):\n now = time.time()\n if name not in self.store:\n self.store[name] = {}\n self.store[name][region] = {\"ts\": now, \"data\": data}\n return self.store[name][region]\n\n def get(self, name, region):\n now = time.time()\n if (\n name in self.store\n and region in self.store[name]\n and (now - self.store[name][region][\"ts\"]) < 3600\n ):\n return self.store[name][region]\n return None\n\n\ndef find_max(market, station=0):\n order = {\"price\": 0.00}\n for m in market:\n if (float(m[\"price\"]) > float(order[\"price\"])) and (\n station == 0 or m[\"location_id\"] == station\n ):\n order = m\n return order\n\n\ndef find_min(market, station=0):\n order = {\"price\": 0.00}\n for m in market:\n if (\n float(order[\"price\"]) == 0.00 or float(m[\"price\"]) < float(order[\"price\"])\n ) and (station == 0 or m[\"location_id\"] == station):\n order = m\n return order\n\n\ncache = _cache()\nregions = list(data.regions)\n","repo_name":"speisekatze/EVEMarket-for-Discord","sub_path":"src/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5523696420","text":"import pickle\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom . import __version__, util\n\n\n@click.command()\n@click.version_option(version=__version__)\n@click.argument(\"grid_file\", type=click.Path(exists=True))\ndef main(grid_file):\n with open(grid_file, \"rb\") as f:\n grid_results = pickle.load(f)\n\n for i, rank in enumerate(grid_results[\"rank_test_score\"]):\n if rank <= 10:\n click.echo(f\"Rank {rank}:\")\n click.echo(f\"{grid_results['params'][i]}\")\n\n best_params_per_classifier = {\n \"KNeighborsClassifier\": (None, 0),\n \"mlp\": (None, 0),\n \"RandomForestClassifier\": (None, 0),\n \"LGBMClassifier\": (None, 0),\n \"SVC\": (None, 0),\n }\n for i in range(len(grid_results[\"mean_test_score\"])):\n classifier_name = get_classifier_name(grid_results[\"params\"][i])\n score = grid_results[\"mean_test_score\"][i]\n for key in best_params_per_classifier.keys():\n if key in classifier_name:\n if score > best_params_per_classifier[key][1]:\n best_params_per_classifier[key] = (grid_results[\"params\"][i], score)\n break\n click.echo(\"Best params per classifier:\")\n for key, value in best_params_per_classifier.items():\n click.echo(f\"Best {key}:\")\n click.echo(value)\n\n _, ax = plt.subplots()\n y_pos = np.arange(len(grid_results[\"mean_test_score\"]))\n ax.barh(\n y_pos,\n grid_results[\"mean_test_score\"],\n tick_label=grid_results[\"rank_test_score\"],\n xerr=grid_results[\"std_test_score\"],\n align=\"center\",\n )\n ax.set_yticks(y_pos)\n ax.set_xlabel(\"file-based Balanced accuracy\")\n ax.set_xlim([0, 1])\n util.finish_plot(\"mean-grid-score\", None, True)\n\n scores = combine_split_scores(grid_results)\n scores = drop_unneeded_classifiers(scores)\n sns.swarmplot(data=scores, x=\"score\", y=\"combination\", hue=\"run\")\n sns.pointplot(data=scores, x=\"score\", y=\"combination\", join=False, ci=\"sd\")\n util.finish_plot(\"swarmplot-score\", None, True)\n\n skip = \"classifier__min_child_samples\"\n scores = combine_split_scores_relplot(grid_results, skip)\n sns.relplot(\n data=scores,\n x=skip,\n y=\"score\",\n col=\"combination\",\n col_wrap=4,\n kind=\"line\",\n )\n util.finish_plot(\"relplot-score\", None, True)\n\n\ndef drop_unneeded_classifiers(scores):\n # scores = scores.loc[scores[\"classifier\"] != \"mlp\", :]\n # scores = scores.loc[scores[\"classifier\"] != \"RandomForestClassifier()\", :]\n # scores = scores.loc[scores[\"classifier\"] != \"KNeighborsClassifier()\", :]\n # scores = scores.loc[scores[\"classifier\"] != \"SVC()\", :]\n return scores\n\n\ndef get_classifier_name(params):\n if \"classifier\" not in params:\n return \"mlp\"\n return str(params[\"classifier\"])\n\n\ndef get_params_description(params, skip):\n return \",\".join(str(v) for k, v in params.items() if k != skip)\n\n\ndef build_row(grid_results, i, split_idx):\n row = {\n \"score\": grid_results[f\"split{split_idx}_test_score\"][i],\n \"combination\": get_params_description(grid_results[\"params\"][i], \"\"),\n \"run\": split_idx,\n \"i\": i,\n }\n return row\n\n\ndef combine_split_scores(grid_results):\n return pd.DataFrame(\n [\n build_row(grid_results, i, split_idx)\n for i in range(len(grid_results[\"mean_test_score\"]))\n for split_idx in range(4)\n if grid_results[\"rank_test_score\"][i] <= 23\n ]\n )\n\n\ndef build_row_relplot(grid_results, i, skip):\n row = {\n \"score\": grid_results[\"mean_test_score\"][i],\n \"combination\": get_params_description(grid_results[\"params\"][i], skip),\n \"i\": i,\n }\n row.update(grid_results[\"params\"][i])\n return row\n\n\ndef combine_split_scores_relplot(grid_results, skip):\n return pd.DataFrame(\n [\n build_row_relplot(grid_results, i, skip)\n for i in range(len(grid_results[\"mean_test_score\"]))\n ]\n )\n","repo_name":"joclement/master_thesis","sub_path":"src/jc_thesis_code/visualize_grid.py","file_name":"visualize_grid.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15607104744","text":"import os\nimport face_model\nimport argparse\nimport cv2\n\nparser = argparse.ArgumentParser(description='face model test')\nparser.add_argument('--image-size', default='112,112', help='')\nparser.add_argument('--image_folder', default='./samples/', help='')\nparser.add_argument('--model', default='model/gender-age,0', help='path to load model.')\nparser.add_argument('--gpu', default=0, type=int, help='gpu id')\nargs = parser.parse_args()\n\nmodel = face_model.FaceModel(args)\nimage_names = os.listdir(args.image_folder)\nimage_names = sorted([os.path.join(args.image_folder, image_name) for image_name in image_names])\nfor image_name in image_names:\n img = cv2.imread(image_name)\n img = model.get_input(img)\n gender, age = model.get_ga(img)\n print(f'image name {image_name.split(\"/\")[-1]}, gender is {gender}, age is {age}!')\n","repo_name":"linghu8812/tensorrt_inference","sub_path":"project/gender-age/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":663,"dataset":"github-code","pt":"72"} +{"seq_id":"562456609","text":"import pandas as pd\nimport numpy as np\nfrom hindexpy.HIndex import h_index_from_dict\nfrom hindexpy.CleanData import convert_setOfString_to_list, convert_df_to_dict, convert_column_datetime, convert_pipeSeperatedString_to_list\nfrom hindexpy.ProcessData import filter_sort_and_drop_duplicates, filter_useless_column, filter_year_range, categorise_citation_by_map\n\n### read the data\ndf = pd.read_csv('./hkul-publications/dataset/nursing/nursing.csv') # major dataset\n# df_uni = pd.read_csv('./hkul-publications/dataset/university/HKU2016-2021.csv') # university dataset\n\nxlsx = pd.ExcelFile('./hkul-publications/dataset/ASJC to QS_Sept2020.xlsx')\nsubj_map = pd.read_excel(xlsx, 'ASJC - QS (Subj Map)', dtype={'ASJC code': str})\nfacu_map = pd.read_excel(xlsx, 'ASJC - QS faculty areas (Top le', dtype={'ASJC code': str})\n\nasjc_map = pd.read_excel('./hkul-publications/dataset/ASJC_CODE_NAME.xlsx', dtype={'Code': str})\n\n### prepare map objects\nsubj_map = filter_sort_and_drop_duplicates(subj_map, 'ASJC code', drop_dup=True)\nfacu_map = filter_sort_and_drop_duplicates(facu_map, 'ASJC code', drop_dup=True)\nasjc_map = filter_sort_and_drop_duplicates(asjc_map, 'Code', drop_dup=True)\n\nsubj_map_dict = convert_df_to_dict(subj_map, 'ASJC code', 'QS subject name')\nfacu_map_dict = convert_df_to_dict(facu_map, 'ASJC code', 'QS faculty area name')\nasjc_map_dict = convert_df_to_dict(asjc_map, 'Code', 'Field')\n\n### filter if the articles from school-level are in university-level, so as to ensure all are from HKU\n## copy a tempory dataframe to avoid polluting the original dataframe\ntemp = df.copy()\n\n## set values in cover_date as datetime\ntemp = convert_column_datetime(temp, 'cover_date')\ntemp = filter_useless_column(temp, ['full_name', 'scopus_author_id', 'title', 'cover_date', 'citation_count', 'asjcs'], dropna='any')\ntemp = convert_setOfString_to_list(temp, 'asjcs')\n\ntemp, _ = filter_year_range(temp, 'cover_date', 2016, 2021)\n\n## university-level data\n# temp_uni = df_uni.copy()\n\n# temp_uni = filter_useless_column(temp_uni, ['Title', 'Scopus Author Ids', 'Year', 'Citations', 'All Science Journal Classification (ASJC) code'], dropna=True)\n# temp_uni = convert_pipeSeperatedString_to_list(temp_uni, 'All Science Journal Classification (ASJC) code')\n# temp_uni = convert_pipeSeperatedString_to_list(temp_uni, 'Scopus Author Ids')\n# temp_uni.columns = ['title', 'scopus', 'year', 'citations', 'asjc']\n# temp_uni = convert_column_datetime(temp_uni, 'year', '%Y')\n\n# temp_uni, _ = filter_year_range(temp_uni, 'year', 2016, 2021)\n\n## ~~trim the dataframe to only include the articles from HKU~~\n## No Longer Needed: to be fair to the newly comed scholars\n# list_university_title = temp_uni['title'].unique()\n\n# for i in range(len(temp)):\n# if temp.loc[i, 'title'] not in list_university_title:\n# # delete the row\n# temp.drop(i, inplace=True)\n\n### clean the data\ntemp = filter_useless_column(temp, ['full_name', 'title', 'cover_date', 'citation_count', 'asjcs'], dropna='any')\ntemp.reset_index(drop=True, inplace=True)\n\n## only keep the ones from specific year\ntemp2016, year2016 = filter_year_range(temp, 'cover_date', 2016, 2020) # from 2016 to 2020\ntemp2017, year2017 = filter_year_range(temp, 'cover_date', 2017, 2021) # from 2017 to 2021\n\n# temp_uni_2016, _ = filter_year_range(temp_uni, 'year', 2016, 2020)\n# temp_uni_2017, _ = filter_year_range(temp_uni, 'year', 2017, 2021)\n\n### processing data\n## average of nursing-school-level data\n## average is useless in h-index computation\n# categorise by map\n# dict_average_asjc_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count', option='map', map_object=asjc_map_dict)\n# dict_average_subject_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count', option='map', map_object=subj_map_dict)\n# dict_average_faculty_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count', option='map', map_object=facu_map_dict)\n\n# dict_average_asjc_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count', option='map', map_object=asjc_map_dict)\n# dict_average_subject_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count', option='map', map_object=subj_map_dict)\n# dict_average_faculty_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count', option='map', map_object=facu_map_dict)\n\n# compute h-index\n# h_index_average_asjc_2016 = h_index_from_dict(dict_average_asjc_2016)\n# h_index_average_subject_2016 = h_index_from_dict(dict_average_subject_2016)\n# h_index_average_faculty_2016 = h_index_from_dict(dict_average_faculty_2016)\n\n# h_index_average_asjc_2017 = h_index_from_dict(dict_average_asjc_2017)\n# h_index_average_subject_2017 = h_index_from_dict(dict_average_subject_2017)\n# h_index_average_faculty_2017 = h_index_from_dict(dict_average_faculty_2017)\n\n## university-level data\n# categorise by map\n# dict_uni_average_asjc_2016 = categorise_citation_by_map(temp_uni_2016, 'asjc', 'citations', option='map', map_object=asjc_map_dict)\n# dict_uni_average_subject_2016 = categorise_citation_by_map(temp_uni_2016, 'asjc', 'citations', option='map', map_object=subj_map_dict)\n# dict_uni_average_faculty_2016 = categorise_citation_by_map(temp_uni_2016, 'asjc', 'citations', option='map', map_object=facu_map_dict)\n\n# dict_uni_average_asjc_2017 = categorise_citation_by_map(temp_uni_2017, 'asjc', 'citations', option='map', map_object=asjc_map_dict)\n# dict_uni_average_subject_2017 = categorise_citation_by_map(temp_uni_2017, 'asjc', 'citations', option='map', map_object=subj_map_dict)\n# dict_uni_average_faculty_2017 = categorise_citation_by_map(temp_uni_2017, 'asjc', 'citations', option='map', map_object=facu_map_dict)\n\n# compute h-index\n# h_index_uni_average_asjc_2016 = h_index_from_dict(dict_uni_average_asjc_2016)\n# h_index_uni_average_subject_2016 = h_index_from_dict(dict_uni_average_subject_2016)\n# h_index_uni_average_faculty_2016 = h_index_from_dict(dict_uni_average_faculty_2016)\n\n# h_index_uni_average_asjc_2017 = h_index_from_dict(dict_uni_average_asjc_2017)\n# h_index_uni_average_subject_2017 = h_index_from_dict(dict_uni_average_subject_2017)\n# h_index_uni_average_faculty_2017 = h_index_from_dict(dict_uni_average_faculty_2017)\n\n### ensure h-index of is the same in mapping subjects as the one of the school-level\n## a function to remove keys of a dictionary which is not in the second dictionary\n# def trim_keys(dict_1, dict_2):\n# target_keylist = list(dict_1.keys())\n# sample_keylist = list(dict_2.keys())\n \n# for key in target_keylist:\n# if key not in sample_keylist:\n# del dict_1[key]\n \n# return dict_1\n\n## trim the keys from the university-level data which is not in the asjc map data\n# h_index_uni_average_faculty_2016 = trim_keys(h_index_uni_average_faculty_2016, h_index_average_faculty_2016)\n# h_index_uni_average_faculty_2017 = trim_keys(h_index_uni_average_faculty_2017, h_index_average_faculty_2017)\n# h_index_uni_average_subject_2016 = trim_keys(h_index_uni_average_subject_2016, h_index_average_subject_2016)\n# h_index_uni_average_subject_2017 = trim_keys(h_index_uni_average_subject_2017, h_index_average_subject_2017)\n# h_index_uni_average_asjc_2016 = trim_keys(h_index_uni_average_asjc_2016, h_index_average_asjc_2016)\n# h_index_uni_average_asjc_2017 = trim_keys(h_index_uni_average_asjc_2017, h_index_average_asjc_2017)\n\n### compute the h-index of every scholars in the school\n## asjc\ndict_asjc_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=asjc_map_dict)\ndict_asjc_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=asjc_map_dict)\n\nfor author in dict_asjc_2016: dict_asjc_2016[author] = h_index_from_dict(dict_asjc_2016[author])\nfor author in dict_asjc_2017: dict_asjc_2017[author] = h_index_from_dict(dict_asjc_2017[author])\n\n## qs-faculty\ndict_faculty_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=facu_map_dict)\ndict_faculty_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=facu_map_dict)\n\nfor author in dict_faculty_2016: dict_faculty_2016[author] = h_index_from_dict(dict_faculty_2016[author])\nfor author in dict_faculty_2017: dict_faculty_2017[author] = h_index_from_dict(dict_faculty_2017[author])\n\n## qs-subject\ndict_subject_2016 = categorise_citation_by_map(temp2016, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=subj_map_dict)\ndict_subject_2017 = categorise_citation_by_map(temp2017, 'asjcs', 'citation_count',\n option='both', author_col='full_name', map_object=subj_map_dict)\n\nfor author in dict_subject_2016: dict_subject_2016[author] = h_index_from_dict(dict_subject_2016[author])\nfor author in dict_subject_2017: dict_subject_2017[author] = h_index_from_dict(dict_subject_2017[author])\n\n### data postprocessing\n## merge the data: add department average into each dict\n## average is useless in h-index\n# dict_asjc_2016['AVERAGE (SCHOOL)'] = dict_average_asjc_2016\n# dict_asjc_2017['AVERAGE (SCHOOL)'] = dict_average_asjc_2017\n# dict_subject_2016['AVERAGE (SCHOOL)'] = dict_average_subject_2016\n# dict_subject_2017['AVERAGE (SCHOOL)'] = dict_average_subject_2017\n# dict_faculty_2016['AVERAGE (SCHOOL)'] = dict_average_faculty_2016\n# dict_faculty_2017['AVERAGE (SCHOOL)'] = dict_average_faculty_2017\n\n# dict_asjc_2016['AVERAGE (UNIVERSITY)'] = dict_uni_average_asjc_2016\n# dict_asjc_2017['AVERAGE (UNIVERSITY)'] = dict_uni_average_asjc_2017\n# dict_subject_2016['AVERAGE (UNIVERSITY)'] = dict_uni_average_subject_2016\n# dict_subject_2017['AVERAGE (UNIVERSITY)'] = dict_uni_average_subject_2017\n# dict_faculty_2016['AVERAGE (UNIVERSITY)'] = dict_uni_average_faculty_2016\n# dict_faculty_2017['AVERAGE (UNIVERSITY)'] = dict_uni_average_faculty_2017\n\n## convert to a dataframe\nh_index_general_2016 = pd.DataFrame.from_dict(dict_asjc_2016).T\nh_index_general_2017 = pd.DataFrame.from_dict(dict_asjc_2017).T\n\nh_index_faculty_2016 = pd.DataFrame.from_dict(dict_faculty_2016).T\nh_index_faculty_2017 = pd.DataFrame.from_dict(dict_faculty_2017).T\n\nh_index_subject_2016 = pd.DataFrame.from_dict(dict_subject_2016).T\nh_index_subject_2017 = pd.DataFrame.from_dict(dict_subject_2017).T\n\n## save the data\nh_index_general_2016.to_csv('./hkul-publications/processed/nursing_h-index_by_asjc_2016-2020.tsv', sep='\\t')\nh_index_general_2017.to_csv('./hkul-publications/processed/nursing_h-index_by_asjc_2017-2021.tsv', sep='\\t')\n\nh_index_faculty_2016.to_csv('./hkul-publications/processed/nursing_h-index_by_qs_faculty_2016-2020.tsv', sep='\\t')\nh_index_faculty_2017.to_csv('./hkul-publications/processed/nursing_h-index_by_qs_faculty_2017-2021.tsv', sep='\\t')\n\nh_index_subject_2016.to_csv('./hkul-publications/processed/nursing_h-index_by_qs_subject_2016-2020.tsv', sep='\\t')\nh_index_subject_2017.to_csv('./hkul-publications/processed/nursing_h-index_by_qs_subject_2017-2021.tsv', sep='\\t')\n","repo_name":"Artorivs/hkul-publications","sub_path":"nursing_h_index.py","file_name":"nursing_h_index.py","file_ext":"py","file_size_in_byte":11312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71387414633","text":"\n\nimport time\n\nimport sql, app\n\nimport colorama\nfrom colorama import Fore, Back, Style\n\n\ndef varifyOfficer(username,password):\n conn, cursor=sql.get_db_connection()\n rows=[]\n try:\n cursor.execute(f\"SELECT * from officers where username='{username}'\")\n rows=cursor.fetchall()\n except Exception as e:\n print(\"Error in Officer fetching\", e)\n conn.rollback()\n finally:\n conn.close()\n\n if not rows:\n print(\"Your details not Exists...\")\n print()\n else:\n for item in rows:\n uname = item[0]\n passw = item[1]\n if(uname==username and passw==password):\n return True\n else:\n return False\n\ndef showOfficerMenu():\n menu={\n \"1\": \"View Compalints\",\n \"2\": \"Upload Found Vehicle details\",\n \"3\": \"Logout\",\n }\n\n for key, value in menu.items():\n print(Fore.GREEN+ f\"{key} . {value}\"+Style.RESET_ALL)\n\ndef viewAllComplaint():\n conn, cursor=sql.get_db_connection()\n rows=[]\n try:\n cursor.execute(f\"SELECT * from complaints\")\n rows=cursor.fetchall()\n except Exception as e:\n print(\"Error in Complaints fetching\", e)\n conn.rollback()\n finally:\n conn.close()\n\n if not rows:\n print(\"\\nNo Complainsts found\")\n print()\n OfficerDashboard()\n else:\n print(Fore.YELLOW + \"+------------------------------------------------------------------------------------------------------------+\")\n print( f\" {'Owner':10} | {'Name':10} | {'Model':8} | {'Chassis Numberame':10} | {'Enginer Number':10} | {'Color':10} | {'Place':10} | {'Description':10} | {'Status':10} |\")\n print( \"+-------------------------------------------------------------------------------------------------------------+\")\n for item in rows:\n uname=item[0]\n name = item[1]\n model = item[2]\n chass=item[3]\n engine=item[4]\n color=item[5]\n place=item[6]\n desc=item[7]\n status=item[8]\n print( f\"{uname:10} | {name:10} | {model:10} | {chass:10} | {engine:10} | {color:10} | {place:10} | {desc:10} | {status:10} |\") \n print( \"+-----------------------------------------------------------------------------------------------------------+\\n\"+Style.RESET_ALL)\n OfficerDashboard()\n\ndef OfficerUpload():\n print(\"Officer upload Page\")\n username=input(\"Onwer: \")\n name=input(\"Name: \")\n chassis=input(\"Chassis Number: \")\n engine=input(\"Engine Number: \")\n model=input(\"Model: \")\n color=input(\"Color: \")\n place=input(\"Place: \")\n description=input(\"Description: \")\n status=\"Found\"\n conn, cursor = sql.get_db_connection()\n try:\n cursor.execute(\"INSERT INTO complaints (username,name,model,chassisNumber,engineNumber,color,place,description,status) VALUES (?,?,?,?,?,?,?,?,?)\",\n (username,name,model,chassis,engine,color,place,description,status))\n conn.commit()\n print(\"Successfully Vehicle data uploaded\")\n print()\n OfficerDashboard()\n except Exception as e:\n print(\"Vehicle exists in Database, Updating it\\n\")\n cursor.execute(f\"UPDATE complaints SET status='FOUND' where chassisNumber='{chassis}'\")\n cursor.execute(f\"UPDATE complaints SET place='{place}' where chassisNumber='{chassis}'\")\n cursor.execute(f\"UPDATE complaints SET description='{description}' where chassisNumber='{chassis}'\")\n conn.commit()\n time.sleep(2)\n print(\"Status Updated\")\n print()\n OfficerDashboard()\n finally:\n conn.close()\n\n\ndef OfficerDashboard():\n print(Fore.GREEN+\"Welcome to Officer Dashboard\"+Style.RESET_ALL)\n try:\n showOfficerMenu()\n while True:\n choice=app.getChoice()\n choice_mapping={\n \"1\": viewAllComplaint,\n \"2\": OfficerUpload,\n \"3\": app.logout\n }\n mapped_choice=choice_mapping.get(choice)\n if not mapped_choice:\n app.invalidChoice()\n else:\n con=choice_mapping[choice]()\n if not con:\n break\n except Exception as e:\n print(\"Error Occured\", e)\n\n","repo_name":"imAneeesh/PWS","sub_path":"task/task_python/officer.py","file_name":"officer.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23123155830","text":"import shutil\nimport sys\nimport os\nimport stat\nimport struct\nimport binascii\nimport string\nimport optparse\nimport subprocess\nimport tempfile\n\n# T124 uses a static monitor library\nmonitor_lib = \"libmonitor\"\n\nparser = optparse.OptionParser(description=\"Constructing Trusted OS partition image\",\n usage=\"usage: %prog [options] \")\nparser.add_option('-e', action=\"store_true\",\n help='partition data is encrypted (default: data is not encrypted)')\nparser.add_option('--cipher', action=\"store_true\",\n help='Encrypt partition data')\nparser.add_option('--key', metavar='HEX_KEY',\n help='32 hex nibbles for cipher key')\nparser.add_option('--keyfile',\n help='file with 32 hex nibbles for cipher key')\nparser.add_option('--iv', metavar='HEX_IV',\n help='32 hex nibbles for IV')\nparser.add_option('--eks_master', metavar='HEX_EKS_MASTER',\n help='32 hex nibbles for EKS master value')\nparser.add_option('--monitor', help=\"monitor file\")\nparser.add_option('--os', help=\"os file\")\nparser.add_option('--dtb', help=\"TrustedOS dtb file\")\nparser.add_option('--tostype', help='Trusted OS type: tlk, trusty, tos2, optee')\n\n(args, leftover)=parser.parse_args()\nif len(leftover) < 1:\n parser.print_help()\n sys.exit(1)\n\nmonitor_name = args.monitor\ntos_name = args.os\ntos_dtb_name = args.dtb\noutput_name = os.path.abspath(leftover[0])\nimage_type = 0\n\nTOS_TYPE_NONE = 0\nTOS_TYPE_TLK = 1\nTOS_TYPE_TRUSTY = 2\nTOS_TYPE_TOS2 = 3\nTOS_TYPE_OPTEE = 4\ntos_type = TOS_TYPE_NONE\nif tos_name:\n tos_type = TOS_TYPE_TRUSTY\nif args.tostype:\n if not tos_name:\n print(\"--tostype specified without --os\")\n parser.print_help()\n sys.exit(1)\n tos_map = {\n 'tlk': TOS_TYPE_TLK,\n 'trusty': TOS_TYPE_TRUSTY,\n 'tos2': TOS_TYPE_TOS2,\n 'optee': TOS_TYPE_OPTEE,\n }\n tos_type = tos_map.get(args.tostype, None)\n if tos_type is None:\n print(\"Invalid --tostype \" + args.tostype)\n parser.print_help()\n sys.exit(1)\n\n# If we want/need to pad tos.img to block size, please use 512 here\npad_size = 16\ntos_size_nopad = 0\n\nif os.path.exists(output_name):\n os.remove(output_name)\n\n# Calculate individual binary sizes for the monitor and trusted OS\nif monitor_name:\n monitor_size = os.path.getsize(monitor_name)\nelse:\n monitor_size = 0\n\nif args.os:\n os_size = os.path.getsize(tos_name)\nelse:\n os_size = 0\n\nif args.dtb:\n tos_dtb_size = os.path.getsize(tos_dtb_name)\nelse:\n tos_dtb_size = 0\n\n# calculate the total image size\nif monitor_name:\n if monitor_lib in monitor_name:\n # libmonitor is not a separate binary\n monitor_size = 0\n\ntos_size = monitor_size + os_size + tos_dtb_size\n\n# Trusted OS has to be 16-byte aligned for faster relocation, so append the\n# monitor binary with zeros\ntos_align = 0\nif monitor_size % 16:\n tos_align = (16 - (monitor_size % 16))\ntos_offset = monitor_size + tos_align\nmonitor_size += tos_align\ntos_size += tos_align\n\ntos_dtb_offset = 0\ntos_dtb_align = 0\nif args.dtb:\n if os_size % 16:\n tos_dtb_align = (16 - (os_size % 16))\n tos_dtb_offset = monitor_size + os_size + tos_dtb_align\n os_size += tos_dtb_align\n tos_size += tos_dtb_align\n\n# By default the image IV is zero vector.\n# If --iv option is used the IV value is saved in TOS TOC for the bootloader\n#\niv_hex=\"00000000000000000000000000000000\"\niv = bytearray.fromhex(iv_hex).decode('utf-8')\n\nif args.e or args.cipher:\n image_type = 0x45\n\n if args.iv:\n iv=args.iv.decode(\"hex\")\n image_type |= 0x20\n\n if args.cipher:\n print(\"Generating Trusted OS Partition Image File (encrypting input)\")\n tos_size_nopad = tos_size\n if tos_size % pad_size:\n tos_size += (pad_size - tos_size % pad_size)\n else:\n print(\"Generating Trusted OS Partition Image File (pre-encrypted input)\")\nelse:\n print(\"Generating Trusted OS Partition Image File\")\n image_type = 0x50\n\n#\n# Table of Contents (TOC) for the Bootloader\n#\n# typedef struct tos_image_toc {\n# char name[7];\n# char total_img_size[9];\n# // word aligned at this point\n# uint32_t monitor_offset;\n# uint32_t monitor_size;\n# uint32_t tos_offset;\n# uint32_t tos_size;\n# uint32_t image_info;\n# uint8_t iv[16];\n# uint32_t tos_type;\n# uint32_t tos_dtb_offset;\n# uint32_t tos_dtb_size;\n# } tos_image_toc_t;\n#\n# Could also use the carveout size but maybe not good to fix that here.\n# total_img_size contains NUL terminated string.\nif len(str(tos_size)) >= 9:\n sys.exit(\"Combined image size does not fit field\")\n\nimg_size = tos_size\n\n# calculate size of padding bytes to make final image 16-byte aligned\nimg_align = 0\nif (img_size % 16):\n img_align = (16 - (img_size % 16))\n\nvalues = (\n str(\"NVTOSP\").encode('utf-8'), # partition name\n str(img_size + img_align).encode('utf-8'), # total image size\n 0, # monitor_offset\n monitor_size, # monitor_size\n tos_offset, # tos_offset\n os_size, # os_size\n image_type, # image type info (ciphered, iv valid, etc)\n iv.encode('utf-8'), # AES init vector if specific\n tos_type, # trusted OS type\n tos_dtb_offset, # trusted OS dtb offset\n tos_dtb_size # trusted OS dtb size\n)\ns = struct.Struct('< 7s 9s I I I I I 16s I I I')\npacked_data = s.pack(*values)\nheader = '\\0' * (512-s.size) # align TOC to 512 bytes\n\n# Open new image file\n\ndest = open(output_name, 'wb')\nos.chmod(output_name, (stat.S_IWUSR | stat.S_IRUSR) | stat.S_IRGRP | stat.S_IROTH)\n\n# Write out TOS partition header\n\ndest.write(packed_data) # write TOC\ndest.write(header.encode('utf-8')) # write padding bytes\n\ndest.flush()\n\nif args.cipher:\n\n if monitor_lib in monitor_name:\n os.remove(output_name)\n sys.exit(\"old systems do not support ciphered images\")\n\n # Encrypting with this script is used only for R&D\n # for obvious security reasons.\n #\n # By default the TOS image cipher key is:\n #\n key=\"58e2fccefa7e3061367f1d57a4e7455a\"\n\n # Read the cipher key from a file\n # or derive the cipher key from EKS master value\n # for the R&D case.\n #\n # Otherwise the TOS image cipher key default value above\n # matches the EKS image master value zero.\n #\n if args.keyfile:\n key = args.keyfile.read()\n args.keyfile.close()\n key = key.rstrip('\\r\\n')\n else:\n if args.key:\n key=args.key\n else:\n if args.eks_master:\n ziv=\"00000000000000000000000000000000\"\n\n # XXX Could use pipes\n s = tempfile.NamedTemporaryFile(delete=True)\n s.write(\"00000000000000000000000000000001\".decode(\"hex\"))\n s.flush()\n s.seek(0)\n k = tempfile.NamedTemporaryFile(delete=True)\n\n subprocess.call([\"openssl\", \"enc\", \"-e\", \"-nopad\", \"-aes-128-cbc\", \"-K\", args.eks_master, \"-iv\", ziv],\n stdin=s, stdout=k)\n\n s.close()\n k.flush()\n k.seek(0)\n key=k.read().encode(\"hex\")\n k.close()\n\n image_file = tempfile.NamedTemporaryFile(delete=True)\n shutil.copyfileobj(open(monitor_name, 'rb'), image_file) # write monitor.bin to temp image_file\n if args.os:\n shutil.copyfileobj(open(tos_name, 'rb'), image_file) # write lk.bin to temp image_file\n if args.dtb:\n shutil.copyfileobj(open(tos_dtb_name, 'rb'), image_file) # write trusted OS dtb to temp image_file\n\n # Pad the image file before encrypting it\n if tos_size - tos_size_nopad > 0:\n image_file.write('\\0' * (tos_size - tos_size_nopad))\n\n image_file.flush()\n image_file.seek(0)\n\n subprocess.call([\"openssl\", \"enc\", \"-e\", \"-nopad\", \"-aes-128-cbc\", \"-K\", key, \"-iv\", iv.encode(\"hex\")],\n stdin=image_file, stdout=dest)\n image_file.close()\n\nelse:\n if monitor_name:\n if monitor_lib in monitor_name:\n shutil.copyfileobj(open(tos_name, 'rb'), dest) # write lk.bin\n else:\n shutil.copyfileobj(open(monitor_name, 'rb'), dest) # write monitor.bin\n dest.write(('\\0' * tos_align).encode('utf-8'))\n if args.os:\n shutil.copyfileobj(open(tos_name, 'rb'), dest) # write lk.bin\n if args.dtb:\n dest.write(('\\0' * tos_dtb_align).encode('utf-8'))\n shutil.copyfileobj(open(tos_dtb_name, 'rb'), dest) # write trusted OS dtb\n\ndest.flush()\n# make TOS image as 16-byte aligned for encryption\ndest.write(('\\0' * img_align).encode('utf-8'))\n\ndest.flush()\ndest.close()\n\nprint(\"Generate TOS Image File for boot-wrapper.\")\n\nimg_out = str(os.path.dirname(output_name)+\"/\"+\"img.bin\")\n\ndest = open(img_out, 'wb')\nos.chmod(img_out, (stat.S_IWUSR | stat.S_IRUSR) | stat.S_IRGRP | stat.S_IROTH)\n\nif monitor_name:\n if monitor_lib in monitor_name:\n shutil.copyfileobj(open(tos_name, 'rb'), dest) # write lk.bin\n else:\n shutil.copyfileobj(open(monitor_name, 'rb'), dest) # write monitor.bin\nif args.os:\n shutil.copyfileobj(open(tos_name, 'rb'), dest) # write lk.bin\nif args.dtb:\n shutil.copyfileobj(open(tos_dtb_name, 'rb'), dest) # write trusted OS dtb\n\ndest.flush()\ndest.close()\n","repo_name":"zbwu/athena_l4t_sdk","sub_path":"nv_tegra/tos-scripts/gen_tos_part_img.py","file_name":"gen_tos_part_img.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"69627089","text":"import pathlib\nimport time\nfrom typing import List\nfrom unittest.mock import patch, MagicMock\n\nimport botocore.exceptions\n\nimport settings\nfrom agent.config import StaticAgentConfigLoader\nfrom agent.config.models import (\n ClusterMetadata, Cluster, ProberWithConfig, Prober, BashProberRunner, ProberFile,\n ProberConfig, AgentConfig,\n)\nfrom agent.main import update_probers_map_from_config, load_config_with_retry\nfrom common.monitoring.solomon import SolomonClient\nfrom agent.telemetry import AgentTelemetrySender, SolomonMetric, MetricKind\nfrom database.models import UploadProberLogPolicy\n\n\nclass DummySolomonClient(SolomonClient):\n def send_metrics(self, metrics: List[SolomonMetric]):\n pass\n\n\ndef get_test_config() -> AgentConfig:\n \"\"\"\n For development & testing purposes only\n \"\"\"\n cluster = ClusterMetadata(\n Cluster(\n id=1,\n name=\"Meeseeks\",\n slug=\"meeseeks\",\n variables={},\n prober_configs=[],\n )\n )\n probers_config = [\n ProberWithConfig(\n unique_key=(1,),\n prober=Prober(\n id=1,\n name=\"Sleep\",\n slug=\"sleep\",\n runner=BashProberRunner(\n command=\"/bin/bash -c 'echo Timeout; sleep 4'\",\n ),\n files=[\n ProberFile(\n id=1,\n prober_id=1,\n is_executable=True,\n relative_file_path=\"metadata.sh\",\n md5_hexdigest=\"d41d8cd98f00b204e9800998ecf8427e\",\n )\n ],\n files_location=pathlib.Path(\"/tmp\"),\n ),\n config=ProberConfig(\n prober_id=1,\n is_prober_enabled=True,\n interval_seconds=5,\n timeout_seconds=3,\n s3_logs_policy=UploadProberLogPolicy.FAIL,\n ),\n ),\n ProberWithConfig(\n unique_key=(2,),\n prober=Prober(\n id=2,\n name=\"Echo\",\n slug=\"echo\",\n runner=BashProberRunner(\n command=\"echo Success; echo $HOME\",\n ),\n files=[\n ProberFile(\n id=2,\n prober_id=2,\n is_executable=False,\n relative_file_path=\"dns.sh\",\n md5_hexdigest=\"d41d8cd98f00b204e9800998ecf8427f\",\n )\n ],\n files_location=pathlib.Path(\"/tmp\"),\n ),\n config=ProberConfig(\n prober_id=2,\n is_prober_enabled=True,\n interval_seconds=7,\n timeout_seconds=5,\n s3_logs_policy=UploadProberLogPolicy.ALL,\n ),\n )]\n\n return AgentConfig(cluster=cluster, probers=probers_config)\n\n\ndef test_update_probers_map_from_config_from_empty():\n config = get_test_config()\n probers_map = {}\n solomon_client = DummySolomonClient()\n with patch(\"agent.main.ProberProcess\") as ProberProcess:\n update_probers_map_from_config(probers_map, StaticAgentConfigLoader(config), solomon_client)\n\n assert ProberProcess.call_count == 2\n ProberProcess.assert_any_call(config.probers[0].prober, config.probers[0].config, config.cluster, solomon_client, 0)\n ProberProcess.assert_any_call(config.probers[1].prober, config.probers[1].config, config.cluster, solomon_client, 1)\n\n assert probers_map.keys() == {(1,), (2,)}\n assert isinstance(probers_map[(1,)], type(ProberProcess.return_value))\n assert isinstance(probers_map[(2,)], type(ProberProcess.return_value))\n\n\ndef test_update_probers_map_from_config_with_existing_prober():\n config = get_test_config()\n solomon_client = DummySolomonClient()\n with patch(\"agent.main.ProberProcess\") as ProberProcess:\n probers_map = {\n (1,): ProberProcess.return_value\n }\n with patch.object(ProberProcess.return_value, \"update_prober_if_changed\") as update_prober_if_changed, \\\n patch.object(ProberProcess.return_value, \"update_config_if_changed\") as update_config_if_changed:\n update_probers_map_from_config(probers_map, StaticAgentConfigLoader(config), solomon_client)\n\n assert ProberProcess.call_count == 1\n ProberProcess.assert_any_call(config.probers[1].prober, config.probers[1].config, config.cluster, solomon_client, 1)\n\n update_prober_if_changed.assert_called_once_with(config.probers[0].prober)\n update_config_if_changed.assert_called_once_with(config.probers[0].config)\n\n assert probers_map.keys() == {(1,), (2,)}\n\n\ndef test_prober_has_been_deleted_from_config():\n # 1. Create a config with 2 probers and empty probers_map\n config = get_test_config()\n probers_map = {}\n solomon_client = DummySolomonClient()\n with patch(\"agent.main.ProberProcess\"):\n update_probers_map_from_config(probers_map, StaticAgentConfigLoader(config), solomon_client)\n\n # 2. Delete one prober in config and update probers_map\n config.probers.pop()\n update_probers_map_from_config(probers_map, StaticAgentConfigLoader(config), solomon_client)\n\n # 3. Check that only one prober left in probers_map\n assert len(probers_map) == 1\n\n\ndef test_agent_telemetry_metrics_send():\n solomon_client_mock = MagicMock()\n solomon_client_mock.send_metrics = MagicMock()\n agent_start_timer_mock = MagicMock()\n agent_start_timer_mock.get_total_seconds = MagicMock(return_value=345.345635345)\n\n cluster_slug = \"test-cluster\"\n sender = AgentTelemetrySender(DummySolomonClient(), cluster_slug)\n sender._solomon_client = solomon_client_mock\n sender._agent_start_time = agent_start_timer_mock\n time_now = 1634124163.5359619\n probers_count = 26\n expected_metrics = [\n SolomonMetric(int(time_now), 1, metric=\"keep_alive\", kind=MetricKind.GAUGE, cluster_slug=cluster_slug),\n SolomonMetric(\n int(time_now), probers_count, metric=\"probers_count\", kind=MetricKind.GAUGE, cluster_slug=cluster_slug\n ),\n SolomonMetric(int(time_now), 345, metric=\"uptime\", kind=MetricKind.COUNTER, cluster_slug=cluster_slug),\n ]\n\n with patch.object(time, \"time\", return_value=time_now):\n sender.send_agent_telemetry(probers_count)\n solomon_client_mock.send_metrics.assert_called_once_with(expected_metrics)\n\n\ndef test_load_config_with_retry():\n expected_config = get_test_config()\n config_loader = StaticAgentConfigLoader(expected_config)\n cluster_config = load_config_with_retry(config_loader)\n assert cluster_config == expected_config\n\n config_loader = MagicMock()\n config_loader.load = MagicMock()\n config_loader.load.side_effect = botocore.exceptions.ClientError(\n error_response={\"Error\": {\"Code\": \"404\"}}, operation_name=\"head_object\"\n )\n settings.AGENT_GET_CLUSTER_CONFIG_RETRY_DELAY = 0.01\n settings.AGENT_GET_CLUSTER_CONFIG_RETRY_ATTEMPTS = 3\n cluster_config = load_config_with_retry(config_loader)\n assert cluster_config is None\n assert config_loader.load.call_count == 3\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"cloud/tests/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39441440962","text":"#!/usr/bin/python3\n\n# Given an array of positive integers, find 2 elements such that their xor:\n# a ^ b is maximum\n\n\nclass TrieNode:\n def __init__(self):\n self.c = [None] * 2\n\n\ndef find_maximum_bits(x, N):\n b = 0\n while x:\n b += 1\n x >>= 1\n return b\n\n\ndef checkBit(x, i):\n return (x >> i) & 1\n\n\ndef insert(root, x, b):\n cur = root\n for i in range(b-1, -1, -1):\n idx = 1 if checkBit(x, i) == 1 else 0\n if cur.c[idx] is None:\n cur.c[idx] = TrieNode()\n cur = cur.c[idx]\n return root\n\n\ndef query(root, x, b):\n cur = root\n ans = 0\n for i in range(b-1, -1, -1):\n idx = 1 if checkBit(x, i) == 1 else 0\n if cur.c[1-idx]:\n ans += (1 << i)\n cur = cur.c[1-idx]\n else:\n cur = cur.c[idx]\n return ans\n\n\ndef maximum_xor(arr, N):\n root = TrieNode()\n # find max no of bits in array\n bits = 0\n for i in range(0, N):\n bits = max(bits, find_maximum_bits(arr[i], N))\n # print(\"maximum no of bits: \", bits)\n for i in range(N):\n root = insert(root, arr[i], bits)\n\n ans = 0\n for i in range(N):\n ans = max(ans, query(root, arr[i], bits))\n return ans\n\n\nif __name__ == '__main__':\n for _ in range(int(input())):\n N = int(input())\n arr = list(map(int, input().split()))\n print(maximum_xor(arr, N))\n","repo_name":"hrishikeshtak/Coding_Practises_Solutions","sub_path":"hackerrank/si/trie/si-maximum-xor.py","file_name":"si-maximum-xor.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11448120052","text":"import dlib\nimport cv2\nimport numpy as np\nfrom imutils import face_utils\n\nDEFAULT_KPREGRESSOR_PATH = \"/home/igor/github/my/GAN_faces/configs/shape_predictor_68_face_landmarks.dat\"\n\n\nclass FaceAligner:\n\n def __init__(self, target_size=100, eye_level=0.3, eye_dist=0.4, kpregressor=DEFAULT_KPREGRESSOR_PATH, ):\n self.kepoint_regressor_path = kpregressor\n self.regressor = dlib.shape_predictor(self.kepoint_regressor_path)\n self.detector = dlib.get_frontal_face_detector()\n self.out_target_size = target_size\n self.eye_level = eye_level\n self.eye_dist = eye_dist\n\n self.left_eye_ids = [36, 37, 38, 39, 40, 41]\n self.right_eye_ids = [42, 43, 44, 45, 46, 47]\n self.regressor_in_size = 500\n return\n\n @staticmethod\n def __distance_between_points(p1, p2):\n return np.sqrt(np.power(p1[0] - p2[0], 2) + np.power(p1[1] - p1[1], 2))\n\n @staticmethod\n def __make_center(current, target=0.5):\n if current < target:\n output_offset = (target - current) / (1.0 - target)\n output_size = 1.0 + output_offset\n else:\n output_offset = 0\n output_size = current / target # - 1.0\n return output_size, output_offset\n\n def process(self, image):\n image = cv2.resize(image, (self.regressor_in_size, self.regressor_in_size))\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 1)\n if len(rects) < 1:\n return None\n\n key_points = self.regressor(gray, rects[0])\n key_points = face_utils.shape_to_np(key_points)\n (x, y, w, h) = face_utils.rect_to_bb(rects[0])\n\n eye_l, eye_r = (0, 0), (0, 0)\n for idx, (x, y) in enumerate(key_points):\n if idx in self.left_eye_ids:\n eye_l = eye_l[0] + x, eye_l[1] + y\n if idx in self.right_eye_ids:\n eye_r = eye_r[0] + x, eye_r[1] + y\n eye_l = int(eye_l[0] / len(self.left_eye_ids)), int(eye_l[1] / len(self.left_eye_ids))\n eye_r = int(eye_r[0] / len(self.right_eye_ids)), int(eye_r[1] / len(self.right_eye_ids))\n center = int((eye_l[0] + eye_r[0]) / 2), int((eye_l[1] + eye_r[1]) / 2)\n eye_distance = self.__distance_between_points(eye_l, eye_r)\n\n buf_sz_x, buf_offset_x = self.__make_center(current=center[0] / image.shape[1], target=0.5)\n buf_sz_y, buf_offset_y = self.__make_center(current=center[1] / image.shape[0], target=self.eye_level)\n\n buf_sz_x = int(buf_sz_x * image.shape[1])\n buf_sz_y = int(buf_sz_y * image.shape[0])\n buf_offset_x = int(buf_offset_x * image.shape[1])\n buf_offset_y = int(buf_offset_y * image.shape[0])\n\n extended_size = max(buf_sz_x, buf_sz_y)\n buf_offset_x += int((extended_size - buf_sz_x) * 0.5)\n buf_offset_y += int((extended_size - buf_sz_y) * 0.5)\n\n eye_target_dist = self.eye_dist * extended_size\n\n buf = np.zeros(shape=(extended_size, extended_size, 3), dtype=np.uint8)\n buf[buf_offset_y:buf_offset_y + self.regressor_in_size, buf_offset_x:buf_offset_x + self.regressor_in_size, :] \\\n = image.copy()\n\n angle = -1 * np.arctan((center[1] - eye_r[1]) / (eye_r[0] - center[0])) * 180. / np.pi\n scale = eye_target_dist / eye_distance\n center = int(0.5 * extended_size), int(self.eye_level * extended_size)\n transform_matrix = cv2.getRotationMatrix2D(center, angle, scale)\n\n rotated = cv2.warpAffine(buf, transform_matrix, (extended_size, extended_size))\n\n return cv2.resize(rotated, (self.out_target_size, self.out_target_size))\n\n def __call__(self, *args, **kwargs):\n return self.process(args[0])\n\n","repo_name":"iolkhovsky/gan_torch","sub_path":"dataset/face_aligner.py","file_name":"face_aligner.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12844687435","text":"import time \nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'Utils'))\nfrom arithm import is_prime\n\n\nproblem_number = 46\n \n\n#Solution\n\n\ndef solution():\n n = 3\n double_square = [2]\n next_square = 2\n b = True\n while b :\n n += 2\n if n > 2 * next_square**2:\n double_square.append(2 * next_square**2)\n next_square += 1\n \n if is_prime(n) != n:\n b = False\n for square in double_square:\n if is_prime(n - square) == n - square:\n b = True\n break\n return n\n\n\n#Test & Result\n\n\nfichier = open(\"Solution \"+str(problem_number)+\".txt\", \"w\")\nstring = \"\"\n\n\nbegin_problem = time.time()\nproblem_value = solution()\nend_problem = time.time()\nproblem_time = end_problem - begin_problem\n\nstring += \"RESULT PROBLEM #\"+str(problem_number)+\"\\n\\n\"\nstring += \"Output: \"+str(problem_value)+\"\\n\"\nstring += \"Computation time: \"+str(problem_time)+\" sec\\n\"\n\nstring += \"\\n\\n\\nCurrent date & time: \" + time.strftime(\"%c\")\n\nfichier.write(string)\nfichier.close()\n ","repo_name":"FrancoisdeFouchecour/Projet-Euler","sub_path":"Problems/46-Problem/Problem 46.py","file_name":"Problem 46.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36415777378","text":"#!/usr/bin/python3\n\n# Prints out all the names defined by the given compiled module below\ndef displayNames():\n hidden_4 = __import__(\"hidden_4\")\n\n defNames = dir(hidden_4)\n for name in defNames:\n if name[:2] not in \"__\":\n print(name)\n\n\nif __name__ == \"__main__\":\n displayNames()\n","repo_name":"chee-zaram/alx-higher_level_programming","sub_path":"0x02-python-import_modules/4-hidden_discovery.py","file_name":"4-hidden_discovery.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15197509209","text":"import sys\r\nN = int(input())\r\na = [(int(v),i) for i, v in enumerate(input().split())]\r\nao = sum([v for v, i in a if i%2 and v>0])\r\nae = sum([v for v, i in a if not i%2 and v>0])\r\nif max(ao, ae)== 0:\r\n ai = a.index(max(a))\r\n Ans = [1]*(ai)+list(range(N-ai,1,-1))\r\n print(max(a)[0])\r\n print(len(Ans))\r\n for i in Ans:\r\n print(i)\r\n sys.exit()\r\nif ao >= ae:\r\n print(ao)\r\n yn = [i for v, i in a if i%2 and v>0]\r\nelse:\r\n print(ae)\r\n yn = [i for v, i in a if not i%2 and v>0]\r\nlistyn = [i in yn for i in range(N)]\r\nAns = []\r\nwhile not listyn[0]:\r\n Ans.append(1)\r\n listyn = listyn[1:]\r\nwhile not listyn[-1]:\r\n Ans.append(len(listyn))\r\n listyn = listyn[:-1]\r\nwhile True:\r\n if len(listyn) == 1:\r\n break\r\n if len(listyn) == [2,3]:\r\n Ans.append(2)\r\n break\r\n if listyn[2]:\r\n Ans.append(2)\r\n listyn = [True] + listyn[3:]\r\n else:\r\n Ans.append(3)\r\n listyn = [True, False] + listyn[4:]\r\nprint(len(Ans))\r\nfor i in Ans:\r\n print(i)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc092/C/3729351.py","file_name":"3729351.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"22796552074","text":"from django.shortcuts import render, redirect\nfrom .forms import RegisterForm\n\n# Create your views here.\n\ndef register(response): \n if response.method == \"POST\": #saving new account\n form = RegisterForm(response.POST)\n if form.is_valid():\n form.save()\n\n return redirect('/home')\n else: #creating account creation form\n form = RegisterForm()\n\n \n return render(response, \"register/reg_page.html\", {\"form\": form})","repo_name":"pablogoat/DjangoTest","sub_path":"learnTest/register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11971841441","text":"import os\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom LAB.common.path import file_manager\nfrom LAB.task08.view.main_view import View\nfrom LAB.task08.view.tool import Tool\nfrom LAB.task08.view.menu import Menu\nfrom LAB.task08.view.mounting import Mounting\nfrom LAB.task08.control.provider import Provider\nfrom LAB.task08.control.auto import Auto\n\n\nTITLE_WINDOW = 'ROI TOOL'\nWINDOW_SIZE_WIDTH = 1024\nWINDOW_SIZE_HEIGHT = 768\nIMAGE_SAVE = 'LAB/image/'\nIMAGE_START = 'LAB'\nDEFAULT_THRESHOLD = 15\n\n\nclass Window(QWidget):\n\n auto = None # ROI 자동 추출\n view = None # 이미지 편집 뷰 입니다\n menu = None # 좌측 상단 File Menu 버튼 모음 입니다.\n tool = None # 상단 도구 버튼 모음 입니다.\n scroll = None # 스크롤\n mounting = None # 스크롤 장착될 내용\n provider = None # 데이터 공급관리\n active_path = None # 뷰에 활성화된 이미지 경로\n active_image_index = None # 뷰에 활성화된 이미지 인덱스 번호\n\n def __init__(self, parent=None):\n super(Window, self).__init__(parent)\n\n # 윈도우 세팅\n self.setWindowTitle(TITLE_WINDOW)\n self.setGeometry(0, 0, WINDOW_SIZE_WIDTH, WINDOW_SIZE_HEIGHT)\n\n # View, Menu, Tool 생성\n self.view = View()\n self.view.setup(WINDOW_SIZE_WIDTH, WINDOW_SIZE_HEIGHT)\n self.view.threshold = DEFAULT_THRESHOLD\n self.menu = Menu()\n self.tool = Tool()\n\n # 스크롤에 버튼 장착 생성\n self.scroll = QScrollArea()\n self.mounting = Mounting()\n\n # ROI 자동화 처리 생성\n self.auto = Auto()\n\n # DATA 공급관리 생성\n self.provider = Provider()\n\n # 콜백 객체에 Window 메소드를 등록합니다.\n self.menu.call_notice = self.notice\n self.menu.call_scroll = self.scroll_data\n self.menu.call_open_path = self.file_open\n self.menu.call_save = self.mask_save\n self.menu.call_activation = self.get_activation\n self.menu.call_threshold = self.threshold_input\n\n self.tool.call_data_set = self.data_set\n self.tool.call_algorithm = self.algorithm\n\n self.mounting.call_back = self.re_setting\n\n # 이미지 \"파일\" 저장 경로\n self.menu.image_save_path = IMAGE_SAVE\n self.menu.image_start_path = IMAGE_START\n\n # ui 설정\n self.ui_setup()\n\n def ui_setup(self):\n print('ui_setup')\n\n # 전체폼 박스\n form_box = QHBoxLayout()\n _left = QVBoxLayout()\n _right = QVBoxLayout()\n\n # left layout\n _left.addLayout(self.menu)\n _left.addWidget(self.scroll)\n\n # right layout\n _right.addLayout(self.tool)\n\n # image view 좌측 상단 고정\n self.view.setAlignment(Qt.AlignTop)\n _right.addWidget(self.view, alignment=Qt.AlignLeft)\n\n # 전체 폼박스에 배치\n form_box.addLayout(_left)\n form_box.addLayout(_right)\n form_box.setStretchFactor(_left, 0)\n form_box.setStretchFactor(_right, 1)\n\n # 레이아웃에 폼박스 등록\n self.setLayout(form_box)\n self.show()\n\n # mark - Call back method: menu\n def get_activation(self):\n return self.active_path\n\n # mark - Call back method: menu\n def file_open(self):\n full_path = QFileDialog.getOpenFileName(self)\n file_path = f'{full_path[0]}'\n return file_path\n\n # mark - Call back method: menu\n def threshold_input(self, update):\n int_value = int(update)\n self.view.threshold = int_value\n\n # 마스크 이미지 저장과 데이터 등록\n # mark - Call back method: menu\n def mask_save(self, mask_path):\n self.view.save_image(mask_path)\n last_name = self.active_path[self.active_path.rfind('/') + 1:]\n self.provider.data_update_roi(last_name, mask_path)\n\n # 스크롤 갱신\n self.mounting.create(self.provider.info_list)\n self.scroll.setWidget(self.mounting.top_widget)\n\n # 스크롤 포커스 이동\n v: QScrollBar = self.scroll.verticalScrollBar()\n v.setSliderPosition(500)\n\n # 공급자 클래스의 데이타 생성\n # mark - Call back method: menu\n def scroll_data(self, img_folder, roi_list):\n # provider.data_list \"데이터 초기화\"\n self.provider.info_list = []\n self.provider.create(IMAGE_START, img_folder, roi_list)\n self.provider.data_read()\n self.mounting.create(self.provider.info_list)\n self.scroll.setWidget(self.mounting.top_widget)\n print('Window: 이미지 속성 데이터 생성완료')\n\n # mark - Call back method: tool\n def data_set(self):\n dicom_folder = self.file_open()\n\n if dicom_folder is '':\n return\n\n self.provider.create_dicom(dicom_folder)\n\n # mark - Call back method: tool\n def algorithm(self):\n print('window: algorithm')\n\n i: int = 0\n for mask in self.view.get_mask_list():\n best_list = self.auto.roi_designation(self.provider.image_container,\n self.active_image_index,\n mask)\n i = i + 1\n print(best_list)\n\n # mark - Call back method: mounting\n def re_setting(self, path, index):\n print('window: re_setting', path)\n # 활성화 할 이미지 경로\n self.active_path = path\n self.active_image_index = index\n self.tool.set_select_image(path)\n\n # 메뉴에 선택된 현재 이미지를 표시 합니다.\n current_image_text = f'Current :{index}'\n self.menu.changeLabel(current_image_text)\n\n # 보여지는 view 에 들어갈 이미지 입니다.\n img = QPixmap(path)\n\n # Open cv 를 위한 상대 경로를 만들어 줍니다.\n image_path = file_manager.relative_path(path, IMAGE_START)\n\n # 보여지는 view 에 이미지를 넣어 주고\n self.view.q_graphic.setPixmap(img)\n self.view.repaint()\n self.view.re_setting(image_path)\n\n # roi 폴더에 mask 이미지 \"절대경로\"를 가져 옵니다.\n roi_path = file_manager.get_roi_path(self.active_path)\n\n # roi 폴더에 mask 이미지 \"상대경로\"를 가져 옵니다.\n mask_path = file_manager.relative_path(roi_path, IMAGE_START)\n\n # roi 폴더에 마스크 이미지 파일이 있는지 \"절대경로\" 확인하고\n # 있다면 마스크 \"상대경로\"를 사용해 화면에 표시합니다.\n if os.path.isfile(roi_path):\n self.view.set_mask(mask_path)\n\n # mark - Call back method: menu, tool\n def notice(self, title, msg):\n print('Window: notice')\n buttonReply = QMessageBox.question(self, title, msg, QMessageBox.Yes)\n if buttonReply == QMessageBox.Yes:\n print('Yes clicked.')\n\n","repo_name":"sungminYoon/NCC","sub_path":"WORK_ROI/LAB/task08/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74852431912","text":"import pygame\nimport sys\nfrom windows import LoadingWindow, StartPage, LoginPage, CongratulationPage\nfrom TabbedInterface import TabbedInterface\n\n# Initialisation de Pygame\npygame.init()\n\nfont_path = \"Ressources/Sevastopol-Interface.ttf\" # Remplace avec le chemin de ta police\n\n# Définir les couleurs\nBLACK = (0, 0, 0)\nGREEN = (0,190,99)\nWHITE = (255, 255, 255)\n\n# Créer la classe principale de l'application\nclass Game:\n def __init__(self):\n\n info = pygame.display.Info()\n self.SCREEN_WIDTH = info.current_w\n self.SCREEN_HEIGHT = info.current_h\n\n # Mettre en plein écran\n self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), pygame.RESIZABLE)\n\n\n # Initialiser les différentes fenêtres ou pages\n self.start_page = StartPage(self.screen)\n self.loading_window = LoadingWindow(self.screen)\n self.login_page = LoginPage(self.screen)\n self.tabbed_interface = TabbedInterface(self.screen)\n self.congratulation_page = CongratulationPage(self.screen)\n\n # Définir l'état initial\n self.current_window = self.start_page\n\n # Initialiser le minuteur\n self.minuter_seconds = 1800\n self.start_ticks = 0\n self.minuter_affichage = False\n self.minuter_value = 0\n self.minuter_status = \"Play\" # Play, Stop\n self.minuteurtext = \"\"\n self.temps_ecoule = 0\n\n pygame.display.set_caption(\"Interface Lamindanlsak\")\n self.clock = pygame.time.Clock()\n self.is_running = True\n\n def handle_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.is_running = False\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE and self.current_window == self.congratulation_page:\n self.is_running = False # Sortir du plein écran si la touche ESC est enfoncée\n\n def switch_window(self, new_window):\n # Changer la fenêtre courante\n self.current_window = new_window\n\n def handle_main_events(self, events):\n for event in events:\n if event.type == pygame.QUIT:\n self.is_running = False\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE and self.current_window == self.congratulation_page:\n self.is_running = False\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:\n if self.current_window == self.start_page:\n print ('ok')\n self.start_page.is_start_pressed = True\n\n\n def timesectommss(self, t):\n if t < 0:\n return \"00:00\"\n m, s = divmod(t, 60)\n return \"%02d:%02d\" % (m, s)\n\n def run(self):\n while self.is_running:\n events = pygame.event.get()\n self.handle_main_events(events) # Nouvelle fonction pour traiter les événements principaux\n\n if isinstance(self.current_window, StartPage) and self.current_window.is_start_pressed:\n self.switch_window(self.loading_window)\n self.start_ticks = pygame.time.get_ticks() #starter tick\n\n if self.loading_window.is_loading_complete:\n print(\"Loading complete\")\n self.switch_window(self.login_page)\n self.loading_window.is_loading_complete = False\n\n if self.login_page.is_login_complete:\n print(\"Login complete\")\n self.switch_window(self.tabbed_interface)\n self.minuter_affichage = True\n self.login_page.is_login_complete = False\n\n if self.current_window == self.login_page:\n self.login_page.handle_events(events)\n\n if self.current_window == self.tabbed_interface:\n self.tabbed_interface.handle_events(events)\n\n if self.tabbed_interface.onglet[4].is_finished:\n self.minuter_status = \"Stop\"\n self.minuter_affichage = False\n self.switch_window(self.congratulation_page)\n self.current_window.update()\n self.current_window.draw()\n\n\n # Minuteur\n if self.minuter_status == \"Play\":\n self.temps_ecoule = (pygame.time.get_ticks() - self.start_ticks) // 1000\n self.minuteurtext = self.timesectommss(self.minuter_seconds - self.temps_ecoule)\n\n if self.minuter_affichage:\n # Affichage du minuteur en haut à droite de l'écran dans un carrée noir de bordure verte\n pygame.draw.rect(self.screen, BLACK, [self.SCREEN_WIDTH - 250, 50, 200, 100], 0)\n pygame.draw.rect(self.screen, GREEN, [self.SCREEN_WIDTH - 250, 50, 200, 100], 2)\n font = pygame.font.Font(font_path, 60)\n text = font.render(self.minuteurtext, True, GREEN)\n self.screen.blit(text, [self.SCREEN_WIDTH - 200, 70])\n\n if self.current_window == self.congratulation_page:\n self.congratulation_page.drawtime(self.timesectommss(self.temps_ecoule))\n\n\n\n pygame.display.flip()# Si tu veux utiliser une valeur différente de FPS, ajuste-la ici\n self.clock.tick(60)\n\n# Code principal\nif __name__ == \"__main__\":\n game = Game()\n game.run()\n pygame.quit()\n sys.exit()\n","repo_name":"PaladinSkyland/InterfaceLamindanl-sak","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20855824764","text":"from tkinter import *\nfrom MemorySetter import MemorySetter\nfrom CodeEditor import CodeEditor\n\nclass BFG9000:\n\n def __init__(self, loader, editor, editorHandler, minus):\n self.__loader = loader\n self.__editor = editor\n self.__editorHandler = editorHandler\n self.__loader.BFG9000 = self\n self.actual = None\n self.__w = self.__editorHandler.getWindowSize()[0]*0.66\n self.__h = self.__editorHandler.getWindowSize()[1]-minus-25\n\n self.__bankBox = self.__loader.listBoxes[\"bankBox\"].getListBoxAndScrollBar()[0]\n self.__sectionBox = self.__loader.listBoxes[\"sectionBox\"].getListBoxAndScrollBar()[0]\n\n self.__lastScaleX = self.__editorHandler.getScales()[0]\n self.__lastScaleY = self.__editorHandler.getScales()[1]\n\n\n self.frame = Frame(self.__editor, width=self.__editorHandler.getWindowSize()[0],\n height=self.__h,\n bg = self.__loader.colorPalettes.getColor(\"window\"),\n #fg=self.__loader.colorPalettes.getColor(\"font\"),\n )\n\n self.frame.pack_propagate(False)\n self.frame.pack(side=BOTTOM, anchor=S, fill=BOTH)\n self.__classicEditorTypes = [\"enter\", \"leave\", \"overscan\", \"vblank\"]\n\n self.__selectedBank = self.__loader.listBoxes[\"bankBox\"].getSelectedName()\n self.__selectedSection = self.__loader.listBoxes[\"sectionBox\"].getSelectedName()\n\n self.__mainFrame = Frame(self.frame, width=self.__w, height=self.__h,\n bg = self.__loader.colorPalettes.getColor(\"window\"),\n #fg=self.__loader.colorPalettes.getColor(\"font\"),\n )\n\n self.__mainFrame.pack_propagate(False)\n self.first = True\n\n self.__w2 = self.__editorHandler.getWindowSize()[0]*0.18\n self.__leftFrame = Frame(self.frame, width=self.__w2, height=self.__h,\n bg=self.__loader.colorPalettes.getColor(\"window\"))\n self.__rightFrame = Frame(self.frame, width=self.__w2, height=self.__h,\n bg=self.__loader.colorPalettes.getColor(\"window\"))\n\n # self.__mainFrame.config(bg=\"red\")\n # self.__leftFrame.config(bg=\"blue\")\n # self.__rightFrame.config(bg=\"blue\")\n\n self.__leftFrame.pack_propagate(False)\n self.__rightFrame.pack_propagate(False)\n\n self.__leftFrame.pack(side=LEFT, anchor=SW, fill=Y)\n self.__mainFrame.pack(side=LEFT, anchor=S, fill=BOTH)\n self.__rightFrame.pack(side=RIGHT, anchor=SE, fill=Y)\n self.__lastPath = None\n\n from threading import Thread\n t = Thread(target=self.checker)\n t.daemon = True\n t.start()\n\n r = Thread(target=self.resizer)\n r.daemon = True\n r.start()\n\n\n def saveFrameToMemory(self, bank, section):\n if bank == \"bank1\" or section == \"local_variables\":\n self.__loader.virtualMemory.writeVariablesToMemory(bank)\n\n\n def loadFromMemoryToFrame(self, bank, section):\n\n if bank == \"bank1\" or section == \"local_variables\":\n self.__loader.virtualMemory.setVariablesFromMemory(bank)\n self.actual = MemorySetter(self.__loader, self.__mainFrame, self.__leftFrame, self.__rightFrame, bank, \"MemorySetter\")\n elif section in self.__classicEditorTypes:\n self.actual = CodeEditor(self.__loader, self.__mainFrame, self.__leftFrame, self.__rightFrame, bank, \"CodeEditor\")\n\n\n def saveAllCode(self):\n if self.__selectedBank!=\"bank1\" and self.__loader.frames[\"CodeEditor\"].changed == True:\n self.__loader.virtualMemory.codes[self.__selectedBank][self.__selectedSection].code = self.__loader.currentEditor.get(0.0, END)\n self.__loader.virtualMemory.codes[self.__selectedBank][self.__selectedSection].changed = True\n self.__loader.virtualMemory.archieve()\n\n def clearFrames(self, color):\n #if self.__selectedSection in self.__classicEditorTypes:\n # self.saveAllCode()\n\n for item in self.__loader.destroyable:\n item.destroy()\n\n self.__loader.mainWindow.stopThreads()\n #del self.actual\n self.__loader.currentEditor = None\n\n self.destroyAll(self.__mainFrame.place_slaves())\n self.destroyAll(self.__mainFrame.pack_slaves())\n self.destroyAll(self.__leftFrame.pack_slaves())\n self.destroyAll(self.__leftFrame.place_slaves())\n self.destroyAll(self.__rightFrame.pack_slaves())\n self.destroyAll(self.__rightFrame.place_slaves())\n\n\n self.__mainFrame.config(bg=color)\n self.__leftFrame.config(bg=color)\n self.__rightFrame.config(bg=color)\n self.__loader.BFG9000.frame.config(bg=color)\n\n def destroyAll(self, list):\n for item in list:\n try:\n item.destroy()\n except:\n pass\n\n\n\n def getSelected(self):\n return(self.__selectedBank, self.__selectedSection)\n\n def checker(self):\n from time import sleep\n while self.__editorHandler.dead==False:\n if self.first == True or (\n self.__selectedBank != self.__loader.listBoxes[\"bankBox\"].getSelectedName() or\n (self.__selectedSection != self.__loader.listBoxes[\"sectionBox\"].getSelectedName() and\n self.__selectedBank != \"bank1\"\n )) or self.__lastPath !=self.__loader.mainWindow.projectPath:\n\n self.__lastPath = self.__loader.mainWindow.projectPath\n self.first = False\n self.actual = None\n\n\n if self.__lastPath ==None:\n from AtariLogo import AtariLogo\n self.clearFrames(\"black\")\n self.actual = AtariLogo(self.__loader, self.__mainFrame, self.__leftFrame, self.__rightFrame)\n\n else:\n self.clearFrames(self.__loader.colorPalettes.getColor(\"window\"))\n self.saveFrameToMemory(self.__selectedBank, self.__selectedSection)\n\n self.__selectedBank = self.__loader.listBoxes[\"bankBox\"].getSelectedName()\n if self.__loader.virtualMemory.locks[self.__selectedBank] != None:\n self.__selectedSection = self.__loader.listBoxes[\"sectionBox\"].getSelectedName()\n\n from LockNChase import LockNChase\n self.clearFrames(\"black\")\n self.actual = LockNChase(self.__loader, self.__mainFrame, self.__leftFrame, self.__rightFrame)\n\n else:\n if self.__selectedBank == \"bank1\":\n self.__selectedSection = \"global_variables\"\n\n else:\n self.__selectedSection = self.__loader.listBoxes[\"sectionBox\"].getSelectedName()\n #print(self.__selectedBank, self.__selectedSection)\n\n self.loadFromMemoryToFrame(self.__selectedBank, self.__selectedSection)\n sleep(0.4)\n sleep(0.4)\n\n def resizer(self):\n from time import sleep\n while self.__editorHandler.dead == False:\n if (self.__lastScaleX != self.__editorHandler.getScales()[0] or\n self.__lastScaleY != self.__editorHandler.getScales()[1]):\n self.__lastScaleX = self.__editorHandler.getScales()[0]\n self.__lastScaleY = self.__editorHandler.getScales()[1]\n\n self.frame.config(\n width=self.__editorHandler.getWindowSize()[0] * self.__lastScaleX,\n height=self.__h * self.__lastScaleY\n )\n\n self.__leftFrame.config(\n width=self.__w2 * self.__lastScaleX,\n height=self.__h * self.__lastScaleY\n )\n\n self.__mainFrame.config(\n width=self.__w * self.__lastScaleX,\n height=self.__h * self.__lastScaleY\n )\n\n self.__rightFrame.config(\n width=self.__w2 * self.__lastScaleX,\n height=self.__h * self.__lastScaleY\n )\n\n sleep(0.02)\n continue\n\n sleep(0.05)\n\n def selectOnBoxes(self, num):\n self.__bankBox.select_clear(0,END)\n self.__bankBox.select_set(num - 1)\n if num > 1:\n from threading import Thread\n t = Thread(target=self.selectSecond, args=[num])\n t.daemon\n t.start()\n\n def selectSecond(self, num):\n from time import sleep\n while self.__sectionBox.curselection()[0]!=2:\n self.__sectionBox.select_clear(0, END)\n self.__sectionBox.select_set(2)","repo_name":"MemberA2600/PublicForLinkedin","sub_path":"Fortari2600/src/Handlers/BFG9000.py","file_name":"BFG9000.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36776283198","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n\nclass Teacher(models.Model):\n name = models.CharField(\"Имя\", max_length=200)\n description = models.TextField(\"Описание\")\n study_place = models.CharField(\"Место учёбы\", max_length=200)\n job_place = models.CharField(\"Место работы\", max_length=200)\n city = models.CharField(\"Город\", max_length=200)\n photo = models.URLField(\"Фото\")\n grade_mean = models.FloatField(\n \"Средняя оценка\",\n validators=[MaxValueValidator(5), MinValueValidator(0)],\n default=5\n )\n grade_count = models.PositiveBigIntegerField(\"Количество оценок\", default=0)\n\n def __str__(self):\n return f\"\"\n\n class Meta:\n verbose_name = \"Учитель\"\n verbose_name_plural = \"Учителя\"\n\n\nclass Review(models.Model):\n author = models.ForeignKey(\n User,\n models.CASCADE,\n related_name=\"reviews_owner\",\n verbose_name=\"Рецензент\"\n )\n title = models.CharField(\"Заголовок\", max_length=200)\n main_text = models.TextField(\"Основной текст\")\n grade = models.PositiveSmallIntegerField(\"Оценка\", validators=[MaxValueValidator(5)])\n teacher = models.ForeignKey(\n Teacher,\n models.CASCADE,\n related_name=\"reviews\",\n verbose_name=\"Учитель\"\n )\n\n def __str__(self):\n return f\"\"\n \n class Meta:\n verbose_name = \"Отзыв\"\n verbose_name_plural = \"Отзывы\"\n","repo_name":"IgnatInyutsin/teacher-rate","sub_path":"backend/api/restapi/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39330868413","text":"from itertools import count\nfrom eth_utils import to_tuple\nfrom typing import Tuple, Iterator, Sequence\nfrom hashlib import blake2b\nfrom .fields import Fr\n\n\n@to_tuple\ndef get_pseudo_random(seed: bytes, n: int) -> Iterator[Fr]:\n h = seed\n for _ in range(n):\n h = blake2b(h, digest_size=32).digest()\n yield Fr(int.from_bytes(h, \"little\"))\n\n\ndef all_different(_tuple: Sequence[Fr]) -> bool:\n _list = list(_tuple)\n for _ in range(len(_tuple)):\n a = _list.pop()\n if a == Fr(0):\n return False\n for b in _list:\n if a == b:\n return False\n return True\n\n\ndef _get_new_seed(seed: bytes, nonce: int):\n nonce_bytes = f\"{nonce:04d}\".encode() if nonce < 10000 else bytes(nonce)\n return seed + b\"_matrix_\" + nonce_bytes\n\n\n@to_tuple\ndef get_matrix(t: int, seed: bytes) -> Iterator[Tuple[Fr, ...]]:\n # iterate to get a cmatrix whose elements are all different\n for nonce in count(0):\n _seed = _get_new_seed(seed, nonce)\n cmatrix = get_pseudo_random(_seed, t * t)\n if all_different(cmatrix):\n break\n\n for i in range(t):\n yield tuple(Fr(1) / (cmatrix[i] - cmatrix[t + j]) for j in range(t))\n\n\ndef get_constants(t: int, seed: bytes, rounds: int):\n return get_pseudo_random(seed + b\"_constants\", rounds)\n\n\ndef recommend_parameter(elements_length: int) -> Tuple[int, int, int]:\n \"\"\"\n A faster way to get recommended parameters from the result of running parameter_finder.py.\n \"\"\"\n if elements_length < 2:\n raise ValueError(\"Number of elements should be at least 2\")\n\n # Poseidon requires 1 width to be reserved for security.\n t = elements_length + 1\n roundsF = 8\n if t == 3:\n roundsP = 49\n elif t <= 7:\n roundsP = 50\n elif t <= 14:\n roundsP = 51\n elif t <= 29:\n roundsP = 52\n else:\n raise ValueError(\n \"Unsupported elements_length, please run parameter_finder.py to get the parameter\"\n )\n return t, roundsF, roundsP\n","repo_name":"ChihChengLiang/misc_crypto","sub_path":"misc_crypto/poseidon/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6043974517","text":"import sys\n\n# Dictionnaries for cookbook recipes\ncookbook = {\n\t\"sandwich\":{\n\t\t\"details\":['salad', 'tomatoe', 'cheese', 'steak', 'mushroom', 'pizza', 'shoes', 'coke'],\n\t\t\"type\":\"snack\",\n\t\t\"time\":\"5\"\n\t},\n\t\"cake\":{\n\t\t\"details\":['flour', 'sugar', 'eggs'],\n\t\t\"type\":\"dessert\",\n\t\t\"time\":\"60\"\n\t},\n\t\"salad\":{\n\t\t\"details\":['salad', 'tomatoe', 'mozzarella', 'vinaigrette'],\n\t\t\"type\":\"starter\",\n\t\t\"time\":\"10\"\n\t},\n}\n\n# Types of recipes\ntypes = ['snack', 'breakfast', 'meal', 'dessert']\n\n# List for select arg in hud\nselect_tab = []\n\n# Func for exit program\ndef exit():\n\tprint(\"\\nCookbook closed.\")\n\tsys.exit()\n\n# Func for print error message\ndef print_error(error_select, select, nb):\n\thud = create_dic()\n\tprint(hud[\"error\"][error_select])\n\tindex = input(\">> \")\n\treturn index;\n\n# Func for print cookbook\ndef print_cookbook():\n\tstr = \"\"\n\tfor x in cookbook:\n\t\tstr += x\n\t\tstr += \", \"\n\tprint(\"\\nCookbook :\\n\" + str + \"\\n\")\n\tprint_menu(\"main\", 0)\n\n# Func for check and print recipe\ndef print_recipe_check(recipe):\n\tfor x in cookbook:\n\t\tif x == recipe:\n\t\t\treturn recipe;\n\trecipe = print_error(\"print_recipe_no_exist\", \"print_recipe\", 1)\n\trecipe = print_recipe_check(recipe)\n\treturn recipe\n\ndef print_recipe():\n\trecipe = print_menu(\"print_recipe\", 1)\n\trecipe = print_recipe_check(recipe)\n\tprint(\"\\nIngredients list: \" + str(cookbook[recipe]['details']))\n\tprint(\"To be eaten for \" + str(cookbook[recipe]['type']))\n\tprint(\"Takes \" + str(cookbook[recipe]['time']) + \" minutes of cooking.\\n\")\n\tprint_menu(\"main\", 0)\n\n# Func for check en delete recipe\ndef del_recipe_check(recipe):\n\tfor x in cookbook:\n\t\tif recipe == x:\n\t\t\tdel cookbook[recipe]\n\t\t\treturn recipe;\n\trecipe = print_error(\"del_recipe_no_exist\", \"print_recipe\", 1)\n\trecipe = print_recipe_check(recipe)\n\treturn recipe;\n\ndef del_recipe():\n\trecipe = print_menu(\"del_recipe\", 1)\n\trecipe = del_recipe_check(recipe)\n\tprint(\"\\n\" + recipe + \" as been delete\\n\")\n\tprint_menu(\"main\", 0)\n\n# Func for check recipe input\ndef add_recipe_check(recipe):\n\tfor x in cookbook:\n\t\tif recipe == x:\n\t\t\trecipe = print_error(\"add_recipe_exist\", \"add_recipe\", 1)\n\t\t\trecipe = add_recipe_check(recipe)\n\t\t\treturn recipe;\n\tif recipe == \"exit\":\n\t\texit()\n\treturn recipe\n\ndef add_recipe_details(nb, list):\n\tnew = input(\"Add new ingredients, write stop for next step:\\n>> \")\n\tif new == \"stop\":\n\t\treturn list;\n\telif new == \"exit\":\n\t\tprint(\"You cant leave now !\")\n\telif len(new) <= 0:\n\t\tprint(\"Enter a ingredients\")\n\t\tadd_recipe_details(nb, list)\n\telse:\n\t\tlist.insert(nb, new)\n\t\tnb += 1\n\t\tadd_recipe_details(nb, list)\n\treturn list;\n\ndef add_recipe_type():\n\ttype = input(\"Enter a type (snack, breakfast, meal, dessert)\\n>> \")\n\tif type in types:\n\t\treturn type;\n\telse:\n\t\treturn add_recipe_type()\n\treturn type;\n\ndef add_recipe_time():\n\ttime = input(\"Enter cooking time for recipe\\n>> \")\n\tif len(time) <= 0 or not time.isnumeric() or int(time) <= 0:\n\t\tprint(\"Not a valid time\")\n\t\treturn add_recipe_time()\n\treturn time;\n\n\n# Func for add new recipes\ndef add_recipe():\n\trecipe = print_menu(\"add_recipe\", 1)\n\trecipe = add_recipe_check(recipe)\n\tlist = []\n\tlist = add_recipe_details(0, list)\n\ttype = add_recipe_type()\n\ttime = add_recipe_time()\n\tnew_dic = {recipe:{\"details\":list, \"type\":type, \"time\":time}}\n\tcookbook.update(new_dic)\n\tprint(\"\\nRecipe \" + recipe + \" as been add in cookbook !\\n\")\n\tprint_menu(\"main\", 0)\n\n# Function for check en call specific function\n# in relation to the selected elements\ndef selections(select, i, nb):\n\terr = 0\n\tindex = input(\">> \")\n\tif index == \"exit\":\n\t\texit()\n\tif nb == 0:\n\t\tfor char in index:\n\t\t\tif not char.isnumeric():\n\t\t\t\terr = 1\n\t\t\t\tbreak ;\n\t\tif err == 1 or len(index) <= 0 or int(index) >= i:\n\t\t\tprint(\"\\nThis option does not exist, please type the corresponding number.\\nTo exit, enter \" + str(i-1))\n\t\t\tselections(select, i, 0)\n\t\t\tsys.exit()\n\t\tselect_tab[int(index)-1]()\n\telif nb == 1:\n\t\tif i == 1:\n\t\t\tif len(index) <= 0:\n\t\t\t\tprint_menu(select, nb)\n\t\t\t\tsys.exit()\n\t\t\treturn index;\n\treturn ;\n\n\n# Func for print_menu\ndef print_menu(select, nb):\n\tselect_tab.clear()\n\thud = create_dic()\n\ti = 0\n\tfor x in hud[select]:\n\t\tif x == \"title\":\n\t\t\tif i == 0:\n\t\t\t\tprint(hud[select][x])\n\t\telif x == \"func\":\n\t\t\tif i == 0:\n\t\t\t\tselect_tab.insert(i, hud[select][x])\n\t\telse :\n\t\t\tselect_tab.insert(i, hud[select][x])\n\t\t\tprint(str(i+1) +\": \"+ x)\n\t\t\ti += 1\n\tindex = selections(select, i+1, nb)\n\treturn index;\n\n# Dictionnaries for hud menu or error message\ndef create_dic():\n\thud = {\n\t\t\"main\":{\n\t\t\t\"title\":\"Please select an option by typing the corresponding number:\",\n\t\t\t\"Add recipe\":add_recipe,\n\t\t\t\"Delete a recipe\":del_recipe,\n\t\t\t\"Print a recipe\":print_recipe,\n\t\t\t\"Print the cookbook\":print_cookbook,\n\t\t\t\"Quit\":exit\n\t\t},\n\t\t\"add_recipe\":{\n\t\t\t\"title\":\"\\nPlease enter the recipe's name to add it:\",\n\t\t},\n\t\t\"del_recipe\":{\n\t\t\t\"title\":\"\\nPlease enter the recipe's name delete it:\"\n\t\t},\n\t\t\"print_recipe\":{\n\t\t\t\"title\":\"\\nPlease enter the recipe's name to get its details:\"\n\t\t},\n\t\t\"error\":{\n\t\t\t\"add_recipe_exist\":\"\\nRecipe already exist, please retry\",\n\t\t\t\"print_recipe_no_exist\":\"\\nYour recipe doesnt exist, please retry\",\n\t\t\t\"del_recipe_no_exist\":\"\\nYour recipe doesnt exist, please retry\"\n\t\t}\n\t}\n\treturn hud;\n\nprint_menu(\"main\", 0)\n","repo_name":"TheoZerbibi/BootCamp42AI","sub_path":"Python/day00/ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15468091014","text":"import xmltodict\nimport xml.etree.ElementTree as xml\n\n\ndef get_dict(file):\n with open(file, \"r\") as hot:\n text = hot.read()\n d = xmltodict.parse(text)\n return d\n\ndef get_data(d):\n new = {}\n lst = []\n for i in d['Hotels']['Hotel']:\n if 'Hilton' in i['Name'] and 'new york' in i['Address']['State'].lower() or 'ny' in i['Address'][\n 'State'].lower():\n new['Hotel'] = {}\n new['Hotel']['@Price'] = i['@Price']\n new['Hotel']['Name'] = i['Name']\n new['Hotel']['Address'] = i['Address'],\n d = dict(names=i['Name'], price=i['@Price'], adress=i['Address']['AddressLine'])\n lst.append(d)\n return lst\n\ndef createXML(filename, lst):\n root = xml.Element(\"Lists\")\n\n appt = xml.Element(\"Names\")\n root.append(appt)\n for i in lst:\n begin = xml.SubElement(appt, \"Name\")\n begin.text = i['names']\n\n appt = xml.Element(\"Prices\")\n root.append(appt)\n for i in lst:\n begin = xml.SubElement(appt, \"Price\")\n begin.text = i['price']\n\n appt = xml.Element(\"Addresses\")\n root.append(appt)\n for i in lst:\n begin = xml.SubElement(appt, \"Address\")\n begin.text = i['adress']\n\n tree = xml.ElementTree(root)\n\n with open(filename, \"wb\") as rez:\n tree.write(rez)\n\n\nif __name__ == \"__main__\":\n input_file = 'Hotels.xml'\n output_file = 'Hilton.xml'\n dict_from_xml = get_dict(input_file)\n data_as_list = get_data(dict_from_xml)\n createXML(output_file, data_as_list)\n","repo_name":"deleroi/xml_parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71621762473","text":"class Solution:\n def findMin(self, nums: List[int]) -> int:\n \n # stupid O(n) or sort O(nlogn)\n \n# res = float('inf')\n# for i in nums:\n# res = min(res, i)\n# return res\n \n # divide and conquer\n \n# return self.findMini(nums, 0, len(nums)-1)\n \n# def findMini(self, nums, start, end):\n# if start == end: return nums[start]\n# if nums[start] < nums[end]: return nums[start]\n# mid = start + (end-start)//2\n# return min(self.findMini(nums, start, mid), self.findMini(nums, mid+1, end))\n \n # binary search\n \n left, right = 0, len(nums)-1\n while left < right:\n mid = left + (right-left)//2\n if nums[mid] < nums[right]:\n right = mid\n elif nums[mid] > nums[right]:\n left = mid + 1\n else:\n right -= 1\n return nums[left]\n ","repo_name":"thomasyu929/Leetcode","sub_path":"BinarySearch/findMinimuminRotatedSortedList_II.py","file_name":"findMinimuminRotatedSortedList_II.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23947272384","text":"'''\nThis script contains code for a Streamlit app to recommend Refinery29 money\nDiaries that are similar to the user. The user inputs their age, salary, and a\nmini diary of the previous day. The app uses a pickled clustering model to cluster\nthe user to the original money diaries and then performs cosine similarity to\nrecommend the 3 diaries most similar to the user's mini diary.\n'''\n\nimport streamlit as st\nimport streamlit.components.v1 as components\nfrom joblib import load\n\nimport numpy as np\n\nfrom sklearn.metrics import pairwise_distances\n\n\nst.title('Refinery29 Money Diary Recommender')\nst.subheader(\n 'Input some information about yourself and I\\'ll recommend a'\n ' few money diaries of people similar to you!'\n)\n\nst.text('')\n\nst.markdown(\n 'This recommender was created using Natural Language Processing (NLP),'\n ' Non-Negative Matrix Factorization (NMF), K-means clustering, and cosine'\n ' similarity.'\n)\n\nst.text('')\n\nage = st.number_input('Age', min_value=20, max_value=100, value=25)\nsalary = st.number_input('Salary', min_value=20000, max_value=600000, value=50000)\n\nuser_diary = [st.text_area('Write a mini diary (paragraph) about what you did yesterday.')]\n\n# predict cluster\nscaler = load('scaler.pkl')\nkfit = load('kfit.pkl')\nnew_observation = np.array([age, salary])\nnew_observation_scaled = scaler.transform(new_observation.reshape(1, -1))\n\n# save predicted cluster\nnew_observation_cluster = int(kfit.predict(new_observation_scaled))\n\n# topic model with clusters\nif new_observation_cluster == 0:\n cvn0 = load('cvn0.pkl')\n doc_topic0 = load('doc_topic0.pkl')\n nmf_model_cluster_0 = load('nmf_model_cluster_0.pkl')\n cluster_0 = load('cluster_0.pkl')\n vector_diary_0 = cvn0.transform(user_diary)\n tranformed_text_0 = nmf_model_cluster_0.transform(vector_diary_0)\n diary_0_rec_array = pairwise_distances(tranformed_text_0, doc_topic0, metric='cosine').argsort()\n diary_0_rec_1 = diary_0_rec_array[0, 0]\n diary_0_rec_2 = diary_0_rec_array[0, 1]\n diary_0_rec_3 = diary_0_rec_array[0, 2]\n diary_0_link_1 = cluster_0.iloc[diary_0_rec_1, 0]\n diary_0_link_2 = cluster_0.iloc[diary_0_rec_2, 0]\n diary_0_link_3 = cluster_0.iloc[diary_0_rec_3, 0]\n diary_0_url_rec1 = 'https://www.refinery29.com' + diary_0_link_1\n diary_0_url_rec2 = 'https://www.refinery29.com' + diary_0_link_2\n diary_0_url_rec3 = 'https://www.refinery29.com' + diary_0_link_3\n if st.button('Recommendation #1'):\n components.iframe(diary_0_url_rec1, height=600, scrolling=True)\n if st.button('Recommendation #2'):\n components.iframe(diary_0_url_rec2, height=600, scrolling=True)\n if st.button('Recommendation #3'):\n components.iframe(diary_0_url_rec3, height=600, scrolling=True)\n\nif new_observation_cluster == 1:\n cvn1 = load('cvn1.pkl')\n doc_topic1 = load('doc_topic1.pkl')\n nmf_model_cluster_1 = load('nmf_model_cluster_1.pkl')\n cluster_1 = load('cluster_1.pkl')\n vector_diary_1 = cvn1.transform(user_diary)\n tranformed_text_1 = nmf_model_cluster_1.transform(vector_diary_1)\n diary_1_rec_array = pairwise_distances(tranformed_text_1, doc_topic1, metric='cosine').argsort()\n diary_1_rec_1 = diary_1_rec_array[0, 0]\n diary_1_rec_2 = diary_1_rec_array[0, 1]\n diary_1_rec_3 = diary_1_rec_array[0, 2]\n diary_1_link_1 = cluster_1.iloc[diary_1_rec_1, 0]\n diary_1_link_2 = cluster_1.iloc[diary_1_rec_2, 0]\n diary_1_link_3 = cluster_1.iloc[diary_1_rec_3, 0]\n diary_1_url_rec1 = 'https://www.refinery29.com' + diary_1_link_1\n diary_1_url_rec2 = 'https://www.refinery29.com' + diary_1_link_2\n diary_1_url_rec3 = 'https://www.refinery29.com' + diary_1_link_3\n if st.button('Recommendation #1'):\n components.iframe(diary_1_url_rec1, height=600, scrolling=True)\n if st.button('Recommendation #2'):\n components.iframe(diary_1_url_rec2, height=600, scrolling=True)\n if st.button('Recommendation #3'):\n components.iframe(diary_1_url_rec3, height=600, scrolling=True)\n\nif new_observation_cluster == 2:\n cvn2 = load('cvn2.pkl')\n doc_topic2 = load('doc_topic2.pkl')\n nmf_model_cluster_2 = load('nmf_model_cluster_2.pkl')\n cluster_2 = load('cluster_2.pkl')\n vector_diary_2 = cvn2.transform(user_diary)\n tranformed_text_2 = nmf_model_cluster_2.transform(vector_diary_2)\n diary_2_rec_array = pairwise_distances(tranformed_text_2, doc_topic2, metric='cosine').argsort()\n diary_2_rec_1 = diary_2_rec_array[0, 0]\n diary_2_rec_2 = diary_2_rec_array[0, 1]\n diary_2_rec_3 = diary_2_rec_array[0, 2]\n diary_2_link_1 = cluster_2.iloc[diary_2_rec_1, 0]\n diary_2_link_2 = cluster_2.iloc[diary_2_rec_2, 0]\n diary_2_link_3 = cluster_2.iloc[diary_2_rec_3, 0]\n diary_2_url_rec1 = 'https://www.refinery29.com' + diary_2_link_1\n diary_2_url_rec2 = 'https://www.refinery29.com' + diary_2_link_2\n diary_2_url_rec3 = 'https://www.refinery29.com' + diary_2_link_3\n if st.button('Recommendation #1'):\n components.iframe(diary_2_url_rec1, height=600, scrolling=True)\n if st.button('Recommendation #2'):\n components.iframe(diary_2_url_rec2, height=600, scrolling=True)\n if st.button('Recommendation #3'):\n components.iframe(diary_2_url_rec3, height=600, scrolling=True)\n\nif new_observation_cluster == 3:\n cvn3 = load('cvn3.pkl')\n doc_topic3 = load('doc_topic3.pkl')\n nmf_model_cluster_3 = load('nmf_model_cluster_3.pkl')\n cluster_3 = load('cluster_3.pkl')\n vector_diary_3 = cvn3.transform(user_diary)\n tranformed_text_3 = nmf_model_cluster_3.transform(vector_diary_3)\n diary_3_rec_array = pairwise_distances(tranformed_text_3, doc_topic3, metric='cosine').argsort()\n diary_3_rec_1 = diary_3_rec_array[0, 0]\n diary_3_rec_2 = diary_3_rec_array[0, 1]\n diary_3_rec_3 = diary_3_rec_array[0, 2]\n diary_3_link_1 = cluster_3.iloc[diary_3_rec_1, 0]\n diary_3_link_2 = cluster_3.iloc[diary_3_rec_2, 0]\n diary_3_link_3 = cluster_3.iloc[diary_3_rec_3, 0]\n diary_3_url_rec1 = 'https://www.refinery29.com' + diary_3_link_1\n diary_3_url_rec2 = 'https://www.refinery29.com' + diary_3_link_2\n diary_3_url_rec3 = 'https://www.refinery29.com' + diary_3_link_3\n if st.button('Recommendation #1'):\n components.iframe(diary_3_url_rec1, height=600, scrolling=True)\n if st.button('Recommendation #2'):\n components.iframe(diary_3_url_rec2, height=600, scrolling=True)\n if st.button('Recommendation #3'):\n components.iframe(diary_3_url_rec3, height=600, scrolling=True)\n\nif new_observation_cluster == 4:\n cvn4 = load('cvn4.pkl')\n doc_topic4 = load('doc_topic4.pkl')\n nmf_model_cluster_4 = load('nmf_model_cluster_4.pkl')\n cluster_4 = load('cluster_4.pkl')\n vector_diary_4 = cvn4.transform(user_diary)\n tranformed_text_4 = nmf_model_cluster_4.transform(vector_diary_4)\n diary_4_rec_array = pairwise_distances(tranformed_text_4, doc_topic4, metric='cosine').argsort()\n diary_4_rec_1 = diary_4_rec_array[0, 0]\n diary_4_rec_2 = diary_4_rec_array[0, 1]\n diary_4_rec_3 = diary_4_rec_array[0, 2]\n diary_4_link_1 = cluster_4.iloc[diary_4_rec_1, 0]\n diary_4_link_2 = cluster_4.iloc[diary_4_rec_2, 0]\n diary_4_link_3 = cluster_4.iloc[diary_4_rec_3, 0]\n diary_4_url_rec1 = 'https://www.refinery29.com' + diary_4_link_1\n diary_4_url_rec2 = 'https://www.refinery29.com' + diary_4_link_2\n diary_4_url_rec3 = 'https://www.refinery29.com' + diary_4_link_3\n if st.button('Recommendation #1'):\n components.iframe(diary_4_url_rec1, height=600, scrolling=True)\n if st.button('Recommendation #2'):\n components.iframe(diary_4_url_rec2, height=600, scrolling=True)\n if st.button('Recommendation #3'):\n components.iframe(diary_4_url_rec3, height=600, scrolling=True)\n\nst.text('')\nst.text('')\nst.markdown(\n 'My name is Lisa VanderVoort, I love reading Refinery29 Money Diaries, and'\n ' working on data science projects. You can read more about my projects'\n ' [here](https://lvandervoort89.github.io) and find my project code'\n ' [here.](https://github.com/lvandervoort89/analyzing_how_millennial_women_spend_time_and_money)'\n)\n","repo_name":"lvandervoort89/analyzing_how_millennial_women_spend_time_and_money","sub_path":"Streamlit/r29_recommender.py","file_name":"r29_recommender.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10377166579","text":"import psycopg2\r\n\r\nquery_1 = '''\r\nSELECT ins_charge,hum_name \r\nFROM insuranse INNER JOIN humans ON insuranse.hum_id = humans.hum_id\r\n'''\r\nquery_2 = '''\r\nSELECT isSmoker,COUNT(*)\r\nFROM smokers \r\nGROUP BY isSmoker\r\n'''\r\n\r\nquery_3 = '''\r\nSELECT ins_charge,hum_age \r\nFROM insuranse INNER JOIN humans ON insuranse.hum_id = humans.hum_id\r\n'''\r\n\r\ndef connect():\r\n \"\"\" Connect to the PostgreSQL database server \"\"\"\r\n conn = None\r\n try:\r\n # connect to the PostgreSQL server\r\n print('Connecting to the PostgreSQL database...')\r\n conn = psycopg2.connect(\r\n host=\"localhost\",\r\n database=\"studentTorba\",\r\n user=\"postgres\",\r\n password=\"sobaka123\")\r\n\t\t\r\n with conn:\r\n cur = conn.cursor()\r\n\r\n print('Query 1:')\r\n cur.execute(query_1)\r\n for row in cur:\r\n print(row)\r\n\r\n print('\\nQuery 2:')\r\n cur.execute(query_2)\r\n for row in cur:\r\n print(row)\r\n \r\n print('\\nQuery 3:')\r\n cur.execute(query_3)\r\n for row in cur:\r\n print(row)\r\n \r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()\r\n print('Database connection closed.')\r\n\r\nconnect()\r\n","repo_name":"sTorba24/db_lab2_torba","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27410445456","text":"import csv\r\nimport re\r\nimport pdb\r\nimport requests\r\nfrom lxml import etree\r\nimport json\r\n\r\nbase_url = 'http://kuehne-nagel.com'\r\n\r\ndef validate(item): \r\n if type(item) == list:\r\n item = ' '.join(item)\r\n return item.encode('ascii', 'ignore').encode(\"utf8\").strip()\r\n\r\ndef get_value(item):\r\n if item == None :\r\n item = ''\r\n item = validate(item)\r\n if item == '':\r\n item = '' \r\n return item\r\n\r\ndef eliminate_space(items):\r\n rets = []\r\n for item in items:\r\n item = validate(item)\r\n if item != '':\r\n rets.append(item)\r\n return rets\r\n\r\ndef write_output(data):\r\n with open('data.csv', mode='w') as output_file:\r\n writer = csv.writer(output_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n writer.writerow([\"locator_domain\", \"location_name\", \"street_address\", \"city\", \"state\", \"zip\", \"country_code\", \"store_number\", \"phone\", \"location_type\", \"latitude\", \"longitude\", \"hours_of_operation\"])\r\n for row in data:\r\n writer.writerow(row)\r\n\r\ndef fetch_data():\r\n output_list = []\r\n url = \"https://www.kn-portal.com/webservice/locations\"\r\n request = requests.get(url)\r\n regions = json.loads(request.text)\r\n store_lists = []\r\n\r\n for region in regions:\r\n if region['region'] == 'North America':\r\n\t for idx, country in region['regionList'][0]['countryList'].items():\r\n\t country_code = country['country_code']\r\n\t if country_code == 'US' or country_code == 'CA':\r\n\t\t for office in country['officeList']:\r\n\t\t output = []\r\n\t\t output.append(base_url)\r\n\t\t output.append(validate(office['locationName']))\r\n\t\t output.append(validate(office['buildingNo'] + ' ' + office['street']))\r\n\t\t output.append(validate(office['city']))\r\n\t\t output.append(validate(office['stateRegion']))\r\n\t\t output.append(validate(office['postalCode']))\r\n\t\t output.append(validate(country_code))\r\n\t\t output.append(validate(office['cid']))\r\n\t\t output.append(get_value(office['phoneNumber'].replace('+','').replace('-','')))\r\n\t\t output.append(validate(office['locationType']))\r\n\t\t output.append(validate(office['latitude']))\r\n\t\t output.append(validate(office['longitude']))\r\n\t\t output.append(get_value(office['openingHours']).replace('\\r\\n', ' '))\r\n\t\t output_list.append(output)\r\n return output_list\r\n \r\ndef scrape():\r\n data = fetch_data()\r\n write_output(data)\r\n\r\nscrape()\r\n","repo_name":"midasdev711/MyCrawlers","sub_path":"kuehne-nagel_com/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18667472194","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import Http404\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom checkout.models import Delivery\nfrom .models import (\n Product,\n ProductInventory,\n Category,\n Media,\n BestSellingProducts\n)\n\n\n# FOR GIFTS IMPORT\nfrom django.conf import settings\nfrom .floweraura_category import GiftCategory\nfrom .floweraura_product import GiftProduct\n# END FOR GIFTS IMPORT\n\n# FOR PRACTISE BUT IMAGES DOESNT WORK\ndef get_fake(request):\n products = Media.objects.select_related(\"product_inventory__product\").filter(is_feature = True).values(\n \"image\", \"alt_text\",\"product_inventory__sale_price\", \"product_inventory__product__id\", \"product_inventory__product__name\", \"product_inventory__product__slug\"\n )\n context = {\n \"products\": products\n }\n return render(request, 'inventory/fake.html', context)\n# \n\n\n# Create your views here.\n\n# 404 PAGE\ndef handler404(request, exception):\n return render(request, '404.html', status=404)\n# END 404 PAGE\n\n# 500 SERVER ERROR\ndef server_error(request):\n return render(request, '500.html', status=500)\n# END 500 SERVER ERROR\n\n\n\n# SEARCH FUNCTION\ndef search(q = None):\n query = (\n Q(product__name__icontains = q) |\n Q(brand__name__icontains = q) |\n Q(sale_price__icontains = q)\n )\n datas = ProductInventory.objects.filter(query).select_related(\"product\")\n\n if len(datas) == 0:\n return 0\n return datas\n# END SEARCH FUNCTION\n\n# ALL PRODUCTS\ndef products(request):\n q = request.GET.get('q') or None\n if q == None:\n products = ProductInventory.objects.select_related(\"product\")\n else:\n products = search(q)\n if products == 0:\n messages.warning(request, 'Product does not exists!')\n return redirect('inventory:list-products')\n context = {\n 'products': products\n }\n return render(request, 'inventory/products.html', context)\n# END ALL PRODUCTS\n\n\n# BEST SELLING PRODUCTS\ndef get_best_selling_products(request):\n products = BestSellingProducts.objects.select_related(\"product\").filter(quantity__gt = 1)\n context = {\n \"products\": products\n }\n return render(request, 'inventory/best_selling_products.html', context)\n# END BEST SELLING PRODUCTS\n\n\n# PRODUCTS BY CATEGORY\ndef products_by_category(request, slug):\n #slug = \"biznes-kitaplar\"\n category = get_object_or_404(Category, slug = slug) \n products = ProductInventory.objects.filter(\n product__category__in = Category.objects.get(slug=slug).get_descendants(include_self = True)\n )\n context = {\n 'category': category,\n 'products': products\n }\n return render(request, 'inventory/products_by_category.html', context)\n# END PRODUCTS BY CATEGORY\n\n\n# PRODUCT BY CATEGORY CHILDREN\ndef products_by_category_children(request, slug):\n category = get_object_or_404(Category, slug = slug)\n products = ProductInventory.objects.filter(\n product__category__in = Category.objects.get(slug=slug).get_descendants(include_self = True)\n )\n context = {\n 'category': category,\n 'products': products\n }\n return render(request, 'inventory/filtered_category_for_products.html', context)\n# END PRODUCT BY CATEGORY CHILDREN\n\n\n# PRODUCT DETAIL\ndef product_detail(request, slug):\n product = get_object_or_404(ProductInventory.objects.select_related(\"product\", \"brand\"), product__slug = slug)\n context = {\n 'product': product\n }\n return render(request, 'inventory/product_detail.html', context)\n# END PRODUCT DETAIL\n\n\n\n\n\n# GIFT IDEAS\n\ndef get_data_from_floweraura(url):\n data = GiftCategory(url=url)\n return data\n\n\ndef get_gifts(num: int):\n url = settings.FLOWERAURA_URL\n data = get_data_from_floweraura(url)\n return data.filter_gift(num)\n\n\n\ndef get_gifts_list(request):\n context = {\n \"gifts_hampers\": get_gifts(0),\n \"for_hers\": get_gifts(1),\n \"for_hims\": get_gifts(2),\n \"home_kitchens\": get_gifts(3),\n \"fashions\": get_gifts(4),\n \"more_gifts\": get_gifts(5),\n }\n return render(request, 'inventory/gifts.html', context)\n\n\ndef check_name(name: str):\n arr = ['Girlfriend', 'Wife', 'Mother', 'Sister', 'Daughter', 'Boyfriend', 'Husband', 'Father', 'Brother', 'Son', 'Boys', \"Kids\", \"Parents\"]\n if name not in arr:\n return name\n return f\"For {name}\"\n\n\ndef get_products_by_gift(request, name):\n url = settings.FLOWERAURA_URL\n products = GiftProduct(url)\n checked_name = check_name(name)\n context = {\n 'gift': name,\n 'products': products.get_products(checked_name),\n }\n return render(request, 'inventory/products_by_gift.html', context)\n\n\n# END GIFT IDEAS\n\n\n# DELIVERY SERVICE\ndef get_delivery_services(request):\n deliveries = Delivery.objects.all()\n page = \"delivery\"\n context = {\n \"deliveries\": deliveries,\n \"page\":page\n }\n return render(request, 'inventory/delivery_services.html', context)\n\n\n\ndef get_delivery_method(request, method):\n deliveries = Delivery.objects.filter(delivery_method = method)\n page = \"method\"\n context = {\n \"deliveries\": deliveries,\n \"page\":page\n }\n return render(request, 'inventory/delivery_services.html', context)\n\n\n# END DELIVERY SERVICE","repo_name":"Jetkerpy/obazar-ecommerce","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31243206061","text":"import os\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom torchvision import datasets\nfrom torchvision import transforms\nimport random\nimport glob\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n\n# %%\ndef findfiles(path):\n return glob.glob(path)\n\n\ndef convert_to_tensor(index: list, lst_of_char: list):\n image_set = []\n for i in index:\n image = Image.open(lst_of_char[i])\n image_ = transforms.ToTensor()(image)\n image_set.append(image_)\n\n return torch.stack(image_set, dim=0)\n\n\n# mission of this data loader is to load images from different classes\ndef chinese_character_loader(opts):\n # return a dataloader and the number of classes\n working_directory = os.getcwd() + \"\\\\data\\\\augmentation\\\\trainset_ch\\\\\"\n font_list = findfiles(working_directory + '*')\n # below is to get me the class names\n labels = []\n for i in range(len(font_list)):\n category = os.path.splitext(os.path.basename(font_list[i]))[0]\n labels.append(category)\n\n # convert labels to one-hot encoding\n label_df = pd.DataFrame(labels, columns=[\"font\"])\n dum_df = pd.get_dummies(label_df, columns=[\"font\"], prefix='', prefix_sep='')\n\n # to randomly choose batch_size of chars and its labels\n image_set = []\n image_set_labels = []\n\n # Suppose the numbers of images in each folder are equal!!!\n number_chars_per_font = len(os.listdir(working_directory + '%s' % labels[0] + '\\\\'))\n for font_index in range(len(font_list)):\n # print(font_index)\n picking_directory = working_directory + '%s' % labels[font_index] + '\\\\'\n charNames_list = os.listdir(picking_directory)\n image_indices = random.sample(range(number_chars_per_font), min(number_chars_per_font, 300))\n # image_indices = range(number_chars_per_font)\n for i in image_indices:\n # print(i, len(image_indices))\n r = Image.open(picking_directory + charNames_list[i]).convert('L')\n t = transforms.ToTensor()(r)\n image_set.append(t)\n\n label_num = dum_df.columns.get_loc(labels[font_index])\n image_set_labels.append(label_num)\n '''\n\n for i in range(opts['batch_size']*3):\n # to randomly pick a font and then randomly pick a char\n font = labels[random.randint(0, len(labels) -1)]\n picking_directory = working_directory + '%s' %font + '\\\\'\n # char_list = findfiles(picking_directory + '*.png')\n charPaths_list = os.listdir(picking_directory)\n\n\n picked_char = charPaths_list[random.randint(0, len(charPaths_list)-1)]\n # convert the image to list\n image = Image.open(picked_char).convert('L')\n image_ = transforms.ToTensor()(image)\n image_set.append(image_)\n # convert its label to list\n\n # font_tensor = torch.tensor(dum_df[font], dtype=torch.long)\n # image_set_labels.append(font_tensor)\n\n label_num = dum_df.columns.get_loc(font)\n image_set_labels.append(label_num)\n '''\n train_data = []\n for i in range(len(image_set)):\n train_data.append([image_set[i], image_set_labels[i]])\n # the first return is the training set, the second return is the validation set\n return DataLoader(train_data, batch_size=opts['batch_size']), len(labels)\n\n\ndef test_data_loader(opts):\n working_directory = os.getcwd() + \"\\\\data\\\\augmentation\\\\testset_ch\\\\\"\n font_list = findfiles(working_directory + '*')\n # below is to get me the class names\n labels = []\n for i in range(len(font_list)):\n category = os.path.splitext(os.path.basename(font_list[i]))[0]\n labels.append(category)\n\n label_df = pd.DataFrame(labels, columns=[\"font\"])\n dum_df = pd.get_dummies(label_df, columns=[\"font\"], prefix='', prefix_sep='')\n image_set = []\n image_set_labels = []\n for i in labels:\n current_directory = working_directory + '%s\\\\' % i\n # load all characters in that directory\n char_list = findfiles(current_directory + '*')\n for j in range(len(char_list)):\n image = Image.open(char_list[j]).convert('L')\n image_ = transforms.ToTensor()(image)\n image_set.append(image_)\n # now we convert the label to tensor\n # font_tensor = dum_df[font].to_numpy()\n # font_tensor = np.long()\n # font_tensor = torch.tensor(dum_df[i], dtype=torch.long)\n font_tensor = torch.from_numpy(dum_df[i].to_numpy())\n\n # image_set_labels.append(font_tensor)\n label_num = dum_df.columns.get_loc(i)\n image_set_labels.append(label_num)\n\n test_data = []\n for i in range(len(image_set)):\n test_data.append([image_set[i], image_set_labels[i]])\n\n return DataLoader(test_data, batch_size=opts['validation_batch_size'])\n\n\n# %%\ndef chinese_advanced_loader(opts, test = False):\n working_directory = os.getcwd() + '\\\\data\\\\augmentation\\\\trainset_ch\\\\'\n font_list = findfiles(working_directory + '*')\n labels = []\n for i in range(len(font_list)):\n category = os.path.splitext(os.path.basename(font_list[i]))[0]\n labels.append(category)\n\n if not test:\n image_set = []\n target_set = []\n y_set = []\n # below is balanced\n\n for i in range(len(font_list)):\n current_path = font_list[i]\n char_list = os.listdir(working_directory + labels[i])\n image_indices = random.sample(range(len(char_list)), 2 * opts['data_size'])\n count = 0\n for j in image_indices:\n if count % 2 == 0:\n image = Image.open(working_directory + labels[i] + \"\\\\\" + char_list[j]).convert('L')\n image = transforms.ToTensor()(image)\n image_set.append(image)\n\n random_dataset = random.randint(0, len(font_list)-1)\n # might be resource-consuming, optimize?\n random_indice = random.randint(0, len(char_list) - 1)\n image_ = Image.open(working_directory + labels[random_dataset] + '\\\\' + char_list[random_indice]).convert('L')\n image_ = transforms.ToTensor()(image_)\n target_set.append(image_)\n if random_dataset == i:\n y_set.append(1)\n else:\n y_set.append(0)\n else:\n image = Image.open(working_directory + labels[i] + \"\\\\\" + char_list[j]).convert('L')\n image = transforms.ToTensor()(image)\n image_set.append(image)\n random_source = (random.sample(image_indices, 1))\n random_source = random_source[0]\n image_ = Image.open(working_directory + labels[i] + \"\\\\\" + char_list[random_source]).convert('L')\n image_ = transforms.ToTensor()(image_)\n target_set.append(image_)\n y_set.append(1)\n count += 1\n\n shuff = list(zip(image_set, target_set, y_set))\n\n random.shuffle(shuff)\n image_set, target_set, y_set = zip(*shuff)\n y_set = torch.tensor(y_set, dtype=torch.float)\n image_loader = DataLoader(image_set, batch_size=opts['image_size'])\n target_loader = DataLoader(target_set, batch_size = opts['image_size'])\n y_loader = DataLoader(y_set, batch_size=opts['image_size'])\n\n return image_loader, target_loader, y_loader\n\n else:\n # below is the test part\n # working_directory_test = os.getcwd() + '\\\\data\\\\augmentation\\\\testset_ch\\\\'\n working_directory_test = os.getcwd() + '\\\\data\\\\augmentation\\\\testset_ch\\\\'\n random_test_indice = random.randint(0, len(labels) - 1)\n char_list_image = os.listdir(working_directory_test + labels[random_test_indice])\n test_target_set = []\n test_image_set = []\n test_y_set = []\n for i in range(60):\n random_ = int(random.randint(0, len(labels)-1))\n i_ = labels[random_]\n # target\n char_list = os.listdir(working_directory_test + i_)\n random_target_indice = int(random.randint(0, len(char_list) - 1))\n image = Image.open(working_directory_test + i_ + '\\\\' + char_list[random_target_indice]).convert('L')\n image = transforms.ToTensor()(image)\n test_target_set.append(image)\n # image\n random_image_indice = int(random.randint(0, len(char_list_image) - 1))\n image = Image.open(working_directory_test + labels[random_test_indice] + '\\\\' + char_list_image[random_image_indice]).convert('L')\n image = transforms.ToTensor()(image)\n test_image_set.append(image)\n if i_ == labels[random_test_indice]:\n test_y_set.append(1)\n else:\n test_y_set.append(0)\n\n # below is balanced test\n # target\n random_target_indice = int(random.randint(0, len(char_list_image) - 1))\n image = Image.open(working_directory_test + labels[random_test_indice] + '\\\\' + char_list_image[random_target_indice]).convert('L')\n image = transforms.ToTensor()(image)\n test_target_set.append(image)\n # image\n random_image_indice = int(random.randint(0, len(char_list_image) - 1))\n image = Image.open(working_directory_test + labels[random_test_indice] + '\\\\' + char_list_image[random_image_indice]).convert('L')\n image = transforms.ToTensor()(image)\n test_image_set.append(image)\n test_y_set.append(1)\n\n shuff = list(zip(test_image_set, test_target_set, test_y_set))\n random.shuffle((shuff))\n test_image_set, test_target_set, test_y_set = zip(*shuff)\n test_y_set = torch.tensor(test_y_set, dtype=torch.float)\n test_imageloader = DataLoader(test_image_set, batch_size=400)\n test_targetloader = DataLoader(test_target_set, batch_size=400)\n test_yloader = DataLoader(test_y_set, batch_size=400)\n\n return test_imageloader, test_targetloader, test_yloader\n\n# %%\ndef chinese_advanced_char_loader(test_flag, opts, font_A_index, font_B_index):\n # A refers to the ref_imgs, B refers to the target_imgs\n # return ref_imgs, target_imgs, y\n working_directory = os.getcwd() + '\\\\data\\\\augmentation\\\\trainset_ch\\\\'\n font_list = findfiles(working_directory + '*')\n labels = []\n for i in range(len(font_list)):\n category = os.path.splitext(os.path.basename(font_list[i]))[0]\n labels.append(category)\n # below we are doing A\n image_font = labels[font_A_index]\n current_directory = working_directory + '%s' % image_font + '\\\\'\n char_list = findfiles(current_directory + '*.png')\n image_indices = random.sample(range(len(char_list)), 300)\n image_set = []\n image_set_labels = []\n # convert to onehot_encoding\n label_df = pd.DataFrame(labels, columns=[\"font\"])\n dum_df = pd.get_dummies(label_df, columns=[\"font\"], prefix='', prefix_sep='')\n\n for i in image_indices:\n image = Image.open(char_list[i]).convert('L')\n image_ = transforms.ToTensor()(image)\n image_set.append(image_)\n # label\n label_num = dum_df.columns.get_loc(image_font)\n image_set_labels.append(label_num)\n train_image_data = []\n for i in range(300):\n train_image_data.append(image_set[i])\n imageloader = DataLoader(train_image_data, batch_size=opts['image_size'])\n\n # below we are doing B\n target_font = labels[font_B_index]\n current_directory = working_directory + '%s' % target_font + '\\\\'\n char_list = findfiles(current_directory + '*.png')\n target_indices = random.sample(range(len(char_list)), 300)\n target_set = []\n target_set_labels = []\n # convert to onehot_encoding\n label_df = pd.DataFrame(labels, columns=[\"font\"])\n dum_df = pd.get_dummies(label_df, columns=[\"font\"], prefix='', prefix_sep='')\n for i in target_indices:\n image = Image.open(char_list[i]).convert('L')\n image_ = transforms.ToTensor()(image)\n target_set.append(image_)\n # label\n label_num = dum_df.columns.get_loc(target_font)\n target_set_labels.append(label_num)\n target_image_data = []\n for i in range(300):\n target_image_data.append(target_set[i])\n targetloader = DataLoader(target_image_data, batch_size=opts['ref_size'])\n\n # below we are doing y\n if target_font == image_font:\n y = 300 * [1]\n y = torch.tensor(y, dtype=torch.float)\n else:\n y = 300 * [0]\n y = torch.tensor(y, dtype=torch.float)\n yloader = DataLoader(y, batch_size=opts['image_size'])\n if not test_flag:\n\n return imageloader, targetloader, yloader\n else:\n # caution! Here y is a dataloader, so it needs extra steps in the test procedure\n image_loader, target_loader, y = advanced_test_dataloader(opts, font_A_index)\n return image_loader, target_loader, y\n\n\n# Trialed as the train dataloader\n# %%\ndef advanced_test_dataloader(opts, font_A_index):\n # the idea is to make sure that the image set and target set have the same batch number\n working_directory = os.getcwd() + \"\\\\data\\\\augmentation\\\\testset_ch\\\\\"\n font_list = findfiles(working_directory + '*')\n labels = []\n for i in range(len(font_list)):\n category = os.path.splitext(os.path.basename(font_list[i]))[0]\n labels.append(category)\n\n image_set = []\n working_directory_A = os.getcwd() + '\\\\data\\\\augmentation\\\\trainset_ch\\\\'\n char_list = os.listdir(working_directory_A + labels[font_A_index])\n # char_list = findfiles(working_directory_A + labels[font_A_index] + '\\\\*.png')\n # for now we are keeping the size of target images to be 200\n\n #below is the image\n for i in range(2 * len(font_list)):\n image_indices = random.sample(range(len(char_list)), 2 * len(font_list) * opts['image_size'])\n for k in image_indices:\n image = Image.open(working_directory_A + labels[font_A_index] + \"\\\\\" + char_list[k]).convert('L')\n image = transforms.ToTensor()(image)\n image_set.append(image)\n image_set *= (200//opts['ref_size'])\n image_loader = DataLoader(image_set, batch_size=opts['image_size'])\n\n # below is the reference\n # for now we load 200 pictures for each target class\n target_set = []\n target_set_ = []\n # for every target, we append 200 pictures to the target_set\n for j in range(len(labels)):\n # this return a list of images\n char_list_tar = os.listdir(working_directory + labels[j])\n image_indices = random.sample(range(len(char_list_tar)), 200)\n for k in image_indices:\n image = Image.open(working_directory + labels[j] + '\\\\' + char_list_tar[k]).convert('L')\n image = transforms.ToTensor()(image)\n target_set.append(image)\n for i in range(200//opts['ref_size']):\n # for every target, we append 200 pictures to the target_set\n # regroup the data so that they can be fed into the dataloader in a correct sequence\n for n in range(len(labels)):\n # add B\n starting_pos = i*(len(target_set)/(200//opts['ref_size'])) + n * opts['ref_size']\n all_pos = [int(starting_pos) + size for size in range(opts['ref_size'])]\n for pos in all_pos:\n pos = int(pos)\n target_set_.append(target_set[pos])\n # add A\n starting_pos_a = i * (len(target_set) / (200 // opts['ref_size'])) + font_A_index * opts['ref_size']\n all_pos_a = [starting_pos_a + size for size in range(opts['ref_size'])]\n for pos in all_pos_a:\n pos = int(pos)\n target_set_.append(target_set[pos])\n target_loader = DataLoader(target_set_, batch_size = opts['ref_size'])\n #below is y\n y = []\n for i in range(2 * (200//opts['ref_size']) * len(font_list)):\n if i % len(font_list) == font_A_index:\n for j in range(2 * opts['image_size']):\n y.append(1)\n else:\n for k in range(2 * opts['image_size']):\n if k <= opts['image_size'] - 1:\n y.append(0)\n else:\n y.append(1)\n\n y = torch.tensor(y, dtype=torch.float)\n y_loader = DataLoader(y, batch_size = opts['image_size'])\n\n return image_loader, target_loader, y_loader\n\n# %%\n\n\nif __name__ == \"__main__\":\n opts = {'image_size': 30, 'ref_size': 50, 'validation_batch_size': 50, 'train': False, 'batch_size': 50, 'data_size': 400}\n a, b, c = chinese_advanced_loader(opts, True)\n\n for data1, data2, data3 in zip(a,b,c):\n c1 = data1\n c2 = data2\n c3 = data3\n print(c1.shape, c2.shape, c3.shape)\n break\n","repo_name":"oqian/Project_449","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":16926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73267189353","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Script to simulate load for osctrl\n#\n# optional arguments:\n# -h, --help show this help message and exit\n# --url URL, -u URL URL for osctrl-tls used to enroll nodes (default: http://localhost:9000/) # noqa: E501\n# --nodes NODES, -n NODES\n# Number of random nodes to simulate (default: 5)\n# --status STATUS, -S STATUS\n# Interval in seconds for status requests to osctrl (default: 60)\n# --result RESULT, -R RESULT\n# Interval in seconds for result requests to osctrl (default: 60)\n# --config CONFIG, -c CONFIG\n# Interval in seconds for config requests to osctrl (default: 45)\n# --query QUERY, -q QUERY\n# Interval in seconds for query requests to osctrl (default: 30)\n# --read [READ], -r [READ]\n# JSON file to read nodes from\n# --write [WRITE], -w [WRITE]\n# JSON file to write nodes to\n# --verbose, -v Enable verbose output (default: False)\n\n# required arguments:\n# --secret SECRET, -s SECRET\n# Secret to enroll nodes for osctrl-tls (default: None)\n\n# Install the Python Requests library:\n# `pip install requests`\n\n\nimport json\nimport random\nimport requests # pylint: disable=import-error\nimport sys\nimport subprocess\nimport time\nimport threading\nimport uuid\nimport argparse\n\nTLS_URL = \"http://localhost:9000/\"\n\nTLS_ENROLL = \"/enroll\"\nTLS_LOG = \"/log\"\nTLS_CONFIG = \"/config\"\nTLS_QUERY_READ = \"/read\"\nTLS_QUERY_WRITE = \"/write\"\n\nLOG_INTERVAL = 60\nCONFIG_INTERVAL = 45\nQUERY_READ_INTERVAL = 30\n\nUBUNTU14 = \"ubuntu14\"\nUBUNTU16 = \"ubuntu16\"\nUBUNTU18 = \"ubuntu18\"\nCENTOS6 = \"centos6\"\nCENTOS7 = \"centos7\"\nDEBIAN8 = \"debian8\"\nDEBIAN9 = \"debian9\"\nFREEBSD = \"freebsd\"\nDARWIN = \"darwin\"\nWINDOWS = \"windows\"\n\nPLATFORMS = [\n UBUNTU14,\n UBUNTU16,\n UBUNTU18,\n CENTOS6,\n CENTOS7,\n DEBIAN8,\n DEBIAN9,\n FREEBSD,\n DARWIN,\n WINDOWS,\n]\n\nOSQUERY_VERSIONS = [\n \"5.0.1\",\n \"4.9.0\",\n \"3.3.1\",\n \"3.3.2\",\n \"5.1.0\",\n \"5.3.0\",\n \"4.8.2\",\n]\n\nNODES_JSON = \"nodes.json\"\n\nOSQUERYI = \"osqueryi\"\n\n\n# returns tuple(status_code, parsed_json)\ndef _post(_url, _data, _headers, _debug):\n try:\n if _debug:\n print(\"POST to \", _url)\n print(\"DATA: \", json.dumps(_data, indent=2, sort_keys=True))\n response = requests.post(\n url=_url, data=json.dumps(_data), headers=_headers, verify=False\n )\n\n if _debug:\n print(f\"HTTP {response.status_code}\")\n\n if response.status_code == 200:\n parsed_json = json.loads(response.content)\n if _debug:\n print(json.dumps(parsed_json, indent=2, sort_keys=True))\n return response.status_code, parsed_json\n\n except requests.exceptions.RequestException as e:\n print(\"HTTP Request failed\")\n if _debug:\n print(e)\n return 0, {}\n\n\n# returns string\ndef _gen_random_ip():\n return \".\".join(\n map( # pylint: disable=bad-builtin\n str, (random.randint(0, 255) for _ in range(4))\n )\n )\n\n\n# returns string\ndef _gen_hostname(_platform):\n return (\n _platform.capitalize()\n + \"-\"\n + random.choice([\"Prod\", \"Legacy\", \"Test\", \"Dev\", \"PC\"])\n )\n\n\n# returns Object\ndef _gen_random_node():\n platform_target = random.choice(PLATFORMS)\n return {\n \"target\": platform_target,\n \"ip\": _gen_random_ip(),\n \"name\": _gen_hostname(platform_target),\n \"version\": random.choice(OSQUERY_VERSIONS),\n \"identifier\": str(uuid.uuid4()),\n \"key\": \"\",\n }\n\n\n# return list(Object)\ndef _gen_random_nodes(_n):\n nodes = []\n for i in range(_n): # pylint: disable=unused-variable\n nodes.append(_gen_random_node())\n return nodes\n\n\n# return Object\ndef _gen_systeminfo(_name, _uuid):\n return {\n \"computer_name\": _name,\n \"cpu_brand\": \"Intel(R) Core(TM) i7-7920HQ CPU @ 3.10GHz\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\", # noqa: E501\n \"cpu_logical_cores\": \"4\",\n \"cpu_physical_cores\": \"4\",\n \"cpu_subtype\": \"158\",\n \"cpu_type\": \"x86_64\",\n \"hardware_model\": \"\",\n \"hostname\": _name,\n \"local_hostname\": _name,\n \"physical_memory\": \"2095869952\",\n \"uuid\": _uuid,\n }\n\n\n# return Object\ndef _gen_osqueryinfo(_uuid, _version):\n return {\n \"build_distro\": \"build_distro\",\n \"build_platform\": \"build_platform\",\n \"config_hash\": \"\",\n \"config_valid\": \"0\",\n \"extensions\": \"active\",\n \"instance_id\": str(uuid.uuid1()),\n \"pid\": \"11\",\n \"start_time\": \"1564800635\",\n \"uuid\": _uuid,\n \"version\": _version,\n \"watcher\": \"9\",\n }\n\n\n# return Object\ndef _osversion_ubuntu14():\n return {\n \"_id\": \"14.04\",\n \"major\": \"14\",\n \"minor\": \"04\",\n \"name\": \"Ubuntu\",\n \"patch\": \"0\",\n \"platform\": \"ubuntu\",\n \"platform_like\": \"debian\",\n \"version\": \"14.04.5 LTS, Trusty Tahr\",\n }\n\n\n# return Object\ndef _osversion_ubuntu16():\n return {\n \"_id\": \"16.04\",\n \"codename\": \"xenial\",\n \"major\": \"16\",\n \"minor\": \"04\",\n \"name\": \"Ubuntu\",\n \"patch\": \"0\",\n \"platform\": \"ubuntu\",\n \"platform_like\": \"debian\",\n \"version\": \"16.04.6 LTS (Xenial Xerus)\",\n }\n\n\n# return Object\ndef _osversion_ubuntu18():\n return {\n \"_id\": \"18.04\",\n \"codename\": \"bionic\",\n \"major\": \"18\",\n \"minor\": \"04\",\n \"name\": \"Ubuntu\",\n \"patch\": \"0\",\n \"platform\": \"ubuntu\",\n \"platform_like\": \"debian\",\n \"version\": \"18.04.2 LTS (Bionic Beaver)\",\n }\n\n\n# return Object\ndef _osversion_centos6():\n return {\n \"build\": \"\",\n \"major\": \"6\",\n \"minor\": \"10\",\n \"name\": \"CentOS\",\n \"patch\": \"0\",\n \"platform\": \"rhel\",\n \"platform_like\": \"rhel\",\n \"version\": \"CentOS release 6.10 (Final)\",\n }\n\n\n# return Object\ndef _osversion_centos7():\n return {\n \"_id\": \"7\",\n \"build\": \"\",\n \"major\": \"7\",\n \"minor\": \"6\",\n \"name\": \"CentOS Linux\",\n \"patch\": \"1810\",\n \"platform\": \"rhel\",\n \"platform_like\": \"rhel\",\n \"version\": \"CentOS Linux release 7.6.1810 (Core)\",\n }\n\n\ndef _osversion_debian8():\n return {\n \"_id\": \"8\",\n \"major\": \"8\",\n \"minor\": \"0\",\n \"name\": \"Debian GNU/Linux\",\n \"patch\": \"0\",\n \"platform\": \"debian\",\n \"version\": \"8 (jessie)\",\n }\n\n\n# return Object\ndef _osversion_debian9():\n return {\n \"_id\": \"9\",\n \"major\": \"9\",\n \"minor\": \"0\",\n \"name\": \"Debian GNU/Linux\",\n \"patch\": \"0\",\n \"platform\": \"debian\",\n \"version\": \"9 (stretch)\",\n }\n\n\n# return Object\ndef _osversion_freebsd():\n return {\n \"build\": \"STABLE\",\n \"major\": \"11\",\n \"minor\": \"3\",\n \"name\": \"FreeBSD\",\n \"patch\": \"\",\n \"platform\": \"freebsd\",\n \"version\": \"11.3-STABLE\",\n }\n\n\ndef _osversion_darwin():\n return {\n \"build\": \"16A323\",\n \"major\": \"10\",\n \"minor\": \"14\",\n \"name\": \"Mac OS X\",\n \"patch\": \"0\",\n \"platform\": \"darwin\",\n \"platform_like\": \"darwin\",\n \"version\": \"10.14\",\n }\n\n\ndef _osversion_windows():\n return {\n \"build\": \"17763\",\n \"codename\": \"Windows 10 Pro\",\n \"install_date\": \"20190119193615.000000-420\",\n \"major\": \"10\",\n \"minor\": \"0\",\n \"name\": \"Microsoft Windows 10 Pro\",\n \"platform\": \"windows\",\n \"platform_like\": \"windows\",\n \"version\": \"10.0.17763\",\n }\n\n\ndef _gen_config(_key):\n return {\"node_key\": _key}\n\n\ndef _gen_queryread(_key):\n return {\"node_key\": _key}\n\n\ndef _gen_querywrite(_node, _query_name, _result):\n queries = {_query_name: _result}\n statuses = {_query_name: 0}\n messages = {_query_name: \"\"}\n return {\n \"node_key\": _node[\"key\"],\n \"queries\": queries,\n \"statuses\": statuses,\n \"messages\": messages,\n }\n\n\ndef _gen_log_status(_node):\n status = {\n \"hostIdentifier\": _node[\"identifier\"],\n \"calendarTime\": time.ctime(),\n \"unixTime\": str(int(time.time())),\n \"severity\": \"0\",\n \"filename\": \"fake_news.py\",\n \"line\": \"255\",\n \"message\": \"Sent fake log message to TLS\",\n \"version\": _node[\"version\"],\n }\n return {\"node_key\": _node[\"key\"], \"log_type\": \"status\", \"data\": [status]}\n\n\ndef _gen_log_result(_node):\n result = {\n \"name\": \"uptime\",\n \"hostIdentifier\": _node[\"identifier\"],\n \"calendarTime\": time.ctime(),\n \"unixTime\": str(int(time.time())),\n \"epoch\": 0,\n \"counter\": 0,\n \"numerics\": False,\n \"decorations\": {\n \"config_hash\": \"7155bb2b98162fa5641d340e03a38d0502df34f0\",\n \"hostname\": _node[\"name\"],\n \"local_hostname\": _node[\"name\"],\n \"osquery_md5\": \"8e2490cb34e32cb33d6326ca30763167\",\n \"osquery_user\": \"root\",\n \"osquery_version\": _node[\"version\"],\n \"username\": \"user (console)\",\n },\n \"columns\": {\n \"days\": \"0\",\n \"hours\": \"1\",\n \"minutes\": \"2\",\n \"seconds\": \"3\",\n \"total_seconds\": \"123456\",\n },\n \"action\": \"added\",\n }\n return {\"node_key\": _node[\"key\"], \"log_type\": \"result\", \"data\": [result]}\n\n\ndef _gen_enroll(_node, _secret):\n selector_osversion = {\n UBUNTU14: _osversion_ubuntu14,\n UBUNTU16: _osversion_ubuntu16,\n UBUNTU18: _osversion_ubuntu18,\n CENTOS6: _osversion_centos6,\n CENTOS7: _osversion_centos7,\n DEBIAN8: _osversion_debian8,\n DEBIAN9: _osversion_debian9,\n FREEBSD: _osversion_freebsd,\n DARWIN: _osversion_darwin,\n WINDOWS: _osversion_windows,\n }\n selector_platform = {\n UBUNTU14: \"9\",\n UBUNTU16: \"9\",\n UBUNTU18: \"9\",\n CENTOS6: \"9\",\n CENTOS7: \"9\",\n DEBIAN8: \"9\",\n DEBIAN9: \"9\",\n FREEBSD: \"37\",\n DARWIN: \"21\",\n WINDOWS: \"2\",\n }\n _osversion = selector_osversion.get(_node[\"target\"], lambda: _osversion_ubuntu18)\n _platform = selector_platform.get(_node[\"target\"], lambda: \"9\")\n return {\n \"enroll_secret\": _secret,\n \"host_identifier\": _node[\"identifier\"],\n \"platform_type\": _platform,\n \"host_details\": {\n \"os_version\": _osversion(),\n \"osquery_info\": _gen_osqueryinfo(_node[\"identifier\"], _node[\"version\"]),\n \"system_info\": _gen_systeminfo(_node[\"name\"], _node[\"identifier\"]),\n },\n }\n\n\ndef _osctrl_log_status(_sleep, _node, _urls, _secret, _verbose):\n while True:\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n data = _gen_log_status(_node)\n code, resp = _post(_urls[\"log\"], data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"⏰ {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _urls[\"log\"])\n if _verbose:\n print(resp)\n if \"node_invalid\" in resp and resp[\"node_invalid\"]:\n with lock:\n _node[\"key\"] = _osctrl_enroll(_node, _secret, _urls[\"enroll\"], _verbose)\n time.sleep(_sleep)\n\n\ndef _osctrl_log_result(_sleep, _node, _urls, _secret, _verbose):\n while True:\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n data = _gen_log_result(_node)\n code, resp = _post(_urls[\"log\"], data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"⏰ {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _urls[\"log\"])\n if _verbose:\n print(resp)\n if \"node_invalid\" in resp and resp[\"node_invalid\"]:\n with lock:\n _node[\"key\"] = _osctrl_enroll(_node, _secret, _urls[\"enroll\"], _verbose)\n time.sleep(_sleep)\n\n\ndef _osctrl_config(_sleep, _node, _urls, _secret, _verbose):\n while True:\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n data = _gen_log_result(_node)\n code, resp = _post(_urls[\"config\"], data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"⏰ {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _urls[\"config\"])\n if _verbose:\n print(resp)\n if \"node_invalid\" in resp and resp[\"node_invalid\"]:\n with lock:\n _node[\"key\"] = _osctrl_enroll(_node, _secret, _urls[\"enroll\"], _verbose)\n time.sleep(_sleep)\n\n\ndef _osctrl_query_read(_sleep, _node, _urls, _secret, _verbose):\n while True:\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n data = _gen_log_result(_node)\n code, resp = _post(_urls[\"query\"], data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"⏰ {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _urls[\"query\"])\n if _verbose:\n print(resp)\n if \"node_invalid\" in resp and resp[\"node_invalid\"]:\n with lock:\n _node[\"key\"] = _osctrl_enroll(_node, _secret, _urls[\"enroll\"], _verbose)\n if \"queries\" in resp and resp[\"queries\"]:\n for qname, q in resp[\"queries\"].items():\n _osctrl_query_write(_node, qname, q, _urls[\"write\"], _verbose)\n time.sleep(_sleep)\n\n\ndef _osquery_query(_query):\n result = subprocess.run( # pylint: disable=subprocess-run-check\n [OSQUERYI, \"--json\", _query],\n stdout=subprocess.PIPE,\n )\n return json.loads(result.stdout)\n\n\ndef _osctrl_query_write(_node, _query_name, _query, _url, _verbose):\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n query_result = _osquery_query(_query)\n data = _gen_querywrite(_node, _query_name, query_result)\n code, resp = _post(_url, data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"��� {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _urls[\"write\"])\n if _verbose:\n print(resp)\n\n\ndef _osctrl_enroll(_node, _secret, _url, _verbose):\n start = time.perf_counter()\n headers = {\"X-Real-IP\": _node[\"ip\"]}\n data = _gen_enroll(_node, _secret)\n code, resp = _post(_url, data, headers, _verbose)\n request_time = time.perf_counter() - start\n print(f\"⏰ {request_time:.0f} ms status from\", _node[\"name\"])\n if code != 200:\n print(\"HTTP\", str(code), \"with\", _url)\n return _node[\"key\"]\n if _verbose:\n print(resp)\n return resp[\"node_key\"]\n\n\n# Parser for command line parameters\nparser = argparse.ArgumentParser(\n description=\"Script to simulate load for osctrl\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n)\n\nrequired = parser.add_argument_group(\"required arguments\")\n\nrequired.add_argument(\"--secret\", \"-s\", help=\"Secret to enroll nodes for osctrl-tls\")\nparser.add_argument(\n \"--url\",\n \"-u\",\n default=TLS_URL,\n help=\"URL for osctrl-tls used to enroll nodes\",\n)\nparser.add_argument(\n \"--nodes\",\n \"-n\",\n type=int,\n default=5,\n help=\"Number of random nodes to simulate\",\n)\nparser.add_argument(\n \"--status\",\n \"-S\",\n type=int,\n default=LOG_INTERVAL,\n help=\"Interval in seconds for status requests to osctrl\",\n)\nparser.add_argument(\n \"--result\",\n \"-R\",\n type=int,\n default=LOG_INTERVAL,\n help=\"Interval in seconds for result requests to osctrl\",\n)\nparser.add_argument(\n \"--config\",\n \"-c\",\n type=int,\n default=CONFIG_INTERVAL,\n help=\"Interval in seconds for config requests to osctrl\",\n)\nparser.add_argument(\n \"--query\",\n \"-q\",\n type=int,\n default=QUERY_READ_INTERVAL,\n help=\"Interval in seconds for query requests to osctrl\",\n)\nparser.add_argument(\n \"--read\",\n \"-r\",\n nargs=\"?\",\n default=argparse.SUPPRESS,\n help=\"JSON file to read nodes from\",\n)\nparser.add_argument(\n \"--write\",\n \"-w\",\n nargs=\"?\",\n default=argparse.SUPPRESS,\n help=\"JSON file to write nodes to\",\n)\nparser.add_argument(\n \"--verbose\",\n \"-v\",\n default=False,\n action=\"store_true\",\n help=\"Enable verbose output\",\n)\n\nlock = threading.Lock()\n\nif __name__ == \"__main__\":\n # Hide SSL warnings\n requests.packages.urllib3.disable_warnings()\n\n # Check if required parameters are provided\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n args = parser.parse_args()\n\n # Here we go\n _url = args.url\n print(\"Using URL\", _url)\n\n _urls = {}\n _urls[\"enroll\"] = _url + TLS_ENROLL\n _urls[\"log\"] = _url + TLS_LOG\n _urls[\"config\"] = _url + TLS_CONFIG\n _urls[\"query\"] = _url + TLS_QUERY_READ\n _urls[\"write\"] = _url + TLS_QUERY_WRITE\n\n _secret = args.secret\n print(\"Using secret\", _secret)\n\n # If we are reading nodes from JSON file\n _nodes = []\n if \"read\" in args:\n file_read = NODES_JSON\n if args.read is not None:\n file_read = args.read\n print(\"Reading from JSON\", file_read)\n with open(file_read, \"r\") as f: # pylint: disable=unspecified-encoding\n _nodes = json.load(f)\n else:\n _num_nodes = args.nodes\n print(\"Generating\", _num_nodes, \"nodes\")\n _nodes = _gen_random_nodes(_num_nodes)\n\n if args.verbose:\n print(json.dumps(_nodes, indent=2, sort_keys=True))\n\n # Enroll nodes and extract node_key and save host_identifier\n for n in _nodes:\n print(\"Enrolling \" + n[\"target\"] + \" as \" + n[\"name\"])\n n[\"key\"] = _osctrl_enroll(n, _secret, _urls[\"enroll\"], args.verbose)\n\n # Save nodes to file\n if \"write\" in args:\n file_write = NODES_JSON\n if args.write is not None:\n file_write = args.write\n print(\"Writing to JSON\", file_write)\n with open(file_write, \"w\") as f: # pylint: disable=unspecified-encoding\n json.dump(_nodes, f)\n\n # Begin concurrent traffic\n threads = []\n\n for n in _nodes:\n # Status log\n t = threading.Thread(\n target=_osctrl_log_status,\n args=(args.status, n, _urls, _secret, args.verbose),\n )\n t.start()\n threads.append(t)\n # Result log\n t = threading.Thread(\n target=_osctrl_log_result,\n args=(args.result, n, _urls, _secret, args.verbose),\n )\n t.start()\n threads.append(t)\n # Config\n t = threading.Thread(\n target=_osctrl_config,\n args=(args.config, n, _urls, _secret, args.verbose),\n )\n t.start()\n threads.append(t)\n # Query read\n t = threading.Thread(\n target=_osctrl_query_read,\n args=(args.query, n, _urls, _secret, args.verbose),\n )\n t.start()\n threads.append(t)\n","repo_name":"jmpsec/osctrl","sub_path":"tools/fake_news.py","file_name":"fake_news.py","file_ext":"py","file_size_in_byte":19230,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"72"} +{"seq_id":"21958864646","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('characters', '0004_auto_20150410_1335'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='magecharacter',\n name='gnosis',\n field=models.IntegerField(default=1, verbose_name='Gnosis', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7'), (8, b'8'), (9, b'9'), (10, b'10')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='magecharacter',\n name='size',\n field=models.IntegerField(default=5, verbose_name='Size'),\n preserve_default=True,\n ),\n ]\n","repo_name":"wlansu/wod","sub_path":"wod/characters/migrations/0005_auto_20150410_1448.py","file_name":"0005_auto_20150410_1448.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1998645004","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 13:58:59 2018\n\n@author: 612383461\n\"\"\"\n\n######################\n\"\"\"Conditionals\"\"\"\n######################\n\n#--------------------------------------#\n\"\"\"Task Two: Testing Boolean Example\"\"\"\n#--------------------------------------#\n\nage = 15\nisaTeen = age >= 13 and age <=19\nprint(isaTeen)\n\nage = 22\nisaTeen = age >= 13 and age <= 19\nprint(isaTeen)\n\n#------------------------------#\n\"\"\"Task Three: IF statements\"\"\"\n#------------------------------#\n\nnumber = input(\"Enter a number between 1 and 10: \")\nnumber = int(number) #converts the input string into an integer\nif number > 10:\n print (\"Too high!\") \nif number <= 0:\n print (\"Too low!\")\n\n#-------------------------------#\n\"\"\"Task Four: ELSE statements\"\"\"\n#-------------------------------#\n\n#Adding an else statement\n\nnumber = input(\"Enter a number between 1 and 10: \")\nnumber = int(number) #converts the input string into an integer\nif number > 10:\n print (\"Too high!\") \nelif number <= 0:\n print (\"Too low!\") \nelse:\n print (\"Just right!\")\n\n#---------------------------------#\n\"\"\"Task Five: ELIF statements\"\"\"\n#---------------------------------#\n\nage = input(\"Enter an age: \")\nage = int(age) #converts the input string into an integer\nif age < 13:\n print(\"child\")\nelif age <18:\n print(\"teen\")\nelif age < 65:\n print(\"adult\")\nelse:\n print(\"pensioner\")\n\n#NB: Order is important, stops at the first if/elif statement the value satisfies","repo_name":"RachelEW/module2","sub_path":"ch04_conditionals/ch04_rachel_conditionals.py","file_name":"ch04_rachel_conditionals.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28162004558","text":"import logging\n\nfrom odoo import _\nfrom odoo.addons.component.core import Component\nfrom odoo.addons.connector.components.mapper import mapping\n\n_logger = logging.getLogger(__name__)\n\n\nclass HrEmployeeMapper(Component):\n _name = 'okticket.employee.mapper'\n _inherit = 'okticket.import.mapper'\n _usage = 'mapper'\n\n _fields_mapping = {'work_email': 'email'}\n\n def filters_fields_conversion(self, filters):\n for filter_key in filters.keys():\n if filter_key in self._fields_mapping:\n filters[self._fields_mapping[filter_key]] = filters.pop(filter_key)\n return filters\n\n @mapping\n def external_id(self, record):\n return {'external_id': record['id']}\n\n @mapping\n def backend_id(self, record):\n return {'backend_id': self.backend_record.id}\n\n @mapping\n def odoo_id(self, record):\n \"\"\" Will bind the category on a existing one with the same name.\"\"\"\n existing = self.env['hr.employee'].search(\n [('work_email', '=', record['email']),\n ('active', 'in', [True, False])], # Allows modify an archived employee preventing\n # new one creation (duplicaded)\n limit=1,\n )\n if existing:\n return {'odoo_id': existing.id}\n\n\nclass HrEmployeeBatchImporter(Component):\n _name = 'okticket.employee.batch.importer'\n _apply_on = 'okticket.hr.employee'\n _usage = 'importer'\n\n def run(self, filters=None, options=None):\n employee_obj = self.env['hr.employee']\n # Adapter\n backend_adapter = self.component(usage='backend.adapter')\n # Read users from OkTicket\n okticket_hr_employee_ids = []\n # Mapper\n mapper = self.component(usage='mapper')\n filters = mapper.filters_fields_conversion(filters)\n # Binder\n binder = self.component(usage='binder')\n for employee_ext_vals in backend_adapter.search(filters):\n # Map to odoo data\n internal_data = mapper.map_record(employee_ext_vals).values()\n # find if the OkTicket id already exists in odoo\n binding = binder.to_internal(employee_ext_vals['id'])\n\n user_to_bind_vals = {\n 'okticket_user_id': employee_ext_vals.get('id'),\n 'work_email': employee_ext_vals.get('email'),\n 'name': employee_ext_vals.get('name'),\n }\n odoo_user = False\n if binding: # User exists and is bound (UPDATE)\n binding.write(internal_data)\n odoo_user = employee_obj.browse(internal_data['odoo_id'])\n else: # User doesn't bound\n if not internal_data.get('odoo_id'):\n # User doesn't exist\n odoo_user = employee_obj. \\\n with_context(ignore_okticket_synch=True).create(user_to_bind_vals)\n else:\n # User exists but is not bound\n odoo_user = employee_obj.browse(internal_data['odoo_id'])\n odoo_user.write(user_to_bind_vals)\n # Binding between Odoo employee and Okticket user\n internal_data['odoo_id'] = odoo_user.id\n binding = self.model.create(internal_data)\n if binding:\n okticket_hr_employee_ids.append(binding.id)\n if 'id' in employee_ext_vals:\n external_id = str(employee_ext_vals['id'])\n binder.bind(external_id, binding)\n _logger.info(_('Imported'))\n\n _logger.info(_('Import from Okticket DONE'))\n return okticket_hr_employee_ids\n\n\nclass HrEmployeeRecordImporter(Component):\n _name = 'okticket.employee.record.exporter'\n _apply_on = 'okticket.hr.employee'\n _usage = 'record.importer'\n\n def run(self, filters=None, options=None):\n # Adapter\n backend_adapter = self.component(usage='backend.adapter')\n # Read users from OkTicket\n okticket_hr_employee_ids = []\n # Mapper\n mapper = self.component(usage='mapper')\n # Binder\n binder = self.component(usage='binder')\n\n for employee_ext_vals in backend_adapter.search(filters):\n # Map to odoo data\n internal_data = mapper.map_record(employee_ext_vals).values()\n # find if the OkTicket id already exists in odoo\n binding = binder.to_internal(employee_ext_vals.get('id'))\n\n if binding:\n # if yes, we update it\n binding.write(internal_data)\n else:\n if internal_data.get('odoo_id'):\n binding = self.model.create(internal_data)\n if internal_data.get('odoo_id'):\n user = self.env['hr.employee'].browse(internal_data['odoo_id'])\n user.write({'okticket_user_id': employee_ext_vals.get('id')})\n if binding:\n okticket_hr_employee_ids.append(binding.id)\n # finally, we bind both, so the next time we import\n # the record, we'll update the same record instead of\n # creating a new on\n binder.bind(employee_ext_vals.get('id'), binding)\n _logger.info('Imported ')\n _logger.info(_('Import from Okticket DONE'))\n return okticket_hr_employee_ids\n","repo_name":"Alialabs/connector-okticket","sub_path":"okticket_connector_user_synchronization/models/okticket_hr_employee_importer.py","file_name":"okticket_hr_employee_importer.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11291338683","text":"import envtb.ldos.hamiltonian\nimport numpy as np\nimport envtb.time_propagator.lanczos\nimport envtb.time_propagator.wave_function\nimport envtb.time_propagator.vector_potential\nimport envtb.wannier90.w90hamiltonian as w90hamiltonian\n\nclass Propagator(object):\n \"\"\"docstring for Propagator\"\"\"\n def __init__(self, num_error=10**(-18), regime='SIL'):\n self.num_error=num_error\n self.regime=regime\n\n @staticmethod\n def propagate_wave_function(wf_init, hamilt, NK=10, dt=1., maxel=None,\n num_error=10**(-18), regime='SIL',\n file_out=None, **kwrds):\n #print 'Start!'\n prop = envtb.time_propagator.lanczos.LanczosPropagator(\n \t\twf=wf_init, ham=hamilt, NK=NK, dt=dt)\n\n wf_final, dt_new, NK_new = prop.propagate(\n \tnum_error=num_error, regime=regime)\n\n #print 'dt_old = %(dt)g; dt_new = %(dt_new)g; NK_old = %(NK)g; NK_new = %(NK_new)g'\\\n #\t\t% vars()\n #print 'norm', wf_final.check_norm()\n\n if file_out is None:\n return wf_final, dt_new, NK_new\n else:\n wf_final.save_wave_function_pic(file_out, maxel, **kwrds)\n return wf_final, dt_new, NK_new\n\n","repo_name":"zonksoft/envTB","sub_path":"envtb/time_propagator/propagator.py","file_name":"propagator.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"23658422874","text":"#!/usr/bin/env python\n# Python Network Programming Cookbook, Second Edition -- Chapter - 8\n# This program is optimized for Python 2.7.12.\n# It may run on any other version with/without modifications.\n\nimport argparse\nimport pcap\nfrom construct.protocols.ipstack import ip_stack\n\n\ndef print_packet(pktlen, data, timestamp):\n \"\"\" Callback for priniting the packet payload\"\"\"\n if not data:\n return\n \n stack = ip_stack.parse(data)\n payload = stack.next.next.next\n print (payload)\n\ndef main():\n # setup commandline arguments\n parser = argparse.ArgumentParser(description='Packet Sniffer')\n parser.add_argument('--iface', action=\"store\", dest=\"iface\", default='eth0')\n parser.add_argument('--port', action=\"store\", dest=\"port\", default=80, type=int)\n # parse arguments\n given_args = parser.parse_args()\n iface, port = given_args.iface, given_args.port\n # start sniffing\n pc = pcap.pcapObject()\n pc.open_live(iface, 1600, 0, 100)\n pc.setfilter('dst port %d' %port, 0, 0)\n \n print ('Press CTRL+C to end capture')\n try:\n while True:\n pc.dispatch(1, print_packet)\n except KeyboardInterrupt:\n print ('Packet statistics: %d packets received, %d packets dropped, %d packets dropped by the interface' % pc.stats())\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter08/8_1_packet_sniffer.py","file_name":"8_1_packet_sniffer.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"} +{"seq_id":"32591509285","text":"import math\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef plot_results(values, title='Title', xlabel='xlabel', ylabel='ylabel'):\n\n if \"loss\" in title.lower():\n throw_away_amount = len(values) // 10\n\n # Create a new list without the first 1/10th elements\n values = values[throw_away_amount:]\n\n # Calculate moving averages\n window_size = len(values) // 10 + 1\n moving_averages = np.convolve(values, np.ones(window_size) / window_size, mode='valid')\n\n # Plot original rewards per episode\n plt.plot(values, label='Original')\n\n # Plot smoothed rewards per episode\n plt.plot(range(window_size - 1, len(values)), moving_averages, label='Smoothed')\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend()\n plt.show()","repo_name":"giulianorasper/ml-workshop-pong","sub_path":"visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70336459752","text":"from flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nfrom flask import url_for\n\ndb = SQLAlchemy()\n\n\nclass ModelMixin(object):\n def __repr__(self):\n class_name = self.__class__.__name__\n properties = ('{0} = {1}'.format(k, v) for k, v in self.__dict__.items())\n return '<{0}: \\n {1}\\n>'.format(class_name, '\\n '.join(properties))\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n # self.deleted = True\n # self.save()\n\n\nbbs_tags = db.Table('bbs_tags_contents',\n db.Column('tag_id', db.Integer, db.ForeignKey('bbs_tag.id', ondelete='CASCADE', onupdate='CASCADE')),\n db.Column('content_id', db.Integer, db.ForeignKey('bbs_contents.id', ondelete='CASCADE', onupdate='CASCADE')))\n\nblog_tags = db.Table('blog_tags_contents',\n db.Column('tag_id', db.Integer, db.ForeignKey('blog_tags.id', ondelete='CASCADE', onupdate='CASCADE')),\n db.Column('content_id', db.Integer, db.ForeignKey('blog_contents.id', ondelete='CASCADE', onupdate='CASCADE')))\n\n\n\"\"\"\n脚本的例子\n\napt-get install python3 python3-dev ....\npip3 install flask flask-script flask-migrate ....\n\nimport os\n\nos.system('pip3 install flask')\n\n\n\n\"\"\"","repo_name":"YXMforfun/bbs_and_blog","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29529950257","text":"import os\nimport numpy as np\nfrom numpy.linalg import norm\nimport librosa\n#from dtw import *\nfrom voice_util import detect_audio_segment_with_medi_tr\n\n\ndef save_feat_data(feat_file, mfcc, engery, rms):\n np.savez(feat_file, mfcc=mfcc, engery=engery, rms=rms)\n\n\ndef load_feat(feat_file):\n load_data = np.load(feat_file)\n return load_data['mfcc'], load_data['energy'], load_data['rms']\n\n\ndef mfcc_from_file(audio_file, sample_rate):\n frame_len = 256\n frame_step = 128\n y, sr = librosa.load(audio_file, sr=None)\n assert sr==sample_rate\n y = normalize_sig(y)\n energy = librosa.feature.rms(y, frame_length=frame_len, hop_length=frame_step)\n S, phase = librosa.magphase(librosa.stft(y, win_length=frame_len, hop_length=frame_step))\n rms = librosa.feature.rms(S=S)\n mfcc = librosa.feature.mfcc(y, sr, n_fft=frame_len, hop_length=frame_step, n_mels=40, n_mfcc=19)\n mfcc = mfcc[1:, :]\n\n return mfcc, energy, rms\n\n\ndef normalize_sig(wav_data):\n signal = np.double(wav_data)\n signal = signal / (2.0 ** 15)\n assert isinstance(signal, np.ndarray)\n sig_mean = signal.mean()\n #sig_std = signal.std()\n sig_max = (np.abs(signal)).max()\n #signal = (signal - sig_mean) / (sig_std + 0.0000000001)\n #signal = (signal - sig_mean)\n signal = (signal - sig_mean) / (sig_max + 0.0000000001)\n return signal\n\n\ndef dtw_dist(audio_file1, audio_file2):\n mfcc1, energy1, rms1 = mfcc_from_file(audio_file1, sample_rate=8000)\n mfcc2, energy2, rms2 = mfcc_from_file(audio_file2, sample_rate=8000)\n\n dist, cost, acc_cost, path = dtw(mfcc1.T, mfcc2.T, dist=lambda x, y: norm(x - y, ord=1))\n # print ('Normalized distance between the two sounds: {}'.format(dist))\n return dist, path\n\n\ndef get_match_point_list(speech_seg1, speech_seg2):\n match_points = []\n for seg_st1, seg_end1 in speech_seg1:\n for seg_st2, seg_end2 in speech_seg2:\n match_points.append([[seg_st1, seg_st2], [seg_end1, seg_end2]])\n\n return match_points\n\n\n# Returns index of x in arr if present, else -1\ndef binarySearch(arr, l, r, x):\n # Check base case\n if r >= l:\n mid = l + (r - l) // 2\n\n # If element is present at the middle itself\n if arr[mid] == x:\n return mid\n\n # If element is smaller than mid, then it can only\n # be present in left subarray\n elif arr[mid] > x:\n return binarySearch(arr, l, mid - 1, x)\n\n # Else the element can only be present in right subarray\n else:\n return binarySearch(arr, mid + 1, r, x)\n\n else:\n # Element is not present in the array\n return -1\n\ndef get_seg_info(dtw_path):\n first_path = dtw_path[0]\n sec_path = dtw_path[1]\n if len(first_path) % 2 == 1:\n first_path = first_path[:-1]\n sec_path = sec_path[:-1]\n assert len(first_path) == len(sec_path)\n assert len(first_path) %2 == 0\n cur_idx = 0\n path_len = len(first_path)\n\n # Initialize segment information\n\n audio_seg_list = [[],[]]\n prev_st1 = first_path[cur_idx]\n prev_st2 = sec_path[cur_idx]\n prev_end1 = first_path[cur_idx + 1]\n prev_end2 = sec_path[cur_idx + 1]\n\n # check overlap segment\n prev_overlap = 0 # 0- no-overlap, 1 : first audio overlap, 2: second audio overlap\n while cur_idx < path_len:\n cur_overlap = 0 # 0- no-overlap, 1 : first audio overlap, 2: second audio overlap\n if first_path[cur_idx] == first_path[cur_idx+1]:\n cur_overlap = 1\n elif sec_path[cur_idx] == sec_path[cur_idx+1]:\n cur_overlap = 2\n\n # update next segment information\n if cur_overlap+prev_overlap == 0:\n audio_seg_list[0].append([first_path[cur_idx], first_path[cur_idx+1]])\n audio_seg_list[1].append([sec_path[cur_idx], sec_path[cur_idx+1]])\n elif cur_overlap != prev_overlap and prev_overlap != 0:\n audio_seg_list[0].append([prev_st1, prev_end1])\n audio_seg_list[1].append([prev_st2, prev_end2])\n\n # update previous position\n if cur_overlap != prev_overlap:\n prev_st1 = first_path[cur_idx]\n prev_st2 = sec_path[cur_idx]\n\n prev_end1 = first_path[cur_idx + 1]\n prev_end2 = sec_path[cur_idx + 1]\n\n # update prev overlap\n prev_overlap = cur_overlap\n\n # move current index into next\n cur_idx += 2\n if cur_idx == path_len and cur_overlap+prev_overlap != 0:\n audio_seg_list[0].append([prev_st1, prev_end1])\n audio_seg_list[1].append([prev_st2, prev_end2])\n\n return audio_seg_list\n\n\ndef get_audio_time_map_org(audio_file1, audio_file2):\n frame_step = 0.016\n # calculate mfcc\n mfcc1, energy1, rms1 = mfcc_from_file(audio_file1, sample_rate=8000)\n mfcc2, energy2, rms2 = mfcc_from_file(audio_file2, sample_rate=8000)\n wav1_len = len(rms1[0])*frame_step\n wav2_len = len(rms2[0])*frame_step\n\n # get dtw cost\n dist, cost, acc_cost, path = dtw(mfcc1.T, mfcc2.T, dist=lambda x, y: norm(x - y, ord=1))\n print ('Normalized distance between the two sounds: {}'.format(dist))\n\n # get vad segment\n speech_seg1, _ = detect_audio_segment_with_medi_tr(energy1[0], frame_step=frame_step, tr=0.09, medi_tr=0.007)\n speech_seg2, _ = detect_audio_segment_with_medi_tr(energy2[0], frame_step=frame_step, tr=0.09, medi_tr=0.007)\n\n # list of point on audio 1\n idx_list_on_audio1 = []\n # inverse search of dtw result because dtw-master results contain other's index array.\n for seg_st, seg_end in speech_seg1:\n seg_st_idx = int(np.floor(seg_st/frame_step))\n st_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_st_idx)\n seg_end_idx = int(np.floor(seg_end/frame_step))\n end_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_end_idx)\n idx_list_on_audio1 += [st_idx_on_audio1, end_idx_on_audio1]\n\n idx_list_on_audio2 = []\n for seg_st, seg_end in speech_seg2:\n seg_st_idx = int(np.floor(seg_st/frame_step))\n st_idx_on_audio2 = binarySearch(path[1], 0, len(path[1])-1, seg_st_idx)\n seg_end_idx = int(np.floor(seg_end/frame_step))\n end_idx_on_audio2 = binarySearch(path[1], 0, len(path[1])-1, seg_end_idx)\n idx_list_on_audio2 += [st_idx_on_audio2, end_idx_on_audio2]\n\n # match paths between\n dist_, cost_, acc_cost_, path_ = dtw(np.array(idx_list_on_audio1).reshape((len(idx_list_on_audio1), 1)),\n np.array(idx_list_on_audio2).reshape((len(idx_list_on_audio2), 1)),\n dist=lambda x, y: norm(x - y, ord=1))\n\n audio_seg_path = get_seg_info(path_)\n audio_seg_match = [[], []]\n for first_seg, sec_seg in zip(audio_seg_path[0], audio_seg_path[1]):\n # some filtering dtw data\n if first_seg[0] == first_seg[1] or sec_seg[0] == sec_seg[1]:\n continue\n if abs(first_seg[1]-first_seg[0]) < 0.3 or abs(sec_seg[1]-sec_seg[0]) < 0.3:\n continue\n\n seg_st1 = path[1][idx_list_on_audio1[first_seg[0]]]*frame_step\n seg_end1 = path[1][idx_list_on_audio1[first_seg[1]]]*frame_step\n seg_st2 = path[0][idx_list_on_audio2[sec_seg[0]]]*frame_step\n seg_end2 = path[0][idx_list_on_audio2[sec_seg[1]]]*frame_step\n audio_seg_match[0].append([seg_st2, seg_end2])\n audio_seg_match[1].append([seg_st1, seg_end1])\n\n # print(audio_seg_match)\n return audio_seg_match, dist, wav1_len, wav2_len\n\n\ndef get_audio_time_map(audio_file1, audio_file2):\n frame_step = 0.016\n # calculate mfcc\n mfcc1, energy1, rms1 = mfcc_from_file(audio_file1, sample_rate=8000)\n mfcc2, energy2, rms2 = mfcc_from_file(audio_file2, sample_rate=8000)\n wav1_len = len(rms1[0])*frame_step\n wav2_len = len(rms2[0])*frame_step\n\n # get vad segment\n speech_seg1, _ = detect_audio_segment_with_medi_tr(energy1[0], frame_step=frame_step, tr=0.04, medi_tr=0.007) #0.09\n # speech_seg2, _ = detect_audio_segment_with_medi_tr(energy2[0], frame_step=frame_step, tr=0.02, medi_tr=0.007) #0.09\n\n # get dtw cost\n dist, cost, acc_cost, path = dtw(mfcc1.T, mfcc2.T, dist=lambda x, y: norm(x - y, ord=1))\n print ('Normalized distance between the two sounds: {}'.format(dist))\n\n # list of point on audio 1\n audio_seg_match = [[], []]\n idx_list_on_audio1 = []\n # inverse search of dtw result because dtw-master results contain other's index array.\n for seg_st, seg_end in speech_seg1:\n seg_st_idx = int(np.floor(seg_st/frame_step))\n st_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_st_idx)\n seg_end_idx = int(np.floor(seg_end/frame_step))\n end_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_end_idx)\n idx_list_on_audio1 += [st_idx_on_audio1, end_idx_on_audio1]\n seg_st1 = path[1][st_idx_on_audio1]*frame_step\n seg_end1 = path[1][end_idx_on_audio1]*frame_step\n audio_seg_match[0].append([seg_st, seg_end])\n audio_seg_match[1].append([seg_st1, seg_end1])\n\n # print(audio_seg_match)\n return audio_seg_match, dist, wav1_len, wav2_len\n\n\n\ndef get_audio_time_map_dtw_python(audio_file1, audio_file2):\n frame_step = 0.016\n # calculate mfcc\n mfcc1, energy1, rms1 = mfcc_from_file(audio_file1, sample_rate=8000)\n mfcc2, energy2, rms2 = mfcc_from_file(audio_file2, sample_rate=8000)\n wav1_len = len(rms1[0])*frame_step\n wav2_len = len(rms2[0])*frame_step\n\n # get vad segment\n speech_seg1, _ = detect_audio_segment_with_medi_tr(energy1[0], frame_step=frame_step, tr=0.04, medi_tr=0.007) #0.09\n # speech_seg2, _ = detect_audio_segment_with_medi_tr(energy2[0], frame_step=frame_step, tr=0.02, medi_tr=0.007) #0.09\n\n # get dtw cost\n # dist, cost, acc_cost, path = dtw(mfcc1.T, mfcc2.T, dist=lambda x, y: norm(x - y, ord=1))\n alignment = dtw(mfcc1.T, mfcc2.T, keep_internals=True)\n #alignment.plot(type=\"threeway\")\n #print ('Normalized distance between the two sounds: {}'.format(dist))\n path = [alignment.index1, alignment.index2]\n dist = alignment.distance\n nor_dist = alignment.normalizedDistance\n\n # list of point on audio 1\n audio_seg_match = [[], []]\n idx_list_on_audio1 = []\n # inverse search of dtw result because dtw-master results contain other's index array.\n for seg_st, seg_end in speech_seg1:\n seg_st_idx = int(np.floor(seg_st/frame_step))\n st_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_st_idx)\n seg_end_idx = int(np.floor(seg_end/frame_step))\n end_idx_on_audio1 = binarySearch(path[0], 0, len(path[0])-1, seg_end_idx)\n idx_list_on_audio1 += [st_idx_on_audio1, end_idx_on_audio1]\n seg_st1 = path[1][st_idx_on_audio1]*frame_step\n seg_end1 = path[1][end_idx_on_audio1]*frame_step\n audio_seg_match[0].append([seg_st, seg_end])\n audio_seg_match[1].append([seg_st1, seg_end1])\n\n # print(audio_seg_match)\n return audio_seg_match, dist, wav1_len, wav2_len\n\n\ndef dtw_test_audio_dir(audio_dir):\n spk_names = []\n for spk_dir in os.listdir(audio_dir):\n spk_path = os.path.join(audio_dir, spk_dir)\n if not os.path.isdir(spk_path) or spk_dir.find(\"Files_\") == -1:\n continue\n spk_name = spk_dir[len(\"Files_\"):]\n #if spk_name != \"7289943383\":\n # continue\n\n if spk_name not in spk_names:\n print(spk_name)\n spk_names.append(spk_name)\n wav_dir = os.path.join(spk_path, \"good\")\n if not os.path.exists(wav_dir):\n print(\"there is not such folder: {}\".format(wav_dir))\n continue\n wav_files = []\n for wav_file in os.listdir(wav_dir):\n if wav_file.find(\".wav\") == -1:\n continue\n # print(wav_file)\n wav_files.append(wav_file)\n\n if len(wav_files) < 2:\n continue\n\n for idx in range(len(wav_files)//2):\n cur_idx = idx * 2\n audio_file1 = os.path.join(wav_dir, wav_files[cur_idx])\n audio_file2 = os.path.join(wav_dir, wav_files[cur_idx+1])\n print(\"comparing dtw between {} and {}\".format(audio_file1, audio_file2))\n get_audio_time_map(audio_file1, audio_file2)\n\n\nif __name__ == \"__main__\":\n test_audio= \"voice_recordings/2020-01-19/noisy/enroll_8447290101_15791774634480.wav\"\n # aa = os.path.basename(test_audio)\n\n wav_dir1 = \"merge/Files_+14082309381/good\"\n audio_file1 = \"enroll_+14082309381_15766209254.wav\"\n audio_file2 = \"verify_+14082309381_157664224379.wav\"\n audio_file1 = os.path.join(wav_dir1, audio_file1)\n audio_file2 = os.path.join(wav_dir1, audio_file2)\n\n audio_file1 = \"merge/Files_5106764206/good/enroll_5106764206_15795077762598.wav\"\n audio_file2 = \"merge/Files_5106764206/good/verify_5106764206_15786121117.wav\"\n # dist_map_with_dtw(audio_file1, audio_file2)\n get_audio_time_map(audio_file1, audio_file2)\n audio_dir = \"merge\"\n # dtw_test_audio_dir(audio_dir)\n","repo_name":"JaneBittner/Voice-Command","sub_path":"dtw_audio_match.py","file_name":"dtw_audio_match.py","file_ext":"py","file_size_in_byte":12968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26233675455","text":"# --- import \nfrom typing_extensions import Self\nfrom PyQt6.QtWidgets import QWidget, QFrame\nfrom PyQt6.QtWidgets import QVBoxLayout, QHBoxLayout\nfrom PyQt6.QtWidgets import QLabel, QLineEdit, QDateEdit, QTextEdit, QPushButton\nfrom PyQt6.QtCore import Qt, QDate, pyqtSignal\nimport film\nfrom LabelledWidget import LabelledWidget\nfrom MultiLineEdit import MultiLineEdit\nfrom WImage import WImage\nfrom FigureWidget import FigureWidget\nimport annuaire\n\n# ----------------------------------------------------------------------\n# --- class FilmVue\n# ----------------------------------------------------------------------\nclass FilmView (QWidget):\n # +---------------------------------------------------------------------+\n # | title | \n # +-------------------+--------------------------+----------------------+\n # | Distributor | Release Date | license |\n # +-------------------+--------------------------+----------------------+\n # | genre | run time | |\n # +-------------------+--------------------------+----------------------+\n # | revenu Domestic | revenu international | revenu worlds sales |\n # +-------------------+--------------------------+----------------------+\n # | descriptif | image | \n # +-------------------+--------------------------+----------------------+\n # | previous | -------------------- message -------------------- | next |\n # +-------------------+--------------------------+----------------------+\n \n # signaux\n next : pyqtSignal = pyqtSignal()\n previous : pyqtSignal = pyqtSignal()\n\n\n \n # constructor\n def __init__(self : Self, record : dict) -> None:\n super().__init__()\n self.__record : dict = record\n \n # drawGUI\n self.topLayouyt : QVBoxLayout = QVBoxLayout() ; \n self.setLayout(self.topLayouyt)\n\n\n\n # info1 Widget\n self.info1Widget : QFrame = QFrame() # creer une box \n self.info1Layout : QHBoxLayout = QHBoxLayout() # creer un layout (ce qu'on met dedans)\n self.info1Widget.setLayout(self.info1Layout) # met le layout dans la box\n self.info1Widget.setFrameStyle(QFrame.Shape.StyledPanel) # QFrame dans le Widget\n\n # info2 Widget\n self.info2Widget : QFrame = QFrame()\n self.info2Layout : QHBoxLayout = QHBoxLayout() \n self.info2Widget.setLayout(self.info2Layout) \n self.info2Widget.setFrameStyle(QFrame.Shape.StyledPanel) \n\n # revenu Widget\n self.revenuWidget : QFrame = QFrame()\n self.revenuLayout : QHBoxLayout = QHBoxLayout() \n self.revenuWidget.setLayout(self.revenuLayout)\n self.revenuWidget.setFrameStyle(QFrame.Shape.StyledPanel) \n \n # dataWidget\n self.dataWidget : QFrame = QFrame()\n self.dataLayout : QHBoxLayout = QHBoxLayout()\n self.dataWidget.setLayout(self.dataLayout)\n self.revenuWidget.setFrameStyle(QFrame.Shape.StyledPanel) \n\n # navMsgWidget\n self.navMsgWidget : QFrame = QFrame()\n self.navMsgLayout : QHBoxLayout = QHBoxLayout()\n self.navMsgWidget.setLayout(self.navMsgLayout)\n self.navMsgWidget.setFrameStyle(QFrame.Shape.StyledPanel) \n\n # row 1: titre\n self.title = QLabel(\"Title\") ; self.topLayouyt.addWidget(self.title)\n\n # row 2: Distributor, Date, license\n self.distributorLE = QLineEdit() \n self.distributor = LabelledWidget(\"Distributor:\", self.distributorLE,\"distributor name\", Qt.Orientation.Vertical, self.distributorLE.setText, self.distributorLE.text, None)\n self.info1Layout.addWidget(self.distributor)\n self.releaseDateLE = QDateEdit()\n self.releaseDate = LabelledWidget(\"Release date:\", self.releaseDateLE,QDate(2000,1,2),Qt.Orientation.Vertical, self.releaseDateLE.setDate, self.releaseDateLE.date, None)\n self.info1Layout.addWidget(self.releaseDate)\n self.licenseLE = QLineEdit() \n self.license = LabelledWidget(\"license:\", self.licenseLE,\"license\", Qt.Orientation.Vertical, self.licenseLE.setText, self.licenseLE.text, None)\n self.info1Layout.addWidget(self.license)\n\n # row 3: genre, run-time\n self.genre = MultiLineEdit(\"Genre(s):\") \n self.genre.addItem(\"genre O\")\n self.genre.addItem(\"genre 1\")\n self.info2Layout.addWidget(self.genre)\n self.runtimeLE = QLineEdit() \n self.runtime = LabelledWidget(\"Run-time (min):\", self.runtimeLE,\"120:00\", Qt.Orientation.Horizontal, self.runtimeLE.setText, self.runtimeLE.text, None)\n self.info2Layout.addWidget(self.runtime)\n\n\n # row 4 : revenue\n self.revenueDomesticLE = QLineEdit() \n self.revenuDomestic = LabelledWidget(\"Domestic Sales (in $):\", self.revenueDomesticLE,\"1 000 000\", Qt.Orientation.Horizontal, self.revenueDomesticLE.setText, self.revenueDomesticLE.text, None)\n self.revenuInterLE = QLineEdit() \n self.revenuInter = LabelledWidget(\"International Sales (in $):\", self.revenuInterLE,\"1 000 000\", Qt.Orientation.Horizontal, self.revenuInterLE.setText, self.revenuInterLE.text, None)\n self.revenuWorldLE = QLineEdit() \n self.revenuWorld = LabelledWidget(\"World Sales (in $):\", self.revenuWorldLE,\"1 000 000\", Qt.Orientation.Horizontal, self.revenuWorldLE.setText, self.revenuWorldLE.text, None) \n \n self.revenuLayout.addWidget(self.revenuDomestic)\n self.revenuLayout.addWidget(self.revenuInter)\n self.revenuLayout.addWidget(self.revenuWorld)\n\n # row 5: descriptif, (image)\n self.descriptif = QTextEdit(\"descriptif.\")\n self.image = WImage()\n self.dataLayout.addWidget(self.descriptif,50)\n self.dataLayout.addWidget(self.image,50)\n\n # row 6: navigation + message\n self.previousButton : QPushButton = QPushButton('<<')\n self.msg : QLabel = QLabel('self.compteFilm')\n self.nextButton : QPushButton = QPushButton('>>')\n self.navMsgLayout.addWidget(self.previousButton)\n self.navMsgLayout.addStretch()\n self.navMsgLayout.addWidget(self.msg)\n self.navMsgLayout.addStretch()\n self.navMsgLayout.addWidget(self.nextButton)\n\n\n # all all widgets to top Layout\n self.topLayouyt.addWidget(self.info1Widget) # add tout \n self.topLayouyt.addWidget(self.info2Widget)\n self.topLayouyt.addWidget(self.revenuWidget)\n self.topLayouyt.addWidget(self.dataWidget)\n self.topLayouyt.addWidget(self.navMsgWidget)\n\n # connections\n self.nextButton.clicked.connect(self.cbNext)\n self.previousButton.clicked.connect(self.cbPrevious)\n\n # show() !\n self.show()\n \n self.modele = annuaire.Annuaire('./Highest_Holywood_Grossing_Movies.json')\n \n\n\n # methods\n def update(self : Self,f : film.Film,genres : list[str], distributors : list[str],licenses : list[str]) -> None:\n self.title.setText(f.title)\n print(distributors[0])\n self.distributor.set(distributors[f.distributor])\n self.releaseDate.set(f.releaseDate)\n self.license.set(licenses[f.license])\n self.runtime.set(str(f.runtime))\n self.descriptif.setText(f.info)\n self.revenuDomestic.set(str(f.domesticSales))\n self.revenuInter.set(str(f.internationalSales))\n self.revenuWorld.set(str(f.worldSales))\n self.image.query(f.title)\n\n self.genre.clear()\n for g in f.genre:\n self.genre.addItem(genres[g])\n \n \n \n # pour les numeros view films => update un parametre films\n # avec longueur films\n \n \n\n def message(self : Self, msg : str) -> None: self.msg.setText(msg)\n\n # callbacks\n def cbNext(self: Self) -> None: \n self.next.emit()\n def cbPrevious(self: Self) -> None: \n self.previous.emit()\n\n# --- main: kind of unit test\nif __name__ == \"__main__\" :\n import sys\n from PyQt6.QtWidgets import QApplication\n import hhgm\n \n app = QApplication(sys.argv)\n db = hhgm.HHGM()\n db.loadCSV()\n fv = FilmView({})\n fw : FigureWidget = FigureWidget()\n sys.exit(app.exec())\n\n\n\n\n","repo_name":"BuathierTom/SAE_INTERFACE","sub_path":"Interface/FilmView.py","file_name":"FilmView.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8662514761","text":"\"\"\"\nGiven a string, compute recursively a new string where all the 'x' chars have been removed.\nnoX(\"xaxb\") → \"ab\"\nnoX(\"abc\") → \"abc\"\nnoX(\"xx\") → \"\"\n\"\"\"\n\ndef noX(word: str) -> str:\n if len(word) < 1:\n return word\n \n if word[0] == \"x\":\n return noX(word[1:])\n else:\n return word[0] + noX(word[1:])\n\n\nprint(\n\n noX(\"xaxb\"),\n noX(\"abc\"),\n noX(\"xx\"),\n\n sep=\"\\n\"\n)","repo_name":"srisaipog/ICS4U-Classwork","sub_path":"Assessment Practice/Final Test Prep/noX.py","file_name":"noX.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16539359217","text":"\r\n\r\nmillis = eval(input(\"Enter millis:\"))\r\ndef convertmillis(millis):\r\n totalsecond = millis // 1000\r\n currentsecond = totalsecond % 60\r\n\r\n currentminutes = totalsecond // 60\r\n\r\n totalminutes = currentsecond % 60\r\n currenthours = totalminutes // 60\r\n\r\n print(\"{}:{}:{}\".format(currenthours,currentminutes,currentsecond))\r\n\r\n\r\nconvertmillis(millis)\r\n","repo_name":"yuuuhui/Basic-python-answers","sub_path":"梁勇版_6.23.py","file_name":"梁勇版_6.23.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"183828349","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.types import Model, Offer, RegionalModel, Shop\nfrom core.testcase import TestCase, main\n\nfrom unittest import skip\n\n\nclass T(TestCase):\n @classmethod\n def prepare_cpa_only_in_parallel(cls):\n cls.index.regional_models += [\n RegionalModel(hyperid=101, rids=[54], has_good_cpa=True, has_cpa=True),\n RegionalModel(hyperid=102, rids=[54], has_good_cpa=True, has_cpa=True),\n RegionalModel(hyperid=103, rids=[54], has_good_cpa=True, has_cpa=True),\n ]\n\n cls.index.models += [\n Model(hyperid=101, hid=91122, title=\"model 101\"),\n Model(hyperid=102, hid=91011, title=\"model 102\"),\n Model(hyperid=103, hid=91012, title=\"model 103\"),\n ]\n\n cls.index.shops += [\n Shop(fesh=1, regions=[54], cpa=Shop.CPA_REAL, name='CPA Магазин 1'),\n Shop(fesh=2, regions=[54], cpa=Shop.CPA_NO, cpc=Shop.CPC_REAL, name='CPC Магазин 2'),\n Shop(fesh=3, regions=[54], cpa=Shop.CPA_REAL, cpc=Shop.CPC_REAL, name='CPA CPC Магазин 3'),\n ]\n\n cls.index.offers += [\n Offer(fesh=1, title=\"CPA офер 1-1\", cpa=Offer.CPA_REAL, hyperid=101, hid=91122),\n Offer(fesh=1, title=\"CPC офер 1-2\", cpa=Offer.CPA_NO, hyperid=101, hid=91122),\n Offer(fesh=2, title=\"CPA офер 3-1\", cpa=Offer.CPA_REAL, hyperid=102, hid=91011),\n Offer(fesh=2, title=\"CPC офер 3-2\", cpa=Offer.CPA_NO, is_cpc=True, hyperid=102, hid=91011),\n Offer(fesh=3, title=\"CPA офер 4-1\", cpa=Offer.CPA_REAL, hyperid=103, hid=91012),\n Offer(fesh=3, title=\"CPC офер 4-2\", cpa=Offer.CPA_NO, is_cpc=True, hyperid=103, hid=91012),\n ]\n\n @skip('MARKETOUT-41660, из CPA-only убраны HIDs, возможно стоит переделать тест на HardHIDs')\n def test_cpa_only_in_parallel(self):\n response = self.report.request_json(\n 'place=parallel&show-urls=external%2Ccpa&text=офер&rids=54&rearr-factors=market_parallel_cpa_only_enabled=1'\n )\n self.assertFragmentIn(\n response,\n {\n \"market_offers_wizard\": [\n {\n \"showcase\": {\n \"items\": [\n {\"title\": {\"text\": {\"__hl\": {\"text\": \"CPA офер 1-1\"}}}},\n {\"title\": {\"text\": {\"__hl\": {\"text\": \"CPA офер 4-1\"}}}},\n ]\n }\n }\n ]\n },\n allow_different_len=False,\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_cpa_only_parallel.py","file_name":"test_cpa_only_parallel.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19041597218","text":"from core.views import *\nfrom django.urls import path\n\napp_name = 'core'\n\nurlpatterns = [\n path('', HomePageView.as_view(), name='home'),\n path('get_point/', get_point, name='points'),\n path('get_data/', DatosGeneralesList.as_view(), name='get_data_list_page'),\n path('get_data//', DatosGeneralesDetail.as_view(), name='get_data_detail_page'),\n]\n","repo_name":"Giaeulate/uob","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75370239","text":"import pytest\nfrom decimal import Decimal\n\nfrom crm.agency_cabinet.ord.common.structs import Act\nfrom crm.agency_cabinet.ord.proto import request_pb2, acts_pb2\n\npytestmark = [pytest.mark.asyncio]\n\n\nasync def test_add_act(client, rmq_rpc_client):\n rmq_rpc_client.send_proto_message.return_value = acts_pb2.AddActOutput(\n result=acts_pb2.Act(\n act_id=1,\n act_eid='eid',\n amount='10',\n is_vat=True,\n )\n )\n\n got = await client.add_act(\n agency_id=1,\n report_id=1,\n act_eid='eid',\n amount=Decimal('10'),\n is_vat=True,\n )\n\n rmq_rpc_client.send_proto_message.assert_awaited_with(\n queue_name='ord',\n message=request_pb2.RpcRequest(\n add_act=acts_pb2.AddActInput(\n agency_id=1,\n report_id=1,\n act_eid='eid',\n amount='10',\n is_vat=True,\n )\n ),\n response_message_type=acts_pb2.AddActOutput,\n )\n\n assert got == Act(\n act_id=1,\n act_eid='eid',\n amount=Decimal('10.0'),\n is_vat=True,\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crm/tests/test_add_act.py","file_name":"test_add_act.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2800275680","text":"import gym\nfrom typing import Dict, Any\nimport logging\nimport optuna\nimport os\nimport warnings\nfrom abc import abstractmethod\n\nfrom mpscenes.goals.goal_composition import GoalComposition\nfrom mpscenes.obstacles.sphere_obstacle import SphereObstacle\n\nimport numpy as np\nfrom optuna_fabrics.planner.symbolic_planner import SymbolicFabricPlanner\nfrom fabrics.planner.serialized_planner import SerializedFabricPlanner\n\n\nfrom forwardkinematics.urdfFks.generic_urdf_fk import GenericURDFFk\n\nfrom optuna_fabrics.tune.fabrics_trial import FabricsTrial\nimport quaternionic\n \n\n\nclass IiwaTrial(FabricsTrial):\n\n def __init__(self, weights=None):\n super().__init__(weights=weights)\n self._degrees_of_freedom = 7\n self._q0 = np.array([0, -0.6, 0.0, -1.1, 0.00, 0, 0.0])\n self._qdot0 = np.zeros(self._degrees_of_freedom)\n self._collision_links = [\"iiwa_link_3\", \"iiwa_link_5\", \"iiwa_link_7\", \"iiwa_link_ee\"]\n self._link_sizes = {\n 'iiwa_link_3': 0.1,\n 'iiwa_link_5': 0.1,\n \"iiwa_link_7\": 0.08,\n \"iiwa_link_ee\": 0.08,\n }\n self._self_collision_pairs = {\n \"iiwa_link_7\": ['iiwa_link_3', 'iiwa_link_5']\n }\n self._ee_link = \"iiwa_link_ee\"\n self._root_link = \"iiwa_link_0\"\n self._absolute_path = os.path.dirname(os.path.abspath(__file__))\n self._urdf_file = self._absolute_path + \"/iiwa7.urdf\"\n with open(self._urdf_file, \"r\") as file:\n self._urdf = file.read()\n self._generic_fk = GenericURDFFk(self._urdf, self._root_link, self._ee_link)\n\n def set_planner(self):\n \"\"\"\n Initializes the fabric planner for the point robot.\n\n This function defines the forward kinematics for collision avoidance,\n and goal reaching. These components are fed into the fabrics planner.\n\n In the top section of this function, an example for optional reconfiguration\n can be found. Commented by default.\n\n \"\"\"\n serialize_file = self._absolute_path + \"/../planner/serialized_planners/iiwa_planner.pkl\"\n if os.path.exists(serialize_file):\n planner = SerializedFabricPlanner(serialize_file)\n return planner\n robot_type = \"panda\"\n\n ## Optional reconfiguration of the planner\n # base_inertia = 0.03\n # attractor_potential = \"20 * ca.norm_2(x)**4\"\n # damper = {\n # \"alpha_b\": 0.5,\n # \"alpha_eta\": 0.5,\n # \"alpha_shift\": 0.5,\n # \"beta_distant\": 0.01,\n # \"beta_close\": 6.5,\n # \"radius_shift\": 0.1,\n # }\n # planner = ParameterizedFabricPlanner(\n # degrees_of_freedom,\n # robot_type,\n # base_inertia=base_inertia,\n # attractor_potential=attractor_potential,\n # damper=damper,\n # )\n planner = SymbolicFabricPlanner(\n self._degrees_of_freedom,\n robot_type,\n urdf=self._urdf,\n root_link='iiwa_link_0',\n end_link=['iiwa_link_ee'],\n )\n iiwa_limits= [\n [-2.96705973, 2.96705973],\n [-2.0943951, 2.0943951],\n [-2.96705973, 2.96705973],\n [-2.0943951, 2.0943951],\n [-2.96705973, 2.96705973],\n [-2.0943951, 2.0943951],\n [-3.05432619, 3.05432619],\n ]\n\n # The planner hides all the logic behind the function set_components.\n goal = self.dummy_goal()\n planner.set_components(\n self._collision_links,\n self._self_collision_pairs,\n goal,\n number_obstacles=self._number_obstacles,\n limits=iiwa_limits,\n )\n planner.concretize()\n planner.serialize(serialize_file)\n return planner\n\n","repo_name":"maxspahn/optuna_fabrics","sub_path":"optuna_fabrics/tune/iiwa_trial.py","file_name":"iiwa_trial.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"27703975750","text":"from modules import util\n\n\ndef paire_simple(valeurs: list):\n \"\"\"\n Fonction détectant une paire dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a paire ou non\n \"\"\"\n for valeur in valeurs:\n #\n # Rappelons-nous que la fonction occurrences permet\n # de détecter le nombre de doublons depuis une liste\n # d'entier (soit la liste des valeurs des cartes)\n #\n # On examine chaque chaque carte, et on regarde si ici\n # on a 2 occurrences\n #\n if util.occurrences(valeurs,valeur,2):\n return True\n return False\n\ndef brelan(valeurs: list):\n \"\"\"\n Fonction détectant un brelan (trois cartes de même valeur) dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un brelan ou non\n \"\"\"\n for valeur in valeurs:\n if util.occurrences(valeurs,valeur,3):\n return True\n return False\n\ndef paire_double(valeurs: list):\n \"\"\"\n Fonction détectant une paire double dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un brelan ou non\n \"\"\"\n\n # À partir d'une comprehension list, on regarde le nombre d'occurrences ayant des paires\n resultat = [ util.occurrences(valeurs,valeur,2) for valeur in valeurs ]\n # Puisque la fonction occurrences renvoie un booléen, si le tableau en contient deux\n # donc on a deux paires différentes\n return resultat == [True,True]\n\ndef quinte(valeurs: list):\n \"\"\"\n Fonction détectant une quinte (suite de cartes consécutives selon \n leur valeur) dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a une quinte ou non\n \"\"\"\n\n # Si le jeu du joueur contient des valeurs non numériques (J,Q,K,A)\n # on doit les convertir en valeurs numériques\n valeurs = util.convert_cartes(valeurs)\n # si le tableau trié des valeurs, correspond à une suite de valeurs\n # (conversion d'une suite de chiffres en liste pour effectuer la comparaison)\n # alors on a affaire à une quinte\n return sorted(valeurs) == list(range(min(valeurs),max(valeurs)+1))\n\ndef flush(couleurs: list):\n \"\"\"\n Fonction détectant un flush (cartes de même couleur / symbole) \n dans le jeu du joueur\n\n Paramètres\n ----------\n\n couleur (list) : une liste contenant la couleur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un flush ou non\n \"\"\"\n\n #Si la première couleur du jeu de carte est contenu 5 fois\n #soit la taille du jeu du joueur, alors on a un flush\n return couleurs.count(couleurs[0]) == util.TAILLE_JEU\n\ndef full(valeurs: list):\n \"\"\"\n Fonction détectant un full (paire + brelan) dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un full ou non\n \"\"\"\n #on détecte en premier si on a un brelan\n check_brelan = brelan(valeurs)\n if check_brelan: \n #si cela est vrai, le résultat de la fonction paire_simple déterminera\n #s'il y a bien un full\n return paire_simple(valeurs)\n else:\n return False\n\ndef carre(valeurs:list):\n \"\"\"\n Fonction détectant un carré (4 cartes de même valeur) dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un brelan ou non\n \"\"\"\n for valeur in valeurs:\n if util.occurrences(valeurs,valeur,4):\n return True\n return False\n\ndef quinte_flush(valeurs: list, couleurs: list):\n \"\"\"\n Fonction détectant un quinte flush (quinte + flush) dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n couleurs (list) : une liste contenant la couleur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un quinte flush ou non\n \"\"\"\n # on réutilise simplement la fonction quinte et flush pour retourner le booléen\n return quinte(valeurs) and flush(couleurs)\n\ndef quinte_flush_royale(valeurs: list, couleurs: list):\n \"\"\"\n Fonction détectant un quinte flush royal \n (quinte + flush + les cartes As (Ace), Roi (King), Reine (Queen), 10) \n dans le jeu du joueur\n\n Paramètres\n ----------\n\n valeurs (list) : une liste contenant la valeur de chaque carte\n couleurs (list) : une liste contenant la couleur de chaque carte\n\n Sortie\n ------\n bool : un booléen confirmant s'il y a un brelan ou non\n \"\"\"\n if flush(couleurs): #si on a un flush, il est possible d'avoir un qunite flush royal\n # Il faut que la liste triée des valeurs soit une suite numérique\n # c'est pour cela qu'on convertit les valeurs des cartes + cette suite doit comporter\n # des éléments précis\n return sorted(util.convert_cartes(['A','K','Q','J','10'])) == sorted(util.convert_cartes(valeurs))\n return False\n\n\ndef partie(mise: int, bankroll: int, ensemble_couleur_valeur: list ):\n \"\"\"\n Effectue une partie de poker\n\n Paramètres\n ----------\n mise (int) : la mise du joueur (nécessaire pour calculer les gains)\n bankroll (int) : le montant de la banque\n ensemble_couleur_valeur (list) : liste contenant les couleurs et les valeurs des cartes séparémment (list of dict)\n\n Sortie\n ------\n list: Une liste contenant le nouveau montant de la banque et le résultat de la partie\n \"\"\"\n tirage_final = ensemble_couleur_valeur\n couleurs = [ cartes[\"couleur\"] for cartes in tirage_final ]\n valeurs = [ cartes[\"valeur\"] for cartes in tirage_final ]\n resultat = \"Vous avez perdu : retentez votre chance !\"\n gain = 0\n #on test du cas le plus rare au plus régulier\n if quinte_flush_royale(valeurs,couleurs):\n gain = mise*250\n resultat = (f\"Quinte flush royale ! Vous remportez {gain} €\") \n elif quinte_flush(valeurs,couleurs):\n gain = mise*50\n resultat = (f\"Quinte flush ! Vous remportez {gain} €\")\n elif carre(valeurs):\n gain = mise*25\n resultat = (f\"Carré ! Vous remportez {gain} €\")\n elif full(valeurs):\n gain = mise*9\n resultat = (f\"Full ! Vous remportez {gain} €\")\n elif flush(couleurs):\n gain = mise*6\n resultat = (f\"Flush ! Vous remportez {gain} €\")\n elif quinte(valeurs):\n gain = mise*4\n resultat = (f\"Quinte ! Vous remportez {gain} €\")\n elif brelan(valeurs):\n gain = mise*3\n resultat = (f\"Brelan ! Vous remportez {gain} €\")\n elif paire_double(valeurs):\n gain = mise*2\n resultat = (f\"Double paire ! Vous remportez {gain} €\")\n elif paire_simple(valeurs):\n gain = mise\n resultat =(f\"Paire ! Vous remportez {mise} €\")\n \n bankroll += gain \n return bankroll, resultat ","repo_name":"ii02735/ipssi_projet_pocker","sub_path":"modules/gain.py","file_name":"gain.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73559269353","text":"from typing import List\nimport logging\nfrom prompt_toolkit.shortcuts import checkboxlist_dialog\nfrom prompt_toolkit import prompt\n\nfrom ez_query.config import Config\nfrom ez_query.chatgpt_communicator import ChatGPTCommunicator\nfrom ez_query.schema_req import SchemaRequester\n\n\nclass Processor:\n \"\"\"Main loop which takes in user input and outputs results.\"\"\"\n\n def __init__(self, config: Config):\n self.sr = SchemaRequester(config)\n self.chatgpt_comm = ChatGPTCommunicator(config)\n\n def start(self):\n tables = self._select_tables()\n if not tables:\n print(\"No tables were selected, exiting.\")\n exit(0)\n\n q = self._get_question()\n if not q or len(q) == 0:\n print(\"No question asked, exiting.\")\n exit(0)\n \n tables_info = [self.sr.schema_for_table(table) for table in tables]\n resp = self.chatgpt_comm.ask(tables_info, q)\n\n print()\n print(f\"Total Tokens Used: {resp.total_tokens_used}\")\n print(\"ChatGPT Response:\")\n print(resp.message)\n\n def _get_question(self) -> str:\n return prompt(\"Please ask ChatGPT what SQL query to generate:\\n\")\n\n def _select_tables(self) -> List[str]:\n \"\"\"Select which tables to feed ChatGPT prior to asking to generate a query.\"\"\"\n tables = self.sr.tables\n options = [(table, table) for table in tables]\n\n selected_tables = checkboxlist_dialog(\n title=\"Select tables\",\n text=\"Select which tables are relevant for your query. \"\n \"For ChatGPT to work best, I reccomend providing solely the tables necessary for the query to work.\",\n values=options,\n ).run()\n\n return selected_tables\n\n","repo_name":"AbhishekVel/ez-query","sub_path":"src/ez_query/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"39440870682","text":"#!/usr/bin/env python3\n\n# You are asked to ensure that the first and last names of people begin\n# with a capital letter in their passports. For example, alison heck should\n# be capitalised correctly as Alison Heck.\n\n# Sample Input\n# chris alan\n# hello world lol\n\n# Sample Output\n# Chris Alan\n# Hello World Lol\n\n\ndef capitalize(string):\n result = []\n for i in range(len(string)):\n if i == 0:\n result.append(string[i].upper())\n elif string[i-1] == ' ':\n result.append(string[i].upper())\n else:\n result.append(string[i])\n return(\"\".join(result))\n\n\nif __name__ == '__main__':\n string = input()\n capitalized_string = capitalize(string)\n print(capitalized_string)\n","repo_name":"hrishikeshtak/Coding_Practises_Solutions","sub_path":"hackerrank/Python/Strings/Capitalize.py","file_name":"Capitalize.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2744810568","text":"import pandas as pd\nimport glob\n\nnames = sorted(glob.glob(\"*.csv\"))\n\nprint(\"load,lambda,E[T]\")\n\nfor name in names:\n df = pd.read_csv(name, comment='#')\n A = df.copy()\n A = A.sort_values(by=\"n\")\n lmb = 1.0 / A[\"A\"].diff().mean()\n df = df[2000:]\n print(name.replace(\".csv\", \"\"), lmb, df['T'].mean(), sep=',')\n","repo_name":"jcpwfloi/benchmark-clickhouse","sub_path":"tpch-evaluation/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41373896518","text":"from flask import Blueprint, jsonify, request\nfrom app import socketio\nfrom flask_socketio import SocketIO, send, emit\nfrom sqlalchemy import or_, and_\nfrom app import app, db\n\nfrom .models import ChatThread, ChatMessage\n\nconsultation = Blueprint('consultation', __name__)\n\n@consultation.route('/')\ndef index():\n return \"make\"\n\n\n@consultation.route('/chatmessages', methods=[\"GET\"])\ndef get_chat_messages():\n sender = request.args.get('sender')\n receiver = request.args.get('receiver')\n chat_thread = get_chat_thread(sender, receiver)\n\n chat_messages = get_chat_messages(chat_thread)\n\n return jsonify(chat_messages), 200\n\n\nusers = {}\n\n@socketio.on('username', namespace=\"/private\")\ndef recieve_username(username):\n users[str(username)] = request.sid\n\n\n@socketio.on('private_message', namespace=\"/private\")\ndef recieve_username(payload):\n try:\n reciever_id = users[str(payload['reciever'])]\n except KeyError:\n return\n reciever = int(payload['reciever'])\n message = payload['message']\n sender = int(payload['sender'])\n\n chat_thread = get_chat_thread(sender, reciever)\n\n has_saved_message = save_chat_message(chat_thread, sender, reciever, message)\n\n message_data = {\n \"message\": message,\n \"reciever\": reciever,\n \"sender\": sender\n }\n emit('new_private_message', message_data, room=reciever_id)\n\n\n@socketio.on('message')\ndef handle_message(msg):\n send(msg, broadcast=True)\n return None\n\n\n\n\ndef get_chat_thread(sender, reciever):\n chat_thread = ChatThread.query.filter(or_(and_(ChatThread.sender_phone == sender, ChatThread.receiver_phone == reciever), and_(ChatThread.sender_phone == reciever, ChatThread.receiver_phone == sender))).first()\n if chat_thread is None:\n new_chat_thread = create_chat_thread(sender, reciever)\n return new_chat_thread\n else:\n return chat_thread\n\n\ndef create_chat_thread(sender_phone, reciever_phone):\n chat_thread = ChatThread(sender_phone=sender_phone, receiver_phone=reciever_phone)\n db.session.add(chat_thread)\n db.session.commit()\n\n return chat_thread\n\n\ndef save_chat_message(thread, sender_phone, receiver_phone, message):\n chat_message = ChatMessage(thread=thread, sender_phone=sender_phone, receiver_phone=receiver_phone, message=message)\n db.session.add(chat_message)\n db.session.commit()\n\n return True\n\ndef get_chat_messages(thread):\n chat_message = ChatMessage.query.filter(ChatMessage.thread_id==thread.id).order_by(ChatMessage.date_created.asc()).paginate(1, 20, False)\n print(\"---chat_message\",chat_message.items)\n print(\"---chat_message\",thread.id)\n return [{\"id\": chat.id, \"message\":chat.message , \"reciever\":chat.receiver_phone , \"sender\": chat.sender_phone, \"read\": chat.read} for chat in chat_message.items]\n","repo_name":"okumujustine/AgriMrk","sub_path":"app/consultation/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20477820680","text":"\"\"\"\nThis gets only the current incarnation of the Doctor's dialogue and stores each word spoken as lowercase and separated\nby commas. This downloads the dialogue whenever there is a DOCTOR: in front of the lines that are being spoken. Each\nindividual episode has its own dialogue, and then all of the dialogue spoken when the current doctor is DOCTOR: in the\nepisode is added to a single doctor_x_dialogue.txt. This means that in episodes where three doctors are present, only\nthe current incarncation of the doctor's dialogue is taken and accounted for.\n\n\"\"\"\nimport os, re\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\n# Label the different dialogue lines by who is speaking\ndef get_names(df):\n text_name = []\n\n # The numbers in the names are to account for the appearances of other doctors. However, the DOCTOR: will always be\n # the current incarnation\n for line in df['text']:\n name = re.findall('[A-Z0-9 ]+', line)\n extra = re.findall('[(]+|[)]+.|[<]+.|[*]+.|[-]+.', line)\n\n # This filters for scene changes, comments made at end of the transcript, etc. Anything that isn't dialogue.\n if len(extra) >= 1:\n name = \"EXTRANEOUS\"\n elif len(name) > 0 and len(name[0]) > 1:\n name = name[0].strip()\n\n # Used to check if missed a line, ie listen in DOCTOR: I\\n listen. Assign such lines as nan.\n if len(re.findall('[A-Z0-9]', name)) <= 1:\n name = np.NaN\n else:\n name = np.NaN\n\n text_name += [name]\n\n return text_name\n\n\n# Turns dialogue into format needed for making word probablity maps.\ndef clean_dialogue(df):\n text_lines = []\n\n for line in df['text']:\n if 'DOCTOR:' in line:\n line = line.replace('DOCTOR: ', '')\n\n # Strip the leading and ending whitespaces.\n line = line.strip()\n line = re.sub(\"([.]+)|([?]+)|([,]+)|([!]+)\", \"\", line.lower())\n line = re.sub(\"(\\s)+\", \",\", line)\n line = re.sub(\"\\n\", \",\", line)\n\n # Ensure that there is a comma at the end of each line\n if line[len(line) - 1] != \",\":\n line += \",\"\n\n text_lines += [line]\n\n return text_lines\n\n\ndef download_dialogue(df, transcript_file_name, all_dialogue_file):\n dialogue_file = open(transcript_file_name, 'w', encoding='utf-8')\n\n for line in df['text']:\n dialogue_file.write(line)\n all_dialogue_file.write(line)\n\n dialogue_file.close()\n\n\ndef get_transcript_dialogue(transcript, abs_dialogue_path, all_dialogue_file):\n df = pd.read_table(transcript, header=None, sep=\"\\n\")\n df.columns = ['text']\n df['name'] = get_names(df)\n df['name'] = df['name'].ffill()\n df = df.dropna()\n df = df[df['name'] == \"DOCTOR\"]\n\n # Removes the \"DOCTOR: \" in front of the relevant lines.\n df['text'] = clean_dialogue(df)\n\n download_dialogue(df, abs_dialogue_path, all_dialogue_file)\n\n print(\"Done\")\n\ndef main():\n\n transcript_root = 'C:\\\\Users\\\\angel\\\\GitHub\\\\doctor-who-dialogue-tracker\\\\data-raw\\\\doctor_who_transcripts'\n dialogue_root = 'C:\\\\Users\\\\angel\\\\GitHub\\\\doctor-who-dialogue-tracker\\\\data\\\\doctor_who_dialogue'\n\n for folder in os.listdir(transcript_root):\n abs_dialogue_folder = dialogue_root + \"\\\\\" + folder\n os.makedirs(abs_dialogue_folder, exist_ok=True)\n\n abs_transcript_folder = transcript_root + \"\\\\\" + folder\n\n all_dialogue_path = abs_dialogue_folder + \"\\\\\" + folder + \"_dialogue.txt\"\n\n all_dialogue_file = open(all_dialogue_path, 'w', encoding='utf-8')\n\n for transcript in os.listdir(abs_transcript_folder):\n abs_transcript_path = abs_transcript_folder + \"\\\\\" + transcript\n\n abs_dialogue_path = abs_dialogue_folder + \"\\\\\" + Path(transcript).stem + \"_dialogue.txt\"\n print(abs_dialogue_path)\n\n get_transcript_dialogue(abs_transcript_path, abs_dialogue_path, all_dialogue_file)\n\n all_dialogue_file.close()\n\n\n# When executed directly, then condition is true. If executed indirectly, like it's imported, then the if statement\n# evaluates to false.\nif __name__ == \"__main__\":\n main()\n","repo_name":"angewzhao/doctor-who-dialogue-tracker","sub_path":"scripts/get_transcripts_dialogue.py","file_name":"get_transcripts_dialogue.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21200620895","text":"import os\nimport requests\nfrom pyquery import PyQuery as pq\nimport secret\n\n\ndef get(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/70.0.3538.110 '\n 'Safari/537.36',\n 'Cookie': secret.cookie,\n\n }\n r = requests.get(url, headers=headers)\n print('status_code', r.status_code)\n page = r.content\n return page\n\n\ndef main():\n url = 'https://www.zhihu.com/follow'\n page = get(url)\n print(page.decode())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SQ2019060017/py_spider","sub_path":"zhihu_bycookie.py","file_name":"zhihu_bycookie.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36780006156","text":"import core\nimport asyncio\nfrom components import controller\n\nclass Speed(Component):\n async def start(self):\n controller = core.core.get_component(controller.Controller)\n while True:\n await asyncio.sleep(0.1)\n x = controller.get_axis(\"LEFT-X\")\n controller.get_button('RIGHT_ARROW')\n \n","repo_name":"Yooooomi/my-car","sub_path":"components/speed.py","file_name":"speed.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3208956704","text":"'''Tests for the :mod:`~.VectUtils` module.'''\n# pylint: disable=no-value-for-parameter\n\nfrom math import isclose, fabs\n\nfrom hypothesis import given, assume, note\nfrom hypothesis.strategies import floats, tuples, composite\n\nfrom t4_geom_convert.Kernel.VectUtils import (scal, vdiff, renorm, mag2,\n pointInPlaneIntersection)\n\n\ndef is_in_plane(point, plane):\n '''Returns `True` if the point lies in the given plane.'''\n pl_pt, norm = plane\n dist = scal(vdiff(point, pl_pt), norm)\n note('dist: {}'.format(dist))\n return isclose(dist, 0., abs_tol=1e-8)\n\n\n@composite\ndef vectors(draw, *, norm=None):\n '''Generate random vectors, possibly with a given norm.'''\n a_vec = draw(tuples(floats(min_value=-1e3, max_value=1e3),\n floats(min_value=-1e3, max_value=1e3),\n floats(min_value=-1e3, max_value=1e3)))\n if norm is None:\n return a_vec\n assume(mag2(a_vec) > 0.)\n return renorm(a_vec, norm)\n\n\n@given(point1=vectors(), point2=vectors(),\n normal1=vectors(norm=1.), normal2=vectors(norm=1.))\ndef test_intersection(point1, point2, normal1, normal2):\n '''Test that the point generated by :func:`pointInPlaneIntersection` always\n lies in both planes.'''\n assume(not isclose(fabs(scal(normal1, normal2)), 1.))\n plane1 = (point1, normal1)\n plane2 = (point2, normal2)\n int_p, line_vec = pointInPlaneIntersection(plane1, plane2)\n assert is_in_plane(int_p, plane1)\n assert is_in_plane(int_p, plane2)\n assert isclose(scal(line_vec, normal1), 0., abs_tol=1e-10)\n assert isclose(scal(line_vec, normal2), 0., abs_tol=1e-10)\n","repo_name":"makeclean/t4_geom_convert","sub_path":"t4_geom_convert/UnitTests/test_VectUtils.py","file_name":"test_VectUtils.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"26077324492","text":"from psychopy.iohub import EventConstants\nfrom psychopy import core\n\nimport game_init as gi\nimport game_run as gr\nfrom target import Target\n\nclass Game(object):\n\n def __init__(self):\n gi.generate_constants(self, 'game')\n gi.generate_variables(self, 'game')\n self.target = Target(self.FRAME_RATE, self.SUCCESS_SCORE)\n self.set_random_orientation()\n\n def check_input(self):\n for event in self.keyboard.getEvents():\n if event.type == EventConstants.KEYBOARD_PRESS:\n if event.key == u'1':\n self.direction = 'ccw'\n elif event.key == u'2':\n self.direction = 'cw'\n # elif event.key == u'3':\n # game.run_trials = not(game.run_trials)\n # elif event.key == u'4':\n # game.target_visible_trial = not(game.target_visible_trial)\n elif event.key == u'5':\n if not(self.run_trials):\n gr.start_run(game)\n # elif event.key == u'6':\n # self.set_random_orientation()\n elif event.key == u' ':\n if not(self.run_trials):\n gr.start_run(game)\n elif event.key == u'escape':\n self.quit()\n if event.type == EventConstants.KEYBOARD_RELEASE:\n # fixthis could add keydown state\n # (holding 1, press then release 2)\n if event.key == u'1':\n if self.direction == 'ccw':\n self.direction = 'none'\n elif event.key == u'2':\n if self.direction == 'cw':\n self.direction = 'none'\n\n def set_random_orientation(self):\n self.target_grating.ori = self.target.set_new_random_target()\n\n def set_next_orientation(self):\n self.target_grating.ori = self.target.set_next_target(self.trial_count)\n\n def run(self):\n while True:\n self.check_input()\n if self.run_trials == True:\n if self.begin_rest_clock.getTime() < 0:\n gr.run_rest(self)\n elif not(self.trial_started):\n # self.set_random_orientation()\n self.set_next_orientation()\n gr.start_trial(self)\n elif not(self.target_reached):\n if self.ENABLE_BLINK:\n if self.blink_clock.getTime() > 0:\n gr.blink_based_updates(self)\n self.blink_clock.add(self.BLINK_TIME)\n if self.signal_clock.getTime() > 0:\n gr.signal_based_updates(self)\n self.signal_clock.add(game.SAMPLE_PERIOD)\n if self.tr_clock.getTime() > 0:\n gr.tr_based_updates_game(self)\n self.tr_clock.add(self.tr)\n gr.run_trial(self)\n elif self.success_show_clock.getTime() < 0:\n gr.run_score(self) \n else:\n gr.reset_for_next_trial(self)\n\n self.draw()\n else:\n self.draw_splash()\n self.screen.flip()\n\n def draw(self):\n if self.grating_blink_visible:\n if self.show_grating:\n self.grating.draw()\n self.grating_mask.draw()\n if self.show_target:\n self.target_grating.draw()\n self.success_score_circ.draw()\n if self.show_score:\n self.score_circ.draw()\n self.fixation.draw()\n\n def draw_splash(self):\n game.splash_msg.draw()\n\n def quit(self):\n core.quit()\n\nif __name__ == \"__main__\":\n game = Game()\n game.run()","repo_name":"efunn/sim-nfb","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72881737513","text":"#!/usr/bin/python3\n\"\"\"AirBnb more like HBNB\"\"\"\n\nimport models\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"Base model class\"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"__init__\"\"\"\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n date_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n if kwargs:\n \"\"\"key word arguements check & init attributes\"\"\"\n\n for key, val in kwargs.items():\n if key == \"id\":\n self.id = str(val)\n elif key == \"created_at\":\n self.created_at = datetime.strptime(val, date_format)\n elif key == \"update_at\":\n self.updated_at = datetime.strptime(val, date_format)\n else:\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"print [] () \"\"\"\n return ('[{}] ({}) {}'.\n format(self.__class__.__name__, self.id, self.__dict__))\n\n def save(self):\n \"\"\"updates_at with the current datetime\"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"returns dict countaining all keys/values of __dict__\"\"\"\n Newdict = self.__dict__.copy()\n Newdict[\"created_at\"] = self.created_at.isoformat()\n Newdict[\"updated_at\"] = self.updated_at.isoformat()\n Newdict[\"__class__\"] = self.__class__.__name__\n return Newdict\n","repo_name":"malekje/holbertonschool-AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16044219164","text":"\"\"\"\nGPG/rsync\n(c) 2008-2016, Iñaki Silanes\n\nLICENSE\n\nThis program is free software; you can redistribute it and/or modify it\nunder the terms of the GNU General Public License (version 2), as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful, but\nWITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\nor FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\nfor more details (http://www.gnu.org/licenses/gpl.txt).\n\nDESCRIPTION\n\nUses a remote destination to store checksums (MD5s) and GPG-encoded versions of local files, \nto sync with other computers.\n\nUSAGE\n\nTo upload local files (defined by ~/.gipsync/blah.conf) to remote (\"pivot\") site:\n\n% gipsync.py blah -u\n\nThen, in some other computer:\n\n% gipsync.py blah\n\"\"\"\n\n# Standard libs:\nimport os\nimport sys\n\n# Our libs:\nfrom libgipsync import core\n\n# Functions:\ndef main():\n \"\"\"Main loop.\"\"\"\n\n # --- Initialization --- #\n o = core.parse_args()\n\n times = core.Timing()\n cfg = core.Configuration()\n cfg.read_prefs()\n\n # --- Execution --- #\n if o.delete:\n delete(cfg, o)\n\n else:\n update(cfg, o, times)\n\n # Lastly, print out timing summary:\n if o.timing:\n times.summary()\n\ndef delete(cfg, o):\n \"\"\"Perform deletion.\"\"\"\n\n # Check that repo dir is mounted:\n dir = cfg.prefs['PIVOTDIR']\n if not os.path.isdir(dir):\n msg = 'Can not find dir \"{0}\". Is it mounted?'.format(dir)\n print(msg)\n return\n\n # Get info:\n sizes = core.collect_sizes(dir)\n\n # Sort info by date:\n sizes.sort()\n\n # Delete up to freeing requested size,\n # starting from oldest files:\n todelete = o.delete*1024*1024\n\n while True:\n returned = core.delete_asked(sizes, todelete)\n if returned:\n string = 'How many MBs do you want to delete?: '\n todelete = input(string)\n try:\n todelete = float(todelete)*1024*1024\n except:\n break\n else:\n break\n\ndef update(cfg, o, times):\n \"\"\"Perform update.\"\"\"\n\n args = o.positional\n\n # Check arguments:\n if args and args[0] == 'all':\n args = cfg.prefs['ALL']\n\n # Perform actions for each repo named in args:\n for what in args:\n # Read and check configs:\n cfg.read_conf(what)\n cfg.check()\n\n # Check that localdir is present:\n ldir = cfg.conf['LOCALDIR']\n if not os.path.isdir(ldir):\n print(\"[ERROR] Required local dir '{0}' not present\".format(ldir))\n sys.exit()\n\n # Initialize repo (read from pickle, if present and not o.fresh):\n repos = core.Repositories(opts=o, cfg=cfg, what=what)\n if not o.fresh:\n repos = repos.pickle(read=True)\n repos.options = o # use currently user-given options, not pickled ones\n\n times.milestone('Read confs')\n \n # Print info:\n core.message('repo', what=what, cfg=cfg)\n \n # --- Read remote data --- #\n\n # Check if remote data already downloaded:\n string = 'Downloading index.dat...'\n if not o.fresh and 'dl_index' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n # Sync local proxy repo with remote repo:\n core.say(string)\n repos.get_index() # first download only index.dat.gpg\n\n # Create flag to say \"we already downloaded index.dat\":\n repos.done['dl_index'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Download remote index')\n\n # Get remote md5tree:\n string = 'Reading remote md5tree...'\n if not o.fresh and 'read_index' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n core.say(string)\n repos.read_remote()\n\n # Create flag to say \"we already read remote index.dat\":\n repos.done['read_index'] = True\n\n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Read remote index')\n\n # --- Read local data --- #\n\n hash_file = os.path.join(cfg.dir, '{0}.md5'.format(what))\n string = 'Reading local md5tree...'\n if not o.fresh and 'read_local_md5s' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n # Read local file hashes from conf (for those files that didn't change):\n core.say(string)\n repos.read(hash_file)\n\n # Create flag to say \"we already read local md5 file\":\n repos.done['read_local_md5s'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Initialize')\n\n # Traverse source and get list of file hashes:\n string = 'Finding new/different local files...'\n if not o.fresh and 'check_local_files' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n core.say(string)\n repos.walk()\n\n # Create flag to say \"we already checked local files\":\n repos.done['check_local_files'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Dir walk')\n \n # --- Write back local data --- #\n \n # Save local hashes, be it dry or real run:\n string = 'Saving local data...'\n if not o.fresh and 'save_local_md5s' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n core.say(string)\n repos.save(hash_file)\n\n # Create flag to say \"we already saved local MD5s\":\n repos.done['save_local_md5s'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Save local hash')\n \n # --- Actually do stuff --- #\n \n # Compare remote and local md5 trees:\n string = 'Comparing remote/local...'\n if not o.fresh and 'compare_md5_trees' in repos.done:\n core.say('[AVOIDED] {0}'.format(string))\n else:\n core.say(string)\n repos.compare()\n\n # Create flag to say \"we already checked local files\":\n repos.done['compare_md5_trees'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Compare')\n \n # Sort lists, for easy reading:\n repos.diff.sort()\n\n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Sort diff')\n \n # Act according to differences in repos:\n success = False\n\n ##########\n # Upload #\n ##########\n if o.up:\n repos.really_do = False\n \n # Print summary/info:\n repos.enumerate()\n \n # Ask for permission to proceed, if there are changes:\n any_diff = repos.ask()\n \n if repos.really_do:\n if not o.safe:\n if o.safe or not o.fresh and 'delete_remote' in repos.done:\n core.say('[AVOIDED] Deleting remote files...')\n else:\n string = 'Deleting remote files...'\n core.say(string)\n repos.nuke_remote()\n\n # Create flag to say \"we already deleted remote files\":\n repos.done['delete_remote'] = True\n\n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Nuke up')\n \n # Safe or not safe, upload:\n if not o.fresh and 'upload' in repos.done:\n core.say('[AVOIDED] Uploading...')\n else:\n string = 'Uploading...'\n core.say(string)\n success = repos.upload()\n\n # Create flag to say \"we already uploaded files\":\n if success:\n repos.done['upload'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Upload')\n\n if not success:\n sys.exit()\n \n # Write index file to remote repo:\n if not o.fresh and 'write_remote_index' in repos.done:\n core.say('[AVOIDED] Saving index.dat remotely...')\n else:\n string = 'Saving index.dat remotely...'\n core.say(string)\n repos.save('index.dat', local=False)\n\n # Create flag to say \"we already wrote remote index\":\n repos.done['write_remote_index'] = True\n\n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Write remote index')\n\n ############\n # Download #\n ############\n else:\n repos.really_do = False\n \n # Print summary/info:\n repos.enumerate()\n \n # Ask for permission to proceed:\n any_diff = repos.ask(up=False)\n \n if repos.really_do:\n if not o.safe:\n if not o.fresh and 'delete_local' in repos.done:\n core.say('[AVOIDED] Deleting local files...')\n else:\n # Delete files only in local:\n repos.nuke_local()\n\n # Create flag to say \"we already deleted local files\":\n repos.done['delete_local'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Nuke local')\n\n # Safe or not, download:\n if not o.fresh and 'download' in repos.done:\n core.say('[AVOIDED] Downloading...')\n else:\n string = 'Downloading...'\n core.say(string)\n success = repos.download()\n\n # Create flag to say \"we already downloaded remote files\":\n repos.done['download'] = True\n \n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Download')\n\n if not success:\n sys.exit()\n\n # Save logs:\n repos.save(hash_file)\n\n # Write index file to remote repo:\n if not o.fresh and 'write_remote_index' in repos.done:\n core.say('[AVOIDED] Saving index.dat remotely...')\n else:\n string = 'Saving index.dat remotely...'\n core.say(string)\n repos.save('index.dat', local=False)\n\n # Create flag to say \"we already wrote remote index\":\n repos.done['write_remote_index'] = True\n\n # For each step, we pickle and log time:\n repos.pickle()\n times.milestone('Save remote index')\n\n # Cleanup, either because all went well, or because \n # there was nothing to do:\n if success or not any_diff:\n string = 'Cleaning up...'\n core.say(string)\n repos.clean()\n\n times.milestone('Finalize')\n\n\n# Main:\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"isilanes/gipsync","sub_path":"gipsync.py","file_name":"gipsync.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"23275116616","text":"import pandas as pd\nimport numpy as np\n\ndef data_cleaning_tool(data):\n # Handling missing values\n data = handle_missing_values(data)\n\n # Removing duplicates\n data = remove_duplicates(data)\n\n # Standardizing data formats\n data = standardize_data_formats(data)\n\n # Perform additional data cleaning tasks\n \n\n return data\n\ndef handle_missing_values(data):\n # Drop rows with missing values\n data = data.dropna()\n\n # Fill missing values with appropriate techniques\n # data = data.fillna(value) or data = data.interpolate()\n\n return data\n\ndef remove_duplicates(data):\n # Drop duplicate rows\n data = data.drop_duplicates()\n\n return data\n\ndef standardize_data_formats(data):\n # Convert 'Date' column to datetime format\n data['Date'] = pd.to_datetime(data['Date'])\n\n # Remove unwanted symbols or characters from specific columns\n data['Amount'] = data['Amount'].str.replace('$', '')\n\n # Perform additional data format standardization\n \n\n return data\n\n# Example usage\n# Load the data from a CSV file\ndata = pd.read_csv('data.csv')\n\n# Clean the data using the Data Cleaning Tool\ncleaned_data = data_cleaning_tool(data)\n\n# Display the cleaned data\nprint(cleaned_data.head())\n","repo_name":"amineux/DataCleaning","sub_path":"cleandata.py","file_name":"cleandata.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33339907731","text":"class Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n wordDict = set(wordDict)\n n = len(s)\n res = []\n def rec(ind,sentence):\n if ind == n:\n res.append(\" \".join(sentence))\n return \n word = \"\"\n for i in range(ind,n):\n word += s[i]\n if word in wordDict:\n sentence.append(word)\n rec(i+1,sentence)\n sentence.pop()\n rec(0,[])\n return res","repo_name":"natitedros/Competitive-Programming","sub_path":"Backtracking/WordBreak2.py","file_name":"WordBreak2.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38938811861","text":"import config\nimport requests\nimport json\n\n\nclass Api:\n\n def __init__(self, api_key):\n self.api_key = api_key\n\n def current_weather(self, *args):\n if len(args) == 1:\n response = requests.request('GET', f'https://api.openweathermap.org/data/2.5/weather?q={args[0]}&appid={self.api_key}')\n output = response.json()\n out = json.dumps(output)\n print(out)\n else:\n response = requests.request('GET', f'https://api.openweathermap.org/data/2.5/weather?lat={args[0]}&lon={args[1]}&appid={self.api_key}')\n output = response.json()\n out = json.dumps(output)\n print(out)\n\n\napi = Api(config.api_key)\napi.current_weather('rajkot')\napi.current_weather('22.3039', '70.8022')\n","repo_name":"KishanPipariya/Weather","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14822238249","text":"# Owner(s): [\"oncall: jit\"]\n\nfrom torch.testing._internal.jit_utils import JitTestCase\nfrom torch._C import parse_ir\nimport torch\n\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\")\n\nclass TestAliasAnalysis(JitTestCase):\n def test_becomes_wildcard_annotations(self):\n graph_str = \"\"\"\n graph(%a.1 : Tensor, %b.1 : Tensor):\n %11 : NoneType = prim::Constant()\n %8 : int = prim::Constant[value=0]()\n %7 : int = prim::Constant[value=1]()\n %x.1 : Tensor = aten::add(%a.1, %b.1, %7)\n %y.1 : Tensor[] = aten::split(%x.1, %7, %8)\n return ()\n \"\"\"\n graph = parse_ir(graph_str)\n alias_db = graph.alias_db()\n split_node = graph.findNode(\"aten::split\")\n # split input enters wildcard set, list initalized as containing wildcard set\n self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), split_node.output()))\n # because %x.1 enters wildcard set, it now aliases other members of wildcard set (graph inputs)\n self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), next(graph.inputs())))\n\n def test_nested_list_construct_not_wildcard(self):\n @torch.jit.script\n def foo(x):\n y = torch.rand([2, 2])\n return [y]\n\n graph = foo.graph\n graph.alias_db()\n alias_db = graph.alias_db()\n ten_construct = graph.findNode(\"aten::rand\").output()\n output = next(graph.outputs())\n self.assertTrue(alias_db.may_contain_alias(ten_construct, output))\n self.assertFalse(alias_db.may_contain_alias(next(graph.inputs()), ten_construct))\n\n def test_recursive_calls(self):\n @torch.jit.script\n def foo(x, y):\n x.add_(1)\n return x + y\n\n @torch.jit.script\n def caller():\n a = torch.rand([2, 2])\n b = torch.ones([2, 2])\n out1 = foo(a, b)\n c = torch.rand([1])\n d = torch.ones([2])\n out2 = foo(d, c)\n return out1, out2\n\n isFrozen = False\n descend_function_calls = True\n alias_db = caller.graph.alias_db(isFrozen, descend_function_calls)\n func_calls = caller.graph.findAllNodes(\"prim::CallFunction\")\n self.assertEqual(len(func_calls), 2)\n for node in func_calls:\n inps = list(node.inputs())\n self.assertTrue(alias_db.has_writers(inps[1]))\n self.assertFalse(alias_db.has_writers(inps[2]))\n\n class Mod(torch.nn.Module):\n def forward(self):\n a = torch.rand([2, 2])\n b = torch.ones([2, 2])\n out1 = self.foo2(a, b)\n c = torch.rand([1])\n d = torch.ones([2])\n out2 = self.foo2(d, c)\n return out1, out2\n\n def foo2(self, x, y):\n x.add_(1)\n return x + y\n\n mod = torch.jit.script(Mod())\n alias_db = mod.graph.alias_db(isFrozen, descend_function_calls)\n func_calls = mod.graph.findAllNodes(\"prim::CallMethod\")\n self.assertEqual(len(func_calls), 2)\n for node in func_calls:\n inps = list(node.inputs())\n self.assertTrue(alias_db.has_writers(inps[1]))\n self.assertFalse(alias_db.has_writers(inps[2]))\n","repo_name":"pytorch/pytorch","sub_path":"test/jit/test_alias_analysis.py","file_name":"test_alias_analysis.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"19767423955","text":"t = (1, 2, 3)\n#t[0] = 3 \t\t\t\tошибка\n\ntable = [\n(\"Alice\", 25, 30),\n(\"Bob\", 27, 80),\n(\"Charlie\", 30, 90)\n]\n#print(table)\nperson = (\"Alice\", 25, 50)\nname, age, weight = person\nprint(name)\nname, age, weight = name, weight, age\nprint(age)\n\nl = [1, 2, 3]\nprint(l)\nll = tuple(l)\nprint(ll)\n#ll[0] = 5 \t\t\t\tошибка\nlll = list(t)\nprint(lll)\n\ntable[0] = 1\n#table[1][1] = 45 \t\tошибка\nprint(table[1][1])\n\na = ('2',)\nb = 'z'\nnew = a + (b,)\nprint(new)\n\nc = (1,2,3,4,5)\nd = c[:2] + c[3:]\nprint(d)","repo_name":"AlexanderBeli/repo","sub_path":"python/BrM3p2_cort.py","file_name":"BrM3p2_cort.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35232128960","text":"import os\nimport shutil\nimport sys\nimport datetime\n\nfrom invoke import task\nfrom pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer\nfrom pelican.settings import DEFAULT_CONFIG, get_settings_from_file\n\nSETTINGS_FILE_BASE = 'pelicanconf.py'\nSETTINGS = {}\nSETTINGS.update(DEFAULT_CONFIG)\nLOCAL_SETTINGS = get_settings_from_file(SETTINGS_FILE_BASE)\nSETTINGS.update(LOCAL_SETTINGS)\n\nCONFIG = {\n 'settings_base': SETTINGS_FILE_BASE,\n 'settings_publish': 'publishconf.py',\n # Output path. Can be absolute or relative to tasks.py. Default: 'output'\n 'deploy_path': SETTINGS['OUTPUT_PATH'],\n # Port for `serve`\n 'port': 8000,\n}\n\n@task\ndef clean(c):\n \"\"\"Remove generated files\"\"\"\n if os.path.isdir(CONFIG['deploy_path']):\n shutil.rmtree(CONFIG['deploy_path'])\n os.makedirs(CONFIG['deploy_path'])\n\n@task\ndef build(c, prod=False):\n \"\"\"Build the site in dev mode (default) or prod mode (with `--prod` argument)\"\"\"\n if prod:\n c.run('pelican -s {settings_publish}'.format(**CONFIG))\n else:\n c.run('pelican -s {settings_base}'.format(**CONFIG))\n\n@task\ndef rebuild(c):\n \"\"\"`build` with the delete switch\"\"\"\n c.run('pelican -d -s {settings_base}'.format(**CONFIG))\n\n@task\ndef regenerate(c):\n \"\"\"Automatically regenerate site upon file modification\"\"\"\n c.run('pelican -r -s {settings_base}'.format(**CONFIG))\n\n@task\ndef serve(c):\n \"\"\"Serve site at http://localhost:$PORT/ (default port is 8000)\"\"\"\n c.run('pelican -l -r -s {settings_base}'.format(**CONFIG))\n","repo_name":"unnecessary-abstraction/pbarker.dev","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"621391818","text":"import os\nfrom pathlib import Path\n\ndef get_forest_from_input(input):\n lines = input.split('\\n')\n return [ [ char for char in line ] for line in lines ]\n\ndef is_tree(forest, x, y):\n return forest[y][x] == '#'\n\ndef find_trees_in_path(input, slope_x, slope_y):\n forest = get_forest_from_input(input)\n height = len(forest)\n width = len(forest[0])\n\n trees_encountered = 0\n x_position = 0\n\n for y_position in range(0, height, slope_y):\n trees_encountered += 1 if is_tree(forest, x_position, y_position) else 0\n x_position = (x_position + slope_x) % width\n\n return trees_encountered\n\ndef solve_puzzle(input, slopes):\n result = 1\n \n for (x, y) in slopes:\n trees = find_trees_in_path(input, x, y)\n result *= trees\n\n return result\n\ndef main():\n input_file_name = f'{Path(__file__).stem}.txt'\n input_file_path = os.path.join('input', input_file_name)\n\n input = ''\n with open(input_file_path) as f:\n input = f.read()\n\n print(solve_puzzle(input, [(3, 1)])) \n\n slopes_2 = [\n (1, 1),\n (3, 1),\n (5, 1),\n (7, 1),\n (1, 2)\n ]\n\n print(solve_puzzle(input, slopes_2))\n\nif __name__ == '__main__':\n main()\n","repo_name":"madisonscott/advent-of-code","sub_path":"2020/2020-12-03.py","file_name":"2020-12-03.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25332181226","text":"\"\"\"\nModule which controls the active status of depositing accounts\n\"\"\"\nfrom service import models\n\n\ndef activate_deposit(acc_id):\n \"\"\"\n Activate the repository's deposit process.\n\n This sets the repository status to \"succeeding\", which in turn means the processor will\n pick up this account and run deposits against the repo.\n\n :param acc_id: account to activate\n :return:\n \"\"\"\n # see if there's a repository status for this account\n rs = models.RepositoryStatus.pull(acc_id)\n\n # if not make one\n if rs is None:\n rs = models.RepositoryStatus()\n rs.id = acc_id\n rs.status = \"succeeding\"\n rs.save()\n return\n\n # if there is, reset it ready to go again\n rs.activate()\n rs.save()\n\ndef deactivate_deposit(acc_id):\n \"\"\"\n Deactivate the repository's deposit process\n\n This sets the repository status to \"failing\", which in turn means the processor will ignore this\n account in its normal run\n\n :param acc_id: account to deactivate\n :return:\n \"\"\"\n # see if there's a repository status for this account\n rs = models.RepositoryStatus.pull(acc_id)\n\n # if not, that's as good as being deactivated\n if rs is None:\n return\n\n # if there is, deactivate it\n rs.deactivate()\n rs.save()","repo_name":"JiscPER/jper-sword-out","sub_path":"service/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4554177290","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Cluster analysis\n\n# K-means method\n\n# base: k(the number of clusters), nearest mean values(Euclidian distance)\n# usecase: market price and cost modeling, custom segmentation,\n# insurance claim fruad detection, hedge fund classification\n# caution: scale the variables, set k based on scatterplot/data table\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport seaborn as sb\n\nimport urllib\n\nimport sklearn\nfrom sklearn.cluster import KMeans\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import scale\nimport sklearn.metrics as sm\nfrom sklearn import datasets\nfrom sklearn.metrics import confusion_matrix, classification_report\n\nimport scipy\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\nfrom scipy.cluster.hierarchy import cophenet\nfrom scipy.spatial.distance import pdist\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import neighbors\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import train_test_split\n\n#%matplot inline\nplt.figure(figsize=(7,4))\n\niris = datasets.load_iris()\nx = scale(iris.data)\ny = pd.DataFrame(iris.target)\nvariable_names = iris.feature_names\n\nclustering = KMeans(n_clusters=3,random_state=5)\nclustering.fit(x)\n\niris_df = pd.DataFrame(iris.data)\niris_df.columns = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']\ny.columns = ['Targets']\n\ncolor_theme = np.array(['darkgray','lightsalmon','powderblue'])\n\nplt.subplot(1,2,1)\nplt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width,c=color_theme[iris.target],s=50)\nplt.title('Ground Truth Classification')\n\nplt.subplot(1,2,2)\nplt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width,c=color_theme[clustering.labels_],s=50)\nplt.title('K-Means Classification')\n\nrelabel = np.choose(clustering.labels_,[2,0,1]).astype(np.int64)\nplt.subplot(1,2,1)\nplt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width,c=color_theme[iris.target],s=50)\nplt.title('Ground Truth Classification')\n\nplt.subplot(1,2,2)\nplt.scatter(x=iris_df.Petal_Length,y=iris_df.Petal_Width,c=color_theme[relabel],s=50)\nplt.title('K-Means Classification')\n\nprint(classification_report(y,relabel))\n# precision(model's relevancy)[+], recall(model's completeness)[+]\n\n\n#=============================================================================\n\n\n# Hierarchical clustering\n\n# distance between each & nearest neighbors -> link\n# by dendrogram\n# use cases: hospital resource management, business process management,\n# customer segmentation, social network analysis\n\nnp.set_printoptions(precision=4, suppress=True)\nplt.figure(figsize=(10,3))\n#%matplotlib inline\nplt.style.use('seaborn-whitegrid')\n\naddress = '/Users/clu128/Desktop/Exercise Files/Ch06/06_02/mtcars.csv'\ncars = pd.read_csv(address)\ncars.columns = ['car_names','mpg','cyl','disp','hp','drat','wt','qsec','vs','am','gear','carb']\n\nx = cars.ix[:,(1,3,4,6)].values\ny = cars.ix[:,(9)].values\n\nz = linkage(x, 'ward')\ndendrogram(z, truncate_mode='lastp',p=12,leaf_rotation=45.,leaf_font_size=15,show_contracted=True)\n\nplt.title('Truncated Hierarchical Clustering Dendrogram')\nplt.xlabel('Cluster Size')\nplt.ylabel('Distance')\n\nplt.axhline(y=500)\nplt.axhline(y=150)\nplt.show()\n\nk=2\nHclustering = AgglomerativeClustering(n_clusters=k,affinity='euclidean',linkage='ward')\nHclustering.fit(x)\nprint(sm.accuracy_score(y,Hclustering.labels_))\n\nHclustering = AgglomerativeClustering(n_clusters=k,affinity='euclidean',linkage='complete')\nHclustering.fit(x)\nprint(sm.accuracy_score(y,Hclustering.labels_))\n\nHclustering = AgglomerativeClustering(n_clusters=k,affinity='euclidean',linkage='average')\nHclustering.fit(x)\nprint(sm.accuracy_score(y,Hclustering.labels_))\n\nHclustering = AgglomerativeClustering(n_clusters=k,affinity='manhattan',linkage='average')\nHclustering.fit(x)\nprint(sm.accuracy_score(y,Hclustering.labels_))\n\n\n#=============================================================================\n\n\n# k-Nearest Neighbor Classification\n\n# use case: stock price prediction, recommendation systems,\n# credit risk analysis, predictive trip planning\n\n# assumption: little noise, labeled, only contain relevant features, has distinguishable subgroups\n# caution: avoid large dataset(long time)\n\nx_prime = cars.ix[:,(1,3,4,6)].values\ny = cars.ix[:,9].values\nx = preprocessing.scale(x_prime)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.33, random_state=17)\n\nclf = neighbors.KNeighborsClassifier()\n\nclf.fit(x_train, y_train)\n\ny_expect = y_test\ny_pred = clf.predict(x_test)\n\nprint(sm.classification_report(y_expect,y_pred))\n\n","repo_name":"carajumpshigh/Python-essential-training-for-data-science","sub_path":"Cluster_Analysis.py","file_name":"Cluster_Analysis.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14231874530","text":"import time\r\nimport os\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\ncomments = []\r\ndef get_links(topic):\r\n execute_path = r\"{}\".format(os.path.dirname(os.path.abspath(__file__)) + '\\chromedriver.exe')\r\n service = Service(executable_path=execute_path)\r\n options = Options()\r\n options.add_argument('--headless')\r\n options.add_argument('--mute-audio')\r\n driver = webdriver.Chrome(service=service, options=options)\r\n topic_words = topic.split()\r\n search_url = \"https://www.youtube.com/results?search_query=\"\r\n\r\n if(len(topic_words) == 1):\r\n search_url = search_url + topic\r\n else:\r\n for words in topic_words:\r\n search_url = search_url + \"+\" + words\r\n \r\n driver.get(search_url)\r\n time.sleep(3)\r\n links = driver.find_elements(By.XPATH, \"//a[@id='video-title']\")\r\n\r\n cleaned_links = []\r\n\r\n for link in links:\r\n cleaned_links.append(link.get_attribute('href'))\r\n \r\n cleaned_links = [element for element in cleaned_links if element != \"https://www.youtube.com/\"]\r\n cleaned_links = [element for element in cleaned_links if element != None] \r\n cleaned_links = [element for element in cleaned_links if \"@\" not in element]\r\n cleaned_links = [element for element in cleaned_links if \"shorts\" not in element]\r\n cleaned_links = list(set(cleaned_links))\r\n \r\n return(cleaned_links)\r\n\r\ndef get_comments(links):\r\n execute_path = r\"{}\".format(os.path.dirname(os.path.abspath(__file__)) + '\\chromedriver.exe')\r\n service = Service(executable_path=execute_path)\r\n options = Options()\r\n options.add_argument('--headless')\r\n options.add_argument('--mute-audio')\r\n \r\n driver = webdriver.Chrome(service=service, options=options)\r\n\r\n wait = WebDriverWait(driver, 1)\r\n\r\n data = []\r\n for link in links:\r\n driver.get(link)\r\n if(links.index(link) == 6):\r\n break\r\n for item in range(10): \r\n wait.until(EC.visibility_of_element_located((By.TAG_NAME, \"body\"))).send_keys(Keys.END)\r\n time.sleep(0.5)\r\n\r\n for comment in wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, \"#content\"))):\r\n comments.append(comment.text)\r\n\r\n driver.close()\r\n driver.quit()\r\n\r\ndef return_comments():\r\n return comments\r\n","repo_name":"knguyen271/Opinion-Consolidator","sub_path":"youtube_comments.py","file_name":"youtube_comments.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16381534222","text":"import asyncio\nimport json\nfrom http.client import HTTPException\n\nfrom fastapi import FastAPI, Depends\nfrom sqlalchemy.orm import Session\n\nfrom app.database import SessionLocal\nfrom app.models import BoardState, BoardStateModel, RobotMove\nfrom app.services.board import initialize_board\nfrom app.services.player import perform_action\n\napp = FastAPI()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n# endpoint definitions...\n\n@app.post(\"/initialize-board\", response_model=BoardState, summary=\"Initialize Game Board\")\nasync def initialize_game_board():\n board_state = initialize_board()\n # Create a BoardStateModel instance and save it to the database\n db = SessionLocal()\n db_board_state = BoardStateModel(board_state=board_state.dict())\n db.add(db_board_state)\n db.commit()\n db.refresh(db_board_state)\n return board_state\n\n\n@app.get(\"/board-state/{board_id}\", response_model=BoardState, summary=\"Get Board State by ID\")\nasync def get_board_state_by_id(board_id: int, db: Session = Depends(get_db)):\n \"\"\"\n Get the state of the game simulation for a specific board ID.\n\n Args:\n board_id (int): The ID of the board for which to retrieve the state.\n\n Returns:\n BoardState: The state of the game simulation for the specified board ID.\n \"\"\"\n db_board_state = db.query(BoardStateModel).filter(BoardStateModel.id == board_id).first()\n if db_board_state:\n return db_board_state.board_state\n else:\n raise HTTPException(status_code=404, detail=\"Board state not found\")\n\n\n# Define board_state_lock to handle concurrency and race conditions\nboard_state_lock = asyncio.Lock()\n\n\n@app.post(\"/move-robot\", response_model=BoardState, summary=\"Move Robot\")\nasync def move_robot(board_id: int, player_id: int, move: RobotMove, db: Session = Depends(get_db)):\n \"\"\"\n Move a robot or perform an attack.\n\n This endpoint allows players to give instructions to a robot using its robot_id.\n A robot can move up, move down, move left, move right, or perform an attack.\n If an attack is performed, dinosaurs in adjacent cells are destroyed, and the player gains points.\n\n Args:\n board_id (int): The ID of the board on which the move is being made.\n player_id (int): The ID of the player making the move.\n move (RobotMove): The action to be performed by the robot.\n\n Returns:\n BoardState: The updated game state after the action is performed.\n \"\"\"\n async with board_state_lock:\n # Retrieve the latest board state from the database\n db_board_state = db.query(BoardStateModel).filter_by(id=board_id).order_by(BoardStateModel.id.desc()).first()\n if db_board_state:\n current_board_state_data = db_board_state.board_state\n current_board_state = BoardState(**current_board_state_data)\n else:\n raise HTTPException(status_code=404, detail=\"Board state not found\")\n\n # Perform the action with the robot using the player's move\n updated_board_state = perform_action(player_id, move, current_board_state)\n\n # Update the board state in the database\n new_db_board_state = BoardStateModel(board_state=updated_board_state.dict())\n db.add(new_db_board_state)\n db.commit()\n db.refresh(new_db_board_state)\n\n return updated_board_state\n","repo_name":"hamedasgari20/game-simulation-with-FastAPI","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15191550859","text":"from collections import deque\r\n\r\nK = int(input())\r\ndp = [float(\"inf\")]*(K+1)\r\ndq = deque([(1, 1)])\r\npopleft, append, append_left = dq.popleft, dq.append, dq.appendleft\r\n\r\nwhile dq:\r\n v, cost = popleft()\r\n if v == 0:\r\n print(cost)\r\n exit()\r\n if dp[v] < cost:\r\n continue\r\n plus_10, plus_1 = (v*10)%K, (v+1)%K\r\n if dp[plus_10] > cost:\r\n dp[plus_10] = cost\r\n append_left((plus_10, cost))\r\n if dp[plus_1] > cost+1:\r\n dp[plus_1] = cost+1\r\n append((plus_1, cost+1))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc084/B/4173989.py","file_name":"4173989.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"17302324097","text":"from flask import render_template, request, Response, json\nfrom flaskr import db, header\nfrom bson.objectid import ObjectId\n\n\ndef _getPipeline(quoteId, trimmedDown):\n pipeline = []\n pipeline.append({'$match' : { '_id' : ObjectId(quoteId) }})\n\n projection = {\n '_id': 0,\n 'name': 1,\n 'url': 1,\n 'unit': 1,\n 'lastQuote': { '$ifNull': [{ '$last': '$quoteHistory.quote' }, 0] },\n }\n if not trimmedDown:\n projection.update({\n '_id': 1,\n 'quoteHistory': 1,\n 'updateFrequency': 1,\n 'currencyPair': 1\n })\n\n pipeline.append({'$project' : projection})\n return pipeline\n\n\ndef item(type):\n if request.method == 'GET':\n quoteId = request.args.get('quoteId')\n if not quoteId:\n return ('', 400)\n\n quotes = list(db.get_db().quotes.aggregate(_getPipeline(quoteId, type == \"json\")))\n if not quotes:\n return ('', 404)\n\n if type == \"json\":\n return Response(json.dumps(quotes[0]), mimetype=\"application/json\")\n else:\n return render_template(\"pricing/item.html\", item=quotes[0], header=header.data())\n else:\n return '', 405\n","repo_name":"kpk-pl/wallet","sub_path":"web-gui/flaskr/apps/pricing/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30849158719","text":"class Solution:\n def maxAreaOfIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n res=0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n cur=0\n if grid[i][j]==1:\n cur+=1\n grid[i][j]=2\n cur+=self.goTop(grid,i,j)\n cur+=self.goBottom(grid,i,j)\n cur+=self.goLeft(grid,i,j)\n cur+=self.gotRight(grid,i,j)\n res=max(cur,res)\n return res\n\n \n \n\n def goTop(self, grid, x,y):\n res=0\n while True:\n if y-1>=0 and grid[x][y-1]==1:\n y-=1\n res+=1\n grid[x][y]=2\n res+=self.goLeft(grid,x,y)\n res+=self.gotRight(grid,x,y)\n res+=self.goBottom(grid,x,y)\n else:\n break\n return res\n\n def goBottom(self, grid, x,y):\n res=0\n while True:\n if y+1=0 and grid[x-1][y]==1:\n x-=1\n res+=1\n grid[x][y]=2\n res+=self.goTop(grid, x,y)\n res+=self.goBottom(grid, x,y)\n res+=self.gotRight(grid, x,y)\n else:\n break\n return res\n def gotRight(self, grid, x,y):\n res=0\n while True:\n if x+1> d & 1 for d in range(num_digits)])\n\ndef fizz_buzz_encode(i):\n if i % 15 == 0:\n return 3\n elif i % 5 == 0:\n return 2\n elif i % 3 == 0:\n return 1\n else:\n return 0\n# training data -> 注意这里没有扯什么反转\nNUM_DIGITS = 10 # 十个位数来表达\n\n# 注意后面是923个数据,选择不同的batch,只会影响到最后一波剩下的个数,然后计算准确率,最后每次都会更新一次epoch的准确率\n# dataloader 中已经预设了 batch,跑完一波,就计算一下\ntrX = np.array([binary_encode(i, NUM_DIGITS) for i in range(101, 2**NUM_DIGITS)])\ntrY = np.array([fizz_buzz_encode(i) for i in range(101, 2**NUM_DIGITS)])\nprint(\"trX[:5] = \\n\",trX[:5])\nprint('--------------')\nprint(\"trY[:5] = \\n\",trY[:5])\n\n\n# 准备模型\nclass FizzBuzzModel(nn.Module):\n def __init__(self, in_features, out_classes, hidden_size, n_hidden_layers):\n super(FizzBuzzModel, self).__init__()\n\n # 注意有多层的hidden layers\n layers = []\n for i in range(n_hidden_layers):\n layers.append(nn.Linear(hidden_size, hidden_size))\n layers.append(nn.BatchNorm1d(hidden_size))\n layers.append(nn.ReLU())\n\n self.inputLayer = nn.Linear(in_features, hidden_size)\n self.relu = nn.ReLU()\n\n self.layers = nn.Sequential(*layers)\n\n self.outputLayer = nn.Linear(hidden_size, out_classes)\n\n def forward(self, x):\n x = self.inputLayer(x)\n x = self.relu(x)\n x = self.layers(x)\n out = self.outputLayer(x)\n return out\n\n\n# 训练模型\n# 初始化\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = FizzBuzzModel(10, 4, 150, 3).to(device)\nprint(\"model = \", model)\nlearning_rate = 0.02\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# DataLoader $ 有点迷\nFizzBuzzDataset = tud.TensorDataset(torch.from_numpy(trX).float().to(device),\n torch.from_numpy(trY).long().to(device))\ndataloader = tud.DataLoader(dataset=FizzBuzzDataset, batch_size=300, shuffle=True)\n\n# 训练\nmodel.train()\nfor epoch in range(1, 300):\n for i, (batch_x, batch_y) in enumerate(dataloader):\n # print('batch_x= ', batch_x, 'shape = ', batch_x.shape)\n # print('batch_y= ', batch_y, 'shape = ', batch_y.shape)\n\n out = model(batch_x)\n loss = loss_fn(out, batch_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n correct = 0\n total = 0\n # print('out.data', out.data, out.data.shape)\n _, predicted = torch.max(out.data, 1)\n # print('predicted = ', predicted, predicted.shape)\n total += batch_y.size(0)\n correct += (predicted == batch_y).sum().item()\n acc = 100*correct/total\n print('Epoch : {:0>4d} | Loss : {:<6.4f} | Train Accuracy : {:<6.2f}%'.format(epoch,loss,acc)) #小数点后面的位数\n\n\n# 模型测试\ndef fizz_buzz_decode(i, prediction):\n return [str(i), \"fizz\", \"buzz\", \"fizzbuzz\"][prediction]\n\n\n# 模型预测:需要切换为 eval模式\nmodel.eval()\n\ntestX = np.array([binary_encode(i, NUM_DIGITS) for i in range(1, 101)])\npredicts = model(torch.from_numpy(testX).float().to(device))\n\n_, res = torch.max(predicts, 1)\n\npredictions = [fizz_buzz_decode(i, prediction) for (i, prediction) in zip(range(1, 101), res)]\n\nprint('------------------------------')\nprint('prediction = \\n', predictions )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yjfiejd/2019xuexi","sub_path":"Pytorch_new/FizzBuzz_new.py","file_name":"FizzBuzz_new.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41004443844","text":"import sys\nsys.setrecursionlimit(10**6)\ninput = sys.stdin.readline\npreorder = []\nwhile True:\n try:\n node = int(input())\n preorder.append(node)\n except:\n break\n\n# 루트보다 작으면 왼쪽 트리\n# 루트보다 크면 오른쪽 트리(하위 혹은 상위 오른쪽 트리)\ndef postorder(left,right):\n if left > right:\n return\n\n root = preorder[left]\n mid = right + 1 # 루트보다 큰 값이 없을 수도 있기때문에 가장 끝 값 표시\n\n # 루트보다 큰 값이 있다면 멈춤\n # -> 해당 mid 전은 다 루트보다 작은 값이란 뜻 즉 왼쪽 트리\n for i in range(left+1,right+1):\n if root < preorder[i]:\n mid = i\n break\n \n # preorder[left] 는 루트이기 때문에 루트 빼고\n postorder(left+1,mid-1) # 왼쪽 트리 체크\n postorder(mid,right) # 오른쪽 트리 체크\n print(preorder[left])\n\npostorder(0,len(preorder)-1)","repo_name":"angiekim05/study","sub_path":"Baekjoon/gold4/5639.py","file_name":"5639.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70336347752","text":"#tuples\n\"\"\"\nA tuple is an immutable collection of ordered, comma-separated values enclosed in round parentheses (). Tuples are similar to lists in Python, but unlike lists, tuples cannot be modified once they are created, making them immutable.\n\nTuples are used to store and represent a fixed sequence of values, and they can contain values of different data types, such as numbers, strings, or even other tuples. Tuples are commonly used to represent a collection of related values that should remain unchanged, such as coordinates, dates, or color codes.\n\"\"\"\n\n# Creating a tuple\nmy_tuple = (1,2,2,2,3,4,5)\n\n# Accessing elements in the tuple\nfirst_el = my_tuple[0]\nlast_el = my_tuple[-1]\n\n# Tuple packing and unpacking\na = 1\nb = 2\na_b = a, b\nprint(a_b) # (1, 2)\nc, d = a_b\nprint(c, d) # 1 2\n\n# Tuple operations\ntuple_len = len(my_tuple)\nprint(tuple_len) # 7\ntuple_count = my_tuple.count(2)\nprint(tuple_count) # 3\n\nis_element_present = 3 in my_tuple\nprint(is_element_present) # True\n\n# Immutable\ntry:\n my_tuple[0] = 10\nexcept:\n print('ya done goofed. this is immutable')\n","repo_name":"zakmayfield/atlantis","sub_path":"gpt/course/1-adv-data-types/3-tuples.py","file_name":"3-tuples.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13726609136","text":"from pymongo import MongoClient, UpdateOne\nimport requests\nfrom bs4 import BeautifulSoup as BS\nfrom time import sleep\nfrom datetime import datetime,timedelta\nimport random\nimport json\n\n\ncluster = MongoClient(\"\") # secret key\ndb = cluster[\"crypto\"]\n\ncollection = db[\"cryptos\"]\n\n\n\nroot_link = \"https://coinmarketcap.com/\"\n\nfir_pg = requests.get(root_link)\nfir_bs = BS(fir_pg.text, \"lxml\")\n\nmax_page = fir_bs.find( class_ = \"pagination\" ).find_all( \"li\", class_ = \"page\" )[-1].text\nmax_page = int(max_page)\nsleep(2)\n\n\nfor page in range(1, max_page+1 ):\n \n \n req = requests.get( root_link + \"?page=\" + str(page) )\n soup = BS( req.text, \"lxml\" )\n\n t = soup.find( \"script\", {\"id\":\"__NEXT_DATA__\"} )\n data = json.loads( t.string )[\"props\"][\"initialState\"][\"cryptocurrency\"][\"listingLatest\"][\"data\"]\n tbody = soup.find(\"table\", class_ = \"cmc-table cmc-table___11lFC cmc-table-homepage___2_guh\").find(\"tbody\").find_all(\"tr\")\n\n data_list = []\n for item,tr in zip(data,tbody):\n \n name = item[\"name\"]\n \n if (\"-\" in name) or (len(name)>18) or ( name[0].isdigit() ) or ( name[-1].isdigit() ):\n continue\n \n tds = tr.find_all(\"td\")\n link = root_link + tds[2].find( \"a\", class_=\"cmc-link\" )[\"href\"][1:]\n \n info_dict = { \"Name\" : name , \"Name Symbol\":item[\"symbol\"], \"link\": link, \"lastUpdated\": item[\"lastUpdated\"],\"dateAdded\": item[\"dateAdded\"] }\n \n\n data_list.append( UpdateOne( {\"_id\": item[\"id\"] }, { \"$set\" : info_dict }, upsert = True ) )\n \n \n collection.bulk_write( data_list )\n sleep(random.randint(2,4))\n \n\nprint(\"succes\")\n\n\n","repo_name":"Ali-Huseynov/Python","sub_path":"Web Scraping/CoinMarketScraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35355002778","text":"from PyQt4 import QtGui\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n\nclass HistogramTab(QtGui.QWidget):\n \"\"\" Tab displaying the histogram of values.\"\"\"\n def __init__(self, parent):\n QtGui.QWidget.__init__(self, parent)\n layout = QtGui.QVBoxLayout(self)\n\n self.figure = plt.Figure()\n self.canvas = FigureCanvas(self.figure)\n\n layout.addWidget(self.canvas)\n\n self.update_histogram()\n\n @property\n def matrix(self):\n if not hasattr(self, \"_matrix\"):\n return None\n return self._matrix\n\n @matrix.setter\n def matrix(self, value):\n self._matrix = value\n self.update_histogram()\n\n def update_histogram(self):\n self.figure.clear()\n if self.matrix:\n steps = 100\n axis = self.figure.add_subplot(111)\n data = np.histogram(self.matrix.data_array, steps)\n # print data\n y = data[0]\n x = data[1][:-1] # data[1][1:]\n\n bar_width = float(data[1][-1] - data[1][0]) / steps\n\n axis.bar(x, y, align=\"center\", width=bar_width)\n self.canvas.draw()\n\n\n #def setText(self, text):\n # self.textEdit.setText(text)\n","repo_name":"janpipek/scoring_browser","sub_path":"scoring_browser/histogram_tab.py","file_name":"histogram_tab.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"185913099","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.types import HyperCategory, Offer, Region, ReturnPolicy, Shop\nfrom core.testcase import TestCase, main\nfrom core.matcher import NoKey\n\n\nclass T(TestCase):\n @classmethod\n def prepare_regional_return_policy(cls):\n cls.index.regiontree += [\n Region(\n rid=50,\n name='Белоруссия',\n region_type=Region.COUNTRY,\n children=[\n Region(rid=101, name='Минск'),\n ],\n ),\n Region(\n rid=200,\n name='Россия',\n region_type=Region.COUNTRY,\n children=[\n Region(rid=213, name='Москва'),\n ],\n ),\n ]\n\n cls.index.hypertree += [\n HyperCategory(\n hid=101,\n name='Электроника',\n return_policies_by_region=[\n ReturnPolicy(200, '14d'),\n ReturnPolicy(50, '7d'),\n ],\n children=[HyperCategory(hid=201, name='Телефоны')],\n ),\n HyperCategory(\n hid=301,\n name='Одежда',\n children=[\n HyperCategory(\n hid=401,\n name='Платья',\n goods_return_policy='10d',\n return_policies_by_region=[\n ReturnPolicy(200, '14d'),\n ReturnPolicy(50, None),\n ],\n )\n ],\n ),\n ]\n\n cls.index.shops += [\n Shop(fesh=1001, regions=[213], name='Moscow'),\n Shop(fesh=1002, regions=[101], name='Minsk'),\n ]\n\n cls.index.offers += [\n Offer(title='Phone Moscow', fesh=1001, hid=201),\n Offer(title='Phone Minsk', fesh=1002, hid=201),\n Offer(title='Dress Moscow', fesh=1001, hid=401),\n Offer(title='Dress Minsk', fesh=1002, hid=401),\n ]\n\n def test_regional_return_policy(self):\n \"\"\"\n Проверяем, что политика для разных городов прокидывается как с самой категории, а если категории нет,\n то с родительской.\n \"\"\"\n\n # берем с родительской категории\n response = self.report.request_json(\"place=prime&hid=201&rids=213\")\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"Phone Moscow\"},\n \"returnPolicy\": \"14d\",\n },\n )\n\n response = self.report.request_json(\"place=prime&hid=201&rids=101\")\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"Phone Minsk\"},\n \"returnPolicy\": \"7d\",\n },\n )\n\n # берем с самой категории, игнорируем return_policy по умолчанию\n response = self.report.request_json(\"place=prime&hid=401&rids=213\")\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"Dress Moscow\"},\n \"returnPolicy\": \"14d\",\n },\n )\n\n # пустой returnPolicy - в ответе тоже нет return policy\n response = self.report.request_json(\"place=prime&hid=401&rids=101\")\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"Dress Minsk\"},\n \"returnPolicy\": NoKey(\"returnPolicy\"),\n },\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_return_policy.py","file_name":"test_return_policy.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34088071959","text":"import numpy as np\nfrom parameters import param as pa\n\ndef receptive_field(img):\n\tw = np.zeros([5,5])\n\tpot = np.zeros([pa.pix, pa.pix])\n\tran = [-2,-1,0,1,2]\n\tox = 2\n\toy = 2\n\tw[ox][oy] = 1\n\n\tfor i in range(5):\n\t\tfor j in range(5):\n\t\t\td = abs(ox-i) + abs(oy-j)\n\t\t\tw[i][j] = (-0.375)*d + 1\n\n\tfor i in range(pa.pix):\n\t\tfor j in range(pa.pix):\n\t\t\tsumm = 0\n\t\t\tfor m in ran:\n\t\t\t\tfor n in ran:\n\t\t\t\t\tif (i+m)>=0 and (i+m)<=27 and (j+n)>=0 and (j+n)<=27:\n\t\t\t\t\t\tsumm = summ + w[ox+m][oy+n]*img[i+m][j+n]\n\t\t\tpot[i][j] = summ\n\n\treturn pot\n\n\n# if __name__ == '__main__':\n# \timg = np.random.rand(28, 28)\n# \tpot = receptive_field(img)\n# \tprint(np.shape(pot))\n# \tprint(np.where(pot<0))\n# \tprint(np.min(pot))\n# \tprint(np.max(pot))\n# \tprint(pot)","repo_name":"Vinohith/SNN_from_scratch","sub_path":"rec_field.py","file_name":"rec_field.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"37921911158","text":"class Solution:\n def mostFrequentEven(self, nums: List[int]) -> int:\n even = []\n nums.sort()\n for i in range(len(nums)):\n if nums[i] % 2 == 0:\n even.append(nums[i])\n counts = collections.Counter(even)\n if len(even) > 0:\n return max(counts, key = counts.get)\n else:\n return -1","repo_name":"scoder5/LeetCode-Solutions","sub_path":"2404-most-frequent-even-element/2404-most-frequent-even-element.py","file_name":"2404-most-frequent-even-element.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23798700911","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 10 22:55:17 2021\n\n@author: APARNA\n\"\"\"\nimport pandas as pd\n\nclass Health:\n def __init__(self,data):\n self.data = data\n self.analysis()\n \n \n def custom_algo(self,x):\n if x.BMI<=18.4:\n return \"Underweight\",'Malnutrition risk'\n elif 18.5<=x.BMI<= 24.9:\n return \"Normal weight\",\"Low risk\"\n elif 25<=x.BMI<= 29.9:\n return \"Overweight\",\"Enhanced risk\"\n elif 30<=x.BMI<= 34.9:\n return \"Moderately obese\",\"Medium risk\"\n elif 35<=x.BMI<= 39.9:\n return \"Severely obese\",\"High risk\"\n elif 40<=x.BMI:\n return \"Very severely obese\",\"Very high risk\"\n else:\n pass\n \n def analysis(self):\n self.data_df = pd.DataFrame(self.data)\n self.data_df['BMI'] = self.data_df.apply(lambda x :round((x.WeightKg/(x.HeightCm/100)**2),2),axis =1)\n self.data_df[['BMI_Category','Health_risk']] = self.data_df.apply(self.custom_algo,axis = 1,result_type='expand')\n self.overweight_people = list(self.data_df[self.data_df[\"BMI_Category\"] == \"Overweight\"].count())[0]\n print(self.data_df)\n \n def __repr__(self):\n return f\"Total number of overweight people:{self.overweight_people}\"\n\ndef main():\n data = [{\"Gender\": \"Male\", \"HeightCm\": 171, \"WeightKg\": 96 },\n { \"Gender\": \"Male\", \"HeightCm\": 161, \"WeightKg\": 85 },\n { \"Gender\": \"Male\", \"HeightCm\": 180, \"WeightKg\": 77 },\n { \"Gender\": \"Female\", \"HeightCm\": 166, \"WeightKg\": 62},\n {\"Gender\": \"Female\", \"HeightCm\": 150, \"WeightKg\": 70},\n {\"Gender\": \"Female\", \"HeightCm\": 167, \"WeightKg\": 82}]\n obj = Health(data)\n print(obj)\n \nif __name__ == \"__main__\":\n main()","repo_name":"aparna2198/code10102021-APARNARAVINDRAMANE","sub_path":"healthreportscript.py","file_name":"healthreportscript.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25608374288","text":"import random\nfrom core.module import MasterModule\nfrom spi.encoder import Encoder\nimport pprint\nimport sys\nimport time\nfrom core.utils import do_every\n\nINTERVAL = 0.2\n\nclass GameManager():\n\n def __init__(self):\n self.encoder = Encoder()\n self.encoder.register_master_module({'SEED':random.randint(0,0xffff)})\n self._modules = []\n self._master_module = None\n self._running = False\n self._strikes = 0\n\n def get_strikes(self):\n return self._strikes\n\n def set_strikes(self,strikes):\n self._strikes = strikes\n\n def inc_strikes(self,inc):\n self.set_strikes(self.get_strikes() + inc)\n\n def get_modules(self):\n return self._modules\n\n def set_modules(self,modules):\n self._modules = modules\n\n def search_modules(self):\n self.encoder.initial_reset()\n time.sleep(2)\n all_modules = self.encoder.discover()\n modules = []\n for module in all_modules:\n if isinstance(module,MasterModule):\n self._master_module = module\n else:\n modules.append(module)\n return modules\n\n def send_neighbours(self):\n return self.encoder.send_neighbours(self._modules+[self._master_module])\n\n def check_module_states(self):\n states = []\n for module in self.get_modules():\n states.append(module.get_status())\n print(module.get_slot(),module.get_status())\n return not any(states)\n\n def run_check(self):\n timeout = not self._running if hasattr(self,'game_time') else False\n strike_out = self.max_strikes < self.get_strikes()\n defused = self.check_module_states()\n print(\"Strikes:\",self.get_strikes())\n print(\"Is defused?\",defused)\n print(strike_out,timeout)\n if defused:\n print(\"SUCCESS\")\n elif any((timeout,strike_out)):\n print(\"BOOOOOOOM\")\n return not any((timeout,strike_out))\n\n def _update(self):\n self.game_time = self.exploding_time-int(time.time()*1000)\n if not self._running:\n self.encoder.send_gamestatus(self._modules,self.game_time,strikes=0,status=0)\n self._running = True\n return\n if (self.check_module_states()):\n self.encoder.send_gamestatus(self._modules,\n time=0 if self.game_time <=0 else self.game_time,\n strikes=self.get_strikes(),\n status=0\n )\n print(\"sendsend\")\n self._running = False\n elif (self.game_time > 0) and (self.get_strikes() < self.max_strikes):\n trigger_new = self.encoder.send_gamestatus(self._modules,\n self.game_time,\n strikes=self.get_strikes(),\n status=1\n )\n self.inc_strikes(trigger_new)\n \n else:\n self.encoder.send_gamestatus(self._modules,\n time=0 if self.game_time <=0 else self.game_time,\n strikes=self.get_strikes(),\n status=2\n )\n self._running = False\n\n def run(self,limit,max_strikes):\n self.max_strikes = max_strikes\n self.exploding_time = int(time.time()+int(limit))*1000\n do_every(INTERVAL,self._update,self.run_check)\n\nif __name__=='__main__':\n \n if len(sys.argv) != 3:\n print(\"Usage: defuseme.py \")\n sys.exit()\n print(\"Starting up\")\n gm = GameManager()\n print(\"Collecting module information\")\n gm.set_modules(gm.search_modules())\n if not gm.get_modules():\n print(\"There are no modules attached to the Frame!\")\n sys.exit()\n print(\"Modules found:\",gm.get_modules())\n\n [pprint.pprint(module.__dict__) for module in gm.get_modules() if module]\n [pprint.pprint(module.get_byte_repr()) for module in gm.get_modules() if module]\n #pprint.pprint(gm.get_modules()[0].get_byte_repr())\n gm.send_neighbours()\n\n strikes = int(sys.argv[2])\n\n if strikes > 5:\n strikes = 5\n elif strikes < 1:\n strikes = 1\n\n gm.run(sys.argv[1],strikes)\n gm.encoder.cleanup()\n","repo_name":"defuseme/DefuseMe","sub_path":"src/defuseme.py","file_name":"defuseme.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"2681945447","text":"# launch the robot and tracking simulators\n\nimport os\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.actions import DeclareLaunchArgument, IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\nfrom ament_index_python.packages import get_package_share_directory\n\n\ndef generate_launch_description():\n arg_name = DeclareLaunchArgument('name')\n arg_eye_in_hand = DeclareLaunchArgument('eye_in_hand', default_value='false')\n arg_tracking_base_frame = DeclareLaunchArgument('tracking_base_frame', default_value='tracking_origin')\n arg_tracking_marker_frame = DeclareLaunchArgument('tracking_marker_frame', default_value='tracking_marker')\n arg_robot_base_frame = DeclareLaunchArgument('robot_base_frame', default_value='base_link')\n arg_robot_effector_frame = DeclareLaunchArgument('robot_effector_frame', default_value='wrist_3_link')\n\n incl_move_group_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(get_package_share_directory('easy_handeye2_demo'), 'launch/run_move_group.launch.py')))\n\n node_tracking = Node(\n package='easy_handeye2_demo',\n executable='tracking_simulator',\n name='tracking_simulator',\n parameters=[{\n # simulator arguments\n 'frequency': 10.0,\n 'translation_noise_stdev': 0.001,\n 'rotation_noise_stdev': 0.0001,\n 'hand_to_tracking': '0.12 0.21 0.137 0 0 0 1',\n 'base_to_tracking': '1 0 0.5 0 0 0 1',\n # calibration arguments\n 'name': LaunchConfiguration('name'),\n 'eye_in_hand': LaunchConfiguration('eye_in_hand'),\n 'tracking_base_frame': LaunchConfiguration('tracking_base_frame'),\n 'tracking_marker_frame': LaunchConfiguration('tracking_marker_frame'),\n 'robot_base_frame': LaunchConfiguration('robot_base_frame'),\n 'robot_effector_frame': LaunchConfiguration('robot_effector_frame'),\n }]\n )\n\n return LaunchDescription([\n arg_name,\n arg_eye_in_hand,\n arg_tracking_base_frame,\n arg_tracking_marker_frame,\n arg_robot_base_frame,\n arg_robot_effector_frame,\n incl_move_group_launch,\n node_tracking\n ])\n","repo_name":"jhu-bigss/bigss_handeye","sub_path":"easy_handeye2_demo/launch/robot_and_sim.launch.py","file_name":"robot_and_sim.launch.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31379769198","text":"import matplotlib.pyplot as plt\nimport fastf1.plotting\nimport fastf1\nimport os\nimport yaml\nfrom unidecode import unidecode\n\nfastf1.Cache.enable_cache('/fastf1_cache/api') # optional but recommended\n\n\nsched = fastf1.get_event_schedule(2021, include_testing=False)\n\nfor index, row in sched.iterrows():\n \n\n folder = unidecode(row['Location'].lower().replace(' ', '_'))\n print(folder)\n\n data = {\n 'name': row['Location'],\n 'grand_prix_name': row['EventName'],\n 'length': 5000,\n 'laps': 50,\n 'latest_session': {\n 'year': 2021,\n 'type': 'R'\n }\n }\n\n os.makedirs(\"tracks/\"+folder, exist_ok=True)\n with open(\"tracks/\"+folder+\"/track.yaml\", 'w') as outfile:\n yaml.dump(data, outfile, sort_keys=False)\n\n\n","repo_name":"joernnilsson/f1-race-strategy","sub_path":"scripts/list_tracks.py","file_name":"list_tracks.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4340185185","text":"n = int(input())\na = [*map(int, input().split())]\n\nxor = 0\nfor i in a:\n xor ^= i\n\nif xor in a:\n ans = \"Win\"\nelif n & 1:\n ans = \"Win\"\nelse:\n ans = \"Lose\"\n\nprint(ans)\n","repo_name":"thanaism/online-judge","sub_path":"contests/arc131/arc131_c.py","file_name":"arc131_c.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7472009619","text":"import numpy as np\nimport re\nimport pdb\n\n\nclass DrainePAH:\n def __init__(self,size_list,size,lam,lum,cabs):\n self.size_list=size_list\n self.size=size\n self.lam=lam\n self.lum=lum\n self.cabs=cabs\n\n\ndef get_sizes(data,start_rows):\n sizes = []\n\n for start_row in start_rows:\n sizes.append(float(data[start_row-9].split()[0]))\n \n return sizes\n\n\n#--------------------------------\n\n#Compute the location of the starting\n#rows for each table of spectra, as well as the number of rows\n#in each table\n\n#--------------------------------\n\ndef read_draine_file(filename):\n PAH_list = []\n\n with open(filename, 'r') as fh:\n data = fh.readlines()\n #start_rows = [ii for ii,row in enumerate(data) if 'lambda nu*P_nu C_abs' in row]\n start_rows = [ii for ii,row in enumerate(data) if '0.0912' in row]\n\n #find the table length\n table_length = start_rows[1]-start_rows[0]-11 #the number of lines in between each row\n\n \n sizes = get_sizes(data,start_rows)\n\n #--------------------------------\n \n #Loop through the table and generate the values for the objects\n \n #--------------------------------\n\n \n for counter,start_row in enumerate(start_rows):\n lam = []\n lum = []\n cabs = []\n \n table_for_this_grain_size = data[start_row:table_length+start_row]\n \n for i in range(table_length):\n lam.append(float(table_for_this_grain_size[i].split()[0]))\n \n lum_str = table_for_this_grain_size[i].split()[1]\n lum_str = lum_str.replace('D','e')\n lum.append(float(lum_str))\n \n cabs_str = table_for_this_grain_size[i].split()[2]\n cabs_str = cabs_str.replace('D','e')\n cabs.append(float(cabs_str))\n \n\n PAH_list.append(DrainePAH(sizes,sizes[counter],lam,lum,cabs))\n \n\n return PAH_list\n","repo_name":"dnarayanan/powderday","sub_path":"powderday/pah/pah_file_read.py","file_name":"pah_file_read.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"13419397867","text":"import importlib\nimport logging\nimport os\nimport traceback\n\nfrom flask import session\nfrom flask_security import current_user\nfrom flask_babelex import _\n\nfrom werkzeug.utils import secure_filename\n\nfrom models import util #, syslog\nfrom bp.gedcom import GEDCOM_DATA, GEDCOM_APP #, APP_ROOT, ALLOWED_EXTENSIONS\nfrom .. import transformer\n\n# Default document server\nDOC_SERVER = 'http://mwikitammi.paas.datacenter.fi/index.php'\n\n# --------------------- GEDCOM functions ------------------------\n\ndef init_log(logfile): \n ''' Define log file and save one previous log. '''\n try:\n if os.open(logfile, os.O_RDONLY):\n os.rename(logfile, logfile + '~')\n except:\n pass\n logging.basicConfig(filename=logfile, level=logging.INFO, format='%(levelname)s:%(message)s')\n\ndef history_init(gedcom_fname):\n ''' Initialize history file. '''\n history_file_name = gedcom_fname + \"-history\"\n open(history_file_name,\"w\").write(\"{}: Uploaded {}\\n\".format(util.format_timestamp(),gedcom_fname))\n \ndef history_append(gedcom_fname,line):\n ''' Add a line to history file. '''\n history_file_name = gedcom_fname + \"-history\"\n open(history_file_name,\"a\").write(\"{}\\n\".format(line))\n\ndef history_append_args(args):\n ''' Add given arguments to history file. '''\n history_file_name = args.input_gedcom + \"-history\"\n with open(history_file_name,\"a\") as f:\n for name,value in sorted(vars(args).items()):\n f.write(\"- {}={}\\n\".format(name,value))\n f.write(\"- User={}\\n\".format(current_user.username))\n\n\ndef get_info(input_gedcom, enc):\n ''' \n Read gedcom HEAD info and count level 0 items.\n Uses the transformation framework.\n '''\n class Options:\n display_changes = False\n encoding = enc\n\n class Nullinfo:\n pass\n \n from .. import gedcom_info_parser\n try:\n t = transformer.Transformer(transform_module=gedcom_info_parser,\n display_callback=display_changes,\n options=Options())\n t.transform_file(input_gedcom)\n return t.transformation.info \n except: # pragma: no cover\n traceback.print_exc()\n return Nullinfo()\n \ndef analyze(input_gedcom, enc):\n ''' Get statistics of given gedcom file. '''\n class Options:\n display_changes = False\n encoding = enc\n\n class Nullinfo:\n pass\n \n from .. import gedcom_analyze\n try:\n t = transformer.Transformer(transform_module=gedcom_analyze,\n display_callback=display_changes,\n options=Options())\n t.transform_file(input_gedcom)\n return t.transformation.info \n except:\n traceback.print_exc()\n return \"error\"\n\ndef read_gedcom(filename):\n ''' Return all gedcom file rows using default or ISO8859-1 encoding. '''\n try:\n return open(filename).readlines()\n except UnicodeDecodeError:\n return open(filename,encoding=\"ISO8859-1\").readlines()\n\ndef get_gedcom_user():\n ''' Return current user name. '''\n return session.get(\"gedcom_user\",current_user.username)\n\ndef get_gedcom_folder(username=None):\n ''' Return user's gedcom data directory. '''\n if username is None:\n username = get_gedcom_user()\n return os.path.join(GEDCOM_DATA, username)\n\ndef gedcom_fullname(gedcom):\n ''' Return gedcom filename. '''\n return os.path.join(get_gedcom_folder(),secure_filename(gedcom))\n\ndef get_metadata(gedcom):\n ''' Return given gedcom metadata from *-meta file. '''\n gedcom_folder = get_gedcom_folder()\n gedcom_fullname = os.path.join(gedcom_folder, secure_filename(gedcom))\n return get_metadata2(gedcom_fullname)\n\ndef get_metadata2(gedcom_fullname):\n ''' Return given gedcom file metadata from corresponding meta file. '''\n try:\n metaname = gedcom_fullname + \"-meta\"\n return eval(open(metaname).read())\n except FileNotFoundError:\n return {}\n\ndef save_metadata(gedcom,metadata):\n ''' Save updated or new gedcom metadata. '''\n gedcom_folder = get_gedcom_folder()\n metaname = os.path.join(gedcom_folder, secure_filename(gedcom) + \"-meta\")\n open(metaname,\"w\").write(repr(metadata))\n \ndef get_transforms():\n ''' Search available transformations and return list of their properties. '''\n class Transform: pass\n\n trans_dir = os.path.join(GEDCOM_APP, \"transforms\")\n names = sorted([name for name in os.listdir(trans_dir) \\\n if name.endswith(\".py\") and not name.startswith(\"_\")])\n \n transforms = []\n for name in names:\n t = Transform()\n t.name = name\n t.modname = name[0:-3]\n transformer = importlib.import_module(\"bp.gedcom.transforms.\"+t.modname)\n\n # have to reload because the user may have changed language -> name and docline may change\n importlib.reload(transformer) \n\n doc = transformer.__doc__\n if doc:\n t.doc = doc\n t.docline = doc.strip().splitlines()[0]\n t.docline = _(t.docline)\n else:\n t.doc = \"\"\n t.docline = \"\"\n if hasattr(transformer,\"docline\"):\n t.docline = transformer.docline\n \n doclink = \"\"\n if hasattr(transformer,\"doclinks\"):\n lang = session.get('lang',\"\")\n doclink = transformer.doclinks.get(lang,\"\")\n if not doclink and hasattr(transformer,\"doclink\"):\n doclink = transformer.doclink\n if doclink.startswith('/'):\n doclink = DOC_SERVER + doclink\n t.doclink = doclink\n\n if hasattr(transformer,\"name\"):\n t.displayname = transformer.name\n else:\n t.displayname = t.modname\n \n t.version = getattr(transformer,\"version\",\"\")\n transforms.append(t)\n #yield t\n return sorted(transforms,key=lambda t: t.displayname)\n\n\ndef list_gedcoms(username):\n ''' Search transformations and return list of their names and metadata. '''\n gedcom_folder = get_gedcom_folder(username)\n try:\n names = sorted([name for name in os.listdir(gedcom_folder) \n if name.lower().endswith(\".ged\")],\n key=lambda s: s.lower()\n )\n except:\n names = []\n files = []\n class File: pass\n for name in names:\n f = File()\n f.name = name\n gedcom_fullname = os.path.join(gedcom_folder,name)\n f.metadata = get_metadata2(gedcom_fullname)\n if username == current_user.username or f.metadata.get(\"admin_permission\"):\n files.append(f)\n return files\n\ndef removefile(fname): \n ''' Remove file. '''\n try:\n os.remove(fname)\n except FileNotFoundError:\n pass\n\ndef display_changed_lines(old_lines, new_lines, linenum=None):\n ''' Print diff list of two line sets as html in user language strating from linenum.\n '''\n if old_lines is None: \n print(\"
\"+_(\"Added:\")+\"
\", end=\"\")\n for line in new_lines:\n print(line)\n print(\"\")\n print(\"
\") \n return\n if not new_lines: \n print(\"
\"+_(\"Deleted:\")+\"
\", end=\"\")\n if linenum: \n print(f\"{_('starting from line ')}{linenum}\")\n for line in old_lines:\n print(line)\n print(\"\")\n print(\"
\") \n return\n print(\"
\"+_(\"Replaced:\")+\"\")\n if linenum: \n print(f\"{_('starting from line ')}{linenum}\")\n print(\"
\", end=\"\")\n for line in old_lines:\n print(line)\n print(\"\")\n print(\"
\"+_(\"With:\")+\"
\", end=\"\")\n for line in new_lines:\n print(line)\n print(\"\")\n print()\n print(\"
\") \n\ndef print_differences(lines1, lines2, linenum):\n # remove duplicates from the beginning and from the end\n while lines1 and lines2:\n if lines1[0] != lines2[0]: break\n lines1 = lines1[1:]\n lines2 = lines2[1:]\n if linenum is not None: linenum += 1\n \n while lines1 and lines2:\n if lines1[-1] != lines2[-1]: break\n lines1 = lines1[:-1]\n lines2 = lines2[:-1]\n display_changed_lines(lines1, lines2, linenum)\n\ndef display_changes(lines, item, linenum=None):\n ''' Print diff list of two line sets as html in user language? \n '''\n class Out:\n def __init__(self):\n self.lines = []\n def emit(self,line):\n self.lines.append(line)\n\n if not item:\n new_lines = []\n else:\n out = Out()\n if isinstance(item, list):\n for it in item:\n it.print_items(out)\n else:\n item.print_items(out)\n new_lines = out.lines\n print_differences(lines, new_lines, linenum)\n\n","repo_name":"Taapeli/stk-upload","sub_path":"app/bp/gedcom/models/gedcom_utils.py","file_name":"gedcom_utils.py","file_ext":"py","file_size_in_byte":9007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"39079719350","text":"from sklearn.preprocessing import MultiLabelBinarizer\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\nimport copy\n\nevent_list = ['Rain', 'Fog', 'NoEvent', 'Snow', 'Thunderstorm', 'Hail']\nfields = [\"StoreType\", \"WeekDay\", \"Region\", \"Month\", \"AssortmentType\"]\nfeatures = [\n\t'StoreID', 'Date', 'IsOpen', 'IsHoliday', 'HasPromotions', 'NumberOfSales',\n\t'StoreType_Hyper Market','StoreType_Shopping Center', 'StoreType_Standard Market',\n\t'StoreType_Super Market','NearestCompetitor',\n\t'WeekDay_0', 'WeekDay_1', 'WeekDay_2','WeekDay_3',\n\t'WeekDay_4', 'WeekDay_5', 'WeekDay_6', 'Region_0',\n\t'Region_1', 'Region_2', 'Region_3', 'Region_4', 'Region_5',\n\t'Region_6','Region_7', 'Region_8', 'Region_9', 'Region_10',\n\t'Month_1', 'Month_2','Month_3', 'Month_4', 'Month_5', 'Month_6',\n\t'Month_7', 'Month_8','Month_9', 'Month_10', 'Month_11', 'Month_12',\n\t'AssortmentType_General','AssortmentType_With Fish Department', 'AssortmentType_With Non-Food Department']\n\nclass TrainPreprocessingAutoreg():\n\tdef __init__(self, data, to_date):\n\t\tself.data = copy.deepcopy(data)\n\t\tself.win_size = 60\n\t\tself.data[\"Date\"] = pd.to_datetime(self.data['Date'], format='%d/%m/%Y')\n\t\tself.add_date_columns(date_column_name=\"Date\", month=True, day_of_week=True)\n\t\tself.split_attribute_list(column=\"Events\", attributes=event_list, fillna=\"NoEvent\")\n\t\tself.one_hot_encode(fields=fields)\n\t\tself.data = self.data[features]\n\t\tself.data = self.data[self.data[\"Region_2\"] != 1]\n\t\tself.data = self.data[self.data[\"Date\"] < to_date]\n\t\tself.add_history_features(lags=[7, 14], mean=True)\n\t\tself.data = self.data[self.data[\"IsOpen\"] == 1]\n\t\tself.X = np.array(self.data.drop([\"StoreID\", \"IsOpen\", \"Date\", \"NumberOfSales\"], inplace=False, axis = 1))\n\t\tself.y = np.array(self.data[\"NumberOfSales\"])\n\n\tdef get_X_y(self):\n\t\treturn self.X, self.y\n\n\tdef one_hot_encode(self, fields):\n\t\tself.data = pd.get_dummies(self.data, columns=fields)\n\n\tdef interpolate_closed_days(self):\n\t\tself.data['NumberOfSales'] = self.data['NumberOfSales'].replace({0: np.nan})\n\t\tself.data['NumberOfSales'] = self.data.groupby('StoreID')[\"NumberOfSales\"].apply(lambda x: x.interpolate())\n\t\tself.data['NumberOfSales'] = self.data['NumberOfSales'].replace({np.nan: 0})\n\n\tdef split_attribute_list(self, column, attributes, fillna):\n\t\tmlb = MultiLabelBinarizer(classes=attributes)\n\t\tif fillna is not None:\n\t\t\tself.data[column] = self.data[column].fillna(fillna, inplace=False)\n\t\tself.data[column] = self.data[column].apply(lambda x: x.split('-'))\n\t\tnew_columns_values = mlb.fit_transform(self.data[column].values.tolist())\n\t\tself.data[attributes] = pd.DataFrame(new_columns_values, index=self.data.index)\n\n\tdef add_date_columns(self, date_column_name, year=False, month=False, day_n=False, day_of_week=False):\n\t\tif year:\n\t\t\tself.data[\"Year\"] = self.data[date_column_name].dt.year\n\t\tif month:\n\t\t\tself.data[\"Month\"] = self.data[date_column_name].dt.month\n\t\tif day_n:\n\t\t\tself.data[\"Day\"] = self.data[date_column_name].dt.day\n\t\tif day_of_week :\n\t\t\tself.data[\"WeekDay\"] = self.data[date_column_name].dt.dayofweek\n\n\tdef add_history_features(self, lags, mean=True):\n\t\ttmp = copy.deepcopy(self.data)\n\t\tif mean:\n\t\t\tself.data['mean'] = tmp.sort_values([\"Date\"], axis=0, ascending=True, inplace=False) \\\n\t\t\t\t\t\t\t\t.groupby('StoreID')[['StoreID','NumberOfSales']] \\\n\t\t\t\t\t\t\t\t.rolling(self.win_size).mean().reset_index(0,drop=True).groupby(\"StoreID\")['NumberOfSales'].shift(1)\n\n\t\tfor l in lags:\n\t\t\tself.data['lag_'+str(l)] = tmp.sort_values([\"Date\", \"StoreID\"], axis=0, ascending=True, inplace=False) \\\n\t\t\t\t\t\t\t\t\t .groupby('StoreID')['NumberOfSales'].shift(l)\n\t\tself.data = self.data.dropna()\n\n\nclass TestPreprocessingAutoreg():\n\tdef __init__(self, data, from_date, region):\n\t\tself.data = copy.deepcopy(data)\n\t\tself.win_size = 60\n\t\tself.data[\"Date\"] = pd.to_datetime(self.data['Date'], format='%d/%m/%Y')\n\t\tn_days_ago = datetime.strptime(from_date, \"%Y-%m-%d\") - timedelta(days=self.win_size)\n\t\tself.add_date_columns(date_column_name=\"Date\", month=True, day_of_week=True)\n\t\tself.split_attribute_list(column=\"Events\", attributes=event_list, fillna=\"NoEvent\")\n\t\tself.one_hot_encode(fields=fields)\n\t\tself.data = self.data[self.data[\"Region_\"+str(region)] == 1]\n\t\tself.history = self.data[(self.data[\"Date\"] < from_date) & (self.data[\"Date\"] >= n_days_ago)]\n\t\tself.history[\"NumberOfSales\"] = self.history[\"NumberOfSales\"]\n\t\tself.history = self.history.sort_values([\"Date\", \"StoreID\"], axis=0, ascending=True, inplace=False)\n\t\tself.history = np.vstack(self.history.groupby(\"Date\")[\"NumberOfSales\"].apply(lambda x : np.array(x)))\n\t\tself.data = self.data[features]\n\t\tself.store_count = len(self.data[self.data[\"Date\"] == from_date][\"StoreID\"].value_counts())\n\t\tself.data = self.data[self.data[\"Date\"] >= from_date]\n\t\tself.dates = sorted(list(self.data[\"Date\"].value_counts().index))\n\t\tself.X = self.data.drop([\"IsOpen\", \"NumberOfSales\"], inplace=False, axis = 1)\n\t\tself.y = self.data[[\"StoreID\", \"Date\", \"NumberOfSales\"]]\n\n\tdef one_hot_encode(self, fields):\n\t\tself.data = pd.get_dummies(self.data, columns=fields)\n\n\tdef split_attribute_list(self, column, attributes, fillna):\n\t\tmlb = MultiLabelBinarizer(classes=attributes)\n\t\tif fillna is not None:\n\t\t\tself.data[column] = self.data[column].fillna(fillna, inplace=False)\n\t\tself.data[column] = self.data[column].apply(lambda x: x.split('-'))\n\t\tnew_columns_values = mlb.fit_transform(self.data[column].values.tolist())\n\t\tself.data[attributes] = pd.DataFrame(new_columns_values, index=self.data.index)\n\n\tdef add_date_columns(self, date_column_name, year=False, month=False, day_n=False, day_of_week=False):\n\t\tif year:\n\t\t\tself.data[\"Year\"] = self.data[date_column_name].dt.year\n\t\tif month:\n\t\t\tself.data[\"Month\"] = self.data[date_column_name].dt.month\n\t\tif day_n:\n\t\t\tself.data[\"Day\"] = self.data[date_column_name].dt.day\n\t\tif day_of_week :\n\t\t\tself.data[\"WeekDay\"] = self.data[date_column_name].dt.dayofweek\n\n\tdef get_day_X_y(self, limit):\n\t\tfor d in self.dates[:limit]:\n\t\t\tX = self.X[self.X[\"Date\"] == d].sort_values([\"Date\", \"StoreID\"], axis=0, ascending=True, inplace=False)\n\t\t\ty = self.y[self.y[\"Date\"] == d].sort_values([\"Date\", \"StoreID\"], axis=0, ascending=True, inplace=False)\n\t\t\tX[\"mean\"], X[\"lag_7\"], X[\"lag_14\"] = self.add_history_features()\n\t\t\tyield X.drop([\"Date\",\"StoreID\"], inplace=False, axis = 1), y.drop([\"Date\",\"StoreID\"], inplace=False, axis = 1) , d\n\n\tdef add_history_features(self):\n\t\tmean = np.mean(self.history, axis=0)\n\t\tlag_7 = self.history[-7]\n\t\tlag_14 = self.history[-14]\n\t\treturn mean, lag_7, lag_14\n\n\tdef add_to_history(self, new):\n\t\tself.history[:self.win_size-1, :] = self.history[1:self.win_size, :]\n\t\tself.history[self.win_size-1] = new\n\n\n","repo_name":"gmenchetti/Sales-Prediction","sub_path":"autoregression_preprocessing.py","file_name":"autoregression_preprocessing.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"15439157465","text":"# coding: utf-8\nhost = '0.0.0.0'\nrootDir = 'D:\\www\\dcrawler'\nport = 5000\n# debug = True\ndebug = False\nname = 'dcrawler'\nversion = '1.0.0'\nselfApiUrl = 'https://sandbox-listing-service.eccang.com'\nproductCenterUrl = 'https://sandbox-product-service.eccang.com/'\nszListingDynamicProxyUrl = 'http://sz-listing-dynamic-proxy.eccang.com/'\nrancherUrl = 'https://k8s-rancher.ecgtool.com'\nyixiaobaoCrwalerUrl = 'http://112.74.107.95:6614'\nrancherProjectsBasicAuth = ''\nrancherProjectName = 'listing'\nrancherProjects = {\n rancherProjectName: {\n 'test': 'c-9rw74:p-6xfxf',\n 'preview': 'c-9rw74:p-j29ts',\n 'production': 'c-9rw74:p-rl2lp'\n }\n}\nrunningServerLocation = 'inside'\n# 是否是异步任务\nisUrlsChannelsAsynchrTask = False\nisTask = False\nrunTaskLog = [\n #类型,平台,站点,第一方法需要执行多少个,第二方法需要执行多少个,第三方法需要执行多少个\n '1,shopee,ph,consumerGetCrawlerDataPushEs:1,consumerSalesDataCrawlerRedis:10,getEsSalesDataPushRedis:1',\n '1,shopee,id,consumerGetCrawlerDataPushEs:1,consumerSalesDataCrawlerRedis:10,getEsSalesDataPushRedis:1',\n '1,shopee,my,consumerGetCrawlerDataPushEs:1,consumerSalesDataCrawlerRedis:10,getEsSalesDataPushRedis:1',\n '1,shopee,th,consumerGetCrawlerDataPushEs:1,consumerSalesDataCrawlerRedis:10,getEsSalesDataPushRedis:1',\n # 'o,urlsChannels,0,1,10',\n # 'o,urlsChannels,0,1,10',\n # 'o,urlsChannels,0,1,10',\n # 'o,urlsChannels,0,1,10',\n # 'o,urlsChannels,0,1,10',\n ]\n\nisGevent = True\nprocessesNum = 1\nthreadedStatus = False\nspecialSegmentationSymbol = '8-8=,=8-8'\nfreeProxyPoolNum = 1\ntrueProxyPoolNum = 3\nallowProxyDict = {\n 'inside': [\n #'pp16ipyun',\n # 'pp66ip',\n # 'pp89ip',\n # 'ppData5u',\n # 'ppGoubanjia',\n # 'ppIp3366',\n # 'ppIphai',\n # 'ppJiangxianli',\n # 'ppKuaidaili',\n # 'ppProxyCoderBusy',\n # 'ppXicidali',\n # 'ppXiLadali',\n 'ppZhiMadali',\n # 'ppIpIdea',\n ],\n 'outside': [\n 'ppCnProxy',\n 'ppProxyList',\n 'ppProxyListPlus',\n ],\n}\namz_domain_mapping = {\n # 中国站\n 'cn': 'https://www.amazon.cn',\n # 加拿大站\n 'ca': 'https://www.amazon.ca',\n # 印度站\n 'in': 'https://www.amazon. in',\n # 墨西哥站\n 'mx': 'https://www.amazon.com.mx',\n # 德国站\n 'de': 'https://www.amazon.de',\n # 意大利站\n 'it': 'https://www.amazon.it',\n # 新加坡站\n 'sg': 'https://www.amazon.sg',\n # 日本站\n 'jp': 'https://www.amazon.co.jp',\n # 法国站\n 'fr': 'https://www.amazon.fr',\n # 澳大利亚站\n 'au': 'https://www.amazon.com.au',\n # 美国站\n 'us': 'https://www.amazon.com',\n # 英国站\n 'uk': 'https://www.amazon.co.uk',\n # 荷兰站\n 'nl': 'https://www.amazon.nl',\n # 西班牙站\n 'es': 'https://www.amazon.es',\n # 阿联酋站\n 'ae': 'https://www.amazon.ae',\n # 波兰\n 'pl': 'https://www.amazon.pl',\n}\n\n\n# 各接口缓存存活时间\ntaobao_life_time = {\n 'info': 86400 * 10,\n 'keyword': 86400 * 0,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5,\n 'library':86400 * 5\n}\npdd_life_time = {\n 'info': 86400 * 7,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5,\n 'library': 86400 * 5\n}\nshopee_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5,\n 'library':86400 * 5\n}\nlazada_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\nsoukuan_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\nali1688_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\nebay_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\naliexpress_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\namazon_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5,\n \"info_v\": 86400 * 5,\n}\nbao66_life_time = {\n 'info': 86400 * 5,\n 'keyword': 86400 * 3,\n 'creg': 86400 * 1.5,\n 'shop': 86400 * 5\n}\n\n\n","repo_name":"kyqv/wow-dcrawler","sub_path":"Configs/defaultApp.py","file_name":"defaultApp.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27459952400","text":"'''\nWrite a function that takes a head node and an integer value n,\nand then returns the nth to last node in the linked list.\n\nFor example, given:\n\nclass Node:\n\n def __init__(self, value):\n self.value = value\n self.next_node = None\n\nExample Input and Output:\n\na = Node(1)\nb = Node(2)\nc = Node(3)\nd = Node(4)\ne = Node(5)\n\na.next_node = b\nb.next_node = c\nc.next_node = d\nd.next_node = e\n\ntarget_node = nth_to_last_node(2, a) = d\n'''\n\nclass Node:\n\n def __init__(self, value):\n self.value = value\n self.next_node = None\n\ndef nth_to_last_node(n, head):\n\n left = right = head\n\n for i in range(n - 1):\n \n if not right.next_node:\n raise LookupError('n is larger than the linked list!')\n\n right = right.next_node\n\n while right.next_node:\n\n left = left.next_node\n right = right.next_node\n\n return left","repo_name":"nirmalnishant645/Python-Programming","sub_path":"Practice-Problems/LinkedListNthToLastNode.py","file_name":"LinkedListNthToLastNode.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"73285321193","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport asyncio\nimport logging\nimport shutil\n\nfrom PySide6.QtCore import QObject, Signal\n\nfrom libbaram import utils\nfrom libbaram.run import runUtility\nfrom libbaram.openfoam.dictionary.decomposePar_dict import DecomposeParDict\n\nfrom baramMesh.app import app\n\nlogger = logging.getLogger(__name__)\n\n\nclass RedistributionTask(QObject):\n progress = Signal(str)\n\n def __init__(self, fileSystem):\n super().__init__()\n\n self._fileSystem = fileSystem\n\n async def redistribute(self, numCores):\n processorFolders = self._fileSystem.processorFolders()\n nProcessorFolders = len(processorFolders)\n\n if numCores == nProcessorFolders:\n return\n\n if numCores == 1 and nProcessorFolders == 0:\n return\n\n try:\n await self.reconstruct()\n await self.decompose(numCores)\n await app.window.meshManager.load()\n\n except Exception as ex:\n logger.info(ex, exc_info=True)\n raise\n\n async def reconstruct(self):\n caseRoot = self._fileSystem.caseRoot()\n\n processorFolders = self._fileSystem.processorFolders()\n nProcessorFolders = len(processorFolders)\n\n if nProcessorFolders > 0:\n self.progress.emit(self.tr('Reconstructing the case.'))\n\n latestTime = self._fileSystem.latestTime(self._fileSystem.processorPath(0))\n proc = await runUtility('reconstructParMesh', '-allRegions', '-constant', '-case', caseRoot,\n cwd=caseRoot, stdout=asyncio.subprocess.PIPE)\n\n # This loop will end if the PIPE is closed (i.e. the process terminates)\n async for line in proc.stdout:\n log = line.decode('utf-8')\n if log.startswith('Time = constant'):\n self.progress.emit(self.tr(f'Reconstructing the case. (constant)'))\n elif log.startswith('Time = '):\n self.progress.emit(self.tr(f'Reconstructing the case. ({log.strip()}/{latestTime})'))\n\n result = await proc.wait()\n if result != 0:\n raise RuntimeError(self.tr('Reconstruction failed.'))\n\n for folder in processorFolders:\n utils.rmtree(folder)\n\n async def decompose(self, numCores):\n if numCores > 1 and self._fileSystem.boundaryFilePath().exists():\n caseRoot = self._fileSystem.caseRoot()\n\n self.progress.emit(self.tr('Decomposing the case.'))\n\n tempPath = self._fileSystem.caseRoot() / 'decomposing'\n if tempPath.exists():\n utils.rmtree(tempPath)\n\n tempPath.mkdir()\n\n DecomposeParDict(self._fileSystem.caseRoot(), numCores).build().write()\n proc = await runUtility('decomposePar', '-time', 'none', '-case', caseRoot, cwd=caseRoot)\n\n result = await proc.wait()\n if result != 0:\n raise RuntimeError(self.tr('Decomposition failed.'))\n\n for i in range(numCores):\n p = self._fileSystem.processorPath(i)\n p.rename(tempPath / p.name)\n\n t = 1\n while self._fileSystem.timePath(t).is_dir():\n time = str(t)\n proc = await runUtility('decomposePar', '-time', time, '-case', caseRoot, cwd=caseRoot)\n\n result = await proc.wait()\n if result != 0:\n raise RuntimeError(self.tr('Decomposition failed.'))\n\n for i in range(numCores):\n processorFolder = self._fileSystem.processorPath(i)\n timeFolder = processorFolder / time\n timeFolder.rename(tempPath / processorFolder.name / time)\n shutil.rmtree(processorFolder)\n\n t += 1\n\n for i in range(numCores):\n folderName = f'processor{i}'\n p = tempPath / folderName\n p.rename(caseRoot / folderName)\n\n shutil.rmtree(tempPath)\n\n # Delete time folders in case root\n #\n # Do NOT delete the polyMesh in case root\n # It was decided to be kept\n #\n for time in self._fileSystem.times(parent=caseRoot):\n utils.rmtree(caseRoot / time)\n\n self.progress.emit(self.tr(f'Decomposition done.'))\n","repo_name":"nextfoam/baram","sub_path":"baramMesh/openfoam/redistribution_task.py","file_name":"redistribution_task.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"9497598449","text":"import scrapy\nfrom scrapy.http.response.html import HtmlResponse\n\nfrom .Base import BaseSpider\n\n\nclass YY24(BaseSpider):\n name = \"YY24\"\n base_link = ''\n\n def start_requests(self):\n # 初始页\n urls = [\n 'https://www.gdmede.com.cn/announcement/?page=1',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse, meta={'page': 1})\n\n def parse(self, response: HtmlResponse):\n # 解析列表页\n datas = response.xpath('//div[@class=\"wrap\"]/a')\n if not datas:\n return\n # 解析列表页\n for data in datas:\n save = {}\n detail_url = f\"https://www.gdmede.com.cn\" + data.xpath('./@href').extract()[0]\n save['title'] = data.xpath('./div[1]/span/text()').extract()[0]\n save['release_date'] = data.xpath('./div[2]/div[@class=\"date\"]/text()').extract()[0]\n yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'save': save})\n\n next_page = response.meta['page'] + 1\n yield scrapy.Request(url=f'https://www.gdmede.com.cn/announcement/?page={next_page}',\n callback=self.parse, meta={'page': next_page})\n\n def parse_detail(self, response: HtmlResponse):\n # 解析详情页\n response.meta['save']['ori_url'] = response.url\n response.meta['save']['mainbody'] = '\\n'.join(\n [''.join([data.extract() for data in datas.xpath('.//text()')]) for datas in\n response.xpath('//div[@class=\"f-detail-content\"]/p')])\n response.meta['save']['mainbody_table'] = response.meta['save']['title']\n response.meta['save']['annex_link'] = ''\n response.meta['save']['annex_title'] = ''\n response.meta['save']['tag'] = int(self.name.replace('YY', ''))\n yield response.meta['save']\n","repo_name":"Medical-Knowledge-Atlas/SpiderMan","sub_path":"SpiderMan/medical/YY24.py","file_name":"YY24.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41439640297","text":"#importando o json para o projeto\nimport json \n\n#Abrindo o meu diretorio para buscar as informações do Json\nDadosJson = open('../Media de vendas/Dados.json')\n\n#Carregando os dados dentro do Json\nDados = json.load(DadosJson)\n\n#Variaveis \naux = 0\nmaior = 0\nmenor = 0\nsoma = 0\nmedia = 0\n\nfor dia in Dados:\n if(dia ['valor']) != 0:\n aux = dia['valor']\n\n#Calculo do maior faturamento \n if(aux > maior):\n maior = aux \n \n#Calculo do menor faturamento \n if(menor == 0):\n menor = aux\t\n elif(aux < menor):\n menor = aux\n \n soma += dia ['valor']\n print('O maior valor de faturamento do mês foi: R$ ' + str(maior) + '.') \n print('O menor valor de faturamento do mês foi: R$ ' + str(menor) + '.') \n \n #Dividindo a soma pelos dias para chegar na media \n media = soma / len(Dados) \n \n #Variavel para mostrar os dias em que o faturamento foi acima da media \n diasFaturamento = ''\n for i in Dados:\n if(i['valor']) != 0:\n if(i['valor']) > media: \n diasFaturamento += (str(i['dia']) + ' ')\n print('Os dias de maior faturamento mensal foram: ' + diasFaturamento)","repo_name":"igorrsilvaa/faturamento","sub_path":"Dados.py","file_name":"Dados.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41314391029","text":"import torch\nimport torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\nimport copy\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom representjs.models.encoder import CodeEncoder\n\nclass Pooler(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n pooled_output = self.dense(hidden_states)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\nclass CloneDetectionEncoder(nn.Module):\n def __init__(\n self,\n n_tokens,\n d_model=512,\n d_rep=128,\n n_head=8,\n n_encoder_layers=6,\n d_ff=2048,\n dropout=0.1,\n activation=\"relu\",\n norm=True,\n pad_id=None,\n encoder_type=\"transformer\",\n ):\n super(CloneDetectionEncoder, self).__init__()\n assert norm\n assert pad_id is not None\n self.config = {k: v for k, v in locals().items() if k != \"self\"}\n self.d_model = d_model\n # Encoder and output for type prediction\n assert encoder_type in [\"transformer\", \"lstm\"]\n if encoder_type == \"transformer\":\n self.encoder = CodeEncoder(\n n_tokens, d_model, d_rep, n_head, n_encoder_layers, d_ff, dropout, activation, norm, pad_id, project=False\n )\n self.pooler = Pooler(d_model)\n\n def forward(self, src_tok_ids, lengths=None):\n r\"\"\"\n Arguments:\n src_tok_ids: [B, L] long tensor\n output_attention: [B, L, L] float tensor\n \"\"\"\n\n # Encode\n memory, _ = self.encoder(src_tok_ids, lengths) # LxBxD\n x = memory[0, :, :] # BxD\n x = self.pooler(x)\n\n return (memory, x)\n \nclass Model(nn.Module): \n def __init__(self, encoder,tokenizer,args):\n super(Model, self).__init__()\n self.encoder = encoder\n self.tokenizer=tokenizer\n self.args=args\n \n \n def forward(self, input_ids=None,p_input_ids=None,n_input_ids=None,labels=None): \n bs,_=input_ids.size()\n input_ids=torch.cat((input_ids,p_input_ids,n_input_ids),0)\n \n outputs=self.encoder(input_ids)[1] # 3B * D\n outputs=outputs.split(bs,0) # B * D , B * D , B * D \n \n prob_1=(outputs[0]*outputs[1]).sum(-1) # B\n prob_2=(outputs[0]*outputs[2]).sum(-1)\n temp=torch.cat((outputs[0],outputs[1]),0) # 2B * D\n temp_labels=torch.cat((labels,labels),0) # 2B\n prob_3= torch.mm(outputs[0],temp.t()) # B * 2B\n mask=labels[:,None]==temp_labels[None,:] # B * 2B\n prob_3=prob_3*(1-mask.float())-1e9*mask.float() # B * 2B\n \n prob=torch.softmax(torch.cat((prob_1[:,None],prob_2[:,None],prob_3),-1),-1) # # B * 2B+2\n loss=torch.log(prob[:,0]+1e-10)\n loss=-loss.mean()\n return loss,outputs[0]\n\n \n \n \n","repo_name":"ZZR0/ISSTA22-CodeStudy","sub_path":"Task/Clone-Detection-POJ-104/contracode/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"32027846781","text":"\"\"\" \nGiven an unsorted array Arr[] of N integers and an integer X, find floor and ceiling \nof X in Arr[0..N-1].\n\nFloor of X is the largest element which is smaller than or equal to X. Floor of X \ndoesn’t exist if X is smaller than smallest element of Arr[].\n\nCeil of X is the smallest element which is greater than or equal to X. Ceil of X \ndoesn’t exist if X is greater than greatest element of Arr[].\n\nN = 8, X = 7\nArr[] = {5, 6, 8, 9, 6, 5, 5, 6}\nOutput: 6 8\nExplanation:\nFloor of 7 is 6 and ceil of 7 \nis 8.\n\"\"\"\n\n\ndef getFloorAndCeil(arr, n, x):\n # code here\n c = -1\n f = -1\n c1 = 0\n\n for i in arr:\n if i > x:\n if c1 == 0:\n c = i\n c1 += 1\n else:\n c = min(c, i)\n if i < x:\n f = max(f, i)\n\n if i == x:\n c = i\n f = i\n break\n\n return [f, c]\n\n\nN = 8\nX = 7\narr = [5, 6, 8, 9, 6, 5, 5, 6]\n","repo_name":"atharvaagrawal/dsa","sub_path":"Striver-A2Z/04_Binary_Search/4_1D_BS/3_FloorAndCeil.py","file_name":"3_FloorAndCeil.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15162743649","text":"import sys\r\nn, a, b = map(int, input().split())\r\ns = [int(input()) for i in range(n)]\r\n\r\ndif = max(s)-min(s)\r\nif dif == 0:\r\n print(-1)\r\n sys.exit()\r\n\r\np = b/dif\r\nq = a - sum(s)*p/n\r\nprint(p, q)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc043/A/4339762.py","file_name":"4339762.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"28605997127","text":"\"\"\"\nTic-Tac-Toe\nShutong Bao\n\"\"\"\n\n# create board function\n\n\ndef create_board():\n board = []\n for slot in range(9):\n board.append(slot + 1)\n return board\n\n# print board function\n\n\ndef print_board(board):\n print()\n print(board[0], '|', board[1], '|', board[2])\n print('- + - + -')\n print(board[3], '|', board[4], '|', board[5])\n print('- + - + -')\n print(board[6], '|', board[7], '|', board[8])\n print()\n\n\nturn_count = 0\n\n\ndef turn(turn_count):\n # while turn is odd, player is X\n # while turn is even, player is O\n if turn_count % 2 == 0:\n return \"O\"\n else:\n return \"X\"\n\n\ndef player_turn(player, board):\n if player == \"X\":\n spot = int(input(\"Choose a spot (1-9): \"))\n while spot not in range(1, 10):\n spot = int(\n input(\"You chose a number that is not between 1 and 9. Try again: \"))\n while board[spot - 1] == \"X\" or board[spot - 1] == \"O\":\n spot = int(\n input(\"You chose a spot that is already taken from another player. Try again: \"))\n board[spot - 1] = player\n print_board(board)\n else:\n spot = int(input(\"Choose a spot (1-9): \"))\n while spot not in range(1, 10):\n spot = int(\n input(\"You chose a number that is not between 1 and 9. Try again: \"))\n while board[spot - 1] == \"X\" or board[spot - 1] == \"O\":\n spot = int(\n input(\"You chose a spot that is already taken from another player. Try again: \"))\n board[spot - 1] = player\n print_board(board)\n\n# check if there is a winner\n\n\ndef has_winner(board):\n return (board[0] == board[1] == board[2] or\n board[3] == board[4] == board[5] or\n board[6] == board[7] == board[8] or\n board[0] == board[3] == board[6] or\n board[1] == board[4] == board[7] or\n board[2] == board[5] == board[8] or\n board[0] == board[4] == board[8] or\n board[2] == board[4] == board[6])\n\n# check if there is a draw\n\n\ndef has_draw(board):\n # if all piece are taken and there is no winner, it is a draw\n count = 0\n for square in board:\n if square == 'X' or square == 'O':\n count += 1\n else:\n return False\n if count == 9:\n return True\n\n\ndef main(turn_count):\n board = create_board()\n print_board(board)\n while not has_winner(board) or has_draw(board):\n turn_count += 1\n if has_draw(board) == True:\n print(\"It's a draw!\")\n break\n player_turn(turn(turn_count), board)\n if has_winner(board):\n print(turn(turn_count), \"wins!\")\n break\n\n\nif __name__ == \"__main__\":\n main(turn_count)\n","repo_name":"ShutongB/cse210-01","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40460937598","text":"####################################Numerical Study of Rockfall Impact on a Shelter#######################################\n# #\n# #\n# #\n# #\n# #\n# #\n# #\n# #\n# #\n##########################################################################################################################\n\n# 感谢玄承涛从小到大在学习上的帮助,在人生道路上的指引\n# 此致,敬礼 :)\n \n\nprint('Study the impact force onto the RC shelter')\n\n# Import the using modules\nfrom woo.core import *\nfrom woo.dem import *\nfrom woo.fem import *\nimport woo\nimport woo.gl\nimport math\nfrom math import pi\nfrom minieigen import *\nfrom past.builtins import execfile\nimport woo.pack, woo.utils, numpy\n\n# Set Api\nwoo.master.usesApi=10103\n\n# Set the graphic options\nS=woo.master.scene=Scene(fields=[DemField(gravity=(0,0,-10))],dtSafety=0.8)\nS.gl.membrane.node=True\nS.gl=woo.gl.GlSetup(\n woo.gl.Gl1_Membrane(uScale=0,relPhi=0,refConf=False),\n woo.gl.Gl1_DemField(shape=woo.gl.Gl1_DemField.shapeNonSpheres,colorBy=woo.gl.Gl1_DemField.colorDisplacement,vecAxis='norm',colorBy2=woo.gl.Gl1_DemField.colorDisplacement),\n)\n#S.gl.demField.colorRange2.mnmx=(0,2.)\n\n# Define the soil grains (num=20000,mechanical property as below)\nif 1:\n sp=woo.pack.SpherePack()\n sp.makeCloud((-1.4,-1.4,0.1),(1.4,1.4,0.65),rMean=.025,rRelFuzz=.3,periodic=False,num=20000,)\n # add to the simulation\n sp.toSimulation(S,mat=FrictMat(young=200e6,density=2650,ktDivKn=0.25,tanPhi=0.3))\n\n# Define the RC plate (Dimension=2.8m*2.8m,thickness=0.2m,mechanical property as below)\nplate_thickness=0.2\nht=plate_thickness/2.\nxmax,ymax=2.8,2.8\nxdiv,ydiv=11,11\nmat=FrictMat(young=30e9,tanPhi=.3,ktDivKn=.25,density=2500)\nff=woo.pack.gtsSurface2Facets(woo.pack.sweptPolylines2gtsSurface([[(x,y,0) for x in numpy.linspace(-1.4,xmax/2,num=xdiv)] for y in numpy.linspace(-1.4,ymax/2,num=ydiv)]),flex=True,halfThick=ht,mat=mat)\nS.dem.par.add(ff,nodes=True)\n \n# Define the box which hold the soil grains\nwall=woo.dem.Wall.makeBox(\n ((-1.4,-1.4,0.1),(1.4,1.4,1.1)),\n (1,1,0,1,1,0)\n)\nS.dem.par.add(wall)\n\n# Set the boundary condition of the RC plate (Fix the translational D.o.F at the four corner of it)\nfor n in S.dem.nodes:\n n.dem.blocked=''\n if (n.pos[0]==-1.4) and (n.pos[1]==-1.4): n.dem.blocked='xyz'\n if (n.pos[0]==1.4) and (n.pos[1]==-1.4): n.dem.blocked='xyz'\n if (n.pos[0]==1.4) and (n.pos[1]==1.4): n.dem.blocked='xyz'\n if (n.pos[0]==-1.4) and (n.pos[1]==1.4): n.dem.blocked='xyz'\n\n# Set the fake inertia and mass to the lumped node of the CST\nfor n in S.dem.nodes: DemData.setOriMassInertia(n)\n\n# block the rotational D.o.F of the spheres\nfor i in range(len(sp)):\n S.dem.par[i].blocked=''\n if i in range(len(sp)): S.dem.par[i].blocked='XYZ'\n\n# radius of the boulder\nr=0.3\n# weight of the boulder\nm=100\n# volume of the boudler\nv=4/3*pi*r**3\n# density of the boulder\nd=m/v\n# create material of boulder\nmat_boulder=woo.dem.FrictMat(\n young=200e6, \n density=d, \n tanPhi=0.3, \n ktDivKn=0.25 \n)\n# create boulder shape\nsphere=woo.dem.Sphere.make(\n (0,0,(0.53+r)),\n radius=r, \n wire=False, \n color=2, \n mat=mat_boulder, \n fixed=False \n)\n# according to the law of conservation of energy\n# MgH=1/2*M*V^2\ng=9.8\nH=50\nv=math.sqrt(2*g*H)\n\n# Find the supports nodes\ni=0\nfor i in range(len(S.dem.nodes)):\n # find the corner nodes (reaction)\n if (S.dem.nodes[i].pos[0]==-1.4) and (S.dem.nodes[i].pos[1]==-1.4): \n print('c_1',i)\n c_1=i\n elif (S.dem.nodes[i].pos[0]==1.4) and (S.dem.nodes[i].pos[1]==-1.4): \n print('c_2',i)\n c_2=i \n elif (S.dem.nodes[i].pos[0]==1.4) and (S.dem.nodes[i].pos[1]==1.4): \n print('c_3',i)\n c_3=i \n elif (S.dem.nodes[i].pos[0]==-1.4) and (S.dem.nodes[i].pos[1]==1.4): \n print('c_4',i)\n c_4=i \n else: i=i+1\n\n# find the diagonal nodes (check the displ)\ni=0\nfor i in range(len(S.dem.nodes)):\n if (S.dem.nodes[i].pos[0]>-1.4 and S.dem.nodes[i].pos[0]<-0.84) and (S.dem.nodes[i].pos[1]>-1.4 and S.dem.nodes[i].pos[1]<-0.84) and (S.dem.nodes[i].pos[2]==0): \n print('d_5',i)\n d_5=i\n elif (S.dem.nodes[i].pos[0]==-0.84) and (S.dem.nodes[i].pos[1]==-0.84) and (S.dem.nodes[i].pos[2]==0): \n print('d_4',i)\n d_4=i\n elif (S.dem.nodes[i].pos[0]>-0.84 and S.dem.nodes[i].pos[0]<-0.28) and (S.dem.nodes[i].pos[1]>-0.84 and S.dem.nodes[i].pos[1]<-0.28) and (S.dem.nodes[i].pos[2]==0): \n print('d_3',i)\n d_3=i\n elif (S.dem.nodes[i].pos[0]>-0.56 and S.dem.nodes[i].pos[0]<-0.) and (S.dem.nodes[i].pos[1]>-0.56 and S.dem.nodes[i].pos[1]<-0.) and (S.dem.nodes[i].pos[2]==0): \n print('d_2',i)\n d_2=i\n elif (S.dem.nodes[i].pos[0]==0) and (S.dem.nodes[i].pos[1]==0): \n print('d_1',i)\n d_1=i\n else: i=i+1\n \n# find the symmetry diagonal nodes (compare the displ)\ni=0\nfor i in range(len(S.dem.nodes)):\n if (S.dem.nodes[i].pos[0]>-1.4 and S.dem.nodes[i].pos[0]<-0.84) and (S.dem.nodes[i].pos[1]>0.84 and S.dem.nodes[i].pos[1]<1.4) and (S.dem.nodes[i].pos[2]==0): \n print('d_5_s',i)\n d_5_s=i\n elif (S.dem.nodes[i].pos[0]==-0.84) and (S.dem.nodes[i].pos[1]>0.56 and S.dem.nodes[i].pos[1]<0.84) and (S.dem.nodes[i].pos[2]==0): \n print('d_4_s',i)\n d_4_s=i\n elif (S.dem.nodes[i].pos[0]>-0.84 and S.dem.nodes[i].pos[0]<-0.28) and (S.dem.nodes[i].pos[1]>0.28 and S.dem.nodes[i].pos[1]<0.56) and (S.dem.nodes[i].pos[2]==0): \n print('d_3_s',i)\n d_3_s=i\n elif (S.dem.nodes[i].pos[0]>-0.56 and S.dem.nodes[i].pos[0]<-0.) and (S.dem.nodes[i].pos[1]>0. and S.dem.nodes[i].pos[1]<0.28) and (S.dem.nodes[i].pos[2]==0): \n print('d_2_s',i)\n d_2_s=i\n elif (S.dem.nodes[i].pos[0]==0) and (S.dem.nodes[i].pos[1]==0) and (S.dem.nodes[i].pos[2]==0): \n print('d_1_s',i)\n d_1_s=i\n else: i=i+1\n\n# Find the overall force and calculate the stress increment at these 8 areas (8 specific area)\n# radius_1\nr_1=0.15\n# radius_2\nr_2i=0.25\nr_2o=0.35\n# radius_3\nr_3i=0.55\nr_3o=0.65\n# radius_4\nr_4i=0.85\nr_4o=0.95\n# radius_5\nr_5i=1.15\nr_5o=1.25\n# raidus_new\nr_6=0.45\n# area_1\na_1=pi*r_1**2\n# area_2\na_2=pi*r_2o**2-pi*r_2i**2\n# area_3\na_3=pi*r_3o**2-pi*r_3i**2\n# area_4\na_4=pi*r_4o**2-pi*r_4i**2\n# area_5\na_5=pi*r_5o**2-pi*r_5i**2\n# area_new\na_6=pi*r_2i**2-pi*r_1**2\na_7=pi*r_6**2-pi*r_2o**2\na_8=pi*r_3i**2-pi*r_6**2\n \n# determine the top layer height\nh=[]\nfor i in range(len(sp)):\n a=S.dem.par[i].pos[2]\n h=h+[a]\n \n# find the exact particle\nfor i in range(len(sp)):\n if S.dem.par[i].pos[2]==max(h):\n print('the heightest particle is',i)\n h_n=i\n\nprint('the top layer height is ',max(h))\n\n# Create the engine\nS.engines=DemField.minimalEngines(damping=.25,verletDist=-0.01)+[IntraForce([In2_Membrane_ElastMat(bending=True)])]+[PyRunner(50,'check(S)')]\n\n# From the Check(S), plotting the results and adding the rockfall\ndef check(S):\n if S.step==100:\n S.dem.par.add(sphere)\n p1=S.dem.par[-1]\n p1.vel=(0,0,-v)\n elif S.step>100:\n # total force\n total_force=0\n # stress\n force_1=0\n force_2=0\n force_3=0\n force_4=0\n force_5=0\n force_6=0\n force_7=0\n force_8=0\n stress_1=0\n stress_2=0\n stress_3=0\n stress_4=0\n stress_5=0\n stress_6=0\n stress_7=0\n stress_8=0\n for c in S.dem.con:\n # filter out the contacts with (par+par)+(par+membrane)\n if not isinstance (c.pA.shape,woo.dem.Wall) and not isinstance (c.pB.shape,woo.dem.Wall):\n # filter out the contacts with (par+membrane)\n if isinstance (c.pA.shape,woo.fem.Membrane) or isinstance (c.pB.shape,woo.fem.Membrane):\n total_force=total_force+abs(c.phys.force[0])\n # filter the contacts in the specific area\n # area_1 \n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_2i: \n force_2=force_2+abs(c.phys.force[0])\n stress_2=force_2/a_2\n \n # area_3 \n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_3i: \n force_3=force_3+abs(c.phys.force[0])\n stress_3=force_3/a_3\n \n # area_4 \n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_4i: \n force_4=force_4+abs(c.phys.force[0])\n stress_4=force_4/a_4\n \n # area_5 \n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_5i:\n force_5=force_5+abs(c.phys.force[0])\n stress_5=force_5/a_5\n \n # area_6\n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_1:\n force_6=force_6+abs(c.phys.force[0])\n stress_6=force_6/a_6\n \n # area_7\n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_2o:\n force_7=force_7+abs(c.phys.force[0])\n stress_7=force_7/a_7\n \n # area_8\n if (c.pB.pos[0]**2+c.pB.pos[1]**2)**0.5r_6:\n force_8=force_8+abs(c.phys.force[0])\n stress_8=force_8/a_8\n \n S.plot.addData(t=S.time,i=S.step,\n # rock boulder impact force [kN]\n uf=S.dem.par[-1].f[2]/1000,\n # reaction force [kN]\n r1=S.dem.nodes[c_1].dem.force[2]/1000,r2=S.dem.nodes[c_2].dem.force[2]/1000,r3=S.dem.nodes[c_3].dem.force[2]/1000,r4=S.dem.nodes[c_4].dem.force[2]/1000,\n # displacement [mm]\n d1=S.dem.nodes[d_1].pos[2]*1000,d2=S.dem.nodes[d_2].pos[2]*1000,d3=S.dem.nodes[d_3].pos[2]*1000,d4=S.dem.nodes[d_4].pos[2]*1000,d5=S.dem.nodes[d_5].pos[2]*1000,\n # displacement symmetry [mm]\n ds1=S.dem.nodes[d_1_s].pos[2]*1000,ds2=S.dem.nodes[d_2_s].pos[2]*1000,ds3=S.dem.nodes[d_3_s].pos[2]*1000,ds4=S.dem.nodes[d_4_s].pos[2]*1000,ds5=S.dem.nodes[d_5_s].pos[2]*1000, \n # the top soil layer [m]\n h=S.dem.par[h_n].pos[2],\n # overall force on the plate [kN]\n t_f=total_force/1000,\n # the stress [kPa]\n s_1=stress_1/1000,\n s_2=stress_6/1000,\n s_3=stress_2/1000,\n s_4=stress_7/1000,\n s_5=stress_8/1000,\n s_6=stress_3/1000,\n s_7=stress_4/1000,\n s_8=stress_5/1000)\n # Save the txt\n # reaction force\n S.plot.saveDataTxt('support reaction',vars=('t','r1','r2','r3','r4'))\n # displ\n S.plot.saveDataTxt('displ',vars=('t','d1','d2','d3','d4','d5'))\n # displ_symmetry\n S.plot.saveDataTxt('displ_s',vars=('t','ds1','ds2','ds3','ds4','ds5'))\n # rock boulder height\n S.plot.saveDataTxt('rock boulder height',vars=('t','h'))\n # rockboulder impact force\n S.plot.saveDataTxt('impact force',vars=('t','uf'))\n # overall force on the plate\n S.plot.saveDataTxt('overall force',vars=('t','t_f'))\n # the stress increment on the plate \n S.plot.saveDataTxt('stress increment',vars=('t','s_1','s_2','s_3','s_4'))\n \n\n\nS.saveTmp()\nimport woo.qt\nwoo.qt.Controller()\n#woo.qt.View() \n\n","repo_name":"FeixiangXuan/Rockfall-Impact-Shelter","sub_path":"Rockfall-Impact-Shelter.py","file_name":"Rockfall-Impact-Shelter.py","file_ext":"py","file_size_in_byte":12961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21841728691","text":"import json\nimport yaml\nfrom kubernetes import client, config, watch\nimport os\nimport base64\nimport platform\n\nDOMAIN = \"estaleiro.io\"\n\n\ndef play_music(crds, obj):\n metadata = obj.get(\"metadata\")\n spec = obj.get(\"spec\")\n if not metadata:\n print(\"No metadata in object, skipping: %s\" % json.dumps(obj, indent=1))\n return\n name = metadata.get(\"name\")\n namespace = metadata.get(\"namespace\") \n \n \n \n try:\n configMaps = client.CoreV1Api(api_client)\n musicConfig = spec.get('music')\n music = configMaps.read_namespaced_config_map(musicConfig, namespace)\n payload = music.binary_data['payload.mp3']\n f = open(\"temp.mp3\", \"w\")\n f.write(base64.b64decode(payload))\n f.close()\n print(\"Tocando o mp3 %s\" % name)\n obj[\"spec\"][\"status\"] = \"Sucesso\"\n arch = platform.machine()\n if arch == \"x86_64\":\n os.system(\"/usr/bin/mpg123 temp.mp3\")\n if arch == \"armv7\": \n os.system(\"/usr/bin/omxplayer temp.mp3\")\n os.remove(\"temp.mp3\")\n \n\n except:\n print(\"Falha ao decodificar o Payload do CRD %s\" % name)\n obj[\"spec\"][\"status\"] = \"Falha\"\n \n crds.replace_namespaced_custom_object(DOMAIN, \"v1\", namespace, \"musics\", name, obj)\n\n\nif __name__ == \"__main__\":\n if 'KUBERNETES_PORT' in os.environ:\n config.load_incluster_config()\n else:\n config.load_kube_config()\n configuration = client.Configuration()\n api_client = client.api_client.ApiClient(configuration=configuration)\n v1 = client.ApiextensionsV1beta1Api(api_client)\n crds = client.CustomObjectsApi(api_client)\n print(\"Esperando pra tocar musicas\")\n resource_version = ''\n while True:\n stream = watch.Watch().stream(crds.list_cluster_custom_object, DOMAIN, \"v1\", \"musics\", resource_version=resource_version)\n for event in stream:\n obj = event[\"object\"]\n operation = event['type']\n spec = obj.get(\"spec\")\n metadata = obj.get(\"metadata\")\n name = metadata.get(\"name\")\n if not spec:\n continue\n status = spec.get(\"status\")\n if operation == \"ADDED\" and status is None:\n print(\"Obtendo %s\" % name)\n play_music(crds, obj)\n \n","repo_name":"rikatz/palestra-qcon-2019","sub_path":"controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26839380155","text":"class Solution:\n def isPalindrome(self, x):\n \"\"\"\n 看是不是对称数字\n 逐个比较就行了\n 另一种算法:取一半反过来和后一半比\n :type x: int\n :rtype: bool\n \"\"\"\n sx = str(x)\n length = len(sx)\n middle = int(length / 2)\n for i in range(middle):\n if sx[i] != sx[length-1-i]:\n return False\n return True\n\n\ns = Solution()\nprint(s.isPalindrome(12212))","repo_name":"Ligenmt/leetcode","sub_path":"009回文数.py","file_name":"009回文数.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37076696932","text":"from flask import Flask, request, jsonify,render_template\nimport openai\nopenai.api_key = \"OPENAI_API_KEY\"\n\napp = Flask(__name__, static_folder='static')\n\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a kind helpful assistant.\"},\n]\n@app.route('/')\ndef index():\n return render_template('voiceassistant.html')\ndef getresponse(inp):\n while True:\n message = inp\n if message:\n messages.append(\n {\"role\": \"user\", \"content\": message},\n )\n chat = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", messages=messages\n )\n\n reply = chat.choices[0].message.content\n #print(f\"ChatGPT: {reply}\")\n return reply #temporary\n\n # messages.append({\"role\": \"assistant\", \"content\": reply})\n\n@app.route('/process-input', methods=['POST'])\ndef process_input():\n data = request.json\n input_text = data['input']\n response = getresponse(input_text)\n print(data)\n print(response)\n return jsonify({\"response\": response})\n\n\nif __name__ == \"__main__\":\n app.run(port=8000, debug=True)","repo_name":"UzayPoyrza/ChatGPTVoiceAssistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25298362364","text":"\nimport jwt\nimport pymysql\nfrom config import mydb\nfrom flask import jsonify\nfrom flask import request\nfrom app import app\nfrom services.jwt import tocken_required\n\n#searching audios by title, category or album\n@app.route('/search', methods=['POST'])\n@tocken_required\ndef search():\n try:\n json = request.json\n search_value = json['search_value']\n print(search_value)\n conn = mydb.connect()\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n data = cursor.execute(\"SELECT * FROM audio WHERE title LIKE %s OR album LIKE %s\", ('%' + search_value + '%', '%' + search_value + '%'))\n print(data)\n if( data == 0):\n return jsonify(error='No matches found.. !!'),404\n row = cursor.fetchall()\n conn.commit()\n print(\"emprow\", row) \n response = jsonify(row)\n response.status_code = 200\n return response\n except Exception as e:\n print(e)\n\n\n\n\n\n\n\n\n\n","repo_name":"Anjitha-A/TSG--Audio-Tracker-15-02-2023-","sub_path":"backend/services/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33441199586","text":"from sqlalchemy import create_engine\nimport pandas as pd\nimport numpy as np\nimport json\n\n# Create the database engine\n# Replace 'username', 'password', and 'database_name' with your database credentials\nengine = create_engine('mysql+pymysql://root:@localhost:3306/datamadeth')\n\ndef convertir_heure_en_secondes(heure):\n h, m, s = map(int, heure.split(':'))\n return h * 3600 + m * 60 + s\n\ndef sqlToDataModel() :\n # Define your SQL queries\n queryTransition = \"SELECT Utilisateur, Titre, Date, Heure FROM transition\"\n queryFile = \"SELECT User,Filenameo, Dateupload, Timeupload FROM userfiles\"\n # Execute the SQL queries and load the results into Pandas DataFrames\n transitionData = pd.read_sql(queryTransition, engine)\n fileData = pd.read_sql(queryFile, engine)\n fileData[\"Filenameo\"] = fileData[\"Filenameo\"].apply(lambda filename : \"fileUpload: \" + filename)\n \n #rename the fileData by the transitionData columns\n fileData.columns = transitionData.columns\n\n # Concatenate the dataframes\n datas = pd.concat([transitionData, fileData])\n datas[\"Heure\"] = datas[\"Heure\"].apply(lambda x: str(x))\n datas.to_json('datas.json', orient='records')\n \n\n# Engine: The database engine\n# First criterion: identify active students by calculating the average time spent per person each day in the application in seconds\ndef filterData(type, df):\n dataSet = df[type].unique()\n data_dict = {data: df[df[type] == data] for data in dataSet}\n return data_dict\n\ndef timeStampMinutsPerUserPerDay(list_timestamps):\n for i in range(len(list_timestamps)):\n list_timestamps[i] = convertir_heure_en_secondes(list_timestamps[i].split()[2])\n list_timestamps.sort()\n\n counter = 0\n seuil = 20*60\n for i in range(len(list_timestamps)-1):\n activePeriod = list_timestamps[i+1] - list_timestamps[i]\n if(activePeriod< seuil) :\n counter+=activePeriod\n return counter\n\n\ndef averageSpentTimePerDayPerUser():\n df = pd.read_json('datas.json')\n # Filter by users and put them into a dictionary with the user as the key and their dataframe as the value\n filterDateTable = filterData(\"Utilisateur\",df)\n # Apply on each user's dataframe a filter to get the dataframe of each day of each user\n averageTimes = {}\n for key, value in filterDateTable.items():\n activePeriods = []\n nbJour = 0\n days = filterData(\"Date\", value)\n for key2, value2 in days.items():\n activePeriods.append(timeStampMinutsPerUserPerDay(value2[\"Heure\"].tolist()))\n nbJour+=1\n averageTimes[key] = np.array(activePeriods).mean()/nbJour\n return averageTimes\naverage_Spent_TimePerDayPerUser = averageSpentTimePerDayPerUser()\n\n\n# Second criterion: regular students in terms of logins by calculating the number of consecutive days they have logged in for each person #\n# Check if dates are consecutive\ndef consecutiveDays(timestamp1, timestamp2):\n return (timestamp2 - timestamp1).days == 1\n\ndef getMaxCounter(list):\n return max(list)+1 if len(list) !=0 else 1\n\ndef maxDayofSigningPerPersonn():\n\n df = pd.read_json('datas.json')\n d ={}\n filterPerPerson = filterData(\"Utilisateur\",df)\n for key, value in filterPerPerson.items():\n listDatePerPerson = []\n counterPerPerson = 0\n filterPerPersonnPerDate = filterData(\"Date\", value)\n for dataFrame in filterPerPersonnPerDate.values():\n listDatePerPerson.append(dataFrame[\"Date\"].tolist()[0])\n\n listDatePerPerson.sort()\n listOptimalCounteur = []\n for i in range(len(listDatePerPerson)-1) :\n if(consecutiveDays(listDatePerPerson[i], listDatePerPerson[i+1])):\n counterPerPerson+=1\n listOptimalCounteur.append(counterPerPerson)\n else :\n counterPerPerson=0\n d[key] = getMaxCounter(listOptimalCounteur)\n return d\n\n\n\n#########Third criteria : student who responded the most about the messages #########\ndef countRepliedMsgPerPersonn():\n df = pd.read_json('datas.json')\n keyMessage = \"Répondre à un message\"\n count = 0\n results = {}\n filterPerPerson = filterData(\"Utilisateur\",df)\n for user, dataFrame in filterPerPerson.items():\n count =0\n for titre in dataFrame[\"Titre\"].tolist():\n if(titre == keyMessage):\n count+=1\n results[user] = count\n return results\nreplied_msg_results = countRepliedMsgPerPersonn()\n\n\n# Fourth criterion: the average number sent per person and per day #\ndef countFilePerPersonn():\n df = pd.read_json('datas.json')\n count = 0\n results = {}\n keyword = \"fileUpload\"\n filterPerPerson = filterData(\"Utilisateur\",df) \n for user, dataFrame in filterPerPerson.items():\n count =0\n for titre in dataFrame[\"Titre\"].tolist():\n if(keyword in titre) :\n count+=1\n results[user] = count\n return results\n# To count files per person\nfiles_results = countFilePerPersonn()\n\n\n#########score calculations #####\ndef scorePerPerson(typeDict, reversed):\n list =[]\n dictResult = {}\n for user, value in typeDict.items():\n list.append(value)\n list.sort(reverse = reversed)\n \n for user, value in typeDict.items():\n dictResult[user] = {\"classement\" : list.index(value) +1, \"score\" : value} \n return dictResult\n\ndef finalRanking():\n maxSignin = scorePerPerson(maxDayofSigningPerPersonn(), True)\n averageSpent = scorePerPerson(averageSpentTimePerDayPerUser(), True)\n countMessagesReplied = scorePerPerson(countRepliedMsgPerPersonn(), True)\n countFile = scorePerPerson(countFilePerPersonn(), True)\n scores = {}\n for user in maxSignin.keys():\n scores[user] = 0\n scores[user] += maxSignin[user][\"classement\"]\n scores[user] += averageSpent[user][\"classement\"]\n scores[user] += countMessagesReplied[user][\"classement\"]\n scores[user] += countFile[user][\"classement\"]\n print(scorePerPerson(scores, False))\n return scorePerPerson(scores, False)\nfinalRanking()","repo_name":"MariaFariss/analyses-traces-cmc","sub_path":"src/scripts/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23514222645","text":"#!/usr/bin/python\n\nimport argparse\nimport os\nimport sys\nimport os.path\nfrom colorama import Fore, Style, init\nfrom time import sleep\n\n# Confugre Argument Parse\n\nwg_parser = argparse.ArgumentParser()\n\nwg_parser.add_argument('-p', dest='port', action='store', help='Specify the port for the server')\nwg_parser.add_argument('-c', '--client', dest='client', action='store_true', help='Use this to create a client config')\nwg_parser.add_argument('-s', '--server', dest='server', action='store_true', help='Use this to create a client config')\nwg_parser.add_argument('-i', dest='ip', action='store', help='Place the ip you want the server to use Ex: 10.0.0.1/24')\nwg_parser.add_argument('-k', '--key', action='store', dest='key', help='Place the public key for the server here')\nwg_parser.add_argument('-e', '--endpoint', action='store', dest='endpoint', help='Place ther server IP here')\nwg_parser.add_argument('-a', '--allow', action='store', dest='allow', help='Place the allowed IPs for communiction here Ex: 10.0.0.0/24')\nwg_parser.add_argument('-r', '--peer', dest='peer', action='store_true', help='Use this to add a peer to the client config')\n\nif len(sys.argv) == 1:\n wg_parser.print_help(sys.stderr)\n sys.exit(1)\n\nwg = wg_parser.parse_args()\n\n# Set colorama to reset colors after each use\n\ninit(autoreset=True)\n\n# Delete configuration file if it already exists\n\nwg0 = \"/etc/wireguard/wg0.conf\"\n\n# Build the correct wireguard config file\n\nif wg.client and wg.server:\n print(Fore.RED+Style.BRIGHT+\"You must select to build only a client or a server\")\nelif wg.client:\n if wg.key and wg.endpoint and wg.allow and wg.ip and wg.port:\n if os.path.isfile(wg0):\n os.remove(wg0)\n print(\"\")\n print(Fore.YELLOW+Style.BRIGHT+\"Building Client config now\")\n os.system(\"umask 077\")\n os.system(\"wg genkey | tee privatekey | wg pubkey > publickey\")\n os.system(\"echo [Interface] >> {wg0}\".format(wg0=wg0))\n sleep(1)\n with open(\"privatekey\") as p:\n priv = p.readline()\n priv = priv.strip(\"\\n\")\n p.close\n os.system(\"echo 'PrivateKey = {priv}' >> {wg0}\".format(priv=priv, wg0=wg0))\n os.system(\"echo Address = {addr} >> {wg0}\".format(addr=wg.ip, wg0=wg0))\n os.system(\"echo [Peer] >> {wg0}\".format(wg0=wg0))\n os.system(\"echo PublicKey = {key} >> {wg0}\".format(key=wg.key, wg0=wg0))\n os.system(\"echo Endpoint = {srv}:{port} >> {wg0}\".format(srv=wg.endpoint, port=wg.port, wg0=wg0))\n os.system(\"echo AllowedIPs = {allow} >> {wg0}\".format(allow=wg.allow, wg0=wg0))\n print(Fore.GREEN+\"Server config complete\")\n else:\n print(Fore.RED+Style.BRIGHT+\"Building a client config file requires a -i -p -k , -e , and -a \")\nelif wg.server:\n if wg.port and wg.ip:\n if os.isfile(wg0):\n os.remove(wg0)\n print(\"\")\n print(Fore.YELLOW+Style.BRIGHT+\"Building Server config now\")\n os.system(\"umask 077\")\n os.system(\"wg genkey | tee privatekey | wg pubkey > publickey\")\n os.system(\"echo [Interface] >> {wg0}\".format(wg0=wg0))\n sleep(1)\n with open(\"privatekey\") as p:\n priv = p.readline()\n priv = priv.strip(\"\\n\")\n p.close\n os.system(\"echo 'PrivateKey = {priv}' >> {wg0}\".format(priv=priv, wg0=wg0))\n os.system(\"echo Address = {addr} >> {wg0}\".format(addr=wg.ip, wg0=wg0))\n os.system(\"echo Listenport = {port} >> {wg0}\".format(port=wg.port, wg0=wg0))\n os.system(\"echo 'PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE; iptables -I INPUT -p udp --dport {port} -j ACCEPT; sysctl -w net.ipv4.ip_forward=1' >> {wg0}\".format(wg0=wg0, port=wg.port))\n os.system(\"echo 'PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE; iptables -D INPUT -p udp --dport {port} -j ACCEPT; sysctl -w net.ipv4.ip_forward=0' >> {wg0}\".format(wg0=wg0, port=wg.port))\n os.system(\"echo SaveConfig = true >> {wg0}\".format(wg0=wg0))\n print(Fore.GREEN+\"Server config complete\")\n else:\n print(Fore.RED+Style.BRIGHT+\"Building a server config requires a -i , -p \")\nelif wg.peer:\n if wg.key and wg.allow:\n print(\"\")\n print(Fore.YELLOW+Style.BRIGHT+\"Adding Peer to configuration file\")\n os.system(\"echo '' >> {wg0}\".format(wg0=wg0))\n os.system(\"echo '[Peer]' >> {wg0}\".format(wg0=wg0))\n os.system(\"echo PublicKey = {key} >> {wg0}\".format(key=wg.key, wg0=wg0))\n os.system(\"echo AllowedIPs = {allow} >> {wg0}\".format(allow=wg.allow, wg0=wg0))\n print(Fore.GREEN+\"Peer added to configuration file\")\n else:\n print(Fore.RED+Style.BRIGHT+\"Adding a Peer to the configuration requires a -k and -a \")\nelse:\n print(\"You must select either a -c for client, -s for server or -r for peer\")\n","repo_name":"mojo0243/WireGuard_Builder","sub_path":"build_wg.py","file_name":"build_wg.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41710321551","text":"import openerp.netsvc\nfrom openerp.osv import osv, fields\nfrom openerp.tools.translate import _\n\n\nclass purchase_order(osv.osv):\n _inherit = \"purchase.order\"\n\n def _get_requistion(self, cr, uid, ids, context=None):\n orders = self.browse(cr, uid, ids, context=context)\n pr_ids = []\n for order in orders:\n for order_line in order.order_line:\n for pr_line in order_line.pr_line_ids:\n pr_ids.append(pr_line.requisition_id.id)\n return pr_ids\n\n def wkf_confirm_order(self, cr, uid, ids, context=None):\n res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)\n\n pr_ids = self._get_requistion(cr, uid, ids, context)\n self.pool.get('purchase.requisition').update_done(cr, uid, pr_ids, context=context)\n return res\n\n def action_cancel_draft(self, cr, uid, ids, context=None):\n po_line = self.pool.get('purchase.order.line')\n super(purchase_order, self).action_cancel_draft(cr, uid, ids, context)\n line_ids = po_line.search(cr, uid, [('order_id', 'in', ids)])\n po_line.write(cr, uid, line_ids, {'state': 'draft'})\n return True\n\n def purchase_cancel(self, cr, uid, ids, context=None):\n po_line = self.pool.get('purchase.order.line')\n\n self.write(cr, uid, ids, {'state': 'cancel'})\n line_ids = po_line.search(cr, uid, [('order_id', 'in', ids)])\n po_line.write(cr, uid, line_ids, {'state': 'cancel'})\n\n pr_ids = self._get_requistion(cr, uid, ids, context)\n self.pool.get('purchase.requisition').update_done(cr, uid, pr_ids, context=context)\n\n return True\n\n def do_merge(self, cr, uid, ids, context=None):\n res = super(purchase_order, self).do_merge(cr, uid, ids, context)\n po_line = self.pool.get('purchase.order.line')\n\n for key, value in res.iteritems():\n line_ids = po_line.search(cr, uid, [('order_id', '=', key)])\n for line_id in po_line.browse(cr, uid, line_ids, context=context):\n old_line_ids = po_line.search(cr, uid, [('order_id', 'in', value), ('product_id', '=', line_id.product_id.id)])\n pr_line_ids = []\n for old_line in po_line.browse(cr, uid, old_line_ids, context=context):\n pr_line_ids.extend([pr_id.id for pr_id in old_line.pr_line_ids])\n po_line.write(cr, uid, [line_id.id], {'pr_line_ids': [(6, 0, pr_line_ids)]})\n\n return res\n\n\nclass purchase_order_line(osv.osv):\n _inherit = \"purchase.order.line\"\n\n _columns = {\n 'pr_line_ids': fields.many2many('purchase.requisition.line', 'pr_rel_po', 'po_id', 'pr_id', 'Purchase requisition Lines', ondelete='cascade' ),\n }","repo_name":"akhdaniel/addons","sub_path":"vit_purchase_requisition/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"42272913346","text":"import json\nimport random\nfrom typing import Any, Dict, List, Optional, Union\n\nimport networkx as nx\nfrom mywheel.array_like import RepeatArray\nfrom mywheel.lict import Lict\nfrom networkx.algorithms import bipartite\nfrom networkx.readwrite import json_graph\n\n\n# The `ThinGraph` class is a subclass of `nx.Graph` that defines default attributes for edges and\n# nodes.\nclass ThinGraph(nx.Graph):\n all_edge_dict = {\"weight\": 1}\n\n def single_edge_dict(self):\n return self.all_edge_dict\n\n edge_attr_dict_factory = single_edge_dict\n node_attr_dict_factory = single_edge_dict\n\n\n# The class SimpleGraph is a subclass of nx.Graph and defines default attributes for edges and nodes.\nclass SimpleGraph(nx.Graph):\n all_edge_dict = {\"weight\": 1}\n\n def single_edge_dict(self):\n return self.all_edge_dict\n\n edge_attr_dict_factory = single_edge_dict\n node_attr_dict_factory = single_edge_dict\n\n\n# The TinyGraph class is a subclass of nx.Graph that initializes a graph with a specified number of\n# nodes and provides methods for creating node dictionaries and adjacency list dictionaries.\nclass TinyGraph(nx.Graph):\n num_nodes = 0\n\n def cheat_node_dict(self):\n return Lict([dict() for _ in range(self.num_nodes)])\n\n def cheat_adjlist_outer_dict(self):\n return Lict([dict() for _ in range(self.num_nodes)])\n\n node_dict_factory = cheat_node_dict\n adjlist_outer_dict_factory = cheat_adjlist_outer_dict\n\n def init_nodes(self, n: int):\n self.num_nodes = n\n self._node = self.cheat_node_dict()\n self._adj = self.cheat_adjlist_outer_dict()\n # self._pred = self.cheat_adjlist_outer_dict()\n\n\n# The `Netlist` class represents a netlist, which is a collection of modules and nets in a graph\n# structure, and provides various properties and methods for working with the netlist.\nclass Netlist:\n num_pads = 0\n cost_model = 0\n\n def __init__(\n self,\n gra: nx.Graph,\n modules: Union[range, List[Any]],\n nets: Union[range, List[Any]],\n ):\n \"\"\"\n The function initializes an object with a graph, modules, and nets, and calculates some properties\n of the graph.\n\n :param gra: The parameter `gra` is a graph object of type `nx.Graph`. It represents the graph\n structure of the system\n :type gra: nx.Graph\n :param modules: The `modules` parameter is a list or range object that represents the modules in the\n graph. Each module is a node in the graph\n :type modules: Union[range, List[Any]]\n :param nets: The `nets` parameter is a list or range that represents the nets in the graph. A net is\n a connection between two or more modules\n :type nets: Union[range, List[Any]]\n \"\"\"\n self.gra = gra\n self.modules = modules\n self.nets = nets\n\n self.num_modules = len(modules)\n self.num_nets = len(nets)\n # self.net_weight: Optional[Union[Dict, List[int]]] = None\n self.module_weight: Optional[Union[Dict, List[int]]] = None\n self.module_fixed: set = set()\n\n # self.module_dict = {}\n # for v in enumerate(self.module_list):\n # self.module_dict[v] = v\n\n # self.net_dict = {}\n # for i_net, net in enumerate(self.net_list):\n # self.net_dict[net] = i_net\n\n # self.module_fixed = module_fixed\n # self.has_fixed_modules = (self.module_fixed != [])\n self.max_degree = max(self.gra.degree[cell] for cell in modules)\n # self.max_net_degree = max(self.gra.degree[net] for net in nets)\n\n def number_of_modules(self) -> int:\n \"\"\"\n The function \"number_of_modules\" returns the number of modules.\n :return: The method is returning the value of the attribute `num_modules`.\n \"\"\"\n return self.num_modules\n\n def number_of_nets(self) -> int:\n \"\"\"\n The function \"number_of_nets\" returns the number of nets.\n :return: The number of nets.\n \"\"\"\n return self.num_nets\n\n def number_of_nodes(self) -> int:\n \"\"\"\n The function \"number_of_nodes\" returns the number of nodes in a graph.\n :return: The number of nodes in the graph.\n \"\"\"\n return self.gra.number_of_nodes()\n\n def number_of_pins(self) -> int:\n \"\"\"\n The function `number_of_pins` returns the number of edges in a graph.\n :return: The number of edges in the graph.\n \"\"\"\n return self.gra.number_of_edges()\n\n def get_max_degree(self) -> int:\n \"\"\"\n The function `get_max_degree` returns the maximum degree of nodes in a graph.\n :return: the maximum degree of the nodes in the graph.\n \"\"\"\n return max(self.gra.degree[cell] for cell in self.modules)\n\n def get_module_weight(self, v) -> int:\n \"\"\"\n The function `get_module_weight` returns the weight of a module given its index.\n\n :param v: The parameter `v` in the `get_module_weight` function is of type `size_t`. It represents\n the index or key of the module weight that you want to retrieve\n :return: the value of `self.module_weight[v]`.\n \"\"\"\n return self.module_weight[v]\n\n # def get_module_weight_by_id(self, v):\n # \"\"\"[summary]\n\n # Arguments:\n # v (size_t): description\n\n # Returns:\n # [size_t]: description\n # \"\"\"\n # return 1 if self.module_weight is None \\\n # else self.module_weight[v]\n\n def get_net_weight(self, _) -> int:\n \"\"\"\n The function `get_net_weight` returns an integer value.\n\n :param _: The underscore (_) in the function signature is a convention in Python to indicate that\n the parameter is not used within the function. It is often used when a parameter is required by the\n function signature but not actually used within the function's implementation. In this case, the\n underscore (_) is used as a placeholder for\n :return: An integer value of 1 is being returned.\n \"\"\"\n return 1\n\n def __iter__(self):\n \"\"\"\n The function returns an iterator over all modules in the Netlist.\n :return: The `iter(self.modules)` is being returned.\n \"\"\"\n return iter(self.modules)\n\n\ndef read_json(filename):\n \"\"\"\n The function `read_json` reads a JSON file, converts it into a graph, and creates a netlist object\n with module and net weights.\n\n :param filename: The filename parameter is the name of the JSON file that contains the data you want\n to read\n :return: an object of type `Netlist`.\n \"\"\"\n with open(filename, \"r\") as fr:\n data = json.load(fr)\n gra = json_graph.node_link_graph(data)\n num_modules = gra.graph[\"num_modules\"]\n num_nets = gra.graph[\"num_nets\"]\n num_pads = gra.graph[\"num_pads\"]\n hyprgraph = Netlist(\n gra, range(num_modules), range(num_modules, num_modules + num_nets)\n )\n hyprgraph.num_pads = num_pads\n hyprgraph.module_weight = RepeatArray(1, num_modules)\n hyprgraph.net_weight = RepeatArray(1, num_nets)\n # hyprgraph.net_weight = ShiftArray(1 for _ in range(num_nets))\n # hyprgraph.net_weight.set_start(num_modules)\n return hyprgraph\n\n\ndef create_drawf():\n \"\"\"\n The function `create_drawf` creates a graph and netlist object with specified nodes, edges, and\n weights.\n :return: an instance of the Netlist class, which is created using the ThinGraph class and some\n predefined modules and nets.\n \"\"\"\n gra = ThinGraph()\n gra.add_nodes_from(\n [\n \"a0\",\n \"a1\",\n \"a2\",\n \"a3\",\n \"p1\",\n \"p2\",\n \"p3\",\n \"n0\",\n \"n1\",\n \"n2\",\n \"n3\",\n \"n4\",\n \"n5\",\n ]\n )\n nets = [\n \"n0\",\n \"n1\",\n \"n2\",\n \"n3\",\n \"n4\",\n \"n5\",\n ]\n # net_map = {net: i_net for i_net, net in enumerate(nets)}\n modules = [\"a0\", \"a1\", \"a2\", \"a3\", \"p1\", \"p2\", \"p3\"]\n # module_map = {v: i_v for i_v, v in enumerate(modules)}\n # module_weight = [1, 3, 4, 2, 0, 0, 0]\n module_weight = {\"a0\": 1, \"a1\": 3, \"a2\": 4, \"a3\": 2, \"p1\": 0, \"p2\": 0, \"p3\": 0}\n\n gra.add_edges_from(\n [\n (\"n0\", \"p1\", {\"dir\": \"I\"}),\n (\"n0\", \"a0\", {\"dir\": \"I\"}),\n (\"n0\", \"a1\", {\"dir\": \"O\"}),\n (\"n1\", \"a0\", {\"dir\": \"I\"}),\n (\"n1\", \"a2\", {\"dir\": \"I\"}),\n (\"n1\", \"a3\", {\"dir\": \"O\"}),\n (\"n2\", \"a1\", {\"dir\": \"I\"}),\n (\"n2\", \"a2\", {\"dir\": \"I\"}),\n (\"n2\", \"a3\", {\"dir\": \"O\"}),\n (\"n3\", \"a2\", {\"dir\": \"I\"}),\n (\"n3\", \"p2\", {\"dir\": \"O\"}),\n (\"n4\", \"a3\", {\"dir\": \"I\"}),\n (\"n4\", \"p3\", {\"dir\": \"O\"}),\n (\"n5\", \"p2\", {\"dir\": \"B\"}),\n ]\n )\n gra.graph[\"num_modules\"] = 7\n gra.graph[\"num_nets\"] = 6\n gra.graph[\"num_pads\"] = 3\n hyprgraph = Netlist(gra, modules, nets)\n hyprgraph.module_weight = module_weight\n hyprgraph.net_weight = RepeatArray(1, len(nets))\n hyprgraph.num_pads = 3\n return hyprgraph\n\n\ndef create_test_netlist():\n \"\"\"\n The function `create_test_netlist` creates a test netlist with nodes, edges, module weights, and net\n weights.\n :return: an instance of the `Netlist` class, which represents a netlist with modules and nets.\n \"\"\"\n gra = ThinGraph()\n gra.add_nodes_from([\"a0\", \"a1\", \"a2\", \"a3\", \"a4\", \"a5\"])\n # module_weight = [533, 543, 532]\n module_weight = {\"a0\": 533, \"a1\": 543, \"a2\": 532}\n gra.add_edges_from(\n [\n (\"a3\", \"a0\"),\n (\"a3\", \"a1\"),\n (\"a4\", \"a0\"),\n (\"a4\", \"a1\"),\n (\"a4\", \"a2\"),\n (\"a5\", \"a0\"), # self-loop\n ]\n )\n\n gra.graph[\"num_modules\"] = 3\n gra.graph[\"num_nets\"] = 3\n modules = [\"a0\", \"a1\", \"a2\"]\n # module_map = {v: i_v for i_v, v in enumerate(modules)}\n nets = [\"a3\", \"a4\", \"a5\"]\n # net_weight = {net: 1 for net in nets}\n net_weight = RepeatArray(1, len(nets))\n\n hyprgraph = Netlist(gra, modules, nets)\n hyprgraph.module_weight = module_weight\n hyprgraph.net_weight = net_weight\n return hyprgraph\n\n\ndef vdc(n, base=2):\n \"\"\"[summary]\n\n Arguments:\n n ([type]): [description]\n\n Keyword Arguments:\n base (int): [description] (default: {2})\n\n Returns:\n [type]: [description]\n \"\"\"\n vdc, denom = 0.0, 1.0\n while n:\n denom *= base\n n, remainder = divmod(n, base)\n vdc += remainder / denom\n return vdc\n\n\ndef vdcorput(n, base=2):\n \"\"\"[summary]\n\n Arguments:\n n (int): number of vectors\n\n Keyword Arguments:\n base (int): [description] (default: {2})\n\n Returns:\n [type]: [description]\n \"\"\"\n return [vdc(i, base) for i in range(n)]\n\n\ndef form_graph(N, M, _, eta, seed=None): # ignore pos\n \"\"\"Form N by N grid of nodes, connect nodes within eta.\n mu and eta are relative to 1/(N-1)\n\n Arguments:\n t (float): the best-so-far optimal value\n pos ([type]): [description]\n eta ([type]): [description]\n\n Keyword Arguments:\n seed ([type]): [description] (default: {None})\n\n Returns:\n [type]: [description]\n \"\"\"\n if seed:\n random.seed(seed)\n\n # connect nodes with edges\n gra = bipartite.random_graph(N, M, eta)\n # gra = nx.DiGraph(gra)\n return gra\n\n\ndef create_random_hgraph(N=30, M=26, eta=0.1):\n T = N + M\n xbase = 2\n ybase = 3\n x = [i for i in vdcorput(T, xbase)]\n y = [i for i in vdcorput(T, ybase)]\n pos = zip(x, y)\n gra = form_graph(N, M, pos, eta, seed=5)\n\n gra.graph[\"num_modules\"] = N\n gra.graph[\"num_nets\"] = M\n hyprgraph = Netlist(gra, range(N), range(N, N + M))\n hyprgraph.module_weight = RepeatArray(1, N)\n hyprgraph.net_weight = RepeatArray(1, M)\n # hyprgraph.net_weight = ShiftArray(1 for _ in range(M))\n # hyprgraph.net_weight.set_start(N)\n return hyprgraph\n","repo_name":"luk036/ckpttnpy","sub_path":"src/ckpttnpy/netlist.py","file_name":"netlist.py","file_ext":"py","file_size_in_byte":12008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31043174879","text":"import web\nfrom web.views import *\n\n\ndef add_view(url, function):\n web.app.add_url_rule(\n url,\n view_func=function,\n methods=[\"GET\", \"POST\"]\n )\n\n\ndef add_views(views):\n for i in views:\n add_view(i, views[i])\n\n\nadd_views({\n '/': core.index,\n '/favicon.ico': core.favicon,\n '/train//': training.train,\n '/train/': training.train,\n '/login/': auth.login,\n '/logout/': auth.logout,\n '/register/': auth.register,\n '/userinfo/': userinfo.uinfo,\n '/userstats/': userstats.ustats,\n '/verify/': verify.verify,\n '/achievements/': ach.ach,\n '/achievement_list/': ach_all.ach_all,\n '/getusers.json': getusers.getusers,\n '/leaderboard/': leaderboard.leaderboard,\n '/verifyemail': verifyemail.verifye,\n})\n","repo_name":"tjcsl/wedge","sub_path":"web/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37998925764","text":"from typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..models.pull_request_merge_parameters_merge_strategy import PullRequestMergeParametersMergeStrategy\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"PullRequestMergeParameters\")\n\n\n@attr.s(auto_attribs=True)\nclass PullRequestMergeParameters:\n \"\"\"The metadata that describes a pull request merge.\n\n Attributes:\n type (str):\n message (Union[Unset, str]): The commit message that will be used on the resulting commit.\n close_source_branch (Union[Unset, bool]): Whether the source branch should be deleted. If this is not provided,\n we fallback to the value used when the pull request was created, which defaults to False\n merge_strategy (Union[Unset, PullRequestMergeParametersMergeStrategy]): The merge strategy that will be used to\n merge the pull request. Default: PullRequestMergeParametersMergeStrategy.MERGE_COMMIT.\n \"\"\"\n\n type: str\n message: Union[Unset, str] = UNSET\n close_source_branch: Union[Unset, bool] = UNSET\n merge_strategy: Union[\n Unset, PullRequestMergeParametersMergeStrategy\n ] = PullRequestMergeParametersMergeStrategy.MERGE_COMMIT\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n type = self.type\n message = self.message\n close_source_branch = self.close_source_branch\n merge_strategy: Union[Unset, str] = UNSET\n if not isinstance(self.merge_strategy, Unset):\n merge_strategy = self.merge_strategy.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"type\": type,\n }\n )\n if message is not UNSET:\n field_dict[\"message\"] = message\n if close_source_branch is not UNSET:\n field_dict[\"close_source_branch\"] = close_source_branch\n if merge_strategy is not UNSET:\n field_dict[\"merge_strategy\"] = merge_strategy\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n type = d.pop(\"type\")\n\n message = d.pop(\"message\", UNSET)\n\n close_source_branch = d.pop(\"close_source_branch\", UNSET)\n\n _merge_strategy = d.pop(\"merge_strategy\", UNSET)\n merge_strategy: Union[Unset, PullRequestMergeParametersMergeStrategy]\n if isinstance(_merge_strategy, Unset):\n merge_strategy = UNSET\n else:\n merge_strategy = PullRequestMergeParametersMergeStrategy(_merge_strategy)\n\n pull_request_merge_parameters = cls(\n type=type,\n message=message,\n close_source_branch=close_source_branch,\n merge_strategy=merge_strategy,\n )\n\n pull_request_merge_parameters.additional_properties = d\n return pull_request_merge_parameters\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"phitoduck/pulumi-bitbucket","sub_path":"bitbucket-api-client/bitbucket_api_client/models/pull_request_merge_parameters.py","file_name":"pull_request_merge_parameters.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72766950952","text":"import matplotlib.pyplot as plt\r\n\r\nfrom time import *\r\nfrom model.circles import *\r\nfrom model.ellipses import *\r\n\r\n\r\ndef measure_time():\r\n radiuses = [r for r in range(100, 10002, 1000)]\r\n\r\n canon_circle_times = []\r\n parametric_circle_times = []\r\n bresenham_circle_times = []\r\n mid_dot_circle_times = []\r\n\r\n canon_ellips_times = []\r\n parametric_ellips_times = []\r\n bresenham_ellipse_times = []\r\n mid_dot_ellipse_times = []\r\n\r\n tasks1 = '''canon_circle\r\n parametric_circle\r\n bresenham_circle\r\n mid_dot_circle'''\r\n\r\n tasks2 = '''canon_ellips\r\n parametric_ellips\r\n bresenham_ellipse\r\n mid_dot_ellipse'''\r\n\r\n\r\n for radius in radiuses:\r\n for task in tasks1.split('\\n'):\r\n start = time()\r\n eval(f'{task}(0, 0, {radius})')\r\n stop = time()\r\n eval(f'{task}_times.append(stop-start)')\r\n\r\n\r\n for task in tasks2.split('\\n'):\r\n start = time()\r\n eval(f'{task}(0, 0, {radius}, {0.5*radius})')\r\n stop = time()\r\n eval(f'{task}_times.append(stop-start)')\r\n\r\n plt.figure(figsize=(15, 6))\r\n\r\n plt.subplot(1, 2, 1)\r\n plt.title(\"Замеры для окружностей: \")\r\n plt.plot(radiuses, canon_circle_times, label=\"Каноническое\\nуравнение\")\r\n plt.plot(radiuses, parametric_circle_times, label=\"Параметрическое\\nуравнение\")\r\n plt.plot(radiuses, bresenham_circle_times, label=\"Брезенхем\")\r\n plt.plot(radiuses, mid_dot_circle_times, label=\"Алгоритм\\nсредней точки\")\r\n plt.legend()\r\n plt.ylabel(\"Время\")\r\n plt.xlabel(\"Величина радиуса\")\r\n\r\n plt.subplot(1, 2, 2)\r\n plt.title(\"Замеры для эллипсов: \")\r\n plt.plot(radiuses, canon_ellips_times, label=\"Каноническое\\nуравнение\")\r\n plt.plot(radiuses, parametric_ellips_times, label=\"Параметрическое\\nуравнение\")\r\n plt.plot(radiuses, bresenham_ellipse_times, label=\"Брезенхем\")\r\n plt.plot(radiuses, mid_dot_ellipse_times, label=\"Алгоритм\\nсредней точки\")\r\n plt.legend()\r\n plt.ylabel(\"Время\")\r\n plt.xlabel(\"Величина радиуса\")\r\n\r\n plt.show()","repo_name":"Tulenenok/BMSTU_CG","sub_path":"lab_04/view/SergFunc.py","file_name":"SergFunc.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13716293165","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#论文复现:Aspect Level Sentiment Classification with Deep Memory Network\nimport string\nimport re\nimport pickle\nfrom os import listdir\nfrom nltk.corpus import stopwords\nfrom pickle import dump,load\nimport numpy as np\nfrom itertools import chain\nfrom sklearn.metrics import accuracy_score,f1_score,precision_score,recall_score,mean_absolute_error,confusion_matrix\nimport torch.nn as nn\nimport tokenizer\nimport pandas as pd\nimport os\nimport random\nimport torch\nimport json\nimport sys\nimport math\nfrom transformers.modeling_bert import BertPreTrainedModel\nfrom transformers.modeling_bert import BertEmbeddings, BertEncoder, BertPooler\nfrom sklearn.metrics import accuracy_score\nimport torch.nn.functional as F\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\nfrom pytorch_pretrained_bert.optimization import BertAdam\nimport math\nfrom torch.optim.optimizer import Optimizer, required\nfrom transformers import BertModel,get_linear_schedule_with_warmup,AutoTokenizer,AutoModel\nfrom torch.utils.data import Dataset,DataLoader\nfrom math import exp, log\n\n\n# In[2]:\n\n\nhidden_size=300\nbatch_size=32\nclass_num=3\nepoch=30\nlocation=4\nhops=2\naddLocation=False\n\n\n# In[3]:\n\n\nseed=1234\nnp.random.seed(seed)\nrandom.seed(seed)\nos.environ['PYTHONHASHSEED'] = str(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\n\n\n# In[4]:\n\n\ngpu_list = [0,1,2,3]\ngpu_list_str = ','.join(map(str, gpu_list))\nos.environ.setdefault(\"CUDA_VISIBLE_DEVICES\", gpu_list_str)\n# 这里注意,需要指定一个 GPU 作为主 GPU。\n# 否则会报错:module must have its parameters and buffers on device cuda:1 (device_ids[0]) but found one of them on device: cuda:2\n# 参考:https://stackoverflow.com/questions/59249563/runtimeerror-module-must-have-its-parameters-and-buffers-on-device-cuda1-devi\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n#device=torch.device(\"cpu\")\nprint('use device: %s' %device,file=sys.stderr)\n#print(\"device_count :{}\".format(torch.cuda.device_count()))\n\n\n# In[5]:\n\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()\n\n\n# In[6]:\n\n\ndef words2indices(self,sents):\n return [[self._word2id[w] for w in s] for s in sents]\n\n\n# In[7]:\n\n\nclass Vocab:\n def __init__(self,trainName,testName,hasConflict):\n #由于这篇论文中的context不包括aspect所以这里需要从sentence中将aspect去掉\n self.trainSent,self.trainAspect,self.trainY=self.readFile(trainName,hasConflict)\n self.testSent,self.testAspect,self.testY=self.readFile(testName,hasConflict)\n self.words=['pad']\n self.aspects=[]\n self.pad=0\n self.length=0\n self.word_vecs=None\n self._word2id=None\n self._sentiment2id={\"positive\":0,\"neutral\":1,\"negative\":2}\n self.buildVocab()\n self.inGlove=[]\n \n def readFile(self,fileName,hasConflict):\n sentence=[]\n aspect=[]\n y=[]\n with open(fileName,'r',encoding='utf-8') as file:\n data=json.load(file)\n for i in data:\n if hasConflict==True or (hasConflict==False and i['sentiment'] != 'conflict'):\n sentence.append(clean_str(i['sentence']).split())\n aspect.append(clean_str(i['aspect']).split())\n y.append(i['sentiment'])\n else:\n continue\n return sentence,aspect,y\n \n def buildVocab(self):\n tempo=self.trainSent.copy()\n tempo.extend(self.testSent)\n for i in tempo:\n for j in i:\n if j not in self.words:\n self.words.append(j)\n reverse=lambda x:dict(zip(x,range(len(x))))\n self._word2id=reverse(self.words)\n self.length=len(self.words)\n self.word_vecs=np.random.uniform(-0.01,0.01,300*self.length).reshape(self.length,300)\n self.trainY=[self._sentiment2id[i] for i in self.trainY]\n self.testY=[self._sentiment2id[i] for i in self.testY]\n self.trainAspect=[self.words2indices(i) for i in self.trainAspect]\n self.testAspect=[self.words2indices(i) for i in self.testAspect]\n self.trainSent=self.removeAspect(self.words2indices(self.trainSent),self.trainAspect)\n self.testSent=self.removeAspect(self.words2indices(self.testSent),self.testAspect)\n \n def removeAspect(self,sents,aspect):\n #这个函数的正确性明天再确定\n result_sents=[]\n for i in range(len(sents)):\n for j in range(len(sents[i])):\n if sents[i][j]==aspect[i][0] and sents[i][j+len(aspect[i])-1]==aspect[i][-1]:\n result_sents.append(sents[i][0:j]+sents[i][j+len(aspect[i]):])\n break\n return result_sents\n \n def words2indices(self,sents):\n if type(sents[0])==list:\n return [[self._word2id[w] for w in s] for s in sents]\n else:\n return [self._word2id[w] for w in sents]\n \n def load_pretrained_vec(self,fname):\n with open(fname) as fp:\n for line in fp.readlines():\n line=line.split(\" \")\n word=line[0]\n if word in self.words:\n self.inGlove.append(word)\n self.word_vecs[self._word2id[word]]=np.array([float(x) for x in line[1:]])\n\n\n# In[8]:\n\n\nfile=\"restaurant\"\ntrain_file=None\ntest_file=None\nif file==\"restaurant\":\n train_file='../data/atsa-restaurant/atsa_train.json'\n test_file='../data/atsa-restaurant/atsa_test.json'\nelif file==\"laptop\":\n train_file='../data/atsa-laptop/atsa_train.json'\n test_file='../data/atsa-laptop/atsa_test.json'\nelse:\n print('[ERROR]file should be one of restaurant/laptop')\n\n\n# In[9]:\n\n\n'''vocab=Vocab(train_file,test_file,hasConflict=False)\nvocab.load_pretrained_vec('../../data/glove.840B.300d.txt')\nif file==\"restaurant\":\n dump(vocab,open('vocab_restaurant.pkl','wb'))\nelse:\n dump(vocab,open('vocab_laptop.pkl','wb'))'''\n\n\n# In[10]:\n\n\nif file==\"restaurant\":\n vocab=load(open('vocab_restaurant.pkl','rb'))\nelse:\n vocab=load(open('vocab_laptop.pkl','rb'))\n\n\n# In[11]:\n\n\nclass myDataset(Dataset):\n def __init__(self,sent,aspect,y):\n self.sent=sent\n self.aspect=aspect\n self.y=y\n self.len=len(sent)\n \n def __len__(self):\n return self.len\n \n def __getitem__(self,index):\n return self.sent[index],self.aspect[index],self.y[index]\n \n @staticmethod\n def to_input_tensor(sents):\n return torch.nn.utils.rnn.pad_sequence([torch.tensor(input_ids) for input_ids in sents],batch_first=True,padding_value=vocab.pad)\n \n @staticmethod\n def collate_fn(batch):\n sent,aspect,y=zip(*batch)\n return sent,aspect,y\n\n\n# In[12]:\n\n\nclass DeepMemnet(nn.Module):\n def __init__(self,vocab):\n super(DeepMemnet,self).__init__()\n self.vocab=vocab\n weight=torch.FloatTensor(vocab.word_vecs).to(device)\n self.wordEmbedding=nn.Embedding.from_pretrained(weight)\n self.wordEmbedding.requires_grad=True\n self.linear=nn.Linear(hidden_size,hidden_size)\n self.attention_layer=nn.Linear(hidden_size*2,1)\n self.predict=nn.Linear(hidden_size,class_num)\n self.location_embedding=nn.Embedding(512,hidden_size)\n \n def forward(self,sents,aspect,sequence_length):\n #sents:[batch_size,sequence_lengths]\n #aspects:[[aspect,,,,]]list中的每个元素不定长,需要下面求一个平均值\n location_id=torch.LongTensor([list(range(max(sequence_length))) for i in range(len(sequence_length))]).to(device)\n sents_embedding=self.wordEmbedding(sents).to(sents.device)#(batch_size,sequence,hidden_size)\n #记录下来aspect的长度,之后在计算aspect向量的平均值的时候除一下\n aspect_length=[len(aspe) for aspe in aspect]\n #下面函数是补齐成一样长度之后转换为tensor\n aspect_tensor=myDataset.to_input_tensor(aspect).to(sents.device)#[batch_size,max_length]\n #获得每个单词的词向量\n aspect_embedding=self.wordEmbedding(aspect_tensor)#[batch_size,max_length,hidden_size]\n #embedding中存储batch个列表,每个列表中存储对应的要求平均的词向量\n aspect_embedding_list=[]\n for i in range(aspect_embedding.shape[0]):\n #对于第i个句子,取出其前aspect_length[i]个词向量\n aspect_embedding_list.append([aspect_embedding[i][j] for j in range(aspect_length[i])])\n #遍历上面的embedding,对每个list中的词向量求平均值,之后再合并起来\n aspect_embedding_average=torch.stack([torch.mean(torch.stack(i),0) for i in aspect_embedding_list])#[batch_size,hidden_size]\n #最终embedding的维度是[batch_size,hidden_size]\n embedding_sum=aspect_embedding_average\n for i in range(1,hops+1):\n aspect_embedding_average=embedding_sum\n aspect_embedding_linear=self.linear(embedding_sum)\n if addLocation==True:\n if location==3:\n v=self.location_embedding(location_id)\n sents_embedding=torch.add(sents_embedding,v)\n else:\n v=torch.sigmoid(self.location_embedding(location_id))\n sents_embedding=sents_embedding*v\n aspect_embedding_average=aspect_embedding_average.unsqueeze(1).repeat(1,sents.shape[1],1)#[batch_size,squence_length,hidden_size]\n sent_embedding_aspect_embedding=torch.cat((sents_embedding,aspect_embedding_average),2)#[batch_size,squence_length,hidden_size*2]\n g=torch.tanh(self.attention_layer(sent_embedding_aspect_embedding)).squeeze(2)#[batch_size,squence_length]\n g=g.masked_fill(sents==0,-float('inf'))#[batch_size,sequence_length]\n atten=torch.softmax(g,1).unsqueeze(1)#[batch_size,1,sequence_length]\n context_embedding=torch.bmm(atten,sents_embedding).squeeze(1)#[batch_size,hidden_size]\n embedding_sum=torch.add(context_embedding,aspect_embedding_linear)#[batch_size,hidden_size]\n return self.predict(embedding_sum)\n\n\n# In[13]:\n\n\ndef cuda2cpu(pred):\n #cuda变量转cpu变量\n if type(pred)==list:\n return pred\n if pred.is_cuda:\n pred_cpu=list(pred.cpu().numpy())\n else:\n pred_cpu=list(pred.numpy())\n return pred_cpu\n\ndef estimate(labels,preds):\n labels=cuda2cpu(labels)\n preds=cuda2cpu(preds)\n accuracy=accuracy_score(labels,preds)\n precision=precision_score(labels,preds,average='macro')\n recall=recall_score(labels,preds,average='macro')\n f1score=f1_score(labels,preds,average='macro')\n cm=confusion_matrix(labels,preds)\n return accuracy,precision,recall,f1score,cm\n\n\n# In[14]:\n\n\ndef evaluate(model,dataLoader):\n model.eval()\n with torch.no_grad():\n allloss=[]\n allresult=[]\n alllabel=[]\n crossentry=nn.CrossEntropyLoss()\n for i,batch in enumerate(dataLoader):\n sents,aspect,label=batch\n sequence_length=[len(sent) for sent in sents]\n label=torch.LongTensor(label).to(device)\n sents=myDataset.to_input_tensor(sents).to(device)\n pred=model(sents,aspect,sequence_length)\n _,result=torch.max(pred,1)\n loss=crossentry(pred,label)\n allloss.append(loss.item())\n alllabel.append(cuda2cpu(label))\n allresult.append(cuda2cpu(result))\n alllabel=list(chain.from_iterable(alllabel))\n allresult=list(chain.from_iterable(allresult))\n accuracy,precision,recall,f1score,_=estimate(alllabel,allresult)\n return accuracy,precision,recall,f1score\n\n\n# In[15]:\n\n\ndef train():\n model=DeepMemnet(vocab)\n #model=nn.DataParallel(model)\n model=model.to(device)\n optimizer=torch.optim.SGD(model.parameters(),lr=0.01)\n trainDataset=myDataset(vocab.trainSent,vocab.trainAspect,vocab.trainY)\n trainLoader=DataLoader(trainDataset,batch_size,shuffle=True,collate_fn=myDataset.collate_fn)\n crossentry=nn.CrossEntropyLoss()\n max_accuracy=0\n for i in range(epoch):\n model.train()\n allloss=[]\n alllabel=[]\n allresult=[]\n for j,batch in enumerate(trainLoader):\n optimizer.zero_grad()\n sents,aspect,label=batch\n label=torch.LongTensor(label).to(device)\n sequence_length=[len(sent) for sent in sents]\n sents=myDataset.to_input_tensor(sents).to(device)\n pred=model(sents,aspect,sequence_length)\n loss=crossentry(pred,label)\n _,result=torch.max(pred,1)\n allresult.append(cuda2cpu(result))\n allloss.append(loss.item())\n alllabel.append(cuda2cpu(label))\n loss.backward()\n optimizer.step()\n alllabel=list(chain.from_iterable(alllabel))\n allresult=list(chain.from_iterable(allresult))\n #train_accuracy,train_precision,train_recall,train_f1score,_=estimate(alllabel,allresult)\n #print(\"train_accuracy:{} train_precision:{} train_recall:{} train_f1score:{}\".format(train_accuracy,train_precision,train_recall,train_f1score))\n testDataset=myDataset(vocab.testSent,vocab.testAspect,vocab.testY)\n testLoader=DataLoader(testDataset,batch_size,shuffle=False,collate_fn=myDataset.collate_fn)\n test_accuracy,test_precision,test_recall,test_f1score=evaluate(model,testLoader)\n print(\"Epoch:{} test_accuracy:{} test_precision:{} test_recall:{} test_f1score:{}\".format(i,test_accuracy,test_precision,test_recall,test_f1score))\n if test_accuracy>max_accuracy:\n torch.save(model,'model_location'+str(location)+file+'.pkl')\n max_accuracy=test_accuracy\n\n\n# In[ ]:\n\n\ntrain()\n\n\n# In[ ]:\n\n\nmodel=torch.load('model_location'+str(location)+file+'.pkl')\ntestDataset=myDataset(vocab.testSent,vocab.testAspect,vocab.testY)\ntestLoader=DataLoader(testDataset,batch_size,shuffle=False,collate_fn=myDataset.collate_fn)\ntest_accuracy,test_precision,test_recall,test_f1score=evaluate(model,testLoader)\nprint(\"test_accuracy:{} test_precision:{} test_recall:{} test_f1score:{}\".format(test_accuracy,test_precision,test_recall,test_f1score))\n\n\n# restaurant:79.4\n# laptop:72.1\n# \n# 不加location编码竟然效果差距这么大\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"loganzhang1/Sentiment-analysis-paper-implement","sub_path":"Aspect Level Sentiment Classification with Deep Memory Network.py","file_name":"Aspect Level Sentiment Classification with Deep Memory Network.py","file_ext":"py","file_size_in_byte":15183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19117003391","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom datetime import date, time, timedelta\nimport time\nimport unittest\nimport HtmlTestRunner\n\nprint('OneShield_UWQuestions Script')\ntry:\n class FL04(unittest.TestCase):\n from CommonFunctions import BasicFunctions as BF, OneShieldFunctions as OSF\n import AccessorRepository as AR\n\n uMarketCap = myData['Option1']\n uTotalAssets = myData['Option2']\n uAnnualRevenues = myData['Option3']\n uOrgCategory = myData['Option4']\n uTicker = myData['Option5']\n uFinCondition = myData['Option6']\n uNoOperations = myData['Option7']\n uAcqHistory = myData['Option8']\n uMgtQuality = myData['Option9']\n uLtgHistory = myData['Option10']\n uTimeInBusiness = myData['Option11']\n uClsBusiness = myData['Option12']\n\n myDriver = myDriver\n ScreenShotsDir = ScreenShotsDir\n\n def test_FL04_01_GoToUWQuestions(self):\n try:\n # Go to UW Questions Tab\n self.myDriver.find_element(By.CSS_SELECTOR, \"#ctl00__MainContent_mnuDataTabs > .rmRootGroup > .rmItem:nth-child(2) .rmText\").click()\n time.sleep(3)\n\n except Exception as e41:\n print(self.BF.fncCaptureScreenshot(self.myDriver, self.ScreenShotsDir, 'GoToUWQuestions'))\n self.fail(e41)\n\n def test_FL04_02_AddPolicyInformation(self):\n try:\n\n # Market Cap\n self.BF.fncEnterTextboxValue(self.myDriver, self.AR.idtxtUWQMarketCap, self.uMarketCap)\n\n\n # Total Assets\n self.BF.fncEnterTextboxValue(self.myDriver, self.AR.idtxtUWQTotalAssets, self.uTotalAssets)\n\n # Annual Revenues\n self.BF.fncEnterTextboxValue(self.myDriver, self.AR.idtxtUWQAnnualRevenues, self.uAnnualRevenues)\n\n # Organization Category\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQOrgCategory, self.uOrgCategory)\n\n # Ticker Symbol\n self.BF.fncEnterTextboxValue(self.myDriver, self.AR.idtxtUWQTicker, self.uTicker)\n\n except Exception as e42:\n print(self.BF.fncCaptureScreenshot(self.myDriver, self.ScreenShotsDir, 'AddPolicyInformation'))\n self.fail(e42)\n\n def test_FL04_03_AddUnderlyingInsuranceRiskFactors(self):\n try:\n # Financial Condition\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQFinCondition, self.uFinCondition)\n\n # Nature of Operations\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQNoOperations, self.uNoOperations)\n\n # Acquisition History\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQAcqHistory, self.uAcqHistory)\n\n # Management Quality\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQMgtQuality, self.uMgtQuality)\n\n # Litigation History\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQLtgHistory, self.uLtgHistory)\n\n # Time in Business\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQTimeInBusiness, self.uTimeInBusiness)\n\n # Class of Business\n self.BF.fncSelectElementFromDropdown(self.myDriver, self.AR.idddUWQClsBusiness, self.uClsBusiness)\n\n except Exception as e43:\n print(self.BF.fncCaptureScreenshot(self.myDriver, self.ScreenShotsDir, 'AddUnderlyingInsuranceRiskFactors'))\n self.fail(e43)\n\n def test_FL04_04_SaveUWQuestions(self):\n try:\n # Click Next to save UW Questions\n self.myDriver.find_element(By.ID, self.AR.idbtnOneShieldNext).click()\n time.sleep(2)\n\n # Verify if the record has been successfully created\n assert self.myDriver.find_element(By.ID, \"ctl00__MainContent_ucMessages_lblInfoMsg\").text == \"Record has been saved\"\n\n except Exception as e44:\n print(self.BF.fncCaptureScreenshot(self.myDriver, self.ScreenShotsDir, 'SaveUWQuestions'))\n self.fail(e44)\n\nexcept Exception as e4:\n print('OneShield_UWQuestions Script did not run successfully')\n print(e4)\n\n","repo_name":"nirajamca/OMS","sub_path":"Submissions/PyScripts/OneShield_UWQuestions.py","file_name":"OneShield_UWQuestions.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19951748136","text":"if __name__ == '__main__':\n students = []\n for _ in range(int(input())):\n name = input()\n score = float(input())\n\n data = []\n data.append(name)\n data.append(score)\n students.append(data)\n\n sortedStudents = sorted(students)\n print()\n print(max(sortedStudents))\n\n for i in range(0, len(sortedStudents)):\n print(sortedStudents[i][1]) \n","repo_name":"rusiruavb/Python-Fundamentals","sub_path":"HackerRank-List.py","file_name":"HackerRank-List.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3621933582","text":"import theano.tensor as T\nfrom unidecode import unidecode\nfrom utils.network import NeuralNetwork\nfrom utils.layers import FullyConnectedLayer, LSTMCell\nfrom utils.losses import SparseCategoricalCrossentropy\nfrom utils.embedding import OneHotEmbedding\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndef get_X_y(df):\n X = df.context.to_list()\n y = df.label.to_list()\n\n X = np.array([[ohe._vocab[char] for char in x] for x in X])\n y = np.array([[ohe._vocab[char] for char in yy] for yy in y])\n\n return X, y\n\ndef generate_text(phrase, n=2000, temp=1.0):\n encoded_phrase = np.array([[ohe._vocab[char]] for char in phrase])\n encoded_phrase = ohe(encoded_phrase)\n encoded_phrase = np.squeeze(encoded_phrase, axis=1)\n\n generated_text = net.predict(encoded_phrase, n=n, temp=temp)\n generated_text = [vocab[char_idx] for char_idx in generated_text]\n\n return phrase + \"\".join(generated_text)\n\n\ndf = pd.read_parquet(\"data/next-character.parquet\")\nvocab = list(set(unidecode(\"\".join(df.context))))\nvocab.sort()\nprint(len(vocab))\nprint(df.shape)\n\nloss = SparseCategoricalCrossentropy()\nohe = OneHotEmbedding(vocab)\n\nntest = int(0.05 * df.shape[0])\ndf_train, df_test = df.head(df.shape[0] - ntest), df.tail(ntest)\n\nX_train, y_train = get_X_y(df_train)\nX_test, y_test = get_X_y(df_test)\n\nnet = NeuralNetwork(len(vocab), loss, nepochs=10, nbatch=100, alpha=0.002, lmbda=0.001, embedding=ohe)\nnet.add_layer(LSTMCell(512, 100))\nnet.add_layer(LSTMCell(512, 100))\nnet.add_layer(FullyConnectedLayer(len(vocab), T.nnet.softmax, seq2seq=True))\n\nnet.compile()\n\nloss_train, loss_test, metric_scores_train, metric_scores_test = net.train(X_train, y_train, X_test, y_test)\n\nplt.plot(loss_train, label=\"train\")\nplt.plot(loss_test, label=\"test\")\nplt.xlabel(\"epoch\")\nplt.ylabel(\"sparse categorical crossentropy\")\nplt.title(\"next-character task\")\nplt.legend()\nplt.show()\n\n","repo_name":"gekas145/char-rnn","sub_path":"theano/next_char.py","file_name":"next_char.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74390600553","text":"import torch\nimport numpy as np\nimport torchvision as tv\nfrom entry import get_parameter\n\npar = get_parameter()\n\n\ndef get_M(samples):\n YYT = samples.dot(samples.T)\n eigenvalue, eigenvector = np.linalg.eig(YYT)\n sorted_indices = np.argsort(eigenvalue)\n print('特征值为:' + str(eigenvalue))\n print('特征向量为:' + str(eigenvector))\n energy = []\n d_pie = 0\n for i in range(len(eigenvalue)):\n energy.append(eigenvalue[:i+1].sum() / eigenvalue.sum())\n if energy[i] > 0.94:\n d_pie = i+1\n break\n\n print('能量为:'+str(energy))\n Ud_pie = eigenvector[:, sorted_indices[:-d_pie - 1:-1]]\n print('前'+str(d_pie)+'个特征向量为:' + str(Ud_pie))\n M = Ud_pie.dot(Ud_pie.T)\n print('矩阵M为:'+str(M))\n return Ud_pie, d_pie\n\n\ndef feature_decompose(samples, w, bias,channel):\n print('权重shape='+str(w.shape))\n samples = np.array(samples)\n print('样本形状为:'+str(samples.shape))\n samples = np.transpose(samples, (3, 0, 1, 2))\n samples = samples.reshape((channel, -1), order='F')\n samples = samples-samples.mean(0)\n Ud_pie, d_pie = get_M(samples)\n print('矩阵为:'+str(Ud_pie))\n return Ud_pie\n\n\nif __name__ == '__main__':\n feature_decompose()\n","repo_name":"caddyless/learn_pytroch","sub_path":"feature-decompose/decompose.py","file_name":"decompose.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38915266291","text":"import hashlib\nimport os\nfrom Crypto.Cipher import AES\nimport base64\nfrom binascii import b2a_hex\nfrom Crypto import Random\nfrom Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5\nfrom Crypto.PublicKey import RSA\n\n\nclass Encrypts:\n \"\"\"MD5 base64 AES RSA 四种加密方法\"\"\"\n\n def __init__(self):\n self.aes_mode = AES.MODE_ECB # AES加密模式\n self.aes_key_size = 256 # AES秘钥,随机数值\n self.rsa_count = 2048 # RSA秘钥对,随机数值\n\n def md5_encrypt(plaintext):\n \"\"\" MD5加密\n :param plaintext: 需要加密的内容\n :return: encrypt_str密文\n \"\"\"\n h1 = hashlib.md5() # 创建md5对象\n h1.update(plaintext.encode(encoding='utf-8')) # 必须声明encode\n # 加密\n encrypt_str = h1.hexdigest()\n return encrypt_str\n\n def base64_encry(plaintext):\n \"\"\"base64加密\"\"\"\n base64_encry = base64.b64encode(plaintext.encode('utf-8'))\n return base64_encry\n\n def generate_aes_key(self):\n \"\"\"AES秘钥生成\"\"\"\n # length for urandom\n key_size = self.aes_key_size\n u_len = int(key_size / 8 / 4 * 3)\n aes_key = base64.b64encode(os.urandom(u_len)) # os.urandom()生成随机字符串\n return aes_key\n\n def aes_encrypt(self, message, aes_key):\n \"\"\"use AES to encrypt message,\n :param message: 需要加密的内容\n :param aes_key: 密钥\n :return: encrypted_message密文\n \"\"\"\n mode = self.aes_mode # 加密模式\n if type(message) == str:\n message = bytes(message, 'utf-8')\n if type(aes_key) == str:\n aes_key = bytes(aes_key, 'utf-8')\n # aes_key, message必须为16的倍数\n while len(aes_key) % 16 != 0:\n aes_key += b' '\n\n while len(message) % 16 != 0:\n message += b' '\n # 加密对象aes\n aes = AES.new(key=aes_key, mode=mode)\n encrypt_message = aes.encrypt(plaintext=message)\n return b2a_hex(encrypt_message)\n\n def generate_rsa_keys(self):\n \"\"\"RSA秘钥对生成\"\"\"\n rsa_count = self.rsa_count\n # 随机数生成器\n random_generator = Random.new().read\n # rsa算法生成实例\n rsa = RSA.generate(rsa_count, random_generator)\n # master的秘钥对的生成\n rsa_public_key = rsa.publickey().exportKey()\n rsa_private_key = rsa.exportKey()\n return rsa_public_key, rsa_private_key\n\n def rsa_encrypt(message, rsa_public_key):\n \"\"\"use RSA to encrypt message,\n :param message: 需要加密的内容\n :param rsa_public_key: 公钥(字节类型)\n :return: encrypt_msg_list密文列表\n \"\"\"\n pub_key = RSA.importKey(rsa_public_key)\n # 加密对象\n cipher = Cipher_pkcs1_v1_5.new(pub_key)\n msg = message.encode('utf-8')\n # 分段加密\n default_encrypt_length = 245\n length = default_encrypt_length\n msg_list = [msg[i:i + length] for i in list(range(0, len(msg), length))]\n # 加密后信息列表\n encrypt_msg_list = []\n for msg_str in msg_list:\n cipher_text = base64.b64encode(cipher.encrypt(message=msg_str))\n encrypt_msg_list.append(cipher_text)\n return encrypt_msg_list\n\nif __name__ == '__main__':\n str = Encrypts.md5_encrypt(\"游龙\")\n print(str)\n","repo_name":"youlong533/Myproject","sub_path":"Tools/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37603213408","text":"import viscojapan as vj\n\nresult_file = 'nrough_06_naslip_11.h5'\nfault_file = 'fault_bott80km.h5'\n\ncpt_file = 'Blues_09.cpt'\ncpt_file = 'hot'\ncpt_reverse = True\n\nslip = vj.inv.ResultFileReader(result_file).get_slip()\n\nepochs = list(range(0,450,50))\n\nplotter = vj.inv.AfterslipPlotter(\n slip = slip,\n epochs = epochs,\n fault_file = fault_file,\n cpt_file = cpt_file,\n cpt_reverse = cpt_reverse\n)\n\nplotter.plot()\n\nplotter.save('test_.pdf')\n\n\n","repo_name":"zy31415/viscojapan","sub_path":"agu-paper/fault_slip/new_version/afterslip_plotter.py","file_name":"afterslip_plotter.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"43130950719","text":"#!/bin/python3\nfrom all_helpers import *\nimport sys\n\npairs = get_paper_all_pairs()\nprox = int(sys.argv[1])\nalgo = sys.argv[2]\n\nfor gtag1, gtag2 in pairs:\n seeds, seed_metrics, extr_metrics = simplified_run_with_metrics(gtag1, gtag2, algo=algo, prox=prox, silent=True)\n to_print = [f'{gtag1}-{gtag2}', len(seeds), seed_metrics[0], seed_metrics[1], extr_metrics[0], extr_metrics[1]]\n print('\\t'.join([str(e) for e in to_print]))\n","repo_name":"wangpatrick57/plant","sub_path":"paper_result_scripts/paper_all_blant_results.py","file_name":"paper_all_blant_results.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"473347757","text":"# import os\nimport csv\nimport json\n\n\n# for file_name in [file for file in os.listdir('output_data') if os.path.isfile('output_data')]:\n# os.remove('output_data')\n\ncars = []\nbrands_list = []\n\nwith open('input.csv', 'r+') as file_in:\n csv_reader = csv.reader(file_in)\n for car in [[csv_reader.line_num, row[0], row[1], int(row[2]), int(row[3])] for row in csv_reader]:\n cars.append({key: value for(key, value) in zip(['id', 'brand', 'model', 'hp', 'price'], car)})\n brands_list.append(cars[-1].get('brand'))\n\n\nsorted_cars = {\n 'slow_cars': filter(lambda car: car['hp'] < 120, cars),\n 'fast_cars': filter(lambda car: 120 <= car['hp'] < 180, cars),\n 'sport_cars': filter(lambda car: car['hp'] >= 180 , cars),\n 'cheap_cars': filter(lambda car: car['price'] < 1000, cars),\n 'medium_cars': filter(lambda car: 1000 <= car['price'] < 5000, cars),\n 'expensive_cars': filter(lambda car: car['price'] >= 5000, cars)\n}\n\nfor brand in brands_list:\n sorted_cars[brand] = list(filter(lambda car: car['brand'] == brand, cars))\n\nfor sorted_car in sorted_cars:\n with open(f'output_data/{sorted_car}.json', 'w') as json_file:\n json.dump(list(sorted_cars[sorted_car]), json_file)\n","repo_name":"MariusTodica/ScoalaInformala","sub_path":"Homeworks/Tema masini/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16736027897","text":"from gensim import corpora, models, similarities\nfrom parse_abs import is_variable, processing_variables_with_emparser\nimport re\nimport pickle\nfrom collections import defaultdict\n\ndef search(search_word, count_top):\n\n search_word = search_word.replace(\",\", \" \")\n search_word = search_word.replace(\";\", \"\")\n input_doc = processing_variables_with_emparser(search_word.split())\n input_doc = input_doc.split()\n\n tfidf = models.TfidfModel.load('tfidf.model') \n lsi = models.LsiModel.load('lsi_topics300.model')\n dictionary = corpora.Dictionary.load('search_dictionary.dict')\n\n index = similarities.MatrixSimilarity.load('lsi_index.index')\n\n query_vector = dictionary.doc2bow(input_doc)\n\n vec_lsi = lsi[tfidf[query_vector]]\n sims = index[vec_lsi]\n \n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n result = []\n result_append = result.append\n\n with open(\"tell.pkl\", \"rb\") as f:\n tell = pickle.load(f)\n\n for idx in sims[:count_top]:\n search_result = {}\n with open(\"abs_dictionary.txt\", \"rb\") as f:\n f.seek(tell[idx[0]])\n doc_list = f.read(tell[idx[0]+1]-tell[idx[0]]).decode('utf-8').split()\n\n # doc_list[0] : theorem or definition\n # [1] : line\n # [2] : file_name\n # [3] : label\n # [4::] : text\n search_result[\"label\"] = (doc_list[3])\n search_result[\"text\"] = (\" \".join(doc_list[4::]))\n search_result[\"relevance\"] = (idx[1])\n search_result[\"filename\"] = (doc_list[2])\n search_result[\"line_no\"] = (doc_list[1])\n\n result_append(search_result)\n \n return result\n ","repo_name":"mimosa-project/emsearch","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22544665857","text":"from django.test import TestCase\nfrom task_manager.statuses.models import Status\nfrom task_manager.users.models import CustomUser\nfrom django.urls import reverse_lazy\nfrom task_manager.fixture_reader import get_data\n\n\nclass TestCreateStatusView(TestCase):\n fixtures = ['users.json', 'statuses.json']\n\n @classmethod\n def setUpTestData(cls):\n cls.user = CustomUser.objects.get(pk=1)\n cls.status = Status.objects.get(pk=1)\n\n # @override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')\n def test_status_create_view_availability(self):\n self.client.force_login(self.user)\n response = self.client.get(reverse_lazy('create_status'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'common_create_update.html')\n\n def test_status_create(self):\n status_data = get_data('test_statuses.json')\n create_status_data = status_data['create']['fields']\n self.client.force_login(self.user)\n response = self.client.post(reverse_lazy('create_status'),\n create_status_data)\n status = Status.objects.get(pk=3)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(status.name, 'assigned')\n self.assertRedirects(response, reverse_lazy('status_list'))\n\n\nclass TestUpdateStatusView(TestCase):\n fixtures = ['users.json', 'statuses.json']\n\n @classmethod\n def setUpTestData(cls):\n cls.user = CustomUser.objects.get(pk=1)\n cls.status = Status.objects.get(pk=1)\n\n def test_status_update_view_availability(self):\n self.client.force_login(self.user)\n response = self.client.get(\n reverse_lazy('update_status', kwargs={'pk': self.status.pk})\n )\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response,\n template_name='common_create_update.html')\n\n def test_status_update(self):\n update_data = get_data('test_statuses.json')\n update_status_data = update_data['update']['fields']\n self.client.force_login(self.user)\n response = self.client.post(\n reverse_lazy('update_status',\n kwargs={'pk': self.status.pk}),\n update_status_data\n )\n updated_status = Status.objects.get(pk=1)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(updated_status.name, 'declined')\n\n\nclass TestDeleteStatusView(TestCase):\n fixtures = ['users.json', 'statuses.json']\n\n @classmethod\n def setUpTestData(cls):\n cls.user = CustomUser.objects.get(pk=2)\n cls.status = Status.objects.get(pk=1)\n\n def test_delete_yourself(self):\n self.client.force_login(self.user)\n response = self.client.post(\n reverse_lazy('delete_status', kwargs={'pk': self.user.pk})\n )\n self.assertEqual(response.status_code, 302)\n","repo_name":"MatveiKhmyzov/python-project-52","sub_path":"task_manager/statuses/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41436982487","text":"import pytest\nfrom mock import patch\nfrom api.exceptions import OperationFailedException\nfrom api.speech_to_text.google.helpers import google_speech_send_request, SpeechClient\n\n\n# Ensure that STT behaves correctly\n@patch.object(SpeechClient, 'recognize', autospec=True)\ndef test_google_speech_send_request(mock_speech_client, google_request, google_response):\n mock_speech_client.return_value = google_response\n res = google_speech_send_request(google_request['file'], google_request['language'])\n\n assert mock_speech_client.call_count == 1\n assert google_response.results[0].alternatives[0].transcript == res['text']\n assert google_response.results[0].alternatives[0].confidence == res['confidence']\n\n\n# Ensure that STT behave correctly when SpeechClient is not working properly\n@patch.object(SpeechClient, 'recognize', autospec=True)\ndef test_google_speech_client_failed(mock_speech_client, google_request, google_response):\n mock_speech_client.side_effect = Exception()\n with pytest.raises(OperationFailedException):\n google_speech_send_request(google_request['file'], google_request['language'])\n\n\n@pytest.mark.externalapi\ndef test_service_available(google_request, result):\n res = google_speech_send_request(google_request['file'], google_request['language'])\n\n assert res['text'] == result['text']\n assert res['confidence'] == result['confidence']\n","repo_name":"suricats/surirobot-api-converse","sub_path":"api/speech_to_text/google/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35915014960","text":"#Plot a series of parallel color plots for the 1d function f(x_i, y)\n#There is ONE function, f(x_i, y), for each x_i, and they are not\n# generally assumed to be continuously related.\n#Functs must be abe to accept numpy arrays as one and only argument . \n\nimport matplotlib.pyplot as plt\nfrom cantrips import simulSortLists\nfrom cantrips import getIndexOfClosestElement\nfrom cantrips import indexMultiDimArrayWithUnknownNumberOfIndeces\nfrom cantrips import operateOnNumpyArray\nimport numpy as np\nfrom matplotlib import cm \n\n\n\ndef showParallel1DTempPlots(xs, y_range, functs, axarr = None, axarr_indeces = None, white_space = 0.0, n_ys_in_range = 100, show = 1, save = 0,\n xlabel = '', ylabel = '', title = '', \n levels = None, colormap = None, xticks = None, show_colorbar = 1, return_single_plot = 0):\n if colormap is None: colormap = cm.jet\n\n #print '[funct ([0.0001, 0.0005]) for funct in functs] = ' + str([funct ([0.0001, 0.0005]) for funct in functs])\n\n xs, functs = simulSortLists(xs, functs)\n n_xs = len(xs)\n x_bounds = [xs[0] - (xs[1] - xs[0]) / 2.0] + [(xs[i] + xs[i-1]) / 2.0 for i in range(1, n_xs)] + [xs[n_xs - 1] + (xs[n_xs - 1] - xs[n_xs - 2]) / 2.0 ]\n ys = np.linspace(y_range[0], y_range[1], n_ys_in_range)\n\n if axarr is None:\n f, axarr = plt.subplots(1,1, squeeze = False)\n if axarr_indeces is None:\n axarr_indeces = [0,0]\n axarr_to_plot = indexMultiDimArrayWithUnknownNumberOfIndeces(axarr, axarr_indeces)\n\n full_funct = lambda x, y: functs[getIndexOfClosestElement(xs, x)] (y)\n '[full_funct(x, 2.0) for x in xs] = '\n\n last_contours = ''\n\n for i in range(len(xs)):\n if i % 100 == 0: print ('Working on x ' + str(xs[i]) + ' which has i = ' + str(i) + ' of total ' + str(len(xs)) )\n x_range = (x_bounds[i] + white_space / 2.0, x_bounds[i+1] - white_space / 2.0)\n funct = functs[i] \n xmesh, ymesh = np.meshgrid(x_range, ys)\n funct_vals = funct(ys)\n #print 'funct_vals = ' + str(funct_vals) \n val_grid = np.array( [ [val, val] for val in funct_vals] ).reshape(np.shape(ymesh)) \n axarr_to_plot.set_xlim([ x_bounds[0], x_bounds[len(x_bounds)-1] ])\n axarr_to_plot.set_ylim(y_range[0], y_range[1])\n #print 'val_grid = ' + str(val_grid) \n last_contours = axarr_to_plot.contourf(xmesh, ymesh, val_grid, levels = levels, cmap = colormap)\n #print ('!!!!!HERE1!!!!!') \n #axarr_to_plot.contourl(xmesh, ymesh, val_grid, levels = levels, cmap = 'black')\n\n if xticks is None:\n axarr_to_plot.set_xticks(xs)\n else:\n axarr_to_plot.set_xticks(xticks)\n if show_colorbar: plt.colorbar(last_contours, np.linspace(0.0, 1.0, 11))\n axarr_to_plot.set_xlabel(xlabel)\n axarr_to_plot.set_ylabel(ylabel)\n axarr_to_plot.set_title(title) \n \n if show:\n plt.show() \n\n if return_single_plot: return last_contours\n else: return 1\n","repo_name":"stubbslab/general_purpose_python_scripts","sub_path":"showParallel1DTempPlots.py","file_name":"showParallel1DTempPlots.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32329425279","text":"def main():\n x = input()\n a = ['0']* len(x)\n for i in range(len(x)):\n if x[i] == '0':\n a[i] = '1'\n ans = ''.join(a)\n print(ans)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kip-i/AtCoderLog","sub_path":"ABC289/ABC289A.py","file_name":"ABC289A.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42106023151","text":"days = 256\nwith open('input.txt') as f:\n lfish = [int(i) for i in f.readline().split(\",\")]\n\npopulation = {\n 0:0,\n 1:0,\n 2:0,\n 3:0,\n 4:0,\n 5:0,\n 6:0,\n 7:0,\n 8:0\n}\n\nfor fish in lfish:\n population[fish] += 1\n\nfor i in range(days):\n population2 = population.copy()\n for idx in range(len(population) - 1):\n if idx == 6:\n continue\n if idx == 0:\n population2[8] = population[0]\n population2[6] = population[0] + population[7]\n \n population2[idx] = population[idx+1]\n \n population = population2.copy()\n\n\nprint(sum(population.values()))\n\n","repo_name":"andrazpovse/aoc2021","sub_path":"day6/py.py","file_name":"py.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15071652179","text":"N=int(input())\r\nA=[int(input())-1 for _ in range(N)]\r\nresult=[False]*N\r\nk=0\r\nwhile 1:\r\n\tnewlight=A[k]\r\n\tif k==1:\r\n\t\tans=0\r\n\t\tbreak\r\n\telif result[k]:\r\n\t\tans=1\r\n\t\tbreak\r\n\telse:\r\n\t\tresult[k]=True\r\n\t\tk=newlight\r\nif ans:\r\n\tprint(-1)\r\nelse:print(sum(result))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc065/B/4904063.py","file_name":"4904063.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"14816548899","text":"\n\n\n\n\nfrom caffe2.python import recurrent, workspace\nfrom caffe2.python.model_helper import ModelHelper\nfrom hypothesis import given, settings\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nimport hypothesis.strategies as st\nimport numpy as np\n\nclass RecurrentNetworkTest(serial.SerializedTestCase):\n @given(T=st.integers(1, 4),\n n=st.integers(1, 5),\n d=st.integers(1, 5))\n @settings(deadline=10000)\n def test_sum_mul(self, T, n, d):\n model = ModelHelper(name='external')\n\n input_blob, initial_input_blob = model.net.AddExternalInputs(\n 'input', 'initial_input')\n\n step = ModelHelper(name='step', param_model=model)\n input_t, output_t_prev = step.net.AddExternalInput(\n 'input_t', 'output_t_prev')\n output_t_internal = step.net.Sum([input_t, output_t_prev])\n output_t = step.net.Mul([input_t, output_t_internal])\n step.net.AddExternalOutput(output_t)\n\n self.simple_rnn(T, n, d, model, step, input_t, output_t, output_t_prev,\n input_blob, initial_input_blob)\n\n @given(T=st.integers(1, 4),\n n=st.integers(1, 5),\n d=st.integers(1, 5))\n @settings(deadline=10000)\n def test_mul(self, T, n, d):\n model = ModelHelper(name='external')\n\n input_blob, initial_input_blob = model.net.AddExternalInputs(\n 'input', 'initial_input')\n\n step = ModelHelper(name='step', param_model=model)\n input_t, output_t_prev = step.net.AddExternalInput(\n 'input_t', 'output_t_prev')\n output_t = step.net.Mul([input_t, output_t_prev])\n step.net.AddExternalOutput(output_t)\n\n self.simple_rnn(T, n, d, model, step, input_t, output_t, output_t_prev,\n input_blob, initial_input_blob)\n\n @given(T=st.integers(1, 4),\n n=st.integers(1, 5),\n d=st.integers(1, 5))\n def test_extract(self, T, n, d):\n model = ModelHelper(name='external')\n workspace.ResetWorkspace()\n\n input_blob, initial_input_blob = model.net.AddExternalInputs(\n 'input', 'initial_input')\n\n step = ModelHelper(name='step', param_model=model)\n input_t, output_t_prev = step.net.AddExternalInput(\n 'input_t', 'output_t_prev')\n output_t = step.net.Mul([input_t, output_t_prev])\n step.net.AddExternalOutput(output_t)\n\n inputs = np.random.randn(T, n, d).astype(np.float32)\n initial_input = np.random.randn(1, n, d).astype(np.float32)\n recurrent.recurrent_net(\n net=model.net,\n cell_net=step.net,\n inputs=[(input_t, input_blob)],\n initial_cell_inputs=[(output_t_prev, initial_input_blob)],\n links={output_t_prev: output_t},\n scope=\"test_rnn_sum_mull\",\n )\n\n workspace.blobs[input_blob] = inputs\n workspace.blobs[initial_input_blob] = initial_input\n\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)\n\n prefix = \"extractTest\"\n\n workspace.RunNet(model.net.Proto().name, T)\n retrieved_blobs = recurrent.retrieve_step_blobs(\n model.net, prefix\n )\n\n # needed for python3.6, which returns bytearrays instead of str\n retrieved_blobs = [x.decode() for x in retrieved_blobs]\n\n for i in range(T):\n blob_name = prefix + \"_\" + \"input_t\" + str(i)\n self.assertTrue(\n blob_name in retrieved_blobs,\n \"blob extraction failed on timestep {}\\\n . \\n\\n Extracted Blobs: {} \\n\\n Looking for {}\\\n .\".format(i, retrieved_blobs, blob_name)\n )\n\n def simple_rnn(self, T, n, d, model, step, input_t, output_t, output_t_prev,\n input_blob, initial_input_blob):\n\n input = np.random.randn(T, n, d).astype(np.float32)\n initial_input = np.random.randn(1, n, d).astype(np.float32)\n print(locals())\n recurrent.recurrent_net(\n net=model.net,\n cell_net=step.net,\n inputs=[(input_t, input_blob)],\n initial_cell_inputs=[(output_t_prev, initial_input_blob)],\n links={output_t_prev: output_t},\n scope=\"test_rnn_sum_mull\",\n )\n workspace.blobs[input_blob] = input\n workspace.blobs[initial_input_blob] = initial_input\n\n op = model.net._net.op[-1]\n # Just conviniently store all inputs in an array in the same\n # order as op.input\n inputs = [workspace.blobs[name] for name in op.input]\n\n def reference(input, initial_input):\n global_ws_name = workspace.CurrentWorkspace()\n input_all = workspace.blobs[input_blob]\n\n workspace.SwitchWorkspace(\"ref\", create_if_missing=True)\n workspace.blobs[input_blob] = input\n workspace.blobs[output_t_prev] = initial_input.reshape(n, d)\n res_all = np.zeros(shape=input.shape, dtype=np.float32)\n\n for t_cur in range(T):\n workspace.blobs[input_t] = input_all[t_cur]\n workspace.RunNetOnce(step.net)\n result_t = workspace.blobs[output_t]\n workspace.blobs[output_t_prev] = result_t\n res_all[t_cur] = result_t\n\n workspace.SwitchWorkspace(global_ws_name)\n\n shape = list(input.shape)\n shape[0] = 1\n return (res_all, res_all[-1].reshape(shape))\n\n self.assertReferenceChecks(\n device_option=hu.cpu_do,\n op=op,\n inputs=inputs,\n reference=reference,\n output_to_grad=op.output[0],\n outputs_to_check=[0, 1],\n )\n\n self.assertGradientChecks(\n device_option=hu.cpu_do,\n op=op,\n inputs=inputs,\n outputs_to_check=0,\n outputs_with_grads=[0],\n threshold=0.01,\n stepsize=0.005,\n )\n\n # Hacky version of 1-D convolution\n def _convolution_1d(\n self,\n model,\n inputs,\n conv_window,\n conv_filter,\n conv_bias,\n output_name,\n left_pad,\n ):\n if left_pad:\n padding_width = conv_window - 1\n else:\n padding_width = 0\n\n # [batch_size, inputs_length, state_size]\n inputs_transposed = model.net.Transpose(\n inputs,\n 'inputs_transposed',\n axes=[1, 0, 2],\n )\n # [batch_size, 1, inputs_length, state_size]\n inputs_transposed_4d = model.net.ExpandDims(\n inputs_transposed,\n 'inputs_transposed_4d',\n dims=[1],\n )\n # [batch_size, 1, inputs_length - conv_window + 1, state_size]\n output_transposed_4d = model.net.Conv(\n [inputs_transposed_4d, conv_filter, conv_bias],\n output_name + '_transposed_4d',\n kernel_h=1,\n kernel_w=conv_window,\n order='NHWC',\n pad_t=0,\n pad_l=padding_width,\n pad_b=0,\n pad_r=0,\n )\n # [batch_size, inputs_length - conv_window + 1, state_size]\n output_transposed = model.net.Squeeze(\n output_transposed_4d,\n output_name + '_transposed',\n dims=[1],\n )\n # [inputs_length - conv_window + 1, batch_size, state_size]\n output = model.net.Transpose(\n output_transposed,\n output_name,\n axes=[1, 0, 2],\n )\n return output\n\n @given(sequence_length=st.integers(3, 7),\n conv_window=st.integers(1, 3),\n batch_size=st.integers(1, 5),\n state_size=st.integers(1, 5))\n def test_stateful_convolution_forward_only(\n self,\n sequence_length,\n conv_window,\n batch_size,\n state_size,\n ):\n '''\n This unit test demonstrates another ways of using RecurrentNetwork.\n\n Imagine, that you want to compute convolution over a sequence,\n but sequence elements are not given to you from the beginning,\n so you have to loop over the sequence and compute convolution\n for each element separately. This situation can occur,\n during inference/generation step of the neural networks.\n\n First of all, you have to provide actual input via recurrent states,\n since the input of RecurrentNetwork should be known in advance.\n Here, we use `fake_inputs` as the input,\n and it's used by the op to extract batch size and sequence length.\n The actual input sequence is stored in the recurrent state\n `input_state`. At every step we generate a new element via input_state_t\n (in this example, input_state_t is generated at random, but\n in a real situation it can be created using convolution output\n from the previous step).\n\n A few important differences from regular RecurrentNetwork usecase:\n\n 1. input_state_t_prev is not only a single previous element of\n input_state sequence. It is last conv_window elements including (!)\n the current one - input_state_t. We specify that using `link_window`\n argument of RecurrentNetwork. We need that many elements to\n compute a single convolution step. Also, note that `link_window`\n specifies how many elements to link starting at\n `timestep` + `link_offset` position.\n\n 2. First few steps might require additional zero padding from the left,\n since there is no enough element of input_state sequence are available.\n So the initial_state for input_state contains several elements\n (exactly how many pads we need for the first step). Also, because of\n that all offseting over input_state sequence is being shifted\n by length of initial_input_state: see `link_offset` and `alias_offset`\n arguments of RecurrentNetwork.\n\n In this test, we assert that we get the same result\n if we apply convolution over all elements simultaneously,\n since the whole input_state sequence was generated at the end.\n '''\n model = ModelHelper(name='model')\n fake_inputs = model.param_init_net.UniformFill(\n [],\n 'fake_inputs',\n min=-1.0,\n max=1.0,\n shape=[sequence_length, batch_size, state_size],\n )\n initial_input_state = model.param_init_net.ConstantFill(\n [],\n 'initial_input_state',\n value=0.0,\n shape=[conv_window - 1, batch_size, state_size],\n )\n initial_output_state = model.param_init_net.ConstantFill(\n [],\n 'initial_output_state',\n value=0.0,\n shape=[1, batch_size, state_size],\n )\n step_model = ModelHelper(name='step_model', param_model=model)\n (\n fake_input_t,\n timestep,\n input_state_t_prev,\n ) = step_model.net.AddExternalInputs(\n 'fake_input_t',\n 'timestep',\n 'input_state_t_prev',\n )\n conv_filter = step_model.param_init_net.XavierFill(\n [],\n 'conv_filter',\n shape=[state_size, 1, conv_window, state_size],\n )\n conv_bias = step_model.param_init_net.ConstantFill(\n [],\n 'conv_bias',\n shape=[state_size],\n value=0.0,\n )\n step_model.params.extend([conv_filter, conv_bias])\n input_state_t = step_model.net.UniformFill(\n [],\n 'input_state_t',\n min=-1.0,\n max=1.0,\n shape=[1, batch_size, state_size],\n )\n output_state_t = self._convolution_1d(\n model=step_model,\n inputs=input_state_t_prev,\n conv_window=conv_window,\n conv_filter=conv_filter,\n conv_bias=conv_bias,\n output_name='output_state_t',\n left_pad=False,\n )\n initial_recurrent_states = [initial_input_state, initial_output_state]\n all_inputs = (\n [fake_inputs] + step_model.params + initial_recurrent_states\n )\n all_outputs = ['input_state_all', 'output_state_all']\n recurrent_states = ['input_state', 'output_state']\n input_state_all, output_state_all, _ = model.net.RecurrentNetwork(\n all_inputs,\n all_outputs + ['step_workspaces'],\n param=[all_inputs.index(p) for p in step_model.params],\n alias_src=recurrent_states,\n alias_dst=all_outputs,\n alias_offset=[conv_window - 1, 1],\n recurrent_states=recurrent_states,\n initial_recurrent_state_ids=[\n all_inputs.index(s) for s in initial_recurrent_states\n ],\n link_internal=[\n str(input_state_t_prev),\n str(input_state_t),\n str(output_state_t),\n ],\n link_external=['input_state', 'input_state', 'output_state'],\n link_offset=[0, conv_window - 1, 1],\n link_window=[conv_window, 1, 1],\n backward_link_internal=[],\n backward_link_external=[],\n backward_link_offset=[],\n step_net=step_model.net.Proto(),\n timestep='timestep' if timestep is None else str(timestep),\n outputs_with_grads=[],\n )\n\n output_states_2 = self._convolution_1d(\n model=model,\n inputs=input_state_all,\n conv_window=conv_window,\n conv_filter=conv_filter,\n conv_bias=conv_bias,\n output_name='output_states_2',\n left_pad=True,\n )\n\n workspace.RunNetOnce(model.param_init_net)\n workspace.RunNetOnce(model.net)\n\n np.testing.assert_almost_equal(\n workspace.FetchBlob(output_state_all),\n workspace.FetchBlob(output_states_2),\n decimal=3,\n )\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/operator_test/recurrent_network_test.py","file_name":"recurrent_network_test.py","file_ext":"py","file_size_in_byte":14048,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"2737830018","text":"from words import english_words\nimport random\nimport re\n\ndef choseWord():\n return english_words[random.randint(0,len(english_words))]\n\ndef checkCorrectInput(letter):\n while len(letter)>1 or len(re.findall(\"[a-zA-Z]\",letter)) == 0:\n letter = str(input(\"You must to type one letter to complete the word. Please enter a letter : \"))\n return letter.lower()\n\ndef word_display(word,right_letters):\n display = \"\"\n for i in range(len(word)):\n if word[i] in right_letters:\n display = display + word[i]\n else :\n if i == 0 or ( i > 5 and i == len(word)) :\n display = display + word[i]\n else :\n if word[i] == \"-\":\n display = display + word[i]\n else :\n display = display + \"_\"\n \n print(display.upper().center(80,\" \"))\n\ndef wrong_letters_display(wrong_letters):\n display = \"Wrong letters : \"\n for i in range(len(wrong_letters)):\n display = display + \"|\" + wrong_letters[i] \n print(display)\n print(\"\\n\")\n\ndef start():\n print (\" 🔥 Welcome to the Hangman Game 🔥\".center(80,'*'))\n print(\"\\n\")\n right_letters = []\n wrong_letters = []\n user_in = \"\"\n tries = 0\n\n word_to_find = choseWord()\n letters_to_find = []\n for i in range(len(word_to_find)):\n if word_to_find[i] not in letters_to_find :\n if word_to_find[i] == \"-\" :\n continue\n letters_to_find.append(word_to_find[i])\n right_letters.append(word_to_find[0])\n\n\n if len(word_to_find) > 5 :\n right_letters.append(word_to_find[len(word_to_find)-1])\n\n while len(right_letters) != len(letters_to_find):\n word_display(word_to_find,right_letters)\n print(\"%s tries left.\" % ((5-tries)))\n user_in = str(input(\"Chose a letter🖊️ : \\n\"))\n user_in = checkCorrectInput(user_in)\n\n if user_in not in right_letters and user_in in word_to_find and tries != 5:\n right_letters.append(user_in)\n print (\" Well played !👍 \".center(80,'*'))\n elif user_in not in word_to_find and tries != 5:\n if user_in not in wrong_letters :\n wrong_letters.append(user_in)\n print (\" Missed !🙁 \".center(80,'*'))\n wrong_letters_display(wrong_letters)\n tries = tries +1\n\n if tries == 5:\n break\n if tries == 0 :\n print (\" 😱😱🎉🎉 INCREDIBLE ! You've found at the first attempt. 😱😱🎉🎉 \".center(80,'*') )\n elif tries == 5 :\n print (\" ⏰ GAME OVER. You've make to much tries. The word was %s. \".center(80,'*') % (word_to_find)) \n elif tries != 0 and tries != 5:\n print (\" ✨ Congratulations ! You've found the word in %s tries. ✨ \".center(80,'*') % (tries))\n \nstart()","repo_name":"jeanPcr/hi-python","sub_path":"ex04_hangman/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21388078929","text":"# Bài 5.2 Nhập mảng số nguyên từ bàn phím. Tính trung bình cộng các số âm, tổng\n# các số dương và đưa kết quả ra màn hình.\n\ndef main():\n n = int(input('Nhap so nguyen duong N: '))\n if n <= 0:\n print('ERROR')\n return None\n a = []\n sum_pos = 0\n sum_neg = 0\n count_neg = 0\n count_pos = 0\n print('Nhap n so nguyen cach nhau boi enter:')\n mang_strform = input()\n inp = mang_strform.split(' ')\n for i in range(0, n):\n tmp = int(inp[i])\n if tmp < 0: sum_neg += tmp; count_neg += 1\n else: sum_pos += tmp\n if sum_pos == 0:\n print('Mang ko co so duong!')\n print(f'Gia tri trung binh cong cac so am: {sum_neg/count_neg}')\n else:\n print(f'Tong các số dương: {sum_pos}')\n if count_neg == 0: print('Mang ko co so am')\n else: print(f'Gia tri trung binh cong cac so am: {sum_neg/count_neg}')\n\nmain()","repo_name":"tiendat01/source-subject-hust","sub_path":"Python-learn-IT3150-ProjectI/thuc-hanh-TinDC-byPython/BaiTHso5/bai5.2.py","file_name":"bai5.2.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31949465372","text":"import os\nimport platform\n\n\ndef system(command):\n retcode = os.system(command)\n if retcode != 0:\n raise Exception(\"Error while executing:\\n\\t %s\" % command)\n\n\nif __name__ == \"__main__\":\n system('conan export memsharded/testing')\n\n if platform.system() == \"Windows\":\n system('conan test -s compiler=\"Visual Studio\" -s compiler.version=12 -s build_type=Debug '\n '-s compiler.runtime=MDd')\n system('conan test -s compiler=\"Visual Studio\" -s compiler.version=12 '\n '-s build_type=Release -s compiler.runtime=MD')\n else:\n system('conan test -s compiler=\"gcc\" -s build_type=Debug')\n system('conan test -s compiler=\"gcc\" -s build_type=Release')\n","repo_name":"lasote/conan-zmq","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"40506549237","text":"import re\nfrom queue import Queue\nfrom typing import Optional, Set\n\nfrom aoc.util.coordinate import BoundingBox, Coordinate\nfrom aoc.util.grid import InfiniteGrid\nfrom aoc.util.inputs import Input\n\n\nclass FiringAttempt(object):\n def __init__(self, x_velocity: int, y_velocity: int):\n self._x = 0\n self._y = 0\n self._x_velocity = x_velocity\n self._y_velocity = y_velocity\n self._max_y = 0\n\n @property\n def coordinate(self) -> Coordinate:\n return Coordinate(self._x, self._y)\n\n @property\n def max_y(self) -> int:\n return self._max_y\n\n def step(self):\n self._x += self._x_velocity\n self._y += self._y_velocity\n self._max_y = max(self._max_y, self._y)\n\n if self._x_velocity > 0:\n self._x_velocity -= 1\n elif self._x_velocity < 0:\n self._x_velocity += 1\n\n self._y_velocity -= 1\n\n def step_until(self, bounding_box: BoundingBox) -> Optional[Coordinate]:\n while self._y >= bounding_box.min_y:\n if self.coordinate in bounding_box:\n return self.coordinate\n self.step()\n\n return None\n\n\nclass Y2021D17(object):\n def __init__(self, file_name):\n line = Input(file_name).line()\n\n target_regex = re.compile(r'target area: x=(-?\\d+)..(-?\\d+), y=(-?\\d+)..(-?\\d+)')\n match = target_regex.match(line)\n box = BoundingBox()\n box = box.expand_x(int(match.group(1)))\n box = box.expand_x(int(match.group(2)))\n box = box.expand_y(int(match.group(3)))\n box = box.expand_y(int(match.group(4)))\n self.target = box\n\n grid: InfiniteGrid[str] = InfiniteGrid[str]()\n\n start_x = 1\n attempt = FiringAttempt(start_x, 0)\n while attempt.step_until(self.target) is None:\n start_x += 1\n attempt = FiringAttempt(start_x, 0)\n\n starting_coordinate = Coordinate(start_x, 0)\n queue: Queue = Queue()\n queue.put(starting_coordinate)\n seen: Set[Coordinate] = set()\n seen.add(starting_coordinate)\n\n max_y = 0\n\n while not queue.empty():\n item: Coordinate = queue.get()\n attempt = FiringAttempt(item.x, item.y)\n end = attempt.step_until(self.target)\n if end is not None:\n max_y = max(max_y, attempt.max_y)\n grid[item] = '*'\n for dx in range(-5, 20):\n for dy in range(-20, 20):\n neighbor = Coordinate(item.x + dx, item.y + dy)\n if neighbor in seen:\n continue\n\n queue.put(neighbor)\n seen.add(neighbor)\n\n for x in range(self.target.min_x, self.target.max_x + 1):\n for y in range(self.target.min_y, self.target.max_y + 1):\n grid[Coordinate(x, y)] = '#'\n\n grid[Coordinate(0, 0)] = '!'\n # grid.to_grid().print(not_found='.')\n\n self.max_y = max_y\n self.hitting_velocity_count = len(grid.find(lambda _: _ in '*#'))\n\n def part1(self):\n result = self.max_y\n\n print(\"Part 1:\", result)\n\n def part2(self):\n result = self.hitting_velocity_count\n\n print(\"Part 2:\", result)\n\n\nif __name__ == '__main__':\n code = Y2021D17(\"2021/17.txt\")\n code.part1()\n code.part2()\n","repo_name":"Jnesselr/AdventOfCode","sub_path":"aoc/y2021/d17.py","file_name":"d17.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6772655261","text":"import dataclasses\n\nfrom typing import Iterable, List\n\n@dataclasses.dataclass\nclass DataDefinition:\n hyper_cols: List[str]\n seed_col: str\n time_col: str\n environment_col: str\n algorithm_col: str\n\n\n_data_def: DataDefinition | None = None\n\ndef data_definition(\n hyper_cols: Iterable[str],\n seed_col: str = 'seed',\n time_col: str = 'frame',\n environment_col: str = 'environment',\n algorithm_col: str = 'algorithm',\n\n make_global: bool = False,\n) -> DataDefinition:\n d = DataDefinition(\n hyper_cols=list(sorted(hyper_cols)),\n seed_col=seed_col,\n time_col=time_col,\n environment_col=environment_col,\n algorithm_col=algorithm_col,\n )\n\n if make_global:\n global _data_def\n _data_def = d\n\n return d\n\n\ndef get_global_data_def():\n global _data_def\n assert _data_def is not None\n return _data_def\n\n\ndef maybe_global(d: DataDefinition | None):\n if d is None:\n return get_global_data_def()\n\n return d\n","repo_name":"andnp/RlEvaluation","sub_path":"RlEvaluation/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5396679816","text":"from django.http import JsonResponse\nfrom rest_framework.decorators import api_view\nfrom .serializers import *\nfrom datetime import datetime\n\n\n@api_view(['GET'])\ndef api_city(request):\n # Пункт A\n if request.method == 'GET':\n city = City.objects.all()\n serializer = CitySerializers(city, many=True)\n return JsonResponse(serializer.data, safe=False, status=200)\n else:\n return JsonResponse(status=400)\n\n\n@api_view(['GET'])\ndef api_city_street(request, city_id):\n # Пункт B\n if request.method == 'GET':\n street = Street.objects.filter(pk=city_id)\n serializer = StreetSerializers(street, many=True)\n return JsonResponse(serializer.data, safe=False, status=200)\n else:\n return JsonResponse(status=400)\n\n\n@api_view(['GET', 'POST'])\ndef api_shop(request):\n # Пункт C\n if request.method == 'POST':\n data = dict(request.POST)\n shop = Shop.objects.create(name=data['name'][0], house_number=data['house_number'][0],\n openTime=data['openTime'][0], closeTime=data['closeTime'][0],\n city_id=data['city_id'][0], street_id=data['street_id'][0])\n serializer = ShopCreateSerializers(shop)\n return JsonResponse(serializer.data, safe=False, status=200)\n # Пункт D\n elif request.method == 'GET':\n street = request.GET.get(\"street\", \"\")\n city = request.GET.get(\"city\", \"\")\n open = request.GET.get(\"open\", \"\") == \"1\"\n if open:\n d = datetime.time(datetime.now())\n shops = Shop.objects.filter(openTime__gte=d, closeTime__lt=d) \\\n .filter(city__name=city).filter(street__name=street)\n serializer = ShopSerializers(shops, many=True)\n return JsonResponse(serializer.data, safe=False, status=200)\n else:\n d = datetime.time(datetime.now())\n shops = Shop.objects.exclude(openTime__gte=d, closeTime__lt=d) \\\n .filter(city__name=city).filter(street__name=street)\n serializer = ShopSerializers(shops, many=True)\n return JsonResponse(serializer.data, safe=False, status=200)\n else:\n return JsonResponse(status=400)\n","repo_name":"shalapaichka1/mediatest2.0","sub_path":"newapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7547996557","text":"from odoo import fields, models, api, _\n\n\nclass EmployeeReports(models.TransientModel):\n _name = \"employee.head.count\"\n\n based_on = fields.Selection([('Department', 'Department'), ('Job', 'Job'), ('Manager', 'Manager')], string='Based on')\n department_ids = fields.Many2many('hr.department', string='Department')\n job_ids = fields.Many2many('hr.job', string='Job')\n manager_ids = fields.Many2many('hr.employee', string='Manager')\n\n @api.multi\n def print_reports(self):\n self.ensure_one()\n return self.env.ref('slnee_hr.action_report_hr_employee').report_action(self)\n\n @api.onchange('based_on')\n def onchange_based_on(self):\n self.department_ids = False\n self.job_ids = False\n self.manager_ids = False\n\n @api.multi\n def get_dept(self):\n data = []\n if self.based_on == 'Department':\n if not self.department_ids:\n departments = self.env['hr.department'].search([])\n if self.department_ids:\n departments = self.env['hr.department'].search([('id', '=', self.department_ids.ids)])\n for department in departments:\n data.append(department.name)\n if self.based_on == 'Job':\n if not self.job_ids:\n jobs = self.env['hr.job'].search([])\n if self.job_ids:\n jobs = self.env['hr.job'].search([('id', '=', self.job_ids.ids)])\n for job in jobs:\n data.append(job.name)\n if self.based_on == 'Manager':\n if not self.manager_ids:\n managers = self.env['hr.employee'].search([('manager', '=', True)])\n if self.manager_ids:\n managers = self.env['hr.employee'].search([('id', '=', self.manager_ids.ids)])\n for manager in managers:\n data.append(manager.name)\n return data\n\n @api.multi\n def get_emp(self, data_id):\n emp = []\n if self.based_on == 'Department':\n dep = self.env['hr.department'].browse(data_id)\n employee = self.env['hr.employee'].search([('department_id', '=', dep.id)])\n if self.based_on == 'Job':\n job = self.env['hr.job'].browse(data_id)\n employee = self.env['hr.employee'].search([('job_id', '=', job.id)])\n if self.based_on == 'Manager':\n manager = self.env['hr.employee'].browse(data_id)\n employee = self.env['hr.employee'].search([('parent_id', '=', manager.id)])\n for rec in employee:\n emp_dict = {'code': rec.code,\n 'name': rec.name,\n 'joining_date': rec.date_of_join,\n 'status': dict(rec._fields['employee_status'].selection).get(rec.employee_status)}\n emp.append(emp_dict)\n return emp\n","repo_name":"slnee-it/SLNEE-MASTER","sub_path":"slnee_hr/wizard/employee_head_count_report.py","file_name":"employee_head_count_report.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"33581536981","text":"with open('chall.txt', 'rb') as f:\n chall = f.read()\n\nl = [x for x in chall.decode()]\n\nl2 = [x.encode() for x in l]\n\nl3 = []\n\nl4 = b''\n\nfor c in l2:\n acc = 0\n if len(c) == 3:\n acc |= (c[2] & 0x3f)\n acc |= (c[1] & 0x3f) << 6\n acc |= (c[0] & 0xf) << 12\n else:\n raise Exception('bruh')\n l3.extend([(acc & 0xff00) >> 8, (acc & 0x00ff)])\n # l3.append(acc)\n # l4 += bin(acc)\n\nprint(l3[:10])\n\nwith open('maybesolution', 'wb') as f:\n f.write(bytes(l3))\n\n","repo_name":"kinshu0/google-ctf-beginners-quest-solutions","sub_path":"Quest_6try/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26229285290","text":"# continue\n\nscore = [92, 86, 68, -1, 56]\n\nfor s in score:\n if s == -1:\n continue\n print(s)\nprint('성적 처리 끝')\n\n# continue는 다시 반복문 시작으로 돌아가고 break는 반복문을 빠져나가는 차이점이 있다.ㄴ","repo_name":"kimvjgd/python","sub_path":"chapter06/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17540776462","text":"# Thuật toán cộng - trừ chính xác bội\nfrom math import *\ndef convert_array(a,w,t):\n A = []\n for i in range(t):\n A.append(a//(2**((t-i-1)*w))) \n a = a % (2**((t-i-1)*w))\n return A\n\ndef sum(a,b,w,t):\n C = []\n e = 0\n for i in range(t-1,-1,-1):\n c = A[i] + B[i] + e\n if 0 <= c < 2**w:\n e = 0\n else:\n e = 1\n C.append(c%(2**w))\n return e,C[::-1]\ndef subtraction(a,b,w,t):\n C = []\n e = 0\n for i in range(t-1,-1,-1):\n c = A[i] - B[i] - e\n if 0 <= c < 2**w:\n e = 0\n else:\n e = 1\n C.append(c%(2**w))\n return e,C[::-1]\n\n# phép cộng trừ số nguyên lớn\nw = int(input(\"Nhap W = \"))\np = 2147483647\nm = ceil(log(p,2))\nt = ceil(m/w)\nprint(\"Choose 1 to sum\")\nprint(\"Choose 2 to subtract\")\nkey = input(\"Enter: \")\nif key == \"1\":\n print(\"Choose a to sum two arrays\")\n print(\"Choose b to sum two numbers\")\n newkey = input(\"Enter: \")\n if newkey == \"a\": \n A = [int(a) for a in input(\"Nhap A = \").split()]\n B = [int(b) for b in input(\"Nhap B = \").split()]\n print(sum(A,B,w,t))\n elif newkey == \"b\":\n a = int(input(\"Nhap a = \"))\n b = int(input(\"Nhap b = \"))\n A = convert_array(a,w,t)\n B = convert_array(b,w,t)\n print(sum(A,B,w,t))\nelif key == \"2\":\n print(\"Choose a to subtract two arrays\")\n print(\"Choose b to subtract two numbers\")\n newkey = input(\"Enter: \")\n if newkey == \"a\": \n A = [int(a) for a in input(\"Nhap A = \").split()]\n B = [int(b) for b in input(\"Nhap B = \").split()]\n print(subtraction(A,B,w,t))\n elif newkey == \"b\":\n a = int(input(\"Nhap a = \"))\n b = int(input(\"Nhap b = \"))\n A = convert_array(a,w,t)\n B = convert_array(b,w,t)\n print(subtraction(A,B,w,t))\n","repo_name":"nguyenktrong/thuat-toan-attt-py","sub_path":"1.2 sum-subtract.py","file_name":"1.2 sum-subtract.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16240468027","text":"import pandas as pd\nimport datetime \nimport re \n\n'''\nrename_columns\n\nThis function reads in files,\nextracts the year and month from the filename (e.g. transforms 202006 to 2020-jun),\nrenames the column with this date,\nand returns a new dataframe.\n'''\n\ndef rename_columns(dir_input, # directory where files are located\n filename, # name of file to process\n column, # desired column to rename eg. 'insgesamt'\n stat_type # 'wz' or 'kldb'\n ):\n\n if stat_type == 'wz':\n code = 'wz2008_code' # column name containing WZ codes\n elif stat_type == 'kldb':\n code = 'kldb2010_code'\n\n # import file\n df = pd.read_csv(f'{dir_input}/{filename}')\n \n # get year and month in filename\n yyyymm = re.search(r'[0-9]{6}', filename).group(0) \n yyyymm = str(yyyymm[:4] + '-' + yyyymm[4:])\n date = datetime.datetime.strptime(yyyymm, '%Y-%m').date() \n \n # rename column\n column_label = f\"{date.year}_{date.strftime('%b').lower()}\"\n df = df.rename(columns={column: column_label})\n df = df[[code, column_label]]\n \n return(df)","repo_name":"bertelsmannstift/OJV-distribution-analyses","sub_path":"src/data_integration_functions.py","file_name":"data_integration_functions.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18617900790","text":"\ndef getInfo():\n var1 = input('\\nPlease provide the first numeric value: ')\n var2 = input('\\nPlease provide the second numeric value: ')\n return var1,var2\n\n\ndef compute():\n go = True\n while go:\n var1,var2 = getInfo()\n try: #try this block of code\n var3 = int(var1) + int(var2)\n go = False #if no error, exit while loop\n except ValueError as e: #if there is a ValueError, run this block of code and print origional error message\n print('{}: \\n\\nYou did not provide a numeric value!'.format(e))\n except: #if there is any other kind of error, run this block of code\n print('\\n\\nOops, you provided invalid input!')\n print('{} + {} = {}'.format(var1,var2,var3))\n \n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n compute()\n","repo_name":"rflstruthers/Small-Python-Projects","sub_path":"error_handling.py","file_name":"error_handling.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4102370271","text":"\"\"\"This module contains functions to plot the metrics of model evaluation.\"\"\"\n\n\nfrom sklearn.metrics import (\n ConfusionMatrixDisplay,\n PrecisionRecallDisplay,\n RocCurveDisplay,\n classification_report,\n)\n\n\ndef plot_confusion_matrix(model, y_test, y_pred, output_path):\n \"\"\"Plot the confusion matrix.\n\n params:\n model: model\n y_test: test target\n y_pred: predicted target\n output_path: path to the confusion matrix output\n \"\"\"\n cm = ConfusionMatrixDisplay.from_estimator(model, y_test, y_pred)\n cm.figure_.savefig(output_path)\n\n\ndef plot_precision_recall_curve(model, y_test, y_pred, output_path):\n \"\"\"Plot the precision recall curve.\n\n params:\n model: model\n y_test: test target\n y_pred: predicted target\n output_path: path to the precision recall curve output\n \"\"\"\n pr = PrecisionRecallDisplay.from_estimator(model, y_test, y_pred)\n pr.figure_.savefig(output_path)\n\n\ndef plot_roc_auc(model, y_test, y_pred, output_path):\n \"\"\"Plot the roc auc curve.\n\n params:\n model: model\n y_test: test target\n y_pred: predicted target\n output_path: path to the roc auc curve output\n \"\"\"\n roc = RocCurveDisplay.from_estimator(model, y_test, y_pred)\n roc.figure_.savefig(output_path)\n","repo_name":"bobcastaldeli/Customer-Churn-Prediction","sub_path":"src/models/plot_metric.py","file_name":"plot_metric.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32072637787","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nclass Route(object):\n def __init__(self, current_vertex_order, graph):\n self.vertex_order = current_vertex_order\n self.graph = graph\n self.distance_traveled = 0\n\n def __eq__(self, other):\n return self.distance_traveled == other.distance_traveled\n\n def __lt__(self, other):\n return self.distance_traveled < other.distance_traveled\n\n def __le__(self, other):\n return self.distance_traveled <= other.distance_traveled\n\n def __gt__(self, other):\n return self.distance_traveled > other.distance_traveled\n\n def __ge__(self, other):\n return self.distance_traveled >= other.distance_traveled\n\n def __str__(self):\n return str(self.vertex_order) + \"|\" + str(self.distance_traveled)\n\n def plot(self):\n x = list([])\n y = list([])\n plots = list([])\n\n # Iterate over vertices, retrieving x and y coordinates\n for vertex in self.graph.vertices:\n x.append(vertex.x)\n y.append(vertex.y)\n\n # Plot the vertices\n vertex_plot = plt.scatter(x,y, label=\"Vertices\")\n plots.append(vertex_plot)\n\n # Plot the route\n for vertex_index in range(len(self.vertex_order)-1):\n plots.append(plt.plot([self.graph.vertices[self.vertex_order[vertex_index]].x, self.graph.vertices[self.vertex_order[vertex_index+1]].x], [self.graph.vertices[self.vertex_order[vertex_index]].y, self.graph.vertices[self.vertex_order[vertex_index+1]].y], label=\"Edge {}-{}\".format(self.vertex_order[vertex_index], self.vertex_order[vertex_index+1])))\n\n # Show the graph with a legend\n plt.legend(loc=2, fontsize='small')\n plt.show()\n\n def goto(self, vertex_id):\n # If no vertex has been visisted\n if len(self.vertex_order) == 0:\n # Initialize the distance traveled to 0\n self.distance_traveled = 0\n # Add the starting vertex to the vertex_order\n self.vertex_order.append(vertex_id)\n # Mart the vertex as being visisted\n self.graph.vertices[vertex_id].visited = True\n else:\n # Find the distance between the goto vertex and the last vertex visited\n self.distance_traveled += self.graph.vertices[self.vertex_order[-1]].compute_distance(self.graph.vertices[vertex_id])\n # Add the new vertex to the vertex_order\n self.vertex_order.append(vertex_id)\n # Mark the vertex as being visisted\n self.graph.vertices[vertex_id].visited = True\n\nclass Vertex(object):\n def __init__(self, vertex_id, x, y, visited=False):\n self.vertex_id = vertex_id\n self.x = x\n self.y = y\n self.adjacent_vertices = list([])\n self.distances = list([])\n self.visited = visited\n\n def __eq__(self, other):\n return self.vertex_id == other.vertex_id\n\n def __str__(self):\n return \"(ID: \" + str(self.vertex_id) + \", X: \" + str(self.x) + \", Y: \" + str(self.y) + \", V:\" + str(self.visited) + \")\"\n\n def compute_distance(self, other_vertex):\n return ((self.x - other_vertex.x)**2 + (self.y - other_vertex.y)**2)**0.5\n\n def display(self):\n # display self\n print(self)\n # as well as every adjacent_vertex\n for vertex in self.adjacent_vertices:\n print(vertex)\n\n def get_unvisited_adjacent_vertex_ids(self):\n return [adjacent_vertex for adjacent_vertex in self.adjacent_vertices if adjacent_vertex.visited == False]\n\nclass Graph(object):\n def __init__(self, vertices):\n self.vertices = vertices\n self.edges = None\n\n def __str__(self):\n string = \"\"\n\n for vertex in self.vertices:\n string += str(vertex)\n string += \"\\n\"\n\n string += \"\\n\"\n\n string += str(self.edges)\n\n return string\n\n def build_graph(self):\n edge_dictionary = {}\n\n # Iterating over each vertex twice\n for index, vertex1 in enumerate(self.vertices):\n for vertex2 in self.vertices:\n # Calculate the distances for a row\n vertex2.distances.append(vertex2.compute_distance(vertex1))\n # Create a matrix of distances with pairs 0,0 , 1,1 ... all\n # having distance 0 denoting the vertex of reference for the row\n edge_dictionary[index] = vertex1.distances\n\n self.edges = pd.DataFrame(edge_dictionary)\n\n return self.edges\n\n def plot(self):\n x = list([])\n y = list([])\n plots = list([])\n\n # Iterate over vertices, retrieving x and y coordinates\n for vertex in self.vertices:\n x.append(vertex.x)\n y.append(vertex.y)\n\n # Plot the vertices\n vertex_plot = plt.scatter(x,y, label=\"Vertices\")\n plots.append(vertex_plot)\n\n # Plot the edges\n for vertex1 in self.vertices:\n for vertex2 in self.vertices:\n if vertex1 != vertex2:\n plots.append(plt.plot([vertex1.x, vertex2.x], [vertex1.y, vertex2.y], label=\"Edge {}-{}\".format(vertex1.vertex_id, vertex2.vertex_id)))\n\n # Show the graph with a legend\n plt.legend(loc=2, fontsize='small')\n plt.show()\n\n def plot_route(self, route_order):\n x = list([])\n y = list([])\n plots = list([])\n\n # Iterate over vertices, retrieving x and y coordinates\n for vertex in self.vertices:\n x.append(vertex.x)\n y.append(vertex.y)\n\n # Plot the vertices\n vertex_plot = plt.scatter(x,y, label=\"Vertices\")\n plots.append(vertex_plot)\n\n # Plot the route\n for vertex_index in range(len(route_order)-1):\n plots.append(plt.plot([self.vertices[route_order[vertex_index]].x, self.vertices[route_order[vertex_index+1]].x], [self.vertices[route_order[vertex_index]].y, self.vertices[route_order[vertex_index+1]].y], label=\"Edge {}-{}\".format(route_order[vertex_index], route_order[vertex_index+1])))\n\n # Show the graph with a legend\n plt.legend(loc=2, fontsize='small')\n plt.show()\n\n\n def get_unvisited_vertex_ids(self):\n return [vertex.vertex_id for vertex in self.vertices if not vertex.visited]\n\n def finished(self):\n if False in [vertex.visited for vertex in self.vertices]:\n return False\n else:\n return True\n","repo_name":"jtcass01/AI","sub_path":"Project 1/src/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30648920406","text":"dosya = open(file=\"metin.txt\",mode=\"r\",encoding=\"utf-8\")\r\nmetin = dosya.read()\r\ndosya.close()\r\n# print(metin)\r\ncumles=0\r\nfor krk in metin:\r\n # print(krk)\r\n if krk==\".\" or krk==\"?\" or krk==\"!\":\r\n cumles=cumles+1#cumles+=1\r\nprint(\"Metinde\",cumles,\"adet cümle var.\")\r\nheces=0\r\nfor krk in metin:\r\n if krk in \"EUIOÜAİÖeuıoüaiö\" :\r\n heces+=1\r\nprint(\"Metinde\",heces,\"adet hece vardır.\")","repo_name":"serkancam/byfp2-2020-2021","sub_path":"hs-S1/cumle_sayma.py","file_name":"cumle_sayma.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24163728621","text":"import os\nimport os.path as osp\nimport math\nimport random\nimport pickle\nimport warnings\nimport glob\n\nimport imageio\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nimport torchvision.transforms as T\nimport torchvision.datasets as datasets\nfrom torchvision.datasets import UCF101\nfrom torchvision.datasets.folder import make_dataset\nfrom tools.video_utils import VideoClips\nfrom torchvision.io import read_video\n\ndata_location = '/data'\nfrom utils import set_random_seed\nfrom tools.data_utils import *\n\nimport av\n\nclass VideoFolderDataset(Dataset):\n def __init__(self,\n root,\n train,\n resolution,\n path=None,\n n_frames=16,\n skip=1,\n fold=1,\n max_size=None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.\n use_labels=False, # Enable conditioning labels? False = label dimension is zero.\n return_vid=False, # True for evaluating FVD\n time_saliency=False,\n sub=False,\n seed=42,\n **super_kwargs, # Additional arguments for the Dataset base class.\n ):\n\n video_root = osp.join(os.path.join(root))\n if not 1 <= fold <= 3:\n raise ValueError(\"fold should be between 1 and 3, got {}\".format(fold))\n\n self.path = video_root\n name = video_root.split('/')[-1]\n self.name = name\n self.train = train\n self.fold = fold\n self.resolution = resolution\n self.nframes = n_frames\n self.annotation_path = os.path.join(video_root, 'ucfTrainTestlist')\n self.classes = list(natsorted(p for p in os.listdir(video_root) if osp.isdir(osp.join(video_root, p))))\n self.classes.remove('ucfTrainTestlist')\n class_to_idx = {self.classes[i]: i for i in range(len(self.classes))}\n self.samples = make_dataset(video_root, class_to_idx, ('avi',), is_valid_file=None)\n video_list = [x[0] for x in self.samples]\n\n self.video_list = video_list\n \"\"\"\n if train:\n self.video_list = video_list\n else:\n self.video_list = []\n for p in video_list:\n video = read_video(p)[0]\n if len(video) >= 128:\n self.video_list.append(p)\n \"\"\"\n\n\n self._use_labels = use_labels\n self._label_shape = None\n self._raw_labels = None\n self._raw_shape = [len(self.video_list)] + [3, resolution, resolution]\n self.num_channels = 3\n self.return_vid = return_vid\n\n frames_between_clips = skip\n print(root, frames_between_clips, n_frames)\n self.indices = self._select_fold(self.video_list, self.annotation_path,\n fold, train)\n\n self.size = len(self.indices)\n print(self.size)\n random.seed(seed)\n self.shuffle_indices = [i for i in range(self.size)]\n random.shuffle(self.shuffle_indices)\n\n self._need_init = True\n\n def _select_fold(self, video_list, annotation_path, fold, train):\n name = \"train\" if train else \"test\"\n name = \"{}list{:02d}.txt\".format(name, fold)\n f = os.path.join(annotation_path, name)\n selected_files = []\n with open(f, \"r\") as fid:\n data = fid.readlines()\n data = [x.strip().split(\" \") for x in data]\n data = [os.path.join(self.path, x[0]) for x in data]\n\n \"\"\"\n for p in data:\n if p in video_list == False:\n data.remove(p)\n \"\"\"\n\n selected_files.extend(data)\n\n \"\"\"\n name = \"train\" if not train else \"test\"\n name = \"{}list{:02d}.txt\".format(name, fold)\n f = os.path.join(annotation_path, name)\n with open(f, \"r\") as fid:\n data = fid.readlines()\n data = [x.strip().split(\" \") for x in data]\n data = [os.path.join(self.path, x[0]) for x in data]\n selected_files.extend(data)\n \"\"\"\n\n selected_files = set(selected_files)\n indices = [i for i in range(len(video_list)) if video_list[i] in selected_files]\n return indices\n\n def __len__(self):\n return self.size\n\n def _preprocess(self, video):\n video = resize_crop(video, self.resolution)\n return video\n\n def __getitem__(self, idx):\n idx = self.shuffle_indices[idx]\n idx = self.indices[idx]\n video = read_video(self.video_list[idx])[0]\n prefix = np.random.randint(len(video)-self.nframes+1)\n video = video[prefix:prefix+self.nframes].float().permute(3,0,1,2)\n\n return self._preprocess(video), idx\n\nclass ImageFolderDataset(Dataset):\n def __init__(self,\n path, # Path to directory or zip.\n resolution=None,\n nframes=16, # number of frames for each video.\n train=True,\n interpolate=False,\n loader=default_loader, # loader for \"sequence\" of images\n return_vid=True, # True for evaluating FVD\n cond=False,\n **super_kwargs, # Additional arguments for the Dataset base class.\n ):\n\n self._path = path\n self._zipfile = None\n self.apply_resize = True\n\n # classes, class_to_idx = find_classes(path)\n if 'taichi' in path and not interpolate:\n classes, class_to_idx = find_classes(path)\n imgs = make_imagefolder_dataset(path, nframes * 4, class_to_idx, True)\n elif 'kinetics' in path or 'KINETICS' in path:\n if train:\n split = 'train'\n else:\n split = 'val'\n classes, class_to_idx = find_classes(path)\n imgs = make_imagefolder_dataset(path, nframes, class_to_idx, False, split)\n elif 'SKY' in path:\n if train:\n split = 'train'\n else:\n split = 'test'\n path = os.path.join(path, split)\n classes, class_to_idx = find_classes(path)\n if cond:\n imgs = make_imagefolder_dataset(path, nframes // 2, class_to_idx, False, split)\n else:\n imgs = make_imagefolder_dataset(path, nframes, class_to_idx, False, split)\n else:\n classes, class_to_idx = find_classes(path)\n imgs = make_imagefolder_dataset(path, nframes, class_to_idx, False)\n\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + path + \"\\n\"\n \"Supported image extensions are: \" +\n \",\".join(IMG_EXTENSIONS)))\n\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.nframes = nframes\n self.loader = loader\n self.img_resolution = resolution\n self._path = path\n self._total_size = len(self.imgs)\n self._raw_shape = [self._total_size] + [3, resolution, resolution]\n self.xflip = False \n self.return_vid = return_vid\n self.shuffle_indices = [i for i in range(self._total_size)]\n self.to_tensor = transforms.ToTensor()\n random.shuffle(self.shuffle_indices)\n self._type = \"dir\"\n\n def _file_ext(self, fname):\n return os.path.splitext(fname)[1].lower()\n\n def _get_zipfile(self):\n assert self._type == 'zip'\n if self._zipfile is None:\n self._zipfile = zipfile.ZipFile(self._path)\n return self._zipfile\n\n def _open_file(self, fname):\n if self._type == 'dir':\n return open(os.path.join(fname), 'rb')\n if self._type == 'zip':\n return self._get_zipfile().open(fname, 'r')\n return None\n\n def close(self):\n try:\n if self._zipfile is not None:\n self._zipfile.close()\n finally:\n self._zipfile = None\n\n def _load_img_from_path(self, folder, fname):\n path = os.path.join(folder, fname)\n with self._open_file(path) as f:\n if pyspng is not None and self._file_ext(path) == '.png':\n img = pyspng.load(f.read())\n img = rearrange(img, 'h w c -> c h w')\n else:\n img = self.to_tensor(PIL.Image.open(f)).numpy() * 255 # c h w\n return img\n\n def __getitem__(self, index):\n index = self.shuffle_indices[index]\n path = self.imgs[index]\n\n # clip is a list of 32 frames\n video = natsorted(os.listdir(path[0]))\n\n # zero padding. only unconditional modeling\n if len(video) < self.nframes:\n prefix = np.random.randint(len(video)-self.nframes//2+1)\n clip = video[prefix:prefix+self.nframes//2]\n else:\n prefix = np.random.randint(len(video)-self.nframes+1)\n clip = video[prefix:prefix+self.nframes]\n \n assert (len(clip) == self.nframes or len(clip)*2 == self.nframes)\n\n vid = np.stack([self._load_img_from_path(folder=path[0], fname=clip[i]) for i in range(len(clip))], axis=0)\n vid = resize_crop(torch.from_numpy(vid).float(), resolution=self.img_resolution) # c t h w \n if vid.size(1) == self.nframes//2:\n vid = torch.cat([torch.zeros_like(vid).to(vid.device), vid], dim=1)\n\n return rearrange(vid, 'c t h w -> t c h w'), index\n\n\n def __len__(self):\n return self._total_size\n\ndef get_loaders(rank, imgstr, resolution, timesteps, skip, batch_size=1, n_gpus=1, seed=42, cond=False, use_train_set=False):\n \"\"\"\n Load dataloaders for an image dataset, center-cropped to a resolution.\n \"\"\"\n if imgstr == 'UCF101':\n train_dir = os.path.join(data_location, 'UCF-101')\n test_dir = os.path.join(data_location, 'UCF-101') # We use all \n if cond:\n print(\"here\")\n timesteps *= 2 # for long generation\n trainset = VideoFolderDataset(train_dir, train=True, resolution=resolution, n_frames=timesteps, skip=skip, seed=seed)\n print(len(trainset))\n testset = VideoFolderDataset(train_dir, train=False, resolution=resolution, n_frames=timesteps, skip=skip, seed=seed)\n print(len(testset))\n \n elif imgstr == 'SKY':\n train_dir = os.path.join(data_location, 'SKY')\n test_dir = os.path.join(data_location, 'SKY')\n if cond:\n print(\"here\")\n timesteps *= 2 # for long generation\n trainset = ImageFolderDataset(train_dir, train=True, resolution=resolution, nframes=timesteps, cond=cond)\n print(len(trainset))\n testset = ImageFolderDataset(test_dir, train=False, resolution=resolution, nframes=timesteps, cond=cond)\n print(len(testset))\n\n else:\n raise NotImplementedError() \n\n shuffle = False if use_train_set else True\n\n kwargs = {'pin_memory': True, 'num_workers': 3}\n\n trainset_sampler = InfiniteSampler(dataset=trainset, rank=rank, num_replicas=n_gpus, seed=seed)\n trainloader = DataLoader(trainset, sampler=trainset_sampler, batch_size=batch_size // n_gpus, pin_memory=False, num_workers=4, prefetch_factor=2)\n \n testset_sampler = InfiniteSampler(testset, num_replicas=n_gpus, rank=rank, seed=seed)\n testloader = DataLoader(testset, sampler=testset_sampler, batch_size=batch_size // n_gpus, pin_memory=False, num_workers=4, prefetch_factor=2)\n\n return trainloader, trainloader, testloader \n\n\n","repo_name":"sihyun-yu/PVDM","sub_path":"tools/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":11614,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"72"} +{"seq_id":"2872733605","text":"# File to route the web app to appropriate address\nimport flask\nfrom MainApp import app\nfrom flask import render_template, redirect, url_for, flash, request\nimport base64\nimport requests\nfrom urllib.parse import urlparse\n\nid = \"\"\nsecret = \"\"\nAPI_URL = 'https://api.mercedes-benz.com/experimental/connectedvehicle/v2/vehicles'\nAUTH_URL = 'https://id.mercedes-benz.com/as/authorization.oauth2'\nTOKEN_URL = 'https://id.mercedes-benz.com/as/token.oauth2'\nTRY_URL = 'https://api.mercedes-benz.com/experimental/connectedvehicle_tryout/v2/vehicles'\nREDIRECT_URL = 'http://namnhatpham1995.pythonanywhere.com/'\n\n\n# Default page\n@app.route('/')\n@app.route('/index')\ndef index():\n code = flask.request.args.get(\"code\")\n if code is not None:\n print(code)\n return redirect(url_for('get_token', code=code))\n else:\n return redirect(url_for('login'))\n\n\n# Page to insert ID and Secret key\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if not request.form['username'] and not request.form['password']: # Check if the log in form is not filled\n flash('Please enter username and password!', 'error')\n else:\n global id\n global secret\n id = request.form['username'] # take user's identity from input\n secret = request.form['password']\n print(id)\n print(secret)\n return redirect(url_for('get_auth_code'))\n return render_template('login.html')\n\n\n# Get authentication code used for getting token\n@app.route('/get_auth_code', methods=['GET', 'POST'])\ndef get_auth_code():\n print(id)\n host = urlparse(request.base_url)\n redirect_uri = \"http://\" + host.hostname + \":5000/\"\n url = AUTH_URL + \"?response_type=code&redirect_uri=\" + redirect_uri + \"&client_id=\" + id + \"&scope=mb:vehicle:status:general mb:user:pool:reader offline_access \"\n return redirect(url)\n\n\n# Get token\n@app.route('/get_token/', methods=['GET', 'POST'])\ndef get_token(code):\n Encoded_ID_Secret = (base64.b64encode((id + \":\" + secret).encode(\"ascii\"))).decode(\"ascii\")\n\n headers = {\n 'Authorization': 'Basic ' + Encoded_ID_Secret,\n 'content-type': 'application/x-www-form-urlencoded',\n }\n\n data = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': 'http://192.168.0.126:5000/'\n }\n\n response = requests.post(TOKEN_URL, headers=headers, data=data)\n # contentType = response.headers.get(\"content-type\")\n # print(contentType)\n response_json = response.json()\n global access_token\n global refresh_token\n access_token = response_json['access_token']\n print(\"Access_Token: \" + access_token)\n print(type(access_token))\n refresh_token = response_json['refresh_token']\n print(\"Refresh_Token: \" + refresh_token)\n # return redirect(url_for('try_out'))\n return redirect(url_for('call_api'))\n # return response.content\n\n\n# Call API using token\n@app.route('/call_api', methods=['GET', 'POST'])\ndef call_api():\n try:\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n \"authorization\": \"Bearer \" + access_token\n }\n\n except:\n print('No available token')\n print(headers)\n response = requests.get(API_URL, headers=headers)\n response_json = response.json()\n error_code = response_json['code']\n return response.content\n\n\n# Test demo\n@app.route('/try_out', methods=['GET', 'POST'])\ndef try_out():\n try:\n headers = {\n \"authorization\": \"Bearer \" + \"a1b2c3d4-a1b2-a1b2-a1b2-a1b2c3d4e5f6\"\n }\n print(headers)\n except:\n print('No available token')\n\n response = requests.get(TRY_URL, headers=headers)\n response_json = response.json()\n # error_code = response_json['code']\n return response.content\n","repo_name":"namnhatpham1995/Check_vehicles_data","sub_path":"MainApp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33156293530","text":"import os\nimport os.path\n\nfrom pyant import command, maven\nfrom pyant.app import const, __build__\nfrom pyant.builtin import __os__\n\n__all__ = ('build', )\n\nclass build(__build__):\n def __init__(self):\n artifact_repos = {\n 'snapshot' : 'umebn-snapshot-generic',\n 'alpha' : 'umebn-alpha-generic',\n 'release' : 'umebn-release-generic'\n }\n\n super().__init__(\n 'umebn',\n const.UMEBN_REPOS,\n artifact_repos\n )\n\n self.type = 'umebn'\n\n def update(self, branch = None):\n if os.path.isdir(self.path):\n if os.path.isfile(os.path.join(self.path, '.git/index.lock')):\n time.sleep(30)\n\n return True\n else:\n return git.pull(self.path, revert = True)\n else:\n return git.clone(self.repos, self.path, branch)\n\n def compile_pom(self, cmd = None):\n return super().compile_pom(cmd, 'devops/parent/build/pom.xml')\n\n def compile(self, cmd = None, clean = False, retry_cmd = None, dirname = None):\n if not dirname:\n dirname = 'build'\n\n if not super().compile(cmd, clean, retry_cmd, dirname, None, True):\n return False\n\n with __os__.chdir(self.path) as chdir:\n mvn = maven.maven()\n map = mvn.artifactid_paths(dirname)\n\n for path in map.values():\n if not self.oki(path, os.environ.get('VERSION')):\n return False\n\n return True\n\n # ------------------------------------------------------\n\n def package_home(self, version, type):\n return super().package_home(version, self.type)\n\n def metric_id(self, module_name = None):\n return const.METRIC_ID_UMEBN\n\n def oki(self, path, version):\n if os.path.isdir(os.path.join(path, 'output')):\n cmd = command.command()\n cmdline = 'python3 %s %s %s' % (const.OKI_FILE, os.path.join(path, 'output'), version)\n\n for line in cmd.command(cmdline):\n print(line)\n\n return True\n","repo_name":"pythoner-github/PyAnt","sub_path":"pyant/app/umebn/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36098903540","text":"import os\nimport re\n\n\ndef main():\n file = open(\"input\", \"r\")\n ep = sorted([int(x) for x in file.readlines()])\n file.close()\n\n l = len(ep)\n for i in range(l):\n vi = ep[i]\n for j in range(i+1, l):\n vj = ep[j]\n vij = vi + vj\n for k in range(j+1, l):\n vk = ep[k]\n v = vij + vk\n if v == 2020:\n print(vi * vj * vk)\n return\n elif v > 2020:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EliTheCreator/AdventOfCode","sub_path":"2020/day01/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"334385141","text":"\nfrom rest_framework import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\nrouter = routers.SimpleRouter(trailing_slash=False)\nrouter.register('empresa', views.EmpresaView, basename='empresa')\nrouter.register('equipamento', views.EquipamentoView, basename='equipamento')\nrouter.register('proprietario', views.ProprietarioView, basename='proprietario')\nrouter.register('tipo_equipamento', views.TipoEquipamentoView, basename='tipo_equipamento')\nrouter.register('responsavel_tecnico', views.ResponsavelTecnicoView, basename='responsavel_tecnico')\nrouter.register('chamado_equipamento', views.ChamadoEquipamentoView, basename='chamado_equipamento')\nurlpatterns = format_suffix_patterns(router.urls)\n","repo_name":"leandrocamposcardoso/Arkmeds","sub_path":"arkmeds-api/equipamentos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8923740659","text":"import sys\nimport math\nimport numpy as np\n\ndef load_csv(input_filename, delimiter):\n input_file = open(input_filename, \"r\")\n lines = input_file.readlines()\n input_file.close()\n\n results = {}\n current_query = None\n current_results = []\n for idx, line in enumerate(lines):\n if idx == 0:\n # assuming header, skip\n continue\n\n parts = line.strip().split(delimiter)\n\n query_id = parts[0]\n\n if query_id != current_query:\n current_query = query_id\n current_results = []\n results[current_query] = current_results\n\n if len(parts) == 7:\n formula_id = parts[1]\n #doc = parts[2]\n #loc = parts[3]\n #rank = parts[4]\n #group = parts[5]\n slt = parts[6]\n\n #current_results.append((formula_id, doc, loc, rank, group, slt))\n current_results.append(slt)\n\n return results\n\n\ndef filter_unique_slt(results, max_k):\n for query_id in results:\n query_slts = results[query_id]\n\n last_slt = query_slts[0]\n filtered = [last_slt]\n\n for pos in range(1, len(query_slts)):\n if query_slts[pos] != last_slt:\n last_slt = query_slts[pos]\n filtered.append(last_slt)\n\n if len(filtered) >= max_k:\n break\n\n results[query_id] = filtered\n\n return results\n\ndef load_gold_standard(filename, max_k):\n input_file = open(filename, \"r\")\n lines = input_file.readlines()\n input_file.close()\n\n ranks = {}\n top_relevance = []\n for idx, line in enumerate(lines):\n if idx == 0:\n continue\n\n parts = line.strip().split(\"\\t\")\n\n #SLT\trank\tgroup\tscore1\tscore2\tscore3\n\n slt = parts[0]\n mss = float(parts[3])\n\n ranks[slt] = mss\n\n if len(top_relevance) < max_k:\n top_relevance.append(mss)\n\n return ranks, top_relevance\n\n\ndef DCG(values, max_k):\n result = 0.0\n if len(values) > 0:\n result = values[0]\n\n for k in range(1, min(max_k, len(values))):\n result += values[k] / math.log(k + 1, 2)\n\n return result\n\n\ndef compute_nDCG(ranks, k_values, gold_std_prefix):\n nDCG = {k: {} for k in k_values}\n\n max_k = max(k_values)\n\n for query_id in ranks:\n # load ground truth ...\n gold_std, top_gold_mss = load_gold_standard(gold_std_prefix + query_id + \".tsv\", max_k)\n\n # compute IDCGp\n IDCG = {k : DCG(top_gold_mss, k) for k in k_values}\n\n # obtain relevance values ...\n rel_values = []\n for pos in range(min(max_k, len(ranks[query_id]))):\n slt = ranks[query_id][pos]\n if slt not in gold_std and \"\\\\\" in slt:\n slt = slt.replace(\"\\\\\", \"\\\\\\\\\")\n if slt not in gold_std:\n print(\"Query \" + query_id + \", Not found in Gold Standard: \" + slt)\n rel_values.append(0.0)\n continue\n\n rel_values.append(gold_std[slt])\n\n # now, compute nDCGp\n for k in k_values:\n if k <= len(rel_values):\n nDCG[k][query_id] = DCG(rel_values, k) / IDCG[k]\n\n return nDCG\n\ndef show_nDCG(nDCG, condition):\n #sorted_k = [str(x) for x in sorted([int(x) for x in nDCG])]\n sorted_k = sorted(list(nDCG.keys()))\n\n row_str = condition + \"\\t{0:.2f}\\t{1:.2f}\\t{2}\"\n for k in sorted_k:\n # convert values to NP matrix and then get AVG and STD\n values = [nDCG[k][query_id] for query_id in nDCG[k]]\n tempo = np.array(values)\n\n avg = tempo.mean() * 100.0\n std = tempo.std() * 100.0\n\n print(row_str.format(avg, std, k))\n\n\ndef save_nDCG(nDCG, output_filename):\n out_file = open(output_filename, \"w\")\n out_file.write(\"queryId\\tk\\tnDCG\\n\")\n\n for k in nDCG:\n for query_id in nDCG[k]:\n out_file.write(query_id + \"\\t\" + str(k) + \"\\t\" + str(nDCG[k][query_id]) + \"\\n\")\n\n out_file.close()\n\ndef main():\n if len(sys.argv) < 5:\n print(\"Usage\")\n print(\"\\tpython nDCG_metrics.py core_input gs_prefix output_dist K_values\")\n print(\"\")\n print(\"Where:\")\n print(\"\\tcore_input\\t: Path to File with core results in tsv format\")\n print(\"\\tgs_prefix\\t: Prefix for gold standard files\")\n print(\"\\toutput_dist\\t: File where the raw nDCG values will be stored\")\n print(\"\\tK_values\\t: Maximum rank values to consider, use 0 for Infinite\")\n print(\"\")\n return\n\n core_filename = sys.argv[1]\n gold_prefix = sys.argv[2]\n output_filename = sys.argv[3]\n\n condition = core_filename.split(\"/\")[-1]\n\n #print(\"------------ Metrics for ----------\")\n #print(\"File: \" + condition)\n\n K_values = []\n for arg_val in sys.argv[4:]:\n try:\n K_values.append(int(arg_val))\n except:\n print(\"Invalid K value: \" + arg_val)\n return\n\n K_values = sorted(K_values)\n\n max_K = K_values[-1]\n\n # load input ...\n records = load_csv(core_filename, \"\\t\")\n\n records = filter_unique_slt(records, max_K)\n\n results = compute_nDCG(records, K_values, gold_prefix)\n\n # show results ...\n show_nDCG(results, condition)\n\n # save results ...\n save_nDCG(results, output_filename)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"hopper-project/tangent-v031","sub_path":"tangent/experiment_tools/nDCG_metrics.py","file_name":"nDCG_metrics.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70091761192","text":"import numpy as np\nimport random as rand\nimport time\nimport json\nfrom collections import Counter\nfrom logic.dmm_interface import dmm_interface\n\nMSR = 'KEY_MEASUREMENT'\nMSG = 'KEY_MESSAGE'\nERR = 'KEY_ERROR'\n\ndef perform_check(expected_values,channel_naming,dmm_ip=''):\n try:\n dmm = dmm_interface()\n except:\n pass\n expected_values=np.array(expected_values)\n yield_data = lambda key,value,n: {'key':key, 'value':value, 'rownum': n}\n channel_naming=np.array(channel_naming)\n print(channel_naming)\n tests = []\n for r in range(len(expected_values)):\n row = expected_values[r]\n try:\n sig1_matrix = channel_naming[\n (channel_naming[:,3]==row[0])\n &\n (channel_naming[:,4]==row[1])][0,0]\n sig2_matrix = channel_naming[\n (channel_naming[:,3]==row[2])\n & \n (channel_naming[:,4]==row[3])][0,0]\n addrow = np.hstack([row,[sig1_matrix,sig2_matrix]])\n tests.append(addrow)\n except IndexError as e:\n raise\n\n tests = np.array(tests)\n\n #split tests into arrays of disconnected and connected tests\n tests_disconnect = tests[np.where(tests[:,4]=='0')]\n tests_connect = tests[np.where(tests[:,4]=='1')]\n\n # perform connected tests\n for row in tests_connect:\n mini, maxi, matrix1, matrix2 = float(row[5]), float(row[6]), row[7], row[8]\n\n # TODO: Change to close multiple channels at once, has Toshin tested this?\n # Otherwise, we just need to come up with a cute way to do this fast.\n measurement = rand.uniform(mini-diff,maxi+diff)\n success = 0\n if mini < measurement < maxi:\n success = 1\n return_row = np.hstack([row,[success,measurement]])\n yield yield_data(key=MSR,value=return_row.tolist(),n=1)\n\n \n # perform disconnected tests\n # first, group signals by frequency (so that we find more errors per test)\n sig_by_freq = Counter(np.array([tests_disconnect[:,8],tests_disconnect[:,7]]).flatten())\n for s1 in sig_by_freq:\n #for more frequent signals (s1), find every node it *isn't* connected to, and test them against s1 all at once\n indices = np.hstack([np.where(tests_disconnect[:,7]==s1),np.where(tests_disconnect[:,8]==s1)])\n\n if not indices.size:\n # means there are no tests in the array for s1, so continue loop\n # (I would use break here, but use continue in case there are more tests left for different signals)\n continue \n rows = tests_disconnect[indices][0]\n for return_row,n in parallell_disconnect(s1,rows,dmm=''):\n yield yield_data(key=MSR,value=[r.tolist() for r in return_row],n=n) \n tests_disconnect = np.delete(tests_disconnect,indices,axis=0)\n\ndef parallell_disconnect(s1,rows,dmm):\n s1_first = rows[np.where(rows[:,7]==s1)]\n s1_second = rows[np.where(rows[:,8]==s1)]\n s2_list = np.hstack([s1_first[:,8],s1_second[:,7]])\n minima = rows[:,5]\n mini = min([float(i) for i in minima.flatten()])\n \n #perform check\n # measurement = dmm.test_parallell(s1,s2_list)\n measurement = 0\n success = 0\n if measurement > mini:\n success = 1\n return_rows = [np.hstack([r,[success,measurement]]) for r in rows]\n yield return_rows, len(return_rows)\n # if measurement fails, we break down into binary search to find errant connections\n else:\n rows_split = np.array_split(rows,2)\n for c in rows_split:\n success = 0\n if len(c)>1:\n yield from parallell_disconnect(s1,c,dmm)\n elif len(c)==1:\n c = c[0]\n measurement = dmm.measure_R(c[7],c[8])\n print([mini,measurement])\n if mini < measurement:\n success = 1\n return_row = np.hstack([c,[success,measurement]])\n yield return_row, 1\n else:\n continue\n","repo_name":"ctdunc/continuity-check","sub_path":"logic/continuity.py","file_name":"continuity.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21995018160","text":"# ***Roaming Romans Solution***\n# Difficulty: 1.5\n# Time Limit: 1 second, Memory Limit: 1024 MB\n# CPU Time: 0.05 s\n# Author: Steven Zeil\n# Source: 2017 ICPC Mid-Atlantic Regional Practice Contest\n# Link: https://open.kattis.com/problems/romans\n\n\nn = float(input())\nx = round(n * 1000 * 5280/4854)\nprint(x)\n","repo_name":"ahmedsiad/kattis-solutions","sub_path":"romans/romans.py","file_name":"romans.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41420974204","text":"from __future__ import print_function, absolute_import, division\r\n\r\nimport time\r\n\r\nfrom numpy import array, nansum, fabs, copy, empty, linspace, isnan\r\nfrom scipy import integrate\r\nimport numpy as np\r\n\r\nimport stimator.de as de\r\nfrom stimator.dynamics import init2array, ModelSolver, solve, get_outputs_decl\r\nimport stimator.fim as fim\r\nimport stimator.timecourse as timecourse\r\nimport stimator.plots as plots\r\nfrom stimator.utils import _is_sequence\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# Optimum criteria for TC similarity\r\n# ----------------------------------------------------------------------------\r\n\r\ndef get_matching_indexes(repnames, tc):\r\n # find indexers to match a solution and tc\r\n # to create compatible subsets for objetive functions\r\n # return the subsets\r\n sol_indexes, tc_indexes = [], []\r\n ntimes = tc.ntimes\r\n\r\n for i, yexp in enumerate(tc):\r\n # count NaN, keep only columns with sufficient number of points\r\n nnan = len(yexp[np.isnan(yexp)])\r\n if nnan >= ntimes - 1:\r\n continue\r\n colname = tc.names[i]\r\n if colname not in repnames:\r\n continue\r\n tc_indexes.append(i)\r\n sol_indexes.append(repnames.index(colname))\r\n\r\n tc_indexes = array(tc_indexes, int)\r\n sol_indexes = array(sol_indexes, int)\r\n return sol_indexes, tc_indexes\r\n\r\n\r\ndef getRangeVars(tcs, varnames):\r\n ranges = [0.0 for i in range(len(varnames))]\r\n for ix, x in enumerate(varnames):\r\n for tc in tcs:\r\n yexp = tc[x]\r\n tpe = (max(yexp) - min(yexp))\r\n ranges[ix] = max(ranges[ix], tpe)\r\n return ranges\r\n\r\ndef get_criterium(repnames, tc, weights=None):\r\n \"\"\"Returns a function to compute the objective function given a solution and a timecourse.\r\n\r\n The function has signature\r\n criterium(Y,i)\r\n Y is the predicted timecourse, for a given set of parameters.\r\n i is the index of the timecourse.\r\n The function returns a float.\r\n\r\n tc is a Solutions object holding ('experimental') timecourse data,\r\n each timecourse has shape (nvars, ntimes).\r\n\r\n weights can be:\r\n\r\n None : no weighting (simple LSquares, S = sum((Ypred-Yexp)**2))\r\n all others are weighted LSquares, S = (Ypred-Yexp).T * W * (Ypred-Yexp)\r\n 'demo' : demo weighting W = 1/j with j = 1,...,nvars\r\n \"\"\"\r\n sol_indexes, tc_indexes = get_matching_indexes(repnames, tc)\r\n\r\n def unweighted_criterium(sol, tc):\r\n sol_subset, tc_subset = sol[sol_indexes], tc[tc_indexes]\r\n d = (sol_subset - tc_subset)\r\n return np.sum(d * d)\r\n\r\n if weights is None:\r\n return unweighted_criterium\r\n else:\r\n return unweighted_criterium\r\n\r\n # return np.sum(d.T * W[i] * d)\r\n\r\n # TODO: weights not implemented\r\n return None\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# Class to perform DE optimization for ODE systems\r\n# ----------------------------------------------------------------------------\r\n\r\nclass OptimumData(object):\r\n \"\"\"Object that holds optimum solution data.\"\"\"\r\n\r\n def __init__(self, optimizer):\r\n self.optimizer = optimizer\r\n\r\n def info(self):\r\n optimum = self\r\n headerformat = \"--- %-20s -----------------------------\\n\"\r\n res = \"\\n\" + (headerformat % 'PARAMETERS')\r\n res += \"\\n\".join([\"%s\\t%12g +- %g\" % i for i in optimum.parameters])\r\n res += '\\n\\n'\r\n res += headerformat % 'OPTIMIZATION'\r\n res += \"%s\\t%g\\n\" % ('Final Score', optimum.optimization_score)\r\n res += \"%s\\t%d\\n\" % ('generations', optimum.optimization_generations)\r\n\r\n res += \"%s\\t%d\\n\" % ('max generations', optimum.max_generations)\r\n res += \"%s\\t%d\\n\" % ('population size', optimum.pop_size)\r\n res += \"%s\\t%s\\n\" % ('Exit by', optimum.optimization_exit_by)\r\n res += '\\n\\n'\r\n\r\n res += headerformat % 'TIME COURSES'\r\n res += '\\t\\t'.join(['Name', 'Points', 'Score'])+'\\n'\r\n res += \"\\n\".join([\"%s\\t%d\\t%g\" % i for i in optimum.tcdata])\r\n res += '\\n\\n'\r\n return res\r\n\r\n def print_info(self):\r\n print (self.info())\r\n \r\n def __str__(self):\r\n return self.info()\r\n\r\n def plot(self, **kwargs):\r\n return plots.plot_estim_optimum(self, **kwargs)\r\n\r\n def plot_generations(self, **kwargs):\r\n return plots.plot_generations(self, **kwargs)\r\n\r\nclass DeODEOptimizer(de.DESolver):\r\n \"\"\"Overides energy function and report functions.\r\n\r\n The energy function solves ODEs and computes a least-squares score.\r\n Ticker functions are called on completion of a generation and when\r\n optimization finishes.\r\n \"\"\"\r\n\r\n def __init__(self, model, optSettings, tcs, weights=None,\r\n aMsgTicker=None,\r\n anEndComputationTicker=None,\r\n dump_generations=None,\r\n dump_predictions=False,\r\n initial='init',\r\n max_generations=200,\r\n convergence_noimprovement=20):\r\n #self.varnames = model.varnames\r\n self.model = model.copy()\r\n self.model_solvers = []\r\n self.tc = tcs\r\n self.endTicker = anEndComputationTicker\r\n self.msgTicker = aMsgTicker\r\n self.dump_predictions = dump_predictions\r\n self.dump_generations = dump_generations\r\n\r\n # reorder variables according to model\r\n self.tc.order_by_modelvars(self.model)\r\n\r\n pars = model.with_bounds\r\n self.par_names = [p.name for p in self.model.with_bounds]\r\n self.varnames = list(self.model.varnames)\r\n mins = array([u.bounds.lower for u in pars])\r\n maxs = array([u.bounds.upper for u in pars])\r\n\r\n if optSettings.get('pop_size', None) is None:\r\n optSettings['pop_size'] = optSettings['genomesize']\r\n if optSettings.get('max_generations', None) is None:\r\n optSettings['max_generations'] = optSettings['generations']\r\n max_generations=optSettings['max_generations']\r\n\r\n de.DESolver.__init__(self, len(pars), # number of parameters\r\n int(optSettings['pop_size']), # pop size\r\n mins, maxs, # min and max parameter values\r\n \"Best2Exp\", # DE strategy\r\n 0.7, 0.6, 0.0, # DiffScale, p crossover, Cut-off S\r\n max_generations=max_generations,\r\n convergence_noimprovement=convergence_noimprovement)\r\n\r\n # cutoffEnergy is 1e-6 of deviation from data\r\n self.cutoffEnergy = 1.0e-6 * sum([nansum(fabs(tc.data)) for tc in self.tc])\r\n\r\n # create one ModelSolver per timecorse\r\n self.criterium = []\r\n\r\n # overide initial values of solver with first time-course point\r\n for tc in self.tc:\r\n X0 = []\r\n for xname in model.varnames:\r\n x0value = tc[xname][0] if xname in tc.names else self.model.get_init(xname)\r\n X0.append(x0value)\r\n X0 = array(X0, dtype=float)\r\n ms = ModelSolver(self.model,\r\n times=tc.t.copy(),\r\n initial=X0,\r\n title=tc.title,\r\n changing_pars=self.par_names)\r\n self.model_solvers.append(ms)\r\n solnames = ms.solutions_names()\r\n self.criterium.append(get_criterium(solnames, tc, weights))\r\n\r\n self.timecourse_scores = empty(len(self.tc))\r\n\r\n def computeSolution(self, i, trial, dense=None):\r\n \"\"\"Computes solution for timecourse i, given parameters trial.\"\"\"\r\n\r\n if dense is not None:\r\n npoints = 1000\r\n else:\r\n npoints = None\r\n sol = self.model_solvers[i].solve(par_values=trial, npoints=npoints)\r\n return sol\r\n\r\n def external_score_function(self, trial):\r\n # if out of bounds flag with error energy\r\n for p, minInitialValue, maxInitialValue in zip(trial, self.min_values, self.max_values):\r\n if p > maxInitialValue or p < minInitialValue:\r\n return float('inf')\r\n # set parameter values from trial\r\n #self.model.set_uncertain(trial)\r\n\r\n # compute solutions and scores\r\n for i in range(len(self.tc)):\r\n sol = self.model_solvers[i].solve(par_values=trial)\r\n #sol = self.computeSolution(i, trial)\r\n if sol is not None:\r\n self.timecourse_scores[i] = self.criterium[i](sol, self.tc[i])\r\n else:\r\n return float('inf')\r\n\r\n globalscore = self.timecourse_scores.sum()\r\n return globalscore\r\n\r\n def reportInitial(self):\r\n msg = \"\\nSolving %s...\" % self.model.metadata.get('title', '')\r\n #initialize stopwatch\r\n self.start_time = time.time()\r\n if self.dump_generations is not None:\r\n self.dumpfile = open('generations.txt', 'w')\r\n if not self.msgTicker:\r\n print (msg)\r\n else:\r\n self.msgTicker(msg)\r\n\r\n def reportGeneration(self):\r\n msg = \"%-4d: %f\" % (self.generation, float(self.best_score))\r\n if not self.msgTicker:\r\n print (msg)\r\n else:\r\n self.msgTicker(msg)\r\n if self.dump_generations is not None:\r\n print (self.generation_string(self.generation), file=self.dumpfile)\r\n\r\n def reportFinal(self):\r\n if self.exitCode <= 0:\r\n outCode = -1\r\n else:\r\n outCode = self.exitCode\r\n self.generate_optimum()\r\n if not self.endTicker:\r\n de.DESolver.reportFinal(self)\r\n else:\r\n self.endTicker(outCode)\r\n if self.dump_generations is not None:\r\n print (self.generation_string(self.generation), file=self.dumpfile)\r\n self.dumpfile.close()\r\n\r\n def generation_string(self, generation):\r\n generation = str(generation)\r\n # find if objectives is iterable\r\n isiter = hasattr(self.scores[0], '__contains__')\r\n res = 'generation %s -------------------------\\n' % generation\r\n for s, o in zip(self.pop, self.scores):\r\n sstr = ' '.join([str(i) for i in s])\r\n if isiter:\r\n ostr = ' '.join([str(i) for i in o])\r\n else:\r\n ostr = str(o)\r\n res = res + '%s %s\\n' % (sstr, ostr)\r\n return res\r\n\r\n def generate_optimum(self):\r\n # compute parameter standard errors, based on FIM-1\r\n # generate TC solutions\r\n best = OptimumData(self)\r\n best.optimization_score = self.best_score\r\n best.optimization_generations = self.generation\r\n best.optimization_exit_by = self.exitCodeStrings[self.exitCode]\r\n best.max_generations = self.max_generations\r\n best.pop_size = self.pop_size\r\n\r\n # TODO: Store initial solver parameters?\r\n\r\n # generate best time-courses\r\n\r\n parameters = list(zip(self.par_names, [x for x in self.best]))\r\n\r\n sols = timecourse.Solutions()\r\n best.tcdata = []\r\n\r\n for (i, tc) in enumerate(self.tc):\r\n sol = self.computeSolution(i, self.best)\r\n if sol is not None:\r\n score = self.criterium[i](sol, tc)\r\n else:\r\n score = 1.0E300\r\n sols += sol\r\n best.tcdata.append((self.tc[i].title, tc.ntimes, score))\r\n\r\n best.optimum_tcs = sols\r\n\r\n if not (fim.SYMPY_INSTALLED):\r\n best.parameters = [(p, v, 0.0) for (p, v) in parameters]\r\n else:\r\n commonvnames = self.tc.get_common_full_vars()\r\n commonvnames = set(commonvnames).intersection(set(self.model.varnames))\r\n if len(commonvnames) == 0:\r\n commonvnames = self.model.varnames\r\n # consterror = getRangeVars(best.optimum_tcs, commonvnames)\r\n consterror = None\r\n else:\r\n commonvnames = list(commonvnames)\r\n # print(commonvnames)\r\n consterror = getRangeVars(self.tc, commonvnames)\r\n # assume 5% of range\r\n consterror = timecourse.constError_func([r * 0.05 for r in consterror])\r\n\r\n try:\r\n _, invFIM1 = fim.computeFIM(self.model,\r\n parameters,\r\n sols,\r\n consterror,\r\n commonvnames)\r\n best.parameters = [(self.par_names[i],\r\n value,\r\n invFIM1[i, i]**0.5)\r\n for (i, value) in enumerate(self.best)]\r\n except:\r\n best.parameters = [(p, v, 0.0) for (p, v) in parameters]\r\n\r\n sols = timecourse.Solutions()\r\n for (i, tc) in enumerate(self.tc):\r\n sol = self.computeSolution(i, self.best, dense=True)\r\n # ts = linspace(tc.t[0], tc.t[-1], 500)\r\n\r\n # sol = timecourse.SolutionTimeCourse(ts, Y.T,\r\n # self.varnames,\r\n # title=tc.title)\r\n sols += sol\r\n\r\n best.optimum_dense_tcs = sols\r\n\r\n if self.dump_generations is not None:\r\n best.generations_exist = True\r\n else:\r\n best.generations_exist = False\r\n\r\n self.optimum = best\r\n # self.generate_fitted_sols()\r\n\r\n if self.dump_predictions:\r\n fnames = ['pred_' + self.tc[i].title for i in range(len(self.tc))]\r\n best.optimum_tcs.write_to(fnames, verbose=True)\r\n\r\n\r\ndef s_timate(model, timecourses=None, opt_settings=None,\r\n tc_dir=None,\r\n names=None,\r\n verbose_readingTCs=True,\r\n **kwargs):\r\n\r\n # create a default dict of optimizer settings,\r\n # then update with .metadata['optSettings']\r\n # finally, update with argument opt_settings\r\n optSettings = {'pop_size': 80,\r\n 'max_generations': 200,\r\n 'optimizer': 'DeODEOptimizer'}\r\n if model.metadata.get('optSettings', None) is not None:\r\n optSettings.update(model.metadata['optSettings'])\r\n if opt_settings is not None:\r\n optSettings.update(opt_settings)\r\n\r\n # timecourses argument is used to indicate time-course files\r\n # if it is None, then use model.metadata['timecourses']\r\n if timecourses is None:\r\n timecourses = model # use model as source in readTCs\r\n\r\n tcs = timecourse.readTCs(timecourses,\r\n filedir=tc_dir,\r\n names=names,\r\n verbose=verbose_readingTCs)\r\n\r\n optimizer = DeODEOptimizer(model, optSettings, tcs, **kwargs)\r\n optimizer.run()\r\n return optimizer.optimum\r\n\r\n\r\ndef test():\r\n from stimator import read_model, Solution\r\n\r\n # --- example 1 --------------------\r\n\r\n example1_data = \"\"\"\r\nt x1 x2\r\n0 0 0\r\n2 1.403812093 0.48351624\r\n4 1.528870297 1.483289613\r\n6 1.917963699 2.039584833\r\n8 2.028998372 2.826410056\r\n10 1.978326655 3.106415222\r\n12 2.143692636 3.060669986\r\n14 2.289572191 3.231815374\r\n16 2.019850835 3.310127564\r\n18 1.977904321 3.098886165\r\n20 2.126776717 3.463202683\r\n\"\"\"\r\n tc = Solution.read_str(example1_data)\r\n\r\n mdl1 = \"\"\"# Example model\r\ntitle Example 1\r\n\r\nvin : -> x1 , rate = k1\r\nv2 : x1 -> x2 , rate = k2 * x1\r\nvout : x2 -> , rate = k3 * x2\r\n\r\ninit : x1=0, x2=0\r\n\r\nfind k1 in [0, 2]\r\nfind k2 in [0, 2]\r\nfind k3 in [0, 2]\r\n\r\n!! x2 x1\r\n\r\npopsize = 60 # population size in GA\r\n\"\"\"\r\n model = read_model(mdl1)\r\n best = model.estimate(timecourses=tc)\r\n\r\n print(best)\r\n best.plot()\r\n\r\n print('--- Modifying model ---')\r\n m2 = model.copy()\r\n bestpars = [(n,v) for n,v,e in best.parameters]\r\n m2.setp(bestpars)\r\n\r\n m2.solve(tf=20.0).plot()\r\n\r\n # --- example 2 --------------------\r\n\r\n m1 = read_model(\"\"\"\r\ntitle example 2: Glyoxalase system in L. Infantum\r\n\r\nglx1 : HTA -> SDLTSH, V1*HTA/(Km1 + HTA)\r\n#glx1 : HTA -> SDLTSH, V*HTA/(Km1 + HTA), V=2.57594e-05\r\nglx2 : SDLTSH ->, V2*SDLTSH/(Km2 + SDLTSH)\r\n\r\n#find glx1.V in [0.00001, 0.0001]\r\nfind V1 in [0.00001, 0.0001]\r\n\r\nKm1 = 0.252531\r\nfind Km1 in [0.01, 1]\r\n\r\nV2 = 2.23416e-05\r\nfind V2 in [0.00001, 0.0001]\r\n\r\nKm2 = 0.0980973\r\nfind Km2 in (0.01, 1)\r\n\r\ninit : (SDLTSH = 7.69231E-05, HTA = 0.1357)\r\n\r\ntimecourse TSH2a.txt\r\ntimecourse TSH2b.txt\r\n\"\"\")\r\n\r\n # print m1\r\n # get tcdir\r\n import pathlib\r\n this_file = pathlib.Path(__file__)\r\n tcdir = pathlib.Path(this_file.parents[0], 'examples', 'timecourses')\r\n #print(tcdir)\r\n\r\n optimum = s_timate(m1, tc_dir=tcdir, #timecourses=['TSH2a.txt', 'TSH2b.txt'],\r\n names=['SDLTSH', 'HTA'],\r\n dump_generations=True) \r\n # convergence_noimprovement=40)\r\n # ... intvarsorder=(0,2,1) ...\r\n\r\n print(optimum)\r\n optimum.plot()\r\n optimum.plot_generations(pars=['V2', 'Km1'], fig_size=(9,6))\r\n\r\n\r\n # --- example 2, fitting a transformation --------------------\r\n\r\n mtransf = read_model(\"\"\"\r\ntitle example 2, fitting a transformation\r\n\r\nglx1 : HTA -> SDLTSH, V1*HTA/(Km1 + HTA)\r\n#glx1 : HTA -> SDLTSH, V*HTA/(Km1 + HTA), V=2.57594e-05\r\nglx2 : SDLTSH ->, V2*SDLTSH/(Km2 + SDLTSH)\r\n\r\n#find glx1.V in [0.00001, 0.0001]\r\nfind V1 in [0.00001, 0.0001]\r\n\r\nKm1 = 0.252531\r\nfind Km1 in [0.01, 1]\r\n\r\nV2 = 2.23416e-05\r\nfind V2 in [0.00001, 0.0001]\r\n\r\nKm2 = 0.0980973\r\nfind Km2 in (0.01, 1)\r\n\r\n~sdlx2 = 2 * SDLTSH # the transformation to fit\r\n\r\n!! sdlx2\r\n\r\ninit : (SDLTSH = 7.69231E-05, HTA = 0.1357)\r\n\r\n\"\"\")\r\n\r\n optimum = s_timate(mtransf, tc_dir=tcdir, timecourses=['tc_double.txt'],\r\n names=['sdlx2', 'SDLTSH', 'HTA']) \r\n\r\n print(optimum)\r\n optimum.plot()\r\n\r\n\r\n # --- example 2 with unknown initial values --------------------\r\n\r\n m2 = m1.copy()\r\n\r\n # Now, assume init.HTA is uncertain\r\n m2.set_bounds('init.HTA', (0.05, 0.25))\r\n # do not estimate Km1 and Km2, to help the analysis\r\n m2.reset_bounds('Km1')\r\n m2.reset_bounds('Km2')\r\n\r\n # VERY IMPORTANT:\r\n # only one time course can be used:\r\n # cannot fit one initial value using several timecourses!!!\r\n\r\n optimum = s_timate(m2, timecourses=['TSH2a.txt'], \r\n tc_dir=tcdir,\r\n opt_settings={'pop_size': 60},\r\n names=['SDLTSH', 'HTA'])\r\n\r\n print(optimum)\r\n optimum.plot(show=True)\r\n\r\nif __name__ == \"__main__\":\r\n test()\r\n","repo_name":"aeferreira/stimator","sub_path":"stimator/estimation.py","file_name":"estimation.py","file_ext":"py","file_size_in_byte":18747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73597632873","text":"from netCDF4 import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mpl_toolkits.basemap import Basemap\nfrom scipy.interpolate import interp2d\n\n\ncount = 0\n\nprecipitationInWetDays = {'historical': np.zeros([53,113]),\n'period1': np.zeros([53,113]),\n'period2': np.zeros([53,113])\n}\n\nSDII = {'historical': np.zeros([53,113]),\n'period1': np.zeros([53,113]),\n'period2': np.zeros([53,113])\n}\n\nnumberOfWetDays = 0\n\nhistorical = 'chirps data/Annual/'\nperiod1 = 'CMIP5/0Major 4/MIROC5/Period 1/'\nperiod2 = 'CMIP5/0Major 4/MIROC5/Period 2/'\n\n# Lon: 72 -100\n# Lat: 25 - 38\n# , vmin=minmin, vmax=maxmax\n\nmodel = 'MIROC5'\n\ncolor_map = plt.cm.get_cmap('jet')\nreversed_color_map = color_map.reversed()\n\n\n\ndef calculateSDII(precip, time_period):\n global SDII, precipitationInWetDays, numberOfWetDays\n for i in range(53):\n for j in range(113):\n if(precip[i][j] >= 1):\n numberOfWetDays += 1\n precipitationInWetDays[time_period][i][j] += precip[i][j]\n\n\n\nfig, axes = plt.subplots(1,3)\nminmin = 0\nmaxmax = 10000\n\nprint(axes)\n\n################### Historical Plot #########################\ntime_period = 'historical'\ndata = historical\n\n\nwith os.scandir(data) as data_files:\n for file in data_files:\n fh = Dataset(data + file.name, mode='r')\n count = count + 1\n # print(fh.variables)\n lons = fh.variables['longitude'][:]\n lats = fh.variables['latitude'][:]\n time = fh.variables['time'][:]\n ppt = fh.variables['precip'][:]\n # print(ppt.shape)\n ppt_units = fh.variables['precip'].units\n\n leftLon = np.where(lons == 71.875)[0][0]\n rightLon = np.where(lons == 100.125)[0][0]\n bottomLat = np.where(lats == 24.875)[0][0]\n topLat = np.where(lats == 38.125)[0][0]\n\n print(count)\n\n for matrix in ppt:\n matrix = matrix[bottomLat:topLat,leftLon:rightLon]\n calculateSDII(matrix, time_period)\n \n\nSDII[time_period] = np.true_divide(precipitationInWetDays[time_period], numberOfWetDays)\n\nprint(SDII[time_period])\n\n\n# save to npy file\nnp.save('Calculated Data/SDII_historical.npy', SDII[time_period])\n\n","repo_name":"tejaswarathe/Precipitation-analysis-Dissertation","sub_path":"Calculations/historical_chirps/SDII_historical.py","file_name":"SDII_historical.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12707072236","text":"# Project a point from 3D to 2D using a matrix operation\n\n## Given a point in 3-space [x y z], and focal length f\n## Return : Location of the projected point on 2D image plane [u v]\n\nimport numpy as np\n\ndef p_img (p,f):\n A = [[f , 0 , 0 , 0],\n [0, f, 0, 0],\n [0, 0, 1, 0]\n ]\n # Convert p to homogeneous coordinates and transpose (size: 4x1)\n p_hom = np.append(p,[1]).reshape(4,1)\n # examle:\n # [[200]\n # [100]\n # [50]\n # [1]]\n\n # print p_hom\n p_proj = np.dot(A ,p_hom)\n # RESULT example :\n # [[10000]\n # [5000]\n # [50]]\n #\n ## convert back to non-homogeneus coordinates and return\n p_img = (int(p_proj[0]/p_proj[2]), int(p_proj[1]/p_proj[2]))\n return p_img\np = [200, 100, 50]\nf = 50\nprint (p_img(p,f))\n\np = [-350, 150, 50]\nf = 50\nprint (p_img(p,f))\n\np = [200, 100, 100]\nf = 50\nprint (p_img(p,f))","repo_name":"cristianku/car-computer-raspberry","sub_path":"projection_2d_3d.py","file_name":"projection_2d_3d.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38447042197","text":"import tkinter as tk\nfrom tkinter import*\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom turtle import bgcolor\nfrom PIL import ImageTk, Image\nfrom tkinter import PhotoImage\nfrom PIL import Image,ImageTk\nfrom hamcrest import empty\nfrom datetime import datetime\n\nglobal colorCeleste\ncolorCeleste = '#C6EBFF'\nglobal colorBlanco\ncolorBlanco = '#F7F7F7'\nglobal colorNegro\ncolorNegro = '#000000'\nglobal anchoVentana\nanchoVentana = 900\nglobal altoVentana\naltoVentana = 600\nglobal alotSubFrames\naltoSubFrames = 400\n\nposicionFrames = (0,200)\n\ndef center(win): \n win.update_idletasks() \n width = win.winfo_width() \n frm_width = win.winfo_rootx() - win.winfo_x() \n win_width = width + 2 * frm_width \n height = win.winfo_height() \n titlebar_height = win.winfo_rooty() - win.winfo_y() \n win_height = height + titlebar_height + frm_width \n x = win.winfo_screenwidth() // 2 - win_width // 2 \n y = win.winfo_screenheight() // 2 - win_height // 2 \n win.geometry('{}x{}+{}+{}'.format(width, height, x, y)) \n win.deiconify()\n\ndef cargarImg(root, file, size: tuple, place: tuple):\n img=Image.open(file)\n img=img.resize(size,Image.ANTIALIAS)\n img.save(file)\n img=PhotoImage(master=root,file=file)\n panel=ttk.Label(root,image=img)\n panel.image = img\n panel.place(x=place[0],y=place[1])\n\nclass Interfaz:\n def __init__(self):\n # Frames\n self.frameRelaDeshabilitadas = None \n self.frameRelaInexistentes = None\n self.frameTriggers = None\n self.frameDatosAnomalos = None\n # Modelo bases de datos\n self.gestorDatos = None \n #self.dibujar() \n\n def setGestorDatos(self, gestorDatos):\n self.gestorDatos = gestorDatos\n\n def mostrarFrameRelDeshabilitadas(self):\n self.ocultarOtrosFrames()\n self.frameRelaDeshabilitadas.place(x=posicionFrames[0], y=posicionFrames[1])\n \n def mostrarFrameRelInexistentes(self):\n self.ocultarOtrosFrames()\n self.frameRelaInexistentes.place(x=posicionFrames[0], y=posicionFrames[1])\n \n def mostrarFrameTriggers(self):\n self.ocultarOtrosFrames()\n self.frameTriggers.place(x=posicionFrames[0], y=posicionFrames[1])\n\n def mostrarFrameDatosAnomalos(self):\n self.ocultarOtrosFrames()\n self.frameDatosAnomalos.place(x=posicionFrames[0], y=posicionFrames[1])\n \n def ocultarOtrosFrames(self): \n self.frameRelaDeshabilitadas.place_forget() \n self.frameRelaInexistentes.place_forget() \n self.frameTriggers.place_forget()\n self.frameDatosAnomalos.place_forget()\n\n def conectarBaseDatos(self, event):\n base = self.comboBases.get()\n self.gestorDatos.conectarConBase(base)\n self.gestorDatos.borrarStoredProcedures()\n self.gestorDatos.crearStoredProcedures()\n\n # Verificación de selección de base\n def sinSeleccionarBase(self):\n if self.gestorDatos.baseActual == None:\n messagebox.showerror(message=f'No se ha seleccionado ninguna base de datos', title='Error')\n return True\n else: return False\n\n def escribirLog(self, titulo, encabezado, cuerpo):\n with open('log.txt', 'a') as file:\n file.write(f'''{'*'*50}\nFecha y hora del análisis: {datetime.now()}\nBase de datos: {self.gestorDatos.baseActual}\nServidor: {self.gestorDatos.direccion_servidor}\nUsuario: {self.gestorDatos.nombre_usuario}\\n''')\n header = '-'*20 + titulo + '-'*20 + '\\n'\n file.write(header)\n # Cabecera de la tabla\n for h in encabezado:\n file.write('{0: <35}'.format(h))\n file.write('\\n')\n # Contenido de la tabla\n for fila in cuerpo:\n for dato in fila:\n file.write('{0: <35}'.format(dato))\n file.write('\\n')\n end = '-'*40 + '\\n\\n'\n file.write(end)\n messagebox.showinfo(message=f'Log generado correctamente', title='Log')\n\n\n def dibujar(self):\n ventana = tk.Tk()\n ventana.title('Auditoría Bases de Datos')\n ventana.geometry(str(anchoVentana)+'x'+str(altoVentana))\n ventana.resizable(width=True ,height =True)\n ventana.configure(background=colorBlanco)\n ventana.resizable(0,0)\n ventana.iconbitmap('../img/icono.ico')\n center(ventana)\n\n# ------- Creación Frames ------------------------\n frameGlobal = Frame(ventana, width=anchoVentana, height=altoVentana, bg='#FFFFFF',relief='sunken')\n frameGlobal.place(x=0,y=0)\n\n self.frameRelaInexistentes = Frame(frameGlobal, width = anchoVentana, height=altoSubFrames, \n bg= colorBlanco, relief ='sunken')\n self.frameRelaInexistentes.place(x=posicionFrames[0], y=posicionFrames[1])\n\n\n self.frameTriggers = Frame(frameGlobal, width = anchoVentana, height=altoSubFrames, \n bg= colorBlanco, relief ='sunken')\n self.frameTriggers.place(x=posicionFrames[0], y=posicionFrames[1])\n\n\n self.frameDatosAnomalos = Frame(frameGlobal, width = anchoVentana, height=altoSubFrames, \n bg= colorBlanco, relief ='sunken')\n self.frameDatosAnomalos.place(x=posicionFrames[0], y=posicionFrames[1])\n\n self.frameRelaDeshabilitadas = Frame(frameGlobal, width = anchoVentana, height=altoSubFrames, \n bg= colorBlanco, relief ='sunken')\n self.frameRelaDeshabilitadas.place(x=posicionFrames[0], y=posicionFrames[1])\n\n# ----------------------------------------------------------------------- #\n# Encabezado\n# ------------------------------------------------------------------------ #\n\n # Variables\n basesDatos = self.gestorDatos.getBasesExistentes()\n\n # Elementos Interfaz\n encabezado = Frame(ventana, width=anchoVentana, height=70, bg=colorCeleste,relief='sunken')\n encabezado.place(x=0,y=0)\n labelTitulo = ttk.Label(encabezado, text=\"Auditoria de Bases de Datos para SQL Server - Grupo 3\", background=colorCeleste, font=(\"Courie\",14,'bold'))\n labelTitulo.place(x=180, y=20) \n label1 = ttk.Label(frameGlobal, text=\"Seleccione la Base de Datos a analizar:\", background='#FFFFFF', font=(\"Courie\",10))\n label1.place(x=40, y=90)\n self.comboBases=tk.ttk.Combobox(frameGlobal,values=basesDatos,width=30)\n self.comboBases.place(x=300,y=90)\n self.comboBases.bind('<>', self.conectarBaseDatos)\n\n #logoEPN\n cargarImg(encabezado,'../img/logo-EPN.png',(50,50),(30,8))\n #logoFIS\n cargarImg(encabezado,'../img/logo-FIS.png',(50,50),(800,8))\n \n #botones\n btnRelDeshabilitadas = Button(frameGlobal,text= 'Relaciones deshabilitadas',font=(\"Courie\",9,'bold'),command=self.mostrarFrameRelDeshabilitadas, width=22, height=2, bd= 2,bg=colorBlanco, relief=GROOVE, highlightbackground=colorNegro)\n btnRelDeshabilitadas.place(x=50,y=140)\n\n btnRelInexistentes = Button(frameGlobal,text= 'Relaciones que deberían existir',font=(\"Courie\",9,'bold'),command=self.mostrarFrameRelInexistentes, width=26, height=2, bd= 2,bg=colorBlanco, relief=GROOVE, highlightbackground=colorNegro)\n btnRelInexistentes.place(x=240,y=140)\n\n btnRelInexistentes = Button(frameGlobal,text= 'Triggers deshabilitado',font=(\"Courie\",9,'bold'),command=self.mostrarFrameTriggers, width=20, height=2, bd= 2,bg=colorBlanco, relief=GROOVE, highlightbackground=colorNegro)\n btnRelInexistentes.place(x=460,y=140)\n\n btnRelInexistentes = Button(frameGlobal,text= 'Datos Anómalos',font=(\"Courie\",9,'bold'),command=self.mostrarFrameDatosAnomalos, width=26, height=2, bd= 2,bg=colorBlanco, relief=GROOVE, highlightbackground=colorNegro)\n btnRelInexistentes.place(x=640,y=140)\n\n# ----------------------------------------------------------------------- #\n# Frame Relaciones deshabilitadas\n# ------------------------------------------------------------------------ #\n\n style = ttk.Style(ventana)\n style.theme_use(\"clam\")\n style.configure(\"Treeview.Heading\", background=colorCeleste, foreground=colorNegro)\n\n #Variables\n\n #Elementos GUI \n label2 = ttk.Label(self.frameRelaDeshabilitadas, text=\"Relaciones existentes en la base de datos, pero que se encuentran deshabilitadas:\",\n background=colorBlanco, font=(\"Courie\",10))\n label2.place(x=40, y=20)\n \n #tabla\n columnsT1 = ('Restricción de Clave Foránea','Nombre Tabla Hijo','Nombre Tabla Padre','¿Está deshabilitado?')\n tabla1 = ttk.Treeview(self.frameRelaDeshabilitadas, height=15,columns=columnsT1,show='headings')\n tabla1.place(x=40,y=50) \n\n for heading in columnsT1: \n tabla1.heading(heading, text=heading)\n tabla1.column(heading,minwidth=0,width=180,stretch=NO)\n \n ladoy = Scrollbar(self.frameRelaDeshabilitadas, orient =VERTICAL, command = tabla1.yview)\n ladoy.place(x=765,y=50)\n ladoy.set(20,200)\n tabla1.configure(yscrollcommand=ladoy.set)\n\n def exportarLOGRelDeshabilitadas():\n data = self.gestorDatos.execRelacionesDeshabilitadas()\n if data == []:\n data = [['No se encontraron relaciones deshabilitadas']]\n self.escribirLog('Relaciones deshabilitadas',columnsT1,data)\n\n \n btnLogRelDeshabilitadas = Button(self.frameRelaDeshabilitadas,text= 'Generar LOG',font=(\"Courie\",9,'bold'),command=exportarLOGRelDeshabilitadas, width=15, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnLogRelDeshabilitadas.config(state=tk.DISABLED)\n btnLogRelDeshabilitadas.place(x=700,y=20)\n\n def llenarData():\n if self.sinSeleccionarBase() == False:\n tabla1 = ttk.Treeview(self.frameRelaDeshabilitadas, height=15,columns=columnsT1,show='headings')\n tabla1.place(x=40,y=50) \n for heading in columnsT1: \n tabla1.heading(heading, text=heading)\n tabla1.column(heading,minwidth=0,width=180,stretch=NO)\n\n data = self.gestorDatos.execRelacionesDeshabilitadas()\n if data == []:\n data = [['No se encontraron relaciones deshabilitadas']]\n messagebox.showinfo(message=f'No existen relaciones deshabilitadas', title='Enhorabuena')\n else: \n for fila in data: \n tabla1.insert('', 'end', values =fila)\n # Habilitar boton log\n btnLogRelDeshabilitadas.config(state=tk.NORMAL)\n \n \n \n\n btnExecRelDeshabilitadas = Button(self.frameRelaDeshabilitadas,text= 'Analizar',font=(\"Courie\",9,'bold'),command=llenarData, width=10, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnExecRelDeshabilitadas.place(x=600,y=20)\n\n \n\n# ----------------------------------------------------------------------- #\n# Frame Relaciones inexistentes\n# ------------------------------------------------------------------------ #\n\n #Variables\n\n #Elementos GUI \n label3 = ttk.Label(self.frameRelaInexistentes, text='Posibles anomalías entre tablas de la base de datos con relaciones no existentes:',\n background=colorBlanco, font=(\"Courie\",10))\n label3.place(x=40, y=20)\n \n #tabla\n columnsT2 = ('Tabla Analizada','Columna con Nombre Repetido','Posible Relación que Debería Crearse')\n tabla2 = ttk.Treeview(self.frameRelaInexistentes, height=15,columns=columnsT2,show='headings')\n tabla2.place(x=40,y=50)\n \n for head in columnsT2: \n tabla2.heading(head, text=head)\n tabla2.column(head,minwidth=0,width=240,stretch=NO)\n \n ladoy2 = Scrollbar(self.frameRelaInexistentes, orient =VERTICAL, command = tabla2.yview)\n ladoy2.place(x=765,y=50)\n ladoy2.set(20,200)\n tabla2.configure(yscrollcommand=ladoy2.set)\n\n def exportarLOGRelInexistentes():\n data = self.gestorDatos.execPosiblesRelaciones()\n if data == []:\n data = [['No se encontraron posibles relaciones inexistentes']]\n else:\n for fila in data:\n if fila[2] == None:\n fila[2] = 'Sin posible tabla de relación'\n self.escribirLog('Relaciones inexistentes',columnsT2,data)\n\n btnLogRelInexistentes = Button(self.frameRelaInexistentes,text= 'Generar LOG',font=(\"Courie\",9,'bold'),command=exportarLOGRelInexistentes, width=15, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnLogRelInexistentes.config(state=tk.DISABLED)\n btnLogRelInexistentes.place(x=700,y=20)\n\n def llenarData2():\n if self.sinSeleccionarBase() == False:\n tabla2 = ttk.Treeview(self.frameRelaInexistentes, height=15,columns=columnsT2,show='headings')\n tabla2.place(x=40,y=50)\n for head in columnsT2: \n tabla2.heading(head, text=head)\n tabla2.column(head,minwidth=0,width=240,stretch=NO)\n\n data = self.gestorDatos.execPosiblesRelaciones()\n if data == []:\n messagebox.showinfo(message=f'No se encontraron posibles relaciones inexistentes', title='Enhorabuena')\n for fila in data: \n dato = fila\n if dato[2] == None:\n dato[2] = 'Sin posible tabla de relación' \n tabla2.insert('', 'end', values=dato)\n btnLogRelInexistentes.config(state=tk.NORMAL)\n\n btnExecRelInexistentes = Button(self.frameRelaInexistentes,text= 'Analizar',font=(\"Courie\",9,'bold'),command=llenarData2, width=10, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnExecRelInexistentes.place(x=600,y=20)\n\n\n\n# ----------------------------------------------------------------------- #\n# Frame Triggers\n# ------------------------------------------------------------------------ #\n\n #Variables\n\n #Elementos GUI \n label4 = ttk.Label(self.frameTriggers, text='Triggers existentes en la base de datos, pero que se encuentran deshabilitados:',\n background=colorBlanco, font=(\"Courie\",10))\n label4.place(x=40, y=20)\n\n #tabla\n columnsT3 = ('Nombre del Trigger','Acción','Tabla perteneciente','¿Está Deshabilitado?')\n tabla3 = ttk.Treeview(self.frameTriggers, height=15,columns=columnsT3,show='headings')\n tabla3.place(x=40,y=50)\n \n for head in columnsT3: \n tabla3.heading(head, text=head)\n tabla3.column(head,minwidth=0,width=200,stretch=NO)\n \n ladoy3 = Scrollbar(self.frameTriggers, orient =VERTICAL, command = tabla3.yview)\n ladoy3.place(x=850,y=50)\n ladoy3.set(20,200)\n tabla3.configure(yscrollcommand=ladoy3.set)\n\n def exportarLOGTrigger():\n data = self.gestorDatos.execTriggersDeshabilitados()\n if data == []:\n data = [['No se encontró ningún trigger deshabilitado']]\n self.escribirLog('Triggers deshabilitados',columnsT3,data)\n\n btnLogTrigger = Button(self.frameTriggers,text= 'Generar LOG',font=(\"Courie\",9,'bold'),command=exportarLOGTrigger, width=15, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnLogTrigger.config(state=tk.DISABLED)\n btnLogTrigger.place(x=700,y=20)\n\n def llenarData3(): \n if self.sinSeleccionarBase() == False: \n tabla3 = ttk.Treeview(self.frameTriggers, height=15,columns=columnsT3,show='headings')\n tabla3.place(x=40,y=50)\n \n for head in columnsT3: \n tabla3.heading(head, text=head)\n tabla3.column(head,minwidth=0,width=200,stretch=NO)\n\n data = self.gestorDatos.execTriggersDeshabilitados()\n if data == []:\n messagebox.showinfo(message=f'No se encontró ningún trigger deshabilitado', title='Enhorabuena')\n for fila in data: \n dato = fila \n tabla3.insert('', 'end', values=dato)\n # Habilitar boton log\n btnLogTrigger.config(state=tk.NORMAL)\n\n btnExecTrigger = Button(self.frameTriggers,text= 'Analizar',font=(\"Courie\",9,'bold'),command=llenarData3, width=10, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnExecTrigger.place(x=600,y=20)\n\n\n\n\n# ----------------------------------------------------------------------- #\n# Frame Datos Anomalos\n# ------------------------------------------------------------------------ #\n\n #Variables\n\n #Elementos GUI \n label5 = ttk.Label(self.frameDatosAnomalos, text='Se encontraron los siguientes datos anómalos:',\n background=colorBlanco, font=(\"Courie\",10))\n label5.place(x=40, y=20)\n\n #tabla\n columnsT4 = ('Nombre de la Tabla','Restricción','Dato Anómalo')\n tabla4 = ttk.Treeview(self.frameDatosAnomalos, height=15,columns=columnsT4,show='headings')\n tabla4.place(x=40,y=50)\n \n for head in columnsT4: \n tabla4.heading(head, text=head)\n tabla4.column(head,minwidth=0,width=220,stretch=NO)\n \n ladoy4 = Scrollbar(self.frameDatosAnomalos, orient =VERTICAL, command = tabla4.yview)\n ladoy4.place(x=710,y=50)\n ladoy4.set(20,200)\n tabla4.configure(yscrollcommand=ladoy4.set)\n\n def exportarLOGDatosAnomalos():\n data = self.gestorDatos.execChequeoAutomatico()\n if data == []:\n data = [['No se encontraron datos anómalos']]\n self.escribirLog('Datos anómalos',columnsT4,data)\n\n btnLogDatosAnomalos = Button(self.frameDatosAnomalos,text= 'Generar LOG',font=(\"Courie\",9,'bold'),command=exportarLOGDatosAnomalos, width=15, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnLogDatosAnomalos.config(state=tk.DISABLED)\n btnLogDatosAnomalos.place(x=700,y=20)\n\n def llenarData4():\n if self.sinSeleccionarBase() == False:\n tabla4 = ttk.Treeview(self.frameDatosAnomalos, height=15,columns=columnsT4,show='headings')\n tabla4.place(x=40,y=50)\n for head in columnsT4: \n tabla4.heading(head, text=head)\n tabla4.column(head,minwidth=0,width=220,stretch=NO)\n data = self.gestorDatos.execChequeoAutomatico()\n if data == []:\n messagebox.showinfo(message=f'No se encontraron datos anómalos', title='Enhorabuena')\n for fila in data: \n dato = fila \n tabla4.insert('', 'end', values=dato)\n # Habilitar boton log\n btnLogDatosAnomalos.config(state=tk.NORMAL)\n\n btnExecTrigger = Button(self.frameDatosAnomalos,text= 'Analizar',font=(\"Courie\",9,'bold'),command=llenarData4, width=10, height=1, bd= 2,bg=colorCeleste, relief=GROOVE, highlightbackground=colorNegro)\n btnExecTrigger.place(x=600,y=20)\n\n\n\n\n# ------- Main loop --------------------# \n ventana.mainloop()\n\n","repo_name":"AndersCFR/AplicacionAuditoriaBases","sub_path":"Interfaz/interfaz.py","file_name":"interfaz.py","file_ext":"py","file_size_in_byte":19851,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22272715015","text":"import numpy as np\n\n\nclass DiscreteAxis:\n\n def __init__(self, x_min, x_max, N, scale):\n \"\"\"\n Create a new `DiscreteAxis` object.\n\n Args:\n x_min: This is the lower boundary of the discretized axis.\n x_max: This is the upper boundary of the discretized axis.\n N: This is the number of grid-points (bins) in the discretized axis.\n scale: This should be either \"lin\" (linear) or \"log\" (logarithmic).\n \"\"\"\n self.x_min = x_min\n self.x_max = x_max\n self.N = N\n self.scale = scale\n\n self.grid_cell_boundaries = self._grid_cell_boundaries()\n self.grid_cell_centers = self._grid_cell_centers()\n self.grid_cell_widths = self._grid_cell_widths()\n\n def _grid_cell_boundaries(self) -> np.ndarray:\n x_min, x_max, N = self.x_min, self.x_max, self.N\n indices = np.arange(0, N + 1, 1)\n if self.scale == \"lin\":\n x = x_min + (x_max - x_min) * (indices / N)\n assert x[0] == x_min and x[N] == x_max\n return x\n if self.scale == \"log\":\n x = x_min * (x_max / x_min) ** (indices / N)\n assert x[0] == x_min and x[N] == x_max\n return x\n raise Exception(f\"Axis scale '{self.scale}' unknown.\")\n\n def _grid_cell_centers(self) -> np.ndarray:\n xs = self.grid_cell_boundaries\n if self.scale == \"lin\":\n return (xs[:-1] + xs[1:]) / 2\n if self.scale == \"log\":\n return np.sqrt(xs[:-1] * xs[1:])\n raise Exception(f\"Axis scale '{self.scale}' unknown.\")\n\n def _grid_cell_widths(self) -> np.ndarray:\n grid_cell_boundaries = self.grid_cell_boundaries\n return grid_cell_boundaries[1:] - grid_cell_boundaries[:-1]\n\n def index_from_value(self, x) -> int:\n xs = self.grid_cell_centers\n i = np.where((xs - x) < 0)[0][-1]\n return i\n\n def value_from_index(self, i) -> np.float64:\n xs = self.grid_cell_centers\n x = xs[i]\n return np.float64(x)\n","repo_name":"vincentmader/msc-thesis.py","sub_path":"src/utils/axis.py","file_name":"axis.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20443437098","text":"from http import HTTPStatus\nimport datetime\nimport requests\n\nfrom sqlalchemy import and_, func\n\nfrom constants import InvoiceStatus\nimport constants\nfrom database import db\nfrom error import get_http_error_resp\nfrom models import Invoice\nfrom order_helpers import maybe_mark_order_as_expired, maybe_mark_order_as_paid\nfrom utils import hmac_sha256_digest\n\n\ndef new_invoice(order, bid):\n \"\"\"Generate a lightning invoice\n\n Args:\n order: The order for which this invoice is being generated.\n bid: Bid amount.\n Returns:\n A pair whose first element is a boolean indicating whether\n the invoice generation was successful or not. If False, then\n the second element is the error key. If True, then the second\n element is the newly generated invoice.\n \"\"\"\n try:\n bid = int(bid)\n # generate Lightning invoice\n charged_response = requests.post(f\"{constants.CHARGE_ROOT}/invoice\",\n json={\n 'msatoshi': bid,\n 'description':\n constants.LN_INVOICE_DESCRIPTION,\n 'expiry':\n constants.LN_INVOICE_EXPIRY,\n 'metadata': {\n 'uuid':\n order.uuid,\n 'sha256_message_digest':\n order.message_digest\n }\n },\n timeout=(constants.CONNECTION_TIMEOUT,\n constants.RESPONSE_TIMEOUT))\n except requests.exceptions.RequestException:\n return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR')\n except ValueError:\n return False, get_http_error_resp('PARAM_COERCION', 'bid')\n\n if charged_response.status_code != HTTPStatus.CREATED:\n return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR')\n\n lightning_invoice = charged_response.json()\n if 'id' not in lightning_invoice:\n return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR')\n\n invoice = Invoice(order_id=order.id,\n amount=bid,\n lid=lightning_invoice['id'],\n invoice=charged_response.content,\n status=InvoiceStatus.pending.value,\n expires_at=datetime.datetime.utcnow() +\n datetime.timedelta(seconds=constants.LN_INVOICE_EXPIRY))\n\n try:\n # register the webhook\n charged_auth_token = hmac_sha256_digest(\n constants.LIGHTNING_WEBHOOK_KEY, invoice.lid)\n callback_url = (f\"{constants.CALLBACK_URI_ROOT}/callback\"\n f\"/{invoice.lid}/{charged_auth_token}\")\n\n webhook_registration_response = requests.post(\n f\"{constants.CHARGE_ROOT}/invoice/{invoice.lid}/webhook\",\n json={'url': callback_url},\n timeout=(constants.CONNECTION_TIMEOUT, constants.RESPONSE_TIMEOUT))\n\n if webhook_registration_response.status_code != HTTPStatus.CREATED:\n return False, get_http_error_resp(\n 'LIGHTNING_CHARGE_WEBHOOK_REGISTRATION_ERROR')\n\n except requests.exceptions.RequestException:\n return False, get_http_error_resp(\n 'LIGHTNING_CHARGE_WEBHOOK_REGISTRATION_ERROR')\n\n return True, invoice\n\n\ndef get_and_authenticate_invoice(lid, charged_auth_token):\n invoice = Invoice.query.filter_by(lid=lid).first()\n\n if invoice is None:\n return False, get_http_error_resp('INVOICE_ID_NOT_FOUND_ERROR', lid)\n\n db_invoice_charged_auth_token = hmac_sha256_digest(\n constants.LIGHTNING_WEBHOOK_KEY, invoice.lid)\n\n if db_invoice_charged_auth_token != charged_auth_token:\n return False, get_http_error_resp('INVALID_AUTH_TOKEN')\n\n return True, invoice\n\n\ndef pay_invoice(invoice):\n if invoice.status == InvoiceStatus.paid.value:\n return get_http_error_resp('INVOICE_ALREADY_PAID')\n if invoice.status == InvoiceStatus.expired.value:\n return get_http_error_resp('INVOICE_ALREADY_EXPIRED')\n\n invoice.status = InvoiceStatus.paid.value\n invoice.paid_at = datetime.datetime.utcnow()\n db.session.commit()\n maybe_mark_order_as_paid(invoice.order_id)\n return None\n\n\ndef get_pending_invoices(order_id):\n return Invoice.query.filter(\n and_(Invoice.order_id == order_id,\n Invoice.status == constants.InvoiceStatus.pending.value)).all()\n\n\ndef expire_invoice(invoice):\n if invoice.status == InvoiceStatus.pending.value:\n invoice.status = constants.InvoiceStatus.expired.value\n db.session.commit()\n\n\ndef expire_unpaid_invoices():\n \"\"\"Expire unpaid invoices\n\n Expire any unpaid invoice that has reached its expiration time. The\n corresponding order may be auto-expired as a result.\n\n Returns:\n Tuple with the list of invoices and the list of orders that got expired\n by this function.\n\n \"\"\"\n invoices_to_expire = Invoice.query.filter(\n and_(Invoice.status == constants.InvoiceStatus.pending.value,\n func.datetime(Invoice.expires_at)\n < datetime.datetime.utcnow())).all()\n expired_orders = []\n for invoice in invoices_to_expire:\n expire_invoice(invoice)\n expired_order = maybe_mark_order_as_expired(invoice.order_id)\n if (expired_order is not None):\n expired_orders.append(expired_order)\n return invoices_to_expire, expired_orders\n","repo_name":"Blockstream/satellite-api","sub_path":"server/invoice_helpers.py","file_name":"invoice_helpers.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"28457237528","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"chelodina\",\n version=\"0.5.5\",\n description=\"A Logo to Python transpiler\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"http://github.com/pablasso/chelodina\",\n author=\"Juan Pablo Ortiz\",\n author_email=\"me@pablasso.com\",\n license=\"MIT\",\n packages=find_packages(),\n install_requires=[\"ply\", \"astor\"],\n zip_safe=False,\n entry_points={\"console_scripts\": [\"chelodina = chelodina.__main__:main\"]},\n)\n","repo_name":"pablasso/chelodina","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"30864472889","text":"class Solution:\n def calculateTime(self, keyboard: str, word: str) -> int:\n keyMap = dict()\n for i, c in enumerate(keyboard):\n keyMap[c] = i\n \n result = keyMap[word[0]]\n for i in range(len(word) - 1):\n result += abs(keyMap[word[i]] - keyMap[word[i+1]])\n \n return result\n ","repo_name":"Maxwell-Yang-2001/maxwell-yang-leetcode","sub_path":"1165-single-row-keyboard/1165-single-row-keyboard.py","file_name":"1165-single-row-keyboard.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17640874045","text":"################\n#\n# test.py\n#\n# Test driver for api.py\n#\n################\n\nfrom api import *\n\ndef main():\n\n image_path = '/path/to/image.jpg'\n\n classification_model_path = '/path/to/model.pth.tar'\n detection_model_path = '/path/to/model'\n useGPU = True \n\n createLabeledImage = True\n\n model = DetectionClassificationAPI(classification_model_path, detection_model_path, useGPU) \n\n print('Working on image {}'.format(image_path))\n\n ret = model.predict_image(image_path, topK=5)\n\n print('\\nSpecies \\n')\n\n for i in range(0, len(ret.species)):\n print('%d) %s likelihood: %f' % (i+1, ret.species[i], ret.species_scores[i]))\n\n if (ret.bboxes is not None):\n print('\\nBounding box \\n')\n\n for i in range(0, len(ret.bboxes)):\n print('%d) %f %f %f %f: conf %f' % \n (i+1, ret.bboxes[i][0], ret.bboxes[i][1], ret.bboxes[i][2], ret.bboxes[i][3], ret.bboxes_scores[i]))\n\n # if viz draw box on image\n if (createLabeledImage):\n test_image = PIL.Image.open(image_path).convert('RGB')\n labeled_image = visdom_bbox(np.array(test_image).transpose((2, 0, 1)),\n at.tonumpy(ret.bboxes[:,[1,0,3,2]]),\n at.tonumpy([1 for _ in ret.bboxes]),\n at.tonumpy(ret.bboxes_scores),\n label_names=['Animal', 'BG'])\n\n labeled_image = PIL.Image.fromarray((255*labeled_image).transpose((1,2,0)).astype(np.uint8))\n labeled_image.save('output.jpg')\n\n print('\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"rbavery/animal_detector","sub_path":"SpeciesClassification/DetectionClassificationAPI/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"20579868519","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom utils import get_keyboard, SMILE, check_name\nfrom telegram import ReplyKeyboardRemove, ReplyKeyboardMarkup, ParseMode, File, Bot\nfrom telegram.ext import ConversationHandler, Filters\nimport glob\nfrom random import choice\nfrom emoji import emojize\nfrom mongodb import mdb, search_or_save_user, save_user_form\nimport pandas as pd\nfrom settings import TG_TOKEN, FILES_PATH\nimport os\n\ndef sms(bot, update):\n \"\"\"Функция вызывается при отправке команды /start\"\"\"\n user = search_or_save_user(mdb, bot.effective_user, bot.message)\n smile = emojize(choice(SMILE), use_aliases=True)\n bot.message.reply_text('Привет {} {}'.format(bot.message.chat.first_name, smile), reply_markup=get_keyboard())\n\ndef send_pics(bot, update):\n lists = glob.glob('images/*')\n picture = choice(lists)\n update.bot.send_photo(chat_id=bot.message.chat.id, photo=open(picture, 'rb'))\n\n\ndef get_joke(bot, update):\n \"\"\"Функция вызывается при отправке сообщения Анекдот\"\"\"\n receive = requests.get('http://anekdotme.ru/random') #отправляем запрос к странице\n page = BeautifulSoup(receive.text, 'html.parser') #подключаем html парсер, получаем текст страницы\n find = page.select('.anekdot_text') #из страницы html получаем class='anekdot_text'\n for text in find:\n page = (text.getText().strip()) #из class='anekdot_text' получаем текст и убираем пробелы по сторонам\n bot.message.reply_text(page)\n\ndef get_contact(bot, update):\n \"\"\"Функция отвечает пользователю о получении контактов\"\"\"\n print(bot.message.contact)\n bot.message.reply_text('{}, мы получили ваш номер телефона'.format(bot.message.chat.first_name))\n\ndef get_location(bot, update):\n \"\"\"Функция отвечает пользователю о получении местоположения\"\"\"\n print(bot.message.location)\n bot.message.reply_text('{}, мы получили ваше местоположение'.format(bot.message.chat.first_name))\n\ndef parrot(bot, update):\n \"\"\"Функция отвечает пользователю тем же сообщением, что пользователь отправил\"\"\"\n print(bot.message.text)\n bot.message.reply_text(bot.message.text)\n\ndef form_start(bot, update):\n bot.message.reply_text('Как вас зовут?', reply_markup=ReplyKeyboardRemove())\n return \"user_name\"\n\ndef form_get_name(bot, update):\n update.user_data['name'] = bot.message.text #временно сохраняем ответ\n bot.message.reply_text('Сколько вам лет?')\n return \"user_age\"\n\ndef form_get_age(bot, update):\n update.user_data['age'] = bot.message.text # временно сохраняем ответ\n reply_keyboard = [['1', '2', '3', '4', '5']] #создаем клавиатуру\n bot.message.reply_text('Насколько вам понравился опрос?', reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True, resize_keyboard=True))\n return \"evaluation\"\n\ndef form_get_evaluation(bot, update):\n update.user_data['evaluation'] = bot.message.text # временно сохраняем ответ\n reply_keyboard = [['пропустить']] # создаем клавиатуру\n bot.message.reply_text('Напишите отзыв или пропустите шаг?',\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True, resize_keyboard=True))\n return \"comment\"\n\ndef form_exit_comment(bot, update):\n update.user_data['comment'] = bot.message.text # временно сохраняем ответ\n user = search_or_save_user(mdb, bot.effective_user, bot.message) # получаем данные из БД\n save_user_form(mdb, user, update.user_data) # передаем результаты анкеты\n text = \"\"\"Результат опроса:\n Имя: {name}\n Возраст: {age}\n Оценка: {evaluation}\n \"\"\".format(**update.user_data)\n bot.message.reply_text(text, parse_mode=ParseMode.HTML)\n bot.message.reply_text('Спасибо за комментарии', reply_markup=get_keyboard())\n return ConversationHandler.END\n\ndef form_comment(bot, update):\n update.user_data['comment'] = bot.message.text # временно сохраняем ответ\n user = search_or_save_user(mdb, bot.effective_user, bot.message) #получаем данные из БД\n form = save_user_form(mdb, user, update.user_data) #передаем и получаем результаты анкеты\n print(form)\n text = \"\"\"Результат опроса:\n Имя: {name}\n Возраст: {age}\n Оценка: {evaluation}\n Комментарий: {comment}\n \"\"\".format(**update.user_data)\n bot.message.reply_text(text, parse_mode=ParseMode.HTML)\n bot.message.reply_text('Спасибо за комментарии', reply_markup=get_keyboard())\n return ConversationHandler.END\n\ndef dontknow1(bot, update):\n bot.message.reply_text('Я вас не понимаю, выберите оценку на клавиатуре')\n\ndef dontknow2(bot, update):\n bot.message.reply_text('Я вас не понимаю, введите название региона')\n\ndef dontknow3(bot, update):\n bot.message.reply_text('Я вас не понимаю, вышлите фото или документ')\n\ndef get_sales(bot, update):\n bot.message.reply_text('Какой регион вас интересует?', reply_markup=ReplyKeyboardRemove())\n return \"region\"\n\ndef get_region(bot, update):\n update.user_data['region'] = bot.message.text # временно сохраняем ответ\n df = pd.read_excel(r'C:\\Users\\Админ\\PycharmProjects\\MafiaZadrotBot\\docs\\sales.xlsx', sheet_name='Sheet1')\n text = f'Регион: {update.user_data[\"region\"]}\\n' \\\n f'Объем продаж: {df.Sales_volume[df[\"Region\"] == update.user_data[\"region\"]].tolist()[0]}'\n bot.message.reply_text(text, reply_markup=get_keyboard())\n return ConversationHandler.END\n\ndef save_doc_start(bot, update):\n if bot.message.text == 'Загрузить фото':\n bot.message.reply_text('Я готов сохранить фото. Вышлите фото в ответ на это сообщение', reply_markup=ReplyKeyboardRemove())\n return 'save_doc_photo_true'\n else:\n bot.message.reply_text('Я готов сохранить документ. Вышлите документ в ответ на это сообщение', reply_markup=ReplyKeyboardRemove())\n return 'save_doc_document_true'\n\ndef save_doc_photo_end(bot, update):\n Bot.get_file(Bot(TG_TOKEN), bot.message.photo[0].file_id).download(os.path.join(FILES_PATH, check_name('new.jpg')))\n bot.message.reply_text('Ваше фото сохранено', reply_markup=get_keyboard())\n return ConversationHandler.END\n\ndef save_doc_document_end(bot, update):\n Bot.get_file(Bot(TG_TOKEN), bot.message.document.file_id).download(os.path.join(FILES_PATH, check_name(bot.message.document.file_name)))\n bot.message.reply_text('Ваш документ сохранен', reply_markup=get_keyboard())\n return ConversationHandler.END\n","repo_name":"Oliv1989/Telegram_bot","sub_path":"MafiaZadrotBot/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42932614416","text":"#!/usr/bin/python3\n\nimport numpy as np\n\nfrom pydrake.common import FindResourceOrThrow\nfrom pydrake.math import RigidTransform\nfrom pydrake.multibody.parsing import Parser\nfrom pydrake.systems.analysis import Simulator\nfrom pydrake.all import MultibodyPlant\nfrom pydrake.systems.framework import DiagramBuilder\nfrom pydrake.all import ConnectDrakeVisualizer, SceneGraph, HalfSpace, Box, ConnectContactResultsToDrakeVisualizer, Simulator\nfrom pydrake.multibody.plant import MultibodyPlant, AddMultibodyPlantSceneGraph, CoulombFriction\nfrom pydrake.systems.analysis import Simulator\nfrom pydrake.math import RollPitchYaw\nfrom typing import NamedTuple\nimport time\n\n'''\nFloating base is attached at the pelvis link\nFor generalized positions, first 7 values are 4 quaternion + 3 x,y,z of floating base\nFor generalized velocities, first 6 values are 3 rotational velocities + 3 xd, yd, zd\nHence generalized velocities are not strictly the derivative of generalized positions\n'''\n\nclass Atlas():\n # Atlas is 175kg according to drake/share/drake/examples/atlas/urdf/atlas_convex_hull.urdf\n M = 175.0\n g = -9.81\n PELVIS_HEIGHT = 0.93845\n\n class JointLimit(NamedTuple):\n effort: float\n lower: float\n upper: float\n velocity: float\n\n JOINT_LIMITS = {\n \"back_bkx\" : JointLimit(300, -0.523599, 0.523599, 12),\n \"back_bky\" : JointLimit(445, -0.219388, 0.538783, 9),\n \"back_bkz\" : JointLimit(106, -0.663225, 0.663225, 12),\n \"l_arm_elx\": JointLimit(112, 0, 2.35619, 12),\n \"l_arm_ely\": JointLimit(63, 0, 3.14159, 12),\n \"l_arm_shx\": JointLimit(99, -1.5708, 1.5708, 12),\n \"l_arm_shz\": JointLimit(87, -1.5708, 0.785398, 12),\n \"l_arm_mwx\": JointLimit(25, -1.7628, 1.7628, 10),\n \"l_arm_uwy\": JointLimit(25, -3.011, 3.011, 10),\n \"l_arm_lwy\": JointLimit(25, -2.9671, 2.9671, 10),\n \"l_leg_akx\": JointLimit(360, -0.8, 0.8, 12),\n \"l_leg_aky\": JointLimit(740, -1, 0.7, 12),\n \"l_leg_hpx\": JointLimit(530, -0.523599, 0.523599, 12),\n \"l_leg_hpy\": JointLimit(840, -1.61234, 0.65764, 12),\n \"l_leg_hpz\": JointLimit(275, -0.174358, 0.786794, 12),\n \"l_leg_kny\": JointLimit(890, 0, 2.35637, 12),\n \"neck_ay\" : JointLimit(25, -0.602139, 1.14319, 6.28),\n \"r_arm_elx\": JointLimit(112, -2.35619, 0, 12),\n \"r_arm_ely\": JointLimit(63, 0, 3.14159, 12),\n \"r_arm_shx\": JointLimit(99, -1.5708, 1.5708, 12),\n \"r_arm_shz\": JointLimit(87, -0.785398, 1.5708, 12),\n \"r_arm_mwx\": JointLimit(25, -1.7628, 1.7628, 10),\n \"r_arm_uwy\": JointLimit(25, -3.011, 3.011, 10),\n \"r_arm_lwy\": JointLimit(25, -2.9671, 2.9671, 10),\n \"r_leg_akx\": JointLimit(360, -0.8, 0.8, 12),\n \"r_leg_aky\": JointLimit(740, -1, 0.7, 12),\n \"r_leg_hpx\": JointLimit(530, -0.523599, 0.523599, 12),\n \"r_leg_hpy\": JointLimit(840, -1.61234, 0.65764, 12),\n \"r_leg_hpz\": JointLimit(275, -0.786794, 0.174358, 12),\n \"r_leg_kny\": JointLimit(890, 0, 2.35637, 12)\n }\n\n # Taken from drake/drake-build/install/share/drake/examples/atlas/urdf/atlas_minimal_contact.urdf\n CONTACTS_PER_FRAME = {\n \"l_foot\": np.array([\n [-0.0876,0.066,-0.07645], # left heel\n [-0.0876,-0.0626,-0.07645], # right heel\n [0.1728,0.066,-0.07645], # left toe\n [0.1728,-0.0626,-0.07645], # right toe\n [0.086,0.066,-0.07645], # left midfoot_front\n [0.086,-0.0626,-0.07645], # right midfoot_front\n [-0.0008,0.066,-0.07645], # left midfoot_rear\n [-0.0008,-0.0626,-0.07645] # right midfoot_rear\n ]).T,\n \"r_foot\": np.array([\n [-0.0876,0.0626,-0.07645], # left heel\n [-0.0876,-0.066,-0.07645], # right heel\n [0.1728,0.0626,-0.07645], # left toe\n [0.1728,-0.066,-0.07645], # right toe\n [0.086,0.0626,-0.07645], # left midfoot_front\n [0.086,-0.066,-0.07645], # right midfoot_front\n [-0.0008,0.0626,-0.07645], # left midfoot_rear\n [-0.0008,-0.066,-0.07645] # right midfoot_rear\n ]).T\n }\n\n FOOT_OFFSET = np.array([0.0, 0.0, -0.07645])\n HEEL_OFFSET = np.array([-0.0876, 0.0, -0.07645])\n TOE_OFFSET = np.array([0.1728, 0.0, -0.07645])\n\n NUM_CONTACTS = sum([contacts.shape[1] for contacts in CONTACTS_PER_FRAME.values()])\n FLOATING_BASE_DOF = 6\n FLOATING_BASE_QUAT_DOF = 7 # Start index of actuated joints in generalized positions\n NUM_ACTUATED_DOF = 30\n TOTAL_DOF = FLOATING_BASE_DOF + NUM_ACTUATED_DOF\n POS_QW_IDX = 0\n POS_QX_IDX = 1\n POS_QY_IDX = 2\n POS_QZ_IDX = 3\n POS_X_IDX = 4\n POS_Y_IDX = 5\n POS_Z_IDX = 6\n\ndef getAllJointIndicesInGeneralizedPositions(plant):\n for joint_limit in Atlas.JOINT_LIMITS.items():\n index = getJointIndexInGeneralizedPositions(plant, joint_limit[0])\n print(f\"{joint_limit[0]}: {index}\")\n\ndef getActuatorIndex(plant, joint_name):\n return int(plant.GetJointActuatorByName(joint_name + \"_motor\").index())\n\ndef getJointLimitsSortedByActuator(plant):\n return sorted(Atlas.JOINT_LIMITS.items(), key=lambda entry : getActuatorIndex(plant, entry[0]))\n\ndef getJointIndexInGeneralizedPositions(plant, joint_name):\n return int(plant.GetJointByName(joint_name).position_start())\n\ndef getJointIndexInGeneralizedVelocities(plant, joint_name):\n return getJointIndexInGeneralizedPositions(plant, joint_name) - 1\n\n'''\nReturns the joint limits sorted by their position in the generalized positions\n'''\ndef getJointLimitsSortedByPosition(plant):\n return sorted(Atlas.JOINT_LIMITS.items(),\n key=lambda entry : getJointIndexInGeneralizedPositions(plant, entry[0]))\n\ndef getJointValues(plant, joint_names, context):\n ret = []\n for name in joint_names:\n ret.append(plant.GetJointByName(name).get_angle(context))\n return ret\n\n\ndef setJointValues(plant, joint_values, context):\n for i in range(len(joint_values)):\n plant.GetJointByIndex(i).set_angle(context, joint_values[i])\n\n# plant is modified in place\ndef load_atlas(plant, add_ground=False):\n atlas_file = FindResourceOrThrow(\"drake/examples/atlas/urdf/atlas_minimal_contact.urdf\")\n # atlas_file = FindResourceOrThrow(\"drake/examples/atlas/urdf/atlas_convex_hull.urdf\")\n atlas = Parser(plant).AddModelFromFile(atlas_file)\n\n if add_ground:\n static_friction = 1.0\n green = np.array([0.5, 1.0, 0.5, 1.0])\n\n # plant.RegisterVisualGeometry(plant.world_body(), RigidTransform(), HalfSpace(),\n # \"GroundVisuaGeometry\", green)\n\n ground_friction = CoulombFriction(1.0, 1.0)\n plant.RegisterCollisionGeometry(plant.world_body(), RigidTransform(), HalfSpace(),\n \"GroundCollisionGeometry\", ground_friction)\n\n plant.Finalize()\n plant.set_penetration_allowance(1.0e-3)\n plant.set_stiction_tolerance(1.0e-3)\n\ndef set_atlas_initial_pose(plant, plant_context):\n pelvis = plant.GetBodyByName(\"pelvis\")\n X_WP = RigidTransform(RollPitchYaw(0.0, 0.0, 0.0), np.array([0.0, 0.0, 0.94]))\n plant.SetFreeBodyPose(plant_context, pelvis, X_WP)\n\ndef set_null_input(plant, plant_context):\n tau = np.zeros(plant.num_actuated_dofs())\n plant.get_actuation_input_port().FixValue(plant_context, tau)\n\ndef visualize(q, dt=None):\n builder = DiagramBuilder()\n plant, scene_graph = AddMultibodyPlantSceneGraph(builder, MultibodyPlant(0.001))\n load_atlas(plant, add_ground=True)\n plant_context = plant.CreateDefaultContext()\n ConnectContactResultsToDrakeVisualizer(builder=builder, plant=plant)\n ConnectDrakeVisualizer(builder=builder, scene_graph=scene_graph)\n diagram = builder.Build()\n\n if len(q.shape) == 1:\n q = np.reshape(q, (1, -1))\n\n for i in range(q.shape[0]):\n print(f\"knot point: {i}\")\n diagram_context = diagram.CreateDefaultContext()\n plant_context = diagram.GetMutableSubsystemContext(plant, diagram_context)\n set_null_input(plant, plant_context)\n\n plant.SetPositions(plant_context, q[i])\n simulator = Simulator(diagram, diagram_context)\n simulator.set_target_realtime_rate(0.0)\n simulator.AdvanceTo(0.0001)\n if not dt is None:\n time.sleep(5/(np.sum(dt))*dt[i])\n else:\n time.sleep(0.5)\n\nif __name__ == \"__main__\":\n builder = DiagramBuilder()\n plant, scene_graph = AddMultibodyPlantSceneGraph(builder, MultibodyPlant(1.0e-3))\n load_atlas(plant, add_ground=True)\n ConnectDrakeVisualizer(builder=builder, scene_graph=scene_graph)\n diagram = builder.Build()\n\n diagram_context = diagram.CreateDefaultContext()\n plant_context = diagram.GetMutableSubsystemContext(plant, diagram_context)\n\n set_atlas_initial_pose(plant, plant_context)\n set_null_input(plant, plant_context)\n\n simulator = Simulator(diagram, diagram_context)\n simulator.set_target_realtime_rate(0.2)\n simulator.AdvanceTo(2.0)\n","repo_name":"rcywongaa/humanoid","sub_path":"attic/old_Atlas.py","file_name":"old_Atlas.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"72"} +{"seq_id":"15614811779","text":"with open('data.txt', 'r') as file:\n data = file.read().split('\\n')\n\n# cut trailing newline\ndata.pop(-1)\n\n# Further data processing\n\n# fishes = []\n# fishes.append([int(e) for e in data[0].split(',')])\n# fishes.append([int(e) for e in data[0].split(',')])\n\n# part 2 data processing\nfishes = [int(e) for e in data[0].split(',')]\n\n# breakpoint()\n\n# cycle = 7\ncycle_num = 6\nbirth_num = 8\n\n# breakpoint()\n\n# Part 1\n\n# just need a back buffer\n\ncurrent = 0\nback = 1\n\ndays = 256\n\n# for _ in range(days):\n# for i in range(len(fishes[current])):\n# fish = fishes[current][i] - 1\n# if fish == -1:\n# fish += cycle\n# fishes[back].append(8)\n# fishes[current].append(8)\n\n# # breakpoint()\n# fishes[back][i] = fish\n\n# # swap the buffer\n# temp = current\n# current = back\n# back = temp\n\n# print(len(fishes[current]))\n\n# Part 2\n\n# create a table of how many fish in each number of days\n# update that table\n\ntable = dict()\n\n# lazy init\ntable[0] = 0\ntable[1] = 0\ntable[2] = 0\ntable[3] = 0\ntable[4] = 0\ntable[5] = 0\ntable[6] = 0\ntable[7] = 0\ntable[8] = 0\n\nfor fish in fishes:\n table[fish] += 1\n\n# print(table)\n\nfor _ in range(days):\n zero = table[0]\n # print(zero)\n for i in range(1, 9):\n table[i-1] = table[i]\n\n table[birth_num] = zero\n table[cycle_num] += zero\n\n # print(table)\n # breakpoint()\n\n\n\ncount = 0\n\nfor key, value in table.items():\n count += value\n\nprint(count)\n","repo_name":"advent-of-codes/2021","sub_path":"06/lanternfish.py","file_name":"lanternfish.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42258565067","text":"import re\n\nclass CFToken:\n\n def __init__(self, value, line, col):\n self.value = value\n self.line = line \n self.col = col\n\n def __repr__(self):\n return f'{self.__class__.__name__}(value={self.value!r}, line={self.line}, col={self.col})'\n\nclass CFTagToken(CFToken):\n \n TAG_REGEX = re.compile(r'<\\/?\\w.*?>')\n\n def __init__(self, value, line, col):\n super().__init__(value, line, col)\n self.name = value[1:-1] \n\nclass CFStringToken(CFToken):\n\n STRING_REGEX = re.compile(r'(\".*?\")|(\\'.*?\\')')\n LITERAL = 'literal'\n SINGLE_QUOTE = 'single quote'\n\n def __init__(self, value, line, col):\n super().__init__(value, line, col)\n self.type = self.LITERAL if value.startswith('\"') else self.SINGLE_QUOTE\n\nclass CFOperatorToken(CFToken):\n\n OPERATORS = {'=': 'ASSIGN', '==': 'EQ', '!=': 'NEQ', \n '<': 'LT', '>': 'GT', '<=': 'LTE', '>=': 'GTE',\n '&&': 'AND', '||': 'OR', '+': 'ADD', '-': 'SUB',\n '*': 'MUL', '/': 'DIV'}\n\n def __init__(self, value, line, col):\n super().__init__(value, line, col)\n self.type = self.OPERATORS.get(value, 'UNKNOWN')\n\nclass CFKeywordToken(CFToken):\n \n KEYWORDS = { \n 'if': 'if',\n 'else': 'else',\n 'while': 'while',\n 'for': 'for',\n 'function': 'function',\n 'component': 'component',\n 'property': 'property',\n 'return': 'return',\n 'include': 'include',\n 'import': 'import',\n 'param': 'param',\n 'try': 'try',\n 'catch': 'catch',\n 'interface': 'interface',\n 'implements': 'implements'\n }\n\n def __init__(self, value, line, col):\n super().__init__(value, line, col)\n self.keyword = self.KEYWORDS.get(value)\n \n def __str__(self):\n return f'CFKeywordToken({self.keyword})' if self.keyword else 'CFKeywordToken(UNKNOWN)'\n \nclass CFFunctionToken(CFToken):\n\n FUNCTIONS = {\n 'Abs': 'Abs',\n 'ACos': 'ACos',\n 'AddSOAPRequestHeader': 'AddSOAPRequestHeader',\n 'AddSOAPResponseHeader': 'AddSOAPResponseHeader',\n 'ArrayAppend': 'ArrayAppend',\n 'ArrayAvg': 'ArrayAvg',\n 'ArrayClear': 'ArrayClear',\n 'ArrayDeleteAt': 'ArrayDeleteAt',\n 'ArrayInsertAt': 'ArrayInsertAt',\n 'ArrayIsDefined': 'ArrayIsDefined',\n 'ArrayIsEmpty': 'ArrayIsEmpty',\n 'ArrayLen': 'ArrayLen',\n 'ArrayMax': 'ArrayMax',\n 'ArrayMin': 'ArrayMin',\n 'ArrayNew': 'ArrayNew',\n 'ArrayPrepend': 'ArrayPrepend',\n 'ArrayResize': 'ArrayResize',\n 'ArraySet': 'ArraySet',\n 'ArraySort': 'ArraySort',\n 'ArraySum': 'ArraySum',\n 'ArraySwap': 'ArraySwap',\n 'ArrayToList': 'ArrayToList',\n 'Asc': 'Asc',\n 'ASin': 'ASin',\n 'ATan': 'ATan',\n 'Beat': 'Beat',\n 'BinaryDecode': 'BinaryDecode',\n 'BinaryEncode': 'BinaryEncode',\n 'BitAnd': 'BitAnd',\n 'BitMaskClear': 'BitMaskClear',\n 'BitMaskRead': 'BitMaskRead',\n 'BitMaskSet': 'BitMaskSet',\n 'BitNot': 'BitNot',\n 'BitOr': 'BitOr',\n 'BitSHLN': 'BitSHLN',\n 'BitSHRN': 'BitSHRN',\n 'BitXOr': 'BitXOr',\n 'Ceiling': 'Ceiling',\n 'CFusion_decrypt': 'CFusion_decrypt',\n 'CFusion_encrypt': 'CFusion_encrypt',\n 'CharsetDecode': 'CharsetDecode',\n 'CharsetEncode': 'CharsetEncode',\n 'Chr': 'Chr',\n 'CJustify': 'CJustify',\n 'Compare': 'Compare',\n 'CompareNoCase': 'CompareNoCase',\n 'CompileRest': 'CompileRest',\n 'ComponentInfo': 'ComponentInfo',\n 'CompoundHash': 'CompoundHash',\n 'ContractPath': 'ContractPath',\n 'Cos': 'Cos',\n 'CreateDecoder': 'CreateDecoder',\n 'CreateObject': 'CreateObject',\n 'CreateUUId': 'CreateUUId',\n 'CreateODBCDate': 'CreateODBCDate',\n 'CreateODBCDateTime': 'CreateODBCDateTime',\n 'CreateODBCTime': 'CreateODBCTime',\n 'CreateTime': 'CreateTime',\n 'CreateTimeSpan': 'CreateTimeSpan',\n 'CreateUniqueString': 'CreateUniqueString',\n 'DateAdd': 'DateAdd',\n 'DateCompare': 'DateCompare',\n 'DateConvert': 'DateConvert',\n 'DateDiff': 'DateDiff',\n 'DateFormat': 'DateFormat',\n 'DatePart': 'DatePart',\n 'DateTimeFormat': 'DateTimeFormat',\n 'Day': 'Day',\n 'DayOfWeek': 'DayOfWeek',\n 'DayOfWeekAsString': 'DayOfWeekAsString',\n 'DayOfYear': 'DayOfYear',\n 'DaysInMonth': 'DaysInMonth',\n 'DaysInYear': 'DaysInYear',\n 'DecrementValue': 'DecrementValue',\n 'Decrypt': 'Decrypt',\n 'DecryptBinary': 'DecryptBinary',\n 'DeleteClientVariable': 'DeleteClientVariable',\n 'DESDecrypt': 'DESDecrypt',\n 'DESEncrypt': 'DESEncrypt',\n 'DirectoryExists': 'DirectoryExists',\n 'DollarFormat': 'DollarFormat',\n 'Duplicate': 'Duplicate',\n 'Each': 'Each',\n 'Encrypt': 'Encrypt',\n 'EncryptBinary': 'EncryptBinary',\n 'EntityDecode': 'EntityDecode',\n 'EntityEncode': 'EntityEncode',\n 'Exp': 'Exp',\n 'ExpandPath': 'ExpandPath',\n 'Extract': 'Extract',\n 'FileExists': 'FileExists',\n 'Find': 'Find',\n 'FindNoCase': 'FindNoCase',\n 'FindOneOf': 'FindOneOf',\n 'FirstDayOfMonth': 'FirstDayOfMonth',\n 'Fix': 'Fix',\n 'FormatBaseN': 'FormatBaseN',\n 'GetApplicationSettings': 'GetApplicationSettings',\n 'GetApplicationMetaData': 'GetApplicationMetaData',\n 'GetAuthUser': 'GetAuthUser',\n 'GetBaseTagData': 'GetBaseTagData',\n 'GetBaseTagList': 'GetBaseTagList',\n 'GetBaseTemplatePath': 'GetBaseTemplatePath',\n 'GetClientVariablesList': 'GetClientVariablesList',\n 'GetComponentMetaData': 'GetComponentMetaData',\n 'GetContextRoot': 'GetContextRoot',\n 'GetCurrentTemplatePath': 'GetCurrentTemplatePath',\n 'GetDirectoryFromPath': 'GetDirectoryFromPath',\n 'GetEncoding': 'GetEncoding',\n 'GetException': 'GetException',\n 'GetFileFromPath': 'GetFileFromPath',\n 'GetFileInfo': 'GetFileInfo',\n 'GetFunctionList': 'GetFunctionList',\n 'GetGatewayHelper': 'GetGatewayHelper',\n 'GetHttpTimeString': 'GetHttpTimeString',\n 'GetK2ServerDocCount': 'GetK2ServerDocCount',\n 'GetK2ServerDocCountLimit': 'GetK2ServerDocCountLimit',\n 'GetLocale': 'GetLocale',\n 'GetLocaleDisplayName': 'GetLocaleDisplayName',\n 'GetMetaData': 'GetMetaData',\n 'GetMetricData': 'GetMetricData',\n 'GetPageContext': 'GetPageContext',\n 'GetProfileSections': 'GetProfileSections',\n 'GetProfileString': 'GetProfileString',\n 'GetRequestData': 'GetRequestData',\n 'GetScriptProtectNumLines': 'GetScriptProtectNumLines',\n 'GetSOAPRequest': 'GetSOAPRequest',\n 'GetSOAPRequestHeader': 'GetSOAPRequestHeader',\n 'GetSOAPResponse': 'GetSOAPResponse',\n 'GetSOAPResponseHeader': 'GetSOAPResponseHeader',\n 'GetTempDirectory': 'GetTempDirectory',\n 'GetTempFile': 'GetTempFile',\n 'GetTickCount': 'GetTickCount',\n 'GetTimeZoneInfo': 'GetTimeZoneInfo',\n 'GetToken': 'GetToken',\n 'GetTotalSpace': 'GetTotalSpace',\n 'GetUserRoles': 'GetUserRoles',\n 'GetWriteableImageFormats': 'GetWriteableImageFormats',\n 'Hash': 'Hash',\n 'Hash40': 'Hash40',\n 'Hour': 'Hour',\n 'HTMLCodeFormat': 'HTMLCodeFormat',\n 'HTMLEditFormat': 'HTMLEditFormat',\n 'IIf': 'IIf',\n 'ImageAddBorder': 'ImageAddBorder',\n 'ImageBlur': 'ImageBlur',\n 'ImageClearRect': 'ImageClearRect',\n 'ImageCopy': 'ImageCopy',\n 'ImageCreateCaptcha': 'ImageCreateCaptcha',\n 'ImageCrop': 'ImageCrop',\n 'ImageDilate': 'ImageDilate',\n 'ImageDrawArc': 'ImageDrawArc',\n 'ImageDrawBeveledRect': 'ImageDrawBeveledRect',\n 'ImageDrawCubicCurve': 'ImageDrawCubicCurve',\n 'ImageDrawLine': 'ImageDrawLine',\n 'ImageDrawLines': 'ImageDrawLines',\n 'ImageDrawOval': 'ImageDrawOval',\n 'ImageDrawPoint': 'ImageDrawPoint',\n 'ImageDrawQuadraticCurve': 'ImageDrawQuadraticCurve',\n 'ImageDrawRect': 'ImageDrawRect',\n 'ImageDrawRoundRect': 'ImageDrawRoundRect',\n 'ImageDrawText': 'ImageDrawText',\n 'ImageFlip': 'ImageFlip',\n 'ImageGetBlob': 'ImageGetBlob',\n 'ImageGetBufferedImage': 'ImageGetBufferedImage',\n 'ImageGetEXIFTag': 'ImageGetEXIFTag',\n 'ImageGetHeight': 'ImageGetHeight',\n 'ImageGetIPTCTag': 'ImageGetIPTCTag',\n 'ImageGetWidth': 'ImageGetWidth',\n 'ImageGrayScale': 'ImageGrayScale',\n 'ImageInfo': 'ImageInfo',\n 'ImageNegative': 'ImageNegative',\n 'ImageNew': 'ImageNew',\n 'ImageOverlay': 'ImageOverlay',\n 'ImagePaste': 'ImagePaste',\n 'ImageRead': 'ImageRead',\n 'ImageReadBase64': 'ImageReadBase64',\n 'ImageResize': 'ImageResize',\n 'ImageRotate': 'ImageRotate',\n 'ImageRotateDrawingAxis': 'ImageRotateDrawingAxis',\n 'ImageScaleToFit': 'ImageScaleToFit',\n 'ImageSetBackgroundColor': 'ImageSetBackgroundColor',\n 'ImageSetDrawingColor': 'ImageSetDrawingColor',\n 'ImageSetDrawingStroke': 'ImageSetDrawingStroke',\n 'ImageSetDrawingTransparency': 'ImageSetDrawingTransparency',\n 'ImageSharpen': 'ImageSharpen',\n 'ImageShear': 'ImageShear',\n 'ImageShearDrawingAxis': 'ImageShearDrawingAxis',\n 'ImageTranslate': 'ImageTranslate',\n 'ImageTranslateDrawingAxis': 'ImageTranslateDrawingAxis',\n 'ImageWrite': 'ImageWrite',\n 'ImageWriteBase64': 'ImageWriteBase64',\n 'IncrementValue': 'IncrementValue',\n 'InputBaseN': 'InputBaseN',\n 'Insert': 'Insert',\n 'Int': 'Int',\n 'InvalidateOAuthaccesstoken': 'InvalidateOAuthaccesstoken',\n 'IsArray': 'IsArray',\n 'IsBinary': 'IsBinary',\n 'IsBoolean': 'IsBoolean',\n 'IsCustomFunction': 'IsCustomFunction',\n 'IsDate': 'IsDate',\n 'IsDDX': 'IsDDX',\n 'IsDebugMode': 'IsDebugMode',\n 'IsImage': 'IsImage',\n 'IsImageFile': 'IsImageFile',\n 'IsInstanceOf': 'IsInstanceOf',\n 'IsJSON': 'IsJSON',\n 'IsLeapYear': 'IsLeapYear',\n 'IsNumeric': 'IsNumeric',\n 'IsNumericDate': 'IsNumericDate',\n 'IsObject': 'IsObject',\n 'IsPDFFile': 'IsPDFFile',\n 'IsPDFObject': 'IsPDFObject',\n 'IsQuery': 'IsQuery',\n 'IsSimpleValue': 'IsSimpleValue',\n 'IsSOAPRequest': 'IsSOAPRequest',\n 'IsStruct': 'IsStruct',\n 'IsUserInAnyRole': 'IsUserInAnyRole',\n 'IsUserInRole': 'IsUserInRole',\n 'IsUserLoggedIn': 'IsUserLoggedIn',\n 'IsValid': 'IsValid',\n 'IsValidOauthaccesstoken': 'IsValidOauthaccesstoken',\n 'IsWDDX': 'IsWDDX',\n 'IsXML': 'IsXML',\n 'IsXMLAttribute': 'IsXMLAttribute',\n 'IsXMLElement': 'IsXMLElement',\n 'IsXMLNode': 'IsXMLNode',\n 'IsXMLRoot': 'IsXMLRoot',\n 'IsXSSFFile': 'IsXSSFFile',\n 'IsXSSFPicture': 'IsXSSFPicture',\n 'IsXSSFShape': 'IsXSSFShape',\n 'IsXSSFSheet': 'IsXSSFSheet',\n 'IsXSSFWorkbook': 'IsXSSFWorkbook',\n 'JSStringFormat': 'JSStringFormat',\n 'LCase': 'LCase',\n 'Left': 'Left',\n 'Len': 'Len',\n 'ListAppend': 'ListAppend',\n 'ListChangeDelims': 'ListChangeDelims',\n 'ListContains': 'ListContains',\n 'ListContainsNoCase': 'ListContainsNoCase',\n 'ListDeleteAt': 'ListDeleteAt',\n 'ListFilter': 'ListFilter',\n 'ListFind': 'ListFind',\n 'ListFindNoCase': 'ListFindNoCase',\n 'ListFirst': 'ListFirst',\n 'ListGetAt': 'ListGetAt',\n 'ListInsertAt': 'ListInsertAt',\n 'ListLast': 'ListLast',\n 'ListLen': 'ListLen',\n 'ListPrepend': 'ListPrepend',\n 'ListQualify': 'ListQualify',\n 'ListReduce': 'ListReduce',\n 'ListRemoveDuplicates': 'ListRemoveDuplicates',\n 'ListRest': 'ListRest',\n 'ListSetAt': 'ListSetAt',\n 'ListSort': 'ListSort',\n 'ListToArray': 'ListToArray',\n 'ListValueCount': 'ListValueCount',\n 'ListValueCountNoCase': 'ListValueCountNoCase',\n 'LJustify': 'LJustify',\n 'LSParseCurrency': 'LSParseCurrency',\n 'LSParseDateTime': 'LSParseDateTime',\n 'LSParseEuroCurrency': 'LSParseEuroCurrency',\n 'LSParseNumber': 'LSParseNumber',\n 'Max': 'Max',\n 'Metaphone': 'Metaphone',\n 'Mid': 'Mid',\n 'Min': 'Min',\n 'Minute': 'Minute',\n 'Month': 'Month',\n 'MonthAsString': 'MonthAsString',\n 'Now': 'Now',\n 'NumberFormat': 'NumberFormat',\n 'ObjectEquals': 'ObjectEquals',\n 'ObjectLoad': 'ObjectLoad',\n 'ObjectSave': 'ObjectSave',\n 'OnApplicationEnd': 'OnApplicationEnd',\n 'OnApplicationStart': 'OnApplicationStart',\n 'OnError': 'OnError',\n 'OnMissingMethod': 'OnMissingMethod',\n 'OnMissingTemplate': 'OnMissingTemplate',\n 'OnRequest': 'OnRequest',\n 'OnRequestEnd': 'OnRequestEnd',\n 'OnRequestStart': 'OnRequestStart',\n 'OnSessionEnd': 'OnSessionEnd',\n 'OnSessionStart': 'OnSessionStart',\n 'ParagraphFormat': 'ParagraphFormat',\n 'ParseDateTime': 'ParseDateTime',\n 'ParseNumber': 'ParseNumber',\n 'Pi': 'Pi',\n 'PreserveSingleQuotes': 'PreserveSingleQuotes',\n 'Quarter': 'Quarter',\n 'QueryAddColumn': 'QueryAddColumn',\n 'QueryAddRow': 'QueryAddRow',\n 'QueryColumnArray': 'QueryColumnArray',\n 'QueryColumnCount': 'QueryColumnCount',\n 'QueryColumnExists': 'QueryColumnExists',\n 'QueryColumnList': 'QueryColumnList',\n 'QueryConvertForGrid': 'QueryConvertForGrid',\n 'QueryCurrentRow': 'QueryCurrentRow',\n 'QueryDeleteColumn': 'QueryDeleteColumn',\n 'QueryDeleteRow': 'QueryDeleteRow',\n 'QueryGetCell': 'QueryGetCell',\n 'QueryGetRow': 'QueryGetRow',\n 'QueryMap': 'QueryMap',\n 'QueryNew': 'QueryNew',\n 'QueryRecordCount': 'QueryRecordCount',\n 'QueryReduce': 'QueryReduce',\n 'QueryRowData': 'QueryRowData',\n 'QuerySetCell': 'QuerySetCell',\n 'Rand': 'Rand',\n 'Randomize': 'Randomize',\n 'RandRange': 'RandRange',\n 'REFind': 'REFind',\n 'REFindNoCase': 'REFindNoCase',\n 'ReleaseComObject': 'ReleaseComObject',\n 'RemoveChars': 'RemoveChars',\n 'RemoveNumeric': 'RemoveNumeric',\n 'RepeatString': 'RepeatString',\n 'Replace': 'Replace',\n 'ReplaceList': 'ReplaceList',\n 'ReplaceNoCase': 'ReplaceNoCase',\n 'REReplace': 'REReplace',\n 'REReplaceNoCase': 'REReplaceNoCase',\n 'RESplit': 'RESplit',\n 'RESplitNoCase': 'RESplitNoCase',\n 'Reverse': 'Reverse',\n 'RTrim': 'RTrim',\n 'Second': 'Second',\n 'SendGatewayMessage': 'SendGatewayMessage',\n 'Serialize': 'Serialize',\n 'SerializeJSON': 'SerializeJSON',\n 'SerializeXML': 'SerializeXML',\n 'SessionId': 'SessionId',\n 'SessionInvalidate': 'SessionInvalidate',\n 'SessionRotate': 'SessionRotate',\n 'SessionTimeout': 'SessionTimeout',\n 'SetEncoding': 'SetEncoding',\n 'SetLocale': 'SetLocale',\n 'SetProfileString': 'SetProfileString',\n 'SetVariable': 'SetVariable',\n 'Sgn': 'Sgn',\n 'Sin': 'Sin',\n 'SpanExcluding': 'SpanExcluding',\n 'SpanIncluding': 'SpanIncluding',\n 'SpreadsheetAddAutoFilter': 'SpreadsheetAddAutoFilter',\n 'SpreadsheetAddColumn': 'SpreadsheetAddColumn',\n 'SpreadsheetAddFreezePane': 'SpreadsheetAddFreezePane',\n 'SpreadsheetAddImage': 'SpreadsheetAddImage',\n 'SpreadsheetAddInfo': 'SpreadsheetAddInfo',\n 'SpreadsheetAddRow': 'SpreadsheetAddRow',\n 'SpreadsheetAddRows': 'SpreadsheetAddRows',\n 'SpreadsheetAddSplitPane': 'SpreadsheetAddSplitPane',\n 'SpreadsheetCreateSheet': 'SpreadsheetCreateSheet',\n 'SpreadsheetDeleteColumn': 'SpreadsheetDeleteColumn',\n 'SpreadsheetDeleteColumns': 'SpreadsheetDeleteColumns',\n 'SpreadsheetDeleteRow': 'SpreadsheetDeleteRow',\n 'SpreadsheetDeleteRows': 'SpreadsheetDeleteRows',\n 'SpreadsheetFormatCell': 'SpreadsheetFormatCell',\n 'SpreadsheetFormatCellRange': 'SpreadsheetFormatCellRange',\n 'SpreadsheetFormatColumn': 'SpreadsheetFormatColumn',\n 'SpreadsheetFormatColumns': 'SpreadsheetFormatColumns',\n 'SpreadsheetFormatRow': 'SpreadsheetFormatRow',\n 'SpreadsheetFormatRows': 'SpreadsheetFormatRows',\n 'SpreadsheetGetCellComment': 'SpreadsheetGetCellComment',\n 'SpreadsheetGetCellFormula': 'SpreadsheetGetCellFormula',\n 'SpreadsheetGetCellValue': 'SpreadsheetGetCellValue',\n 'SpreadsheetInfo': 'SpreadsheetInfo',\n 'SpreadsheetMergeCells': 'SpreadsheetMergeCells',\n 'SpreadsheetNew': 'SpreadsheetNew',\n 'SpreadsheetRead': 'SpreadsheetRead',\n 'SpreadsheetReadBinary': 'SpreadsheetReadBinary',\n 'SpreadsheetRemoveSheet': 'SpreadsheetRemoveSheet',\n 'SpreadsheetSetActiveSheet': 'SpreadsheetSetActiveSheet',\n 'SpreadsheetSetActiveSheetNumber': 'SpreadsheetSetActiveSheetNumber',\n 'SpreadsheetSetCellComment': 'SpreadsheetSetCellComment',\n 'SpreadsheetSetCellFormula': 'SpreadsheetSetCellFormula',\n 'SpreadsheetSetCellValue': 'SpreadsheetSetCellValue',\n 'SpreadsheetSetColumnWidth': 'SpreadsheetSetColumnWidth',\n 'SpreadsheetSetFooter': 'SpreadsheetSetFooter',\n 'SpreadsheetSetHeader': 'SpreadsheetSetHeader',\n 'SpreadsheetSetRowHeight': 'SpreadsheetSetRowHeight',\n 'SpreadsheetShiftColumns': 'SpreadsheetShiftColumns',\n 'SpreadsheetShiftRows': 'SpreadsheetShiftRows',\n 'SpreadsheetWrite': 'SpreadsheetWrite',\n 'Sqrt': 'Sqrt',\n 'StripCR': 'StripCR',\n 'StructAppend': 'StructAppend',\n 'StructClear': 'StructClear',\n 'StructCopy': 'StructCopy',\n 'StructCount': 'StructCount',\n 'StructDelete': 'StructDelete',\n 'StructEach': 'StructEach',\n 'StructFilter': 'StructFilter',\n 'StructFind': 'StructFind',\n 'StructFindKey': 'StructFindKey',\n 'StructFindValue': 'StructFindValue',\n 'StructGet': 'StructGet',\n 'StructInsert': 'StructInsert',\n 'StructIsEmpty': 'StructIsEmpty',\n 'StructKeyArray': 'StructKeyArray',\n 'StructKeyExists': 'StructKeyExists',\n 'StructKeyList': 'StructKeyList',\n 'StructNew': 'StructNew',\n 'StructSort': 'StructSort',\n 'StructUpdate': 'StructUpdate',\n 'Tan': 'Tan',\n 'Throw': 'Throw',\n 'TimeFormat': 'TimeFormat',\n 'ToBase64': 'ToBase64',\n 'ToBinary': 'ToBinary',\n 'ToScript': 'ToScript',\n 'ToString': 'ToString',\n 'Trace': 'Trace',\n 'TransactionCommit': 'TransactionCommit',\n 'TransactionRollback': 'TransactionRollback',\n 'Trim': 'Trim',\n 'UCase': 'UCase',\n 'URLDecode': 'URLDecode',\n 'URLEncodedFormat': 'URLEncodedFormat',\n 'URLSessionFormat': 'URLSessionFormat',\n 'Val': 'Val',\n 'ValueArray': 'ValueArray',\n 'ValueList': 'ValueList',\n 'VerifyClient': 'VerifyClient',\n 'Week': 'Week',\n 'Wrap': 'Wrap',\n 'WriteBody': 'WriteBody',\n 'WriteDump': 'WriteDump',\n 'WriteLog': 'WriteLog',\n 'WriteOutput': 'WriteOutput',\n 'WSGetAllChannels': 'WSGetAllChannels',\n 'WSGetSubscribers': 'WSGetSubscribers',\n 'WSPublish': 'WSPublish',\n 'WSSendMessage': 'WSSendMessage',\n 'WSSubscribe': 'WSSubscribe',\n 'XMLChildPos': 'XMLChildPos',\n 'XMLElemNew': 'XMLElemNew',\n 'XMLFormat': 'XMLFormat',\n 'XMLGetNodeType': 'XMLGetNodeType',\n 'XMLNew': 'XMLNew',\n 'XMLParse': 'XMLParse',\n 'XMLSearch': 'XMLSearch',\n 'XMLTransform': 'XMLTransform',\n 'XMLValidate': 'XMLValidate',\n 'Year': 'Year',\n 'YesNoFormat': 'YesNoFormat'\n }\n\n def __init__(self, value, line, col):\n super().__init__(value, line, col)\n self.function = self.FUNCTIONS.get(value)\n \n def __str__(self):\n return f'CFFunctionToken({self.function})' if self.function else 'CFFunctionToken(UNKNOWN)'\n\nclass CFScriptToken(CFToken):\n\n def __init__(self, value, line, col):\n # Validate script token format\n if not value.startswith('') or not value.endswith(''):\n raise ValueError('Invalid script token format')\n\n super().__init__(value, line, col)\n self.script = value[10:-11].strip()\n\n def _get_script(self, value):\n if value.startswith('') and value.endswith(''):\n return value[10:-11].strip()\n else:\n return None\n \n def __str__(self):\n script = self.script[:20] if self.script else 'INVALID'\n return f'CFScriptToken({script}...)'\n\nclass CFTokenizer:\n\n def __init__(self, code):\n self.code = code\n self.tokens = []\n\n def tokenize(self):\n lines = self.code.split('\\n')\n \n for line_num, line in enumerate(lines, start=1):\n self.tokenize_line(line, line_num)\n\n return self.tokens\n\n def tokenize_line(self, text, line_num):\n self.tokenize_tags(text, line_num)\n self.tokenize_strings(text, line_num)\n self.tokenize_keywords(text, line_num)\n self.tokenize_operators(text, line_num)\n self.tokenize_functions(text, line_num)\n self.tokenize_script(text, line_num)\n\n def tokenize_tags(self, text, line):\n for match in CFTagToken.TAG_REGEX.finditer(text):\n token = CFTagToken(match.group(), line, match.start()+1)\n self.tokens.append(token)\n\n def tokenize_strings(self, text, line):\n for match in CFStringToken.STRING_REGEX.finditer(text):\n token = CFStringToken(match.group(), line, match.start()+1)\n self.tokens.append(token)\n\n def tokenize_keywords(self, text, line):\n for word in CFKeywordToken.KEYWORDS:\n idx = text.find(word)\n if idx != -1:\n token = CFKeywordToken(word, line, idx+1)\n self.tokens.append(token)\n\n def tokenize_operators(self, text, line):\n for op in CFOperatorToken.OPERATORS:\n idx = text.find(op)\n if idx != -1:\n token = CFOperatorToken(op, line, idx+1)\n self.tokens.append(token)\n \n def tokenize_functions(self, text, line):\n if not isinstance(text, str):\n return\n \n for func in CFFunctionToken.FUNCTIONS:\n idx = text.find(func)\n if idx != -1:\n token = CFFunctionToken(func, line, idx+1)\n self.tokens.append(token)\n\n def tokenize_script(self, text, line):\n idx = text.find('')\n if idx != -1:\n script = text[idx+10:-11]\n token = CFScriptToken(script, line, idx+1)\n self.tokens.append(token)","repo_name":"BangRocket/ColdfusionTokenizer","sub_path":"cftokenizer.py","file_name":"cftokenizer.py","file_ext":"py","file_size_in_byte":21120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9063843928","text":"import numpy as np\nfrom pint.models import get_model\nimport pywt\nimport tempfile\n\ndef get_pint_model(ds):\n with tempfile.NamedTemporaryFile('w+') as tp:\n tp.write(ds.model)\n tp.flush()\n model = get_model(tp.name)\n return model\n\n# Modified from code written by E. Fonseca for PulsePortraiture\ndef wavelet_smooth(prof, wavelet='db8', nlevel=5, threshtype='hard', fact=1.0):\n nbin = prof.shape[-1]\n # Translation-invariant (stationary) wavelet transform/denoising\n coeffs = np.array(pywt.swt(prof, wavelet, level=nlevel, start_level=0, axis=-1))\n # Get threshold value\n lopt = fact * (np.median(np.abs(coeffs[0])) / 0.6745) * np.sqrt(2 * np.log(nbin))\n # Do wavelet thresholding\n coeffs = pywt.threshold(coeffs, lopt, mode=threshtype, substitute=0.0)\n # Reconstruct data\n smooth_prof = pywt.iswt(list(map(tuple, coeffs)), wavelet)\n return smooth_prof\n","repo_name":"rossjjennings/psrfits","sub_path":"psrfits/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14454789629","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 20 11:18:26 2019\n@author: chiappisil\n\"\"\"\nfrom math import radians, exp\nfrom scipy.special import gamma\nfrom scipy.stats import linregress\nimport os as os\nimport numpy as np\nimport sys\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as mpatches\nfrom lmfit import minimize, Parameters, fit_report\nimport SLS_DLS2 as dls\n\n\nRR = 1.35e-5 #cm-1, Rayleigh ratio of toluene, the standard used for renormalizing. \n\ndef extract_data_file(data_path, filename):\n ''' Extracts from each file the values of temperature, angle, countrates and monitor intensity'''\n # T = '' #Sample temperature in Kelvin\n # Angle = '' #Measurement angle in degree\n # CR0 = '' #Mean countrate of channel 0\n # CR1 = '' #Mean countrate of channel 1\n # Imon = '' #Value of the monitor diode\n # print(sample_info)\n # data_path = sample_info['data_path']\n with open(os.path.join(data_path, filename), \"r\", encoding='latin1') as f:\n for index, line in enumerate(f):\n if line.startswith('Temp'):\n T = float(line.split(':')[1])\n if line.startswith('Date'):\n Date = line.split(':')[1].split('\"')[1].split('\"')[0]\n if line.startswith('Time'):\n Time = line.split(' :')[1].split('\"')[1].split('\"')[0]\n if line.startswith('Angle'):\n Angle = float(line.split(':')[1]) \n if line.startswith('MeanCR0'):\n CR0 = float(line.split(':')[1])\n if line.startswith('MeanCR1'): \n CR1 = float(line.split(':')[1])\n if line.startswith('Wavelength'): \n Wavelength = float(line.split(':')[1])\n if line.startswith('Monitor'):\n Imon = float(line.split('Diode')[1])\n Monitor_diode_line = index\n if line.startswith('\"Count Rate\"'):\n Count_rate_line = index \n if line.startswith('\"Correlation\"'):\n Correlation_line = index\n \n CRs = np.loadtxt(os.path.join(data_path, filename), skiprows=Count_rate_line+2, encoding='latin1', usecols=(0,1,2), max_rows=Monitor_diode_line-Count_rate_line-2)\n mean_CR0 = np.average(CRs[:,1])\n mean_CR1 = np.average(CRs[:,2])\n temp = np.loadtxt(os.path.join(data_path, filename), skiprows=Correlation_line+1, encoding='latin1', usecols=(0,1,2), max_rows=Count_rate_line-Correlation_line-2)\n g2s = pd.DataFrame(temp, columns=['tau', 'g2_1','g2_2']).set_index('tau')\n g2s['g2_average'] = np.average([g2s['g2_1'].values, g2s['g2_2'].values], axis=0)\n g2s['g2_std'] = np.std([g2s['g2_1'].values, g2s['g2_2'].values], axis=0)\n \n \n return {'T': T, \n 'Angle': Angle,\n 'Imon': Imon, \n 'CR0': CR0,\n 'CR1':CR1,\n 'Date_Time': Date + ' ' + Time,\n# 'Time': Time,\n# 'Date': Date,\n 'Wavelength': Wavelength,\n 'mean_CR0': mean_CR0,\n 'mean_CR1': mean_CR1,\n 'CRs': CRs,\n 'g2s':g2s}\n\ndef extract_data(sample_info):\n ''' Extracts all information from the datafiles contained in the folder data_path, and returns\n a dictionary where each element is a dictionary will all the informations conatained for each\n file in the folder. '''\n \n data_path = sample_info['data_path']\n data = {}\n for file in sorted(os.listdir(data_path)):\n if file.endswith(\".ASC\"):\n data[file] = extract_data_file(data_path, file) \n \n parameters = ['T', 'Angle', 'Imon', 'CR0', 'CR1', 'mean_CR0', 'mean_CR1', 'Date_Time', 'Wavelength'] #parameters found in the summary pandas dataframe\n data_summary = pd.DataFrame(columns=parameters)\n for key in data:\n temp_dict = {requested_value : data[key][requested_value] for requested_value in parameters}\n data_summary.loc[key] = temp_dict\n \n data_summary['q'] = 4*np.pi/data_summary['Wavelength']*sample_info['refractive_index']*np.sin(np.radians(data_summary['Angle']/2))\n \n data_summary['Date_Time']= pd.to_datetime(data_summary['Date_Time']) \n data_average = data_summary.groupby(['Angle']).agg([np.mean, np.std])\n print('Data from {} imported correcty.'.format(data_path))\n \n \n # data_solvent = {}\n # if sample_info['data_path_solvent']: #if the solvent data path has been provided\n # solvent_path = sample_info['data_path_solvent']\n # for file in sorted(os.listdir(solvent_path)):\n # if file.endswith(\".ASC\"):\n # data_solvent[file] = extract_data_file(solvent_path, file) \n # parameters = ['T', 'Angle', 'Imon', 'CR0', 'CR1', 'mean_CR0', 'mean_CR1', 'Date_Time', 'Wavelength'] #parameters found in the summary pandas dataframe\n # solvent_summary = pd.DataFrame(columns=parameters)\n # for key in data_solvent:\n # temp_dict = {requested_value : data_solvent[key][requested_value] for requested_value in parameters}\n # solvent_summary.loc[key] = temp_dict\n \n # solvent_summary['q'] = 4*np.pi/data_summary['Wavelength']*sample_info['refractive_index']*np.sin(np.radians(data_summary['Angle']/2))\n # solvent_summary['Date_Time']= pd.to_datetime(solvent_summary['Date_Time']) \n # print('Data from {} imported correcty.'.format(solvent_path))\n\n \n \n return data, data_summary, data_average\n\n\n\n\n\ndef plot_raw_intensity(data, title, path):\n ''' Here all count rate traces for each file are plotted. The main goal is \n to be able to rapidly spot problems in the measurements. \n '''\n def applyPlotStyle(title, ax):\n ax.set_xlabel('time / s')\n ax.set_ylabel('Count rate / a.u.')\n ax.set_title(title)\n # ax.legend(loc='upper left')\n \n \n def plot(ax, data, file):\n ax.plot(data[file]['CRs'][:,0], data[file]['CRs'][:,1], '-', linewidth=1, label='CR0')\n ax.plot(data[file]['CRs'][:,0], data[file]['CRs'][:,2], '-', linewidth=1, label='CR1')\n \n N = len(data)\n rows = int(np.ceil(np.sqrt(N/2)))\n cols = int(np.ceil(N/rows)) if rows > 0 else 1\n gs = gridspec.GridSpec(rows, cols)\n \n fig = plt.figure(figsize=(cols*3.5, rows*3.5))\n\n i = 0\n for file in sorted(data):\n ax = fig.add_subplot(gs[i])\n applyPlotStyle(file, ax)\n plot(ax, data, file)\n i+= 1\n \n gs.tight_layout(fig)\n plt.savefig(os.path.join(path, '{}_raw_CR.pdf'.format(title)))\n plt.close(fig)\n print('Countrates of {} correctly plotted.'.format(title))\n return None\n\n\ndef plot_all_g2s(data, title, path):\n ''' Here all intensity correlation functions for each file are plotted. The main goal is \n to be able to rapidly spot problems in the measurements. \n '''\n def applyPlotStyle(title, ax):\n ax.set_xlabel('$\\tau$/ ms')\n ax.set_ylabel('g$^{(\\tau)}$ / a.u.')\n ax.set_xscale('log')\n ax.set_title(title)\n \n def plot(ax, data, file):\n ax.plot(data[file]['g2s'].index, data[file]['g2s']['g2_1'], '-', linewidth=0.5)\n ax.plot(data[file]['g2s'].index, data[file]['g2s']['g2_2'], '-', linewidth=0.5)\n ax.plot(data[file]['g2s'].index, data[file]['g2s']['g2_average'], '-', linewidth=1.5)\n # ax.plot(data[file]['g2s'][:,0], data[file]['g2s'][:,2], '-', linewidth=0.5)\n # ax.plot(data[file]['g2s'][:,0], data[file]['g2s'][:,3], '-', linewidth=1.5)\n \n \n N = len(data)\n rows = int(np.ceil(np.sqrt(N/2)))\n cols = int(np.ceil(N/rows)) if rows > 0 else 1\n gs = gridspec.GridSpec(rows, cols)\n \n fig = plt.figure(figsize=(cols*3.5, rows*3.5))\n\n i = 0\n for file in sorted(data):\n ax = fig.add_subplot(gs[i])\n applyPlotStyle(file, ax)\n plot(ax, data, file)\n i+= 1\n \n gs.tight_layout(fig)\n plt.savefig(os.path.join(path, '{}_raw_g2s.pdf'.format(title)))\n print('Correlation functions of {} correctly plotted.'.format(title))\n plt.close(fig)\n \n return None\n\ndef toluene_normalization(static_tol, sample, path):\n '''In this function, the toluene static intensities are plotted. The intensity\n is calculated as the average of the countrates CR0 and CR1, and normalized by the\n monitor intensity. The angle dependent intensity is fitted with the function:\n I = A/sin(angle). \n This function is used to normalize the scattering intensity of the sample measurements. \n '''\n \n def model(x, A):\n return A/np.sin(np.radians(x))\n \n Inten = ((static_tol['CR0']['mean'] + static_tol['CR1']['mean'])/static_tol['Imon']['mean']).tolist() #intensity calculated from the angle averaged values of CR0, CR1, and Imon\n Inten_std = (static_tol['CR0']['std'] + static_tol['CR1']['std'])/(static_tol['CR0']['mean'] + static_tol['CR1']['mean']) #standard deviation from error propagation.\n Inten_std += static_tol['Imon']['std']/static_tol['Imon']['mean']#standard deviation from error propagation.\n Inten_std *= Inten #standard deviation from error propagation.\n Inten_std = Inten_std.tolist()\n A, Aerr = opt.curve_fit(model, static_tol.index.tolist(), Inten, p0 = [1.5e-5])\n \n #opt_Inten = model(static_tol['Angle'].tolist(), A)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.errorbar(static_tol.index.tolist(), Inten, yerr=Inten_std, fmt='o')\n ax.set_xlabel('Angle / deg')\n ax.set_ylabel('CR0+CR1 / Imon / a.u.')\n ax.plot(static_tol.index.tolist(), model(static_tol.index.tolist(), A))\n \n fig.savefig(os.path.join(path, 'toluene.pdf'))\n plt.close(fig)\n \n for key in sample:\n sample[key]['Tol_int'] = A[0]\n sample[key]['Tol_int_err'] = Aerr[0][0]\n return A, Aerr\n\ndef solvent_intensity(sample_info):\n return None\n\n\n\n\ndef sample_intensity(sample_info):\n sample = sample_info['sample_summary']\n sample['Inten'] = (sample['CR0']+sample['CR1'])/sample['Imon']\n I_tol = sample_info['Tol_int']/np.sin(np.radians(sample['Angle'].tolist()))\n sample['I_q'] = sample['Inten']/I_tol*RR #cm-1\n # sample['I_q'] = sample['Inten']/I_tol*RR #cm-1\n # print(sample_info)\n NA = 6.022e23 #mol-1\n wl = sample_info['sample_summary']['Wavelength'].mean()\n KL = 4*np.pi**2*sample_info['refractive_index']**2*sample_info['dndc']**2/NA/(wl*1e-7)**4 #in cm2/g2/mol\n sample['KcR'] = sample_info['conc']*KL/sample['I_q']\n sample['KcR'] = sample['I_q']/sample['I_q']*sample['KcR']\n sample_info['sample_average']['I_q', 'mean'] = sample.groupby(['Angle']).mean()['I_q'] \n \n sample_info['sample_average']['I_q', 'std'] = sample.groupby(['Angle']).std()['I_q'] \n sample_info['sample_average']['KcR', 'mean'] = sample.groupby(['Angle']).mean()['KcR']\n sample_info['sample_average']['KcR', 'std'] = sample.groupby(['Angle']).std()['KcR']\n# print(sample_info['sample_average'])\n return None\n\ndef export_intensity(sample_info):\n sample = sample_info['sample_average']\n# print(sample)\n static_to_be_exported = pd.DataFrame()\n static_to_be_exported['T'] = sample['T','mean']\n static_to_be_exported['q'] = sample['q','mean']\n static_to_be_exported['Imon_mean'] = sample['Imon','mean']\n static_to_be_exported['Imon_std'] = sample['Imon','std']\n static_to_be_exported['Iq_mean'] = sample['I_q','mean']\n static_to_be_exported['Iq_std'] = sample['I_q','std']\n static_to_be_exported['KcR_mean'] = sample['KcR','mean']\n static_to_be_exported['KcR_std'] = sample['KcR','std']\n try:\n Iq = sample_info['I0']*np.exp(-1/3*sample_info['Rg']**2*sample['q', 'mean']**2)\n static_to_be_exported['Guinier_fit'] = Iq\n except:\n None\n# print(static_to_be_exported)\n# static_to_be_exported['Angle'] = sample.index.tolist()\n# columns_to_be_exported = \n filename = os.path.join(sample_info['data_path'], 'SLS_params.csv')\n with open(filename, 'w+') as f:\n header = '#Units are K for the Temperature, cm^-1 for the intensity, 1/nm for the scattering vector, and mol/g for KcR\\n'\n f.write(header)\n \n static_to_be_exported.to_csv(filename, mode='a')\n \n return None\n\ndef plot_intensity(sample_info):\n ''' Function where the static intensities are plotted. Two plots will be generated:\n in the first, the static intensity is reported as a function of the\n scattering vector q, in a log-log representation.\n in the second, Kl*C/R is reported as a function of the sinus of the \n scattering angle.'''\n \n sample = sample_info['sample_average']\n \n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9.0,4.5))\n ax1.set_xlabel('q / nm-1')\n ax1.set_ylabel('I(q) / cm$^{-1}$')\n ax1.set_xscale('log')\n ax1.set_yscale('log')\n \n #defines the limits to plot the data analyzed by guinier fit. \n try:\n minq = 0.0 if sample_info['qmin'] is None else sample_info['qmin']\n except:\n minq = 0.0\n try:\n maxq = 10.0 if sample_info['qmax'] is None else sample_info['qmax']\n except:\n maxq = 10.0\n \n \n mask_1 = sample['q', 'mean'].to_numpy() > minq\n mask_2 = sample['q', 'mean'].to_numpy() < maxq\n mask = np.logical_and(mask_1, mask_2)\n \n scatter = ax1.errorbar(sample['q', 'mean'][mask], sample['I_q', 'mean'][mask], color='C0', marker='s', yerr=sample['I_q', 'std'][mask], label=sample_info['name'])\n mask = [x for x in mask == False]\n ax1.errorbar(sample['q', 'mean'][mask], sample['I_q', 'mean'][mask], color='C0', marker='s', yerr=sample['I_q', 'std'][mask], alpha=0.5)\n ax1.set_ylim(ax1.get_ylim())\n\n try:\n Iq = sample_info['I0']*np.exp(-1/3*sample_info['Rg']**2*sample['q', 'mean']**2)\n fit = ax1.plot(sample['q', 'mean'], Iq, label='Guinier fit', color='C2')\n s1 = 'I(0) = {:.2f} $\\pm$ {:.2f} $cm^{{-1}}$'.format(sample_info['I0'], sample_info['I0_err'])\n s2 = 'R$_g$ = {:.0f} $\\pm$ {:.0f} nm'.format(sample_info['Rg'], sample_info['Rg_err'])\n except:\n s1, s2 = '', ''\n print('Could not plot guinier fits')\n\n \n # extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n \n handles, labels = ax1.get_legend_handles_labels()\n handles.append(mpatches.Patch(color='none', label=s1))\n handles.append(mpatches.Patch(color='none', label=s2))\n ax1.legend(handles=handles)\n \n \n # plt.legend([scatter, fit, extra, extra],[scatter.get_label(), fit.get_label(), s1, s2], loc='best')\n \n ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n ax2.set_xlabel(r'$sin(\\theta/2$)')\n ax2.set_ylabel('K$_L$c/R / mol g$^{-1}$') # \n ax2.errorbar(np.sin(np.radians(sample.index/2)), sample['KcR', 'mean'], marker='s', yerr=sample['KcR', 'std'])\n \n plt.tight_layout()\n plt.savefig(os.path.join(sample_info['data_path'], str(sample_info['name'])+'_static.pdf'))\n # plt.close()\n return None\n\n \ndef analyze_static_intensity(sample_info):\n ''' Description here'''\n sample = sample_info['sample_average']\n \n def Guinier(sample):\n ''' In this function, the scattering intensity is described as:\n I(q) = I(0)exp(-q^2Rg^2/3). '''\n \n #initialization of the fit routine for the guinier fit.\n fit_params = Parameters()\n fit_params.add('I0', value = sample['I_q', 'mean'].to_numpy()[0], min=1e-6, vary=True)\n fit_params.add('Rg', value = 15.0, min=5.0, max=400, vary=True)\n \n \n def f2min(params):\n vals = params.valuesdict()\n Iq = np.log(vals['I0']) - 1/3*(vals['Rg']**2)*(sample['q', 'mean']**2)\n\n #the function is evaluated only between qmin and qmax, if provided. \n try:\n minq = 0.0 if sample_info['qmin'] is None else sample_info['qmin']\n except:\n minq = 0.0\n try:\n maxq = 10.0 if sample_info['qmax'] is None else sample_info['qmax']\n except:\n maxq = 10.0\n \n \n mask_1 = sample['q', 'mean'].to_numpy() > minq\n mask_2 = sample['q', 'mean'].to_numpy() < maxq\n mask = np.logical_and(mask_1, mask_2)\n \n if np.isnan(sample['I_q', 'std'].to_numpy()).any():\n res = (np.log(sample['I_q', 'mean']) - Iq)\n else:\n res = ( (np.log(sample['I_q', 'mean']) - Iq) / (sample['I_q', 'std']/sample['I_q', 'mean'])).to_numpy()\n\n return res[mask]\n \n out = minimize(f2min, fit_params, xtol=1e-6)\n print(fit_report(out))\n return out.params['I0'].value, out.params['I0'].stderr, out.params['Rg'].value, out.params['Rg'].stderr\n \n sample_info['I0'], sample_info['I0_err'], sample_info['Rg'], sample_info['Rg_err'] = Guinier(sample)\n plot_intensity(sample_info)\n print('Guinier analysis of {} sample performed.'.format(sample_info['name']))\n return None\n\n\n\ndef plot_analyzed_correlations_functions(sample_info, dls_methods):\n ''' Function where all correlation functions are plotted, together with the\n best fitting functions and the residuals. '''\n\n\n \n num_true = sum(1 for condition in dls_methods.values() if condition is True) #counts the number of analyses performed.\n \n if num_true == 0: #if no analysis is performed, the function is terminated here. \n return None\n else:\n for sample in sample_info:\n data = sample_info[sample] \n for run in data['sample_data']:\n fig = plt.figure(figsize=(3.5*num_true, 5.5))\n plt.suptitle(run)\n outer = gridspec.GridSpec(1, num_true, wspace=0.2, hspace=0.1)\n counter = 0\n \n if dls_methods['Cumulant'] is True:\n gs0 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[counter], hspace=0.0, height_ratios=[2,1])\n \n ax1 = fig.add_subplot(gs0[0])\n ax2 = fig.add_subplot(gs0[1], sharex=ax1)\n \n ax1.set_xscale(\"log\", nonpositive='clip')\n ax2.set_xscale(\"log\", nonpositive='clip')\n \n ax1.set_ylabel('$g^{(2)}(\\\\tau$)')\n ax2.set_xlabel('$\\\\tau$ / ms')\n ax2.set_ylabel('Residual') \n\n \n decay = dls_methods['Cumulant_decay']*data['sample_summary'].loc[run,'CM_A']\n \n tau = data['sample_data'][run]['g2s'].index.to_numpy()\n g2_cumulant = data['sample_data'][run]['g2s']['g2_Cumulant'].to_numpy()\n g2s = data['sample_data'][run]['g2s']['g2_average'].to_numpy()\n \n tau = tau[g2s>decay]\n g2_cumulant = g2_cumulant[g2s>decay]\n g2s = g2s[g2s>decay]\n residual = g2s - g2_cumulant\n \n ax1.set_title('Cumulant Analysis')\n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_average'].to_list(), 'o')\n ax1.plot(tau, g2_cumulant)\n ax2.plot(tau, residual)\n counter += 1\n \n \n if dls_methods['Frisken'] is True:\n gs1 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[counter], hspace=0.0, height_ratios=[2,1])\n \n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1], sharex=ax1)\n \n ax1.set_xscale(\"log\", nonpositive='clip')\n ax2.set_xscale(\"log\", nonpositive='clip')\n \n ax1.set_title('Frisken Analysis')\n ax1.set_ylabel('$g^{(2)}(\\\\tau$)')\n ax2.set_xlabel('$\\\\tau$ / ms')\n ax2.set_ylabel('Residual') \n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_average'].to_list(), 'o')\n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_Frisken'].to_list())\n residual = data['sample_data'][run]['g2s']['g2_average'].to_numpy() - data['sample_data'][run]['g2s']['g2_Frisken'].to_numpy()\n ax2.plot( data['sample_data'][run]['g2s'].index.to_list(), residual)\n \n counter += 1 \n \n if dls_methods['Double_exponential'] is True:\n gs1 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[counter], hspace=0.0, height_ratios=[2,1])\n \n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1], sharex=ax1)\n \n ax1.set_xscale(\"log\", nonpositive='clip')\n ax2.set_xscale(\"log\", nonpositive='clip')\n \n ax1.set_title('Double_exponential')\n ax1.set_ylabel('$g^{(2)}(\\\\tau$)')\n ax2.set_xlabel('$\\\\tau$ / ms')\n ax2.set_ylabel('Residual') \n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_average'].to_list(), 'o')\n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_Double_Exponential'].to_list())\n residual = data['sample_data'][run]['g2s']['g2_average'].to_numpy() - data['sample_data'][run]['g2s']['g2_Double_Exponential'].to_numpy()\n ax2.plot( data['sample_data'][run]['g2s'].index.to_list(), residual)\n \n counter += 1 \n \n if dls_methods['Stretched_exponential'] is True:\n gs1 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[counter], hspace=0.0, height_ratios=[2,1])\n \n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1], sharex=ax1)\n \n ax1.set_xscale(\"log\", nonpositive='clip')\n ax2.set_xscale(\"log\", nonpositive='clip')\n \n ax1.set_title('Stretched exponential Analysis')\n ax1.set_ylabel('$g^{(2)}(\\\\tau$)')\n ax2.set_xlabel('$\\\\tau$ / ms')\n ax2.set_ylabel('Residual') \n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_average'].to_list(), 'o')\n ax1.plot( data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_Stretched_exp'].to_list())\n residual = data['sample_data'][run]['g2s']['g2_average'].to_numpy() - data['sample_data'][run]['g2s']['g2_Stretched_exp'].to_numpy()\n ax2.plot( data['sample_data'][run]['g2s'].index.to_list(), residual)\n \n counter += 1 \n \n #plt.tight_layout()\n plt.savefig(os.path.join(data['data_path'], str(run).split('.ASC')[0] + '.png'))\n plt.close()\n \n if dls_methods['Cumulant'] is True:\n print('Cumulant analysis on sample {} performed'.format(sample))\n if dls_methods['Stretched_exponential'] is True:\n print('Stretched exponential fit on sample {} performed'.format(sample))\n if dls_methods['Frisken'] is True:\n print('Frisken fit on sample {} performed'.format(sample))\n if dls_methods['Double_exponential'] is True:\n print('Double Exponential fit on sample {} performed'.format(sample)) \n return None\n \ndef plot_dls_results(sample_info, dls_methods):\n ''' Description here'''\n for sample in sample_info:\n data = sample_info[sample]['sample_summary']\n \n \n if sample_info[sample]['time_series'] is True:\n plt.figure()\n plt.ylabel('D$_{app}$ = $\\Gamma$/q$^2$ / $\\mu$m^2 s$^{-1}$')\n plt.xlabel('Date and Time')\n \n if dls_methods['Frisken'] is True:\n D_app_Frisken = data['FR_Gamma']/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n plt.plot(data['Date_Time'], D_app_Frisken, 'bo', alpha=0.75, label='Frisken fit')\n \n \n if dls_methods['Cumulant'] is True:\n D_app_cumulant = data['CM_Gamma']/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n plt.plot(data['Date_Time'], D_app_cumulant, 'rs', alpha=0.75, label='Cumulant fit')\n \n if dls_methods['Stretched_exponential'] is True:\n D_app_stretched = data['SE_Gamma']/data['SE_beta']*gamma(1/data['SE_beta'])/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n plt.plot(data['Date_Time'], D_app_stretched, 'mv', alpha=0.75, label='Stretched exp. fit') \n \n \n else:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.ylabel('D$_{app}$ = $\\Gamma$/q$^2$ / $\\mu$m^2 s$^{-1}$')\n plt.xlabel('q$^2$ / 10$^{-4}$ nm$^{-2}$')\n \n if dls_methods['Frisken'] is True:\n D_app_Frisken = data['FR_Gamma']/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n ax.plot(data['q']**2*1e4, D_app_Frisken, 'bo', alpha=0.75, label='Frisken fit')\n\n linfit = linregress(data['q']**2*1e4, D_app_Frisken)\n Dapp_fit = data['q']**2*1e4*linfit.slope + linfit.intercept\n ax.plot(data['q']**2*1e4, Dapp_fit)\n try:\n sample_info[sample]['D0_Frisken'], sample_info[sample]['D0_Frisken_err'] = linfit.intercept, linfit.intercept_stderr\n s1 = 'D(0)$_{{Frisken}}$ = {:.2f} $\\pm$ {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_Frisken'], sample_info[sample]['D0_Frisken_err'])\n except:\n sample_info[sample]['D0_Frisken'], sample_info[sample]['D0_Frisken_err'] = linfit.intercept, np.nan\n s1 = 'D(0)$_{{Frisken}}$ = {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_Frisken'])\n \n ax.plot([],[], ' ', label=s1)\n \n if dls_methods['Cumulant'] is True:\n D_app_cumulant = data['CM_Gamma']/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n ax.plot(data['q']**2*1e4, D_app_cumulant, 'rs', alpha=0.75, label='Cumulant fit')\n \n linfit = linregress(data['q']**2*1e4, D_app_cumulant)\n Dapp_fit = data['q']**2*1e4*linfit.slope + linfit.intercept\n ax.plot(data['q']**2*1e4, Dapp_fit, color='red')\n try:\n sample_info[sample]['D0_Cumulant'], sample_info[sample]['D0_Cumulant_err'] = linfit.intercept, linfit.intercept_stderr\n s1 = 'D(0)$_{{Cumulant}}$ = {:.2f} $\\pm$ {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_Cumulant'], sample_info[sample]['D0_Cumulant_err'])\n except:\n sample_info[sample]['D0_Cumulant'], sample_info[sample]['D0_Cumulant_err'] = linfit.intercept, np.nan\n s1 = 'D(0)$_{{Cumulant}}$ = {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_Cumulant'])\n \n ax.plot([],[], ' ', label=s1)\n \n if dls_methods['Stretched_exponential'] is True:\n D_app_stretched = data['SE_Gamma']/data['SE_beta']*gamma(1/data['SE_beta'])/(data['q'])**2 * 1000 / 1e6 #from nm^2/ms to mu2/s\n plt.plot(data['q']**2*1e4, D_app_stretched, 'mv', alpha=0.75, label='Stretched exp. fit') \n\n linfit = linregress(data['q']**2*1e4, D_app_stretched)\n Dapp_fit = data['q']**2*1e4*linfit.slope + linfit.intercept\n ax.plot(data['q']**2*1e4, Dapp_fit, color='magenta')\n try:\n sample_info[sample]['D0_stretched'], sample_info[sample]['D0_stretched_err'] = linfit.intercept, linfit.intercept_stderr\n s1 = 'D(0)$_{{stretched}}$ = {:.2f} $\\pm$ {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_stretched'], sample_info[sample]['D0_stretched_err'])\n except:\n sample_info[sample]['D0_stretched'], sample_info[sample]['D0_stretched_err'] = linfit.intercept, np.nan\n s1 = 'D(0)$_{{stretched}}$ = {:.2f} $\\mu$m^2 s$^{{-1}}$'.format(sample_info[sample]['D0_stretched'])\n \n ax.plot([],[], ' ', label=s1)\n \n\n\n\n ax.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(sample_info[sample]['data_path'], str(sample) + 'Dapp.pdf'))\n plt.show()\n \n return None\n\ndef analyze_correlation_function(sample_info, dls_methods):\n ''' Description here'''\n #sample = sample_info['sample_average'] \n for sample in sample_info:\n# sys.stdout.write('*'); sys.stdout.flush(); \n data = sample_info[sample] \n #analyse non-averaged curves. \n if dls_methods['Frisken'] is True:\n frisken_temp = pd.DataFrame(columns=['run', 'FR_A', 'FR_A_err', 'FR_Gamma', 'FR_Gamma_err', 'FR_mu2', 'FR_mu2_err'])\n if dls_methods['Cumulant'] is True:\n cumulant_temp = pd.DataFrame(columns=['run', 'CM_A', 'CM_A_err', 'CM_Gamma', 'CM_Gamma_err', 'CM_mu2', 'CM_mu2_err'])\n if dls_methods['Stretched_exponential'] is True:\n stretched_temp = pd.DataFrame(columns=['run', 'SE_A', 'SE_A_err', 'SE_Gamma', 'SE_Gamma_err', 'SE_beta', 'SE_beta_err'])\n if dls_methods['Double_exponential'] is True:\n double_exp_temp = pd.DataFrame(columns=['run', 'DE_A', 'DE_A_err', 'DE_Gamma_1', 'DE_Gamma_1_err', 'DE_Gamma_2', 'DE_Gamma_2_err', 'DE_alpha', 'DE_alpha_err'])\n \n for run in data['sample_data']:\n g2_tau = np.array([data['sample_data'][run]['g2s'].index.to_list(), data['sample_data'][run]['g2s']['g2_average'].to_list()])\n \n if dls_methods['Frisken'] is True:\n Frisken_params, data['sample_data'][run]['g2s']['g2_Frisken'] = dls.Frisken(g2_tau)\n frisken_pars = {'run':run,\n 'FR_A': Frisken_params.params['A'].value,\n 'FR_A_err': Frisken_params.params['A'].stderr, \n 'FR_Gamma': Frisken_params.params['Gamma'].value, \n 'FR_Gamma_err': Frisken_params.params['Gamma'].stderr, \n 'FR_mu2':Frisken_params.params['mu2'].value, \n 'FR_mu2_err':Frisken_params.params['mu2'].stderr}\n \n frisken_temp = frisken_temp.append(frisken_pars, ignore_index=True)\n\n if dls_methods['Cumulant'] is True: \n Cumulant_params, data['sample_data'][run]['g2s']['g2_Cumulant'] = dls.Cumulant(g2_tau, dls_methods['Cumulant_decay'])\n cumulant_pars = {'run':run,\n 'CM_A': Cumulant_params.params['A'].value,\n 'CM_A_err': Cumulant_params.params['A'].stderr,\n 'CM_Gamma': Cumulant_params.params['Gamma'].value, \n 'CM_Gamma_err': Cumulant_params.params['Gamma'].stderr, \n 'CM_mu2': Cumulant_params.params['mu2'].value, \n 'CM_mu2_err': Cumulant_params.params['mu2'].stderr}\n cumulant_temp = cumulant_temp.append(cumulant_pars, ignore_index=True)\n\n if dls_methods['Stretched_exponential'] is True: \n Stretched_params, data['sample_data'][run]['g2s']['g2_Stretched_exp'] = dls.Stretched_exponential(g2_tau)\n stretched_pars = {'run':run,\n 'SE_A': Stretched_params.params['A'].value,\n 'SE_A_err': Stretched_params.params['A'].stderr, \n 'SE_Gamma': Stretched_params.params['Gamma'].value, \n 'SE_Gamma_err': Stretched_params.params['Gamma'].stderr, \n 'SE_beta': Stretched_params.params['beta'].value, \n 'SE_beta_err': Stretched_params.params['beta'].stderr}\n stretched_temp = stretched_temp.append(stretched_pars, ignore_index=True)\n \n if dls_methods['Double_exponential'] is True: \n Double_exp_params, data['sample_data'][run]['g2s']['g2_Double_Exponential'] = dls.Double_Exponential(g2_tau)\n double_exponential_pars = {'run':run,\n 'DE_A': Double_exp_params.params['A'].value,\n 'DE_A_err': Double_exp_params.params['A'].stderr, \n 'DE_Gamma_1': Double_exp_params.params['Gamma_1'].value, \n 'DE_Gamma_1_err': Double_exp_params.params['Gamma_1'].stderr, \n 'DE_Gamma_2': Double_exp_params.params['Gamma_2'].value, \n 'DE_Gamma_2_err': Double_exp_params.params['Gamma_2'].stderr, \n 'DE_alpha': Double_exp_params.params['alpha'].value, \n 'DE_alpha_err':Double_exp_params.params['alpha'].stderr}\n \n double_exp_temp = double_exp_temp.append(double_exponential_pars, ignore_index=True)\n \n \n \n if dls_methods['Contin'] is True:\n # data['sample_data'][run]['g2s']['g2_Contin'] = dls.Contin(g2_tau)\n alldata =dls.Contin(g2_tau, dls_methods['Contin_pars'])\n distr_function = [alldata[-1][1][:, 2], alldata[-1][1][:, 0], alldata[-1][1][:, 1]] #distribution function of decay times\n # print(alldata[-1][0])\n \n testxdata = alldata[-1][1][:, 2]\n testydata = alldata[-1][1][:, 0]\n testyerr = alldata[-1][1][:, 1]\n \n plt.xscale('log')\n plt.errorbar(testxdata, testydata, yerr=testyerr, fmt='rs')\n\n plt.show()\n \n if dls_methods['Frisken'] is True:\n frisken_temp.set_index('run',inplace=True) \n data['sample_summary'] = pd.concat([data['sample_summary'], frisken_temp], axis=1)\n \n if dls_methods['Cumulant'] is True:\n cumulant_temp.set_index('run',inplace=True) \n data['sample_summary'] = pd.concat([data['sample_summary'], cumulant_temp], axis=1)\n \n if dls_methods['Stretched_exponential'] is True:\n stretched_temp.set_index('run',inplace=True) \n data['sample_summary'] = pd.concat([data['sample_summary'], stretched_temp], axis=1)\n \n if dls_methods['Double_exponential'] is True:\n double_exp_temp.set_index('run',inplace=True) \n data['sample_summary'] = pd.concat([data['sample_summary'], double_exp_temp], axis=1)\n \n\n # print(frisken_temp)\n # print(data['sample_summary'])\n\n \ndef export_DLS_parameters(sample_info, dls_methods):\n for sample in sample_info:\n data = sample_info[sample]\n# print(data['sample_summary'])\n# print(data['data_path'])\n filename = os.path.join(data['data_path'], 'exp_SLS_DLS_params.csv')\n with open(filename, 'w') as f:\n s = '# The units are: q -- 1/nm; Gamma -- 1/ms\\n'\n f.write(s)\n \n data['sample_summary'].sort_index().to_csv(filename, mode='a')\n\n \n \n \n return None\n\ndef export_results(sample_info):\n try:\n results = pd.read_csv('results.csv')\n except:\n results = pd.DataFrame(columns=['Sample', 'Tol_int','Tol_int_err','I0','I0_err','Rg','Rg_err','D0_Frisken','D0_Frisken_err','D0_Cumulant','D0_Cumulant_err','D0_stretched','D0_stretched_err'])\n \n \n for sample in sample_info:\n s = sample_info[sample]\n params = {'Sample': sample}\n for pars in results:\n if pars == 'Sample':\n None\n else:\n try:\n params[pars] = s[pars]\n except:\n params[pars] = np.nan\n # print(params)\n results = results.append(params, ignore_index=True)\n results.set_index('Sample', inplace=True)\n \n results.to_csv('results.csv')\n \n \n \n \n \n","repo_name":"leonardo-chiappisi/pyALV","sub_path":"SLS_DLS1.py","file_name":"SLS_DLS1.py","file_ext":"py","file_size_in_byte":37037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30431643639","text":"import logging\nfrom typing import List\n\nimport torch\nfrom transformers import AutoTokenizer\nfrom transformers.models.bert.tokenization_bert import BasicTokenizer\nfrom transformers.models.bert.tokenization_bert import BertTokenizer\nfrom transformers.models.xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer\n\nfrom machamp.data.machamp_instance import MachampInstance\nfrom machamp.data.machamp_vocabulary import MachampVocabulary\nfrom machamp.utils import myutils\nfrom machamp.utils import tok_utils\nfrom machamp.utils.lemma_edit import gen_lemma_rule\n\nlogger = logging.getLogger(__name__)\n\n\ndef seqs2data(tabular_file: str, skip_first_line: bool = False):\n \"\"\"\n Reads a conll-like file. We do not base the comment identification on\n the starting character being a '#' , as in some of the datasets we used\n the words where in column 0, and could start with a `#'. Instead we start\n at the back, and see how many columns (tabs) the file has. Then we judge\n any sentences at the start which do not have this amount of columns (tabs)\n as comments. Returns both the read column data as well as the full data.\n\n Parameters\n ----------\n tabular_file: str\n The path to the file to read.\n skip_first_line: bool\n In some csv/tsv files, the heads are included in the first row.\n This option let you skip these.\n\n Returns\n -------\n full_data: List[List[str]]\n A list with an instance for each token, which is represented as \n a list of strings (split by '\\t'). This variable includes the \n comments in the beginning of the instance.\n instance_str: List[List[str]]\n A list with an instance for each token, which is represented as \n a list of strings (split by '\\t'). This variable does not include\n the comments in the beginning of the instance.\n \"\"\"\n sent = []\n for line in open(tabular_file, mode=\"r\", encoding=\"utf-8\"):\n if skip_first_line:\n skip_first_line = False\n continue\n # because people use paste command, which includes empty tabs\n if len(line) < 2 or line.replace('\\t', '') in ['' '\\n']:\n if len(sent) == 0:\n continue\n num_cols = len(sent[-1])\n beg_idx = 0\n for i in range(len(sent)):\n back_idx = len(sent) - 1 - i\n if len(sent[back_idx]) == num_cols:\n beg_idx = len(sent) - 1 - i\n yield sent[beg_idx:], sent\n sent = []\n else:\n if line.startswith('# text'): # because tab in UD_Munduruku-TuDeT\n line = line.replace('\\t', ' ')\n sent.append([token for token in line.rstrip(\"\\n\").split('\\t')])\n\n # adds the last sentence when there is no empty line\n if len(sent) != 0 and sent != ['']:\n num_cols = len(sent[-1])\n beg_idx = 0\n for i in range(len(sent)):\n back_idx = len(sent) - 1 - i\n if len(sent[back_idx]) == num_cols:\n beg_idx = len(sent) - 1 - i\n yield sent[beg_idx:], sent\n\n\ndef tokenize_simple(tokenizer: AutoTokenizer, sent: List[List[str]], word_col_idx: int, num_special_tokens: int,\n has_unk: bool):\n \"\"\"\n A tokenizer that tokenizes each token separately (over gold tokenization). \n We found that this is the most robust method to tokenize overall (handling\n of special characters, whitespaces etc.).\n\n Parameters\n ----------\n tokenizer: AutoTokenizer\n The tokenizer to use (that should match the used MLM).\n sent: List[List[str]]:\n Contains all information of the tokens (also annotation), hence a list\n of lists.\n word_col_idx: int:\n The column index that contains the input words.\n num_special_tokens: int\n Number of special tokens, here assumed to be 2 (start/end token) or 1\n (only end token)\n has_unk: bool\n Does the tokenizer have an unk token\n \n Returns\n -------\n token_ids: List[int]\n The full list of token ids (for each subword, note that this can\n be longer than the annotation lists)\n offsets: list[int]\n The index of the last subword for every gold token. Should have\n the same length as annotation for sequence labeling tasks.\n \"\"\"\n token_ids = []\n offsets = []\n for token_idx in range(len(sent)):\n # we do not use return_tensors='pt' because we do not know the length beforehand\n if num_special_tokens == 2:\n tokked = tokenizer.encode(sent[token_idx][word_col_idx])[1:-1]\n elif num_special_tokens == 1:\n # We assume that if there is only one special token, it is the end token\n tokked = tokenizer.encode(sent[token_idx][word_col_idx])[:-1]\n elif num_special_tokens == 0:\n tokked = tokenizer.encode(sent[token_idx][word_col_idx])\n else:\n logger.error('Number of special tokens is currently not handled: ' + str(num_special_tokens))\n exit(1)\n if len(tokked) == 0 and has_unk:\n tokked = [tokenizer.unk_token_id]\n token_ids.extend(tokked)\n offsets.append(len(token_ids) - 1)\n offsets = torch.tensor(offsets, dtype=torch.long)\n\n return token_ids, offsets\n\n\ndef read_sequence(\n dataset: str,\n config: dict,\n tokenizer: AutoTokenizer,\n vocabulary: MachampVocabulary,\n data_path: str,\n is_train: bool,\n max_sents: int,\n max_words: int,\n max_input_length: int):\n \"\"\"\n Reads conllu-like files. It relies heavily on seqs2data for the reading\n logic. Can also read sentence classification tasks for which the labels \n should be specified in the comments.\n \n Parameters\n ----------\n dataset: str\n The (unique) name of the dataset.\n config: dict\n The dataset configuration, with all defined parameters we need to \n read the file properly.\n tokenizer: AutoTokenizer\n The tokenizer to use (that should match the used MLM).\n vocabulary: MachampVocabulary\n The vocabularies for all tasks.\n data_path: str\n The path to read the data from\n is_train: bool\n Whether we are currrently training, important to know so that we can\n expand the label vocabulary or not.\n max_sents: int\n The maximum number of sentences to read.\n max_words: int\n The maximum amount of words to read, rounds down.\n max_input_length\n The maximum amount of subwords to input to the encoder, not used here.\n\n Returns\n -------\n data: List[Machamp.data.MachampInstance]\n A list of instances, including tokenization, annotation etc.\n \"\"\"\n data = []\n word_col_idx = config['word_idx']\n sent_counter = 0\n word_counter = 0\n unk_counter = 0\n subword_counter = 0\n has_unk = tokenizer.unk_token_id != None\n has_tok_task = 'tok' in [config['tasks'][task]['task_type'] for task in config['tasks']]\n num_special_tokens = len(tokenizer.prepare_for_model([])['input_ids'])\n if has_tok_task:\n pre_tokenizer = BasicTokenizer(strip_accents=False, do_lower_case=False, tokenize_chinese_chars=True)\n tokenizer.do_basic_tokenize = False\n script_finder = tok_utils.ScriptFinder()\n type_tokenizer = myutils.identify_tokenizer(tokenizer)\n\n all_sents = list(seqs2data(data_path))\n do_splits = False\n if has_tok_task:\n for task in config['tasks']:\n if config['tasks'][task]['task_type'] == 'tok':\n do_splits = config['tasks'][task]['pre_split']\n\n \n if is_train and 'dataset_embed_idx':\n vocabulary.create_vocab('dataset_embeds', True)\n\n learn_splits = do_splits and is_train\n for sent, full_data in all_sents:\n # sent is a list of lists, of shape sentenceLength, numColumns\n if max_sents != -1 and sent_counter >= max_sents and is_train:\n break\n sent_counter += 1\n\n for token in sent:\n if len(token) <= word_col_idx:\n logger.error(\"Sentence (\" + str(sent_counter) + \") does not have input words in column \" + str(\n word_col_idx) + ':\\n ' + ' '.join(['\\n'.join(x) for x in sent]) + '\\t' + data_path)\n exit(1)\n\n if has_tok_task:\n gold_tokens = [line[word_col_idx] for line in sent]\n token_ids, offsets, tok_labels, no_unk_subwords, new_splits = tok_utils.tokenize_and_annotate(full_data,\n gold_tokens,\n pre_tokenizer,\n tokenizer,\n vocabulary.pre_splits,\n learn_splits,\n script_finder,\n do_splits,\n type_tokenizer)\n # Note that the splits are not per dataset as of now, might be sub-optimal for performance, \n # but is more generalizable. \n # They are also not picked in a smart way; we just keep the last for each..\n if new_splits != {}:\n vocabulary.pre_splits = new_splits\n # We assume that if we have only one special token, that it is the end token\n\n else:\n token_ids, offsets = tokenize_simple(tokenizer, sent, word_col_idx, num_special_tokens, has_unk)\n no_unk_subwords = None\n token_ids = tokenizer.prepare_for_model(token_ids, return_tensors='pt')['input_ids']\n\n dataset_ids_subwords = []\n if 'dataset_embed_idx' in config:\n if config['dataset_embed_idx'] == -1:\n dataset_ids_words = [vocabulary.token2id(dataset, 'dataset_embeds', is_train) for token in sent]\n else:\n dataset_ids_words = [vocabulary.token2id(token[config['dataset_embed_idx']], 'dataset_embeds', is_train)\n for token in sent]\n dataset_ids_subwords = torch.zeros(len(token_ids), dtype=torch.long)\n\n for word_idx in range(len(offsets)):\n if word_idx == 0:\n beg = 0\n else:\n beg = offsets[word_idx-1]\n end = offsets[word_idx]\n for subword_idx in range(beg, end+1):# end+1 because inclusive\n # 1+ for the CLS token\n if 'dataset_embed_idx' in config:\n if num_special_tokens == 2:\n dataset_ids_subwords[1+subword_idx] = dataset_ids_words[word_idx]\n else:\n dataset_ids_subwords[1+subword_idx] = dataset_ids_words[word_idx]\n\n col_idxs = {}\n golds = {}\n for task in config['tasks']:\n if is_train:\n vocabulary.create_vocab(task, True)\n task_type = config['tasks'][task]['task_type']\n task_idx = config['tasks'][task]['column_idx']\n col_idxs[task] = task_idx\n\n # Read sequence labeling tasks\n if task_type in ['seq', 'seq_bio', 'multiseq', 'string2string']:\n for word_data in sent:\n if len(word_data) <= task_idx:\n logger.error(\"Sentence (\" + str(\n sent_counter) + \") does not have annotation for task \" + task + ' column ' + str(\n task_idx) + ' is missing:\\n' + ' '.join(['\\n'.join(x) for x in sent]) + '\\t' + data_path)\n exit(1)\n if len(word_data[task_idx]) == 0:\n logger.error(\"Sentence (\" + str(\n sent_counter) + \") does not have annotation for task \" + task + ' column ' + str(\n task_idx) + ' is empty\\n' + ' '.join(['\\n'.join(x) for x in sent]) + '\\t' + data_path)\n exit(1)\n\n # adapt labels for string2string\n if task_type == 'string2string':\n golds[task] = torch.tensor([vocabulary.token2id(\n gen_lemma_rule(token_info[word_col_idx], token_info[task_idx]), task, is_train) for token_info\n in sent], dtype=torch.long)\n\n # Special handling for multiseq, as it required a different labelfield\n elif task_type == 'multiseq':\n label_sequence = []\n for token_info in sent:\n label_list = token_info[task_idx].split(\"|\")\n label_sequence.append([vocabulary.token2id(label, task, is_train) for label in label_list])\n max_labels = max([len(label) for label in label_sequence])\n padded_label_sequence = [labels + [vocabulary.UNK_ID] * (max_labels - len(labels)) for labels in\n label_sequence]\n golds[task] = torch.tensor(padded_label_sequence, dtype=torch.long)\n else:\n golds[task] = torch.tensor(\n [vocabulary.token2id(token_info[task_idx], task, is_train) for token_info in sent],\n dtype=torch.long)\n\n elif task_type == 'dependency':\n heads = []\n try:\n for wordIdx, word in enumerate(sent):\n if word[task_idx] == '_':\n head = 0 if wordIdx == 0 else 1\n else:\n head = int(word[task_idx])\n heads.append(head)\n except ValueError:\n logger.error(\n \"Your dependency file \" + data_path + \" seems to contain invalid structures sentence \" +\n str(sent_counter) + \" contains a non-integer head: \" + word_data[\n task_idx] + \"\\nIf you directly used UD data, this could be due to \" +\n \"multiword tokens, which we currently do not support, you can clean your \" +\n \"conllu file by using scripts/misc/cleanconl.py\")\n exit(1)\n golds[task + '-heads'] = torch.tensor(heads, dtype=torch.long)\n\n if len(sent[0]) <= task_idx + 1:\n logger.error(\"Sentence (\" + str(\n sent_counter) + \") does not have annotation for task \" + task + ' column ' + str(\n task_idx + 1) + ' is missing:\\n' + ' '.join(['\\n'.join(x) for x in sent]))\n exit(1)\n elif len(sent[0][task_idx + 1]) == 0:\n logger.error(\"Sentence (\" + str(\n sent_counter) + \") does not have annotation for task \" + task + ' column ' + str(\n task_idx + 1) + ' is empty\\n' + ' '.join(['\\n'.join(x) for x in sent]))\n else:\n golds[task + '-rels'] = torch.tensor(\n [vocabulary.token2id(token_info[task_idx + 1], task, is_train) for token_info in sent],\n dtype=torch.long)\n\n # Read sentence classification task in the comments\n elif task_type == 'classification' and task_idx == -1:\n start = '# ' + task + ': '\n label = ''\n for line in full_data:\n if line[0].startswith(start):\n label = line[0][len(start):]\n if label != '':\n golds[task] = vocabulary.token2id(label, task, is_train)\n else:\n logger.error(\n \"Classification label \" + task + \"not found. Make sure that every sentence has a comment \"\n \"looking like:\\n# \" + task + \":